Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix a few more memory64 bugs #52

Merged
merged 1 commit into from
Jun 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,6 @@ jobs:
with:
version: 0.13.0

# The current default version of clang on macos runners is 14, which doesn't support the wasm64-freestanding target.
- name: Install LLVM and Clang
if: matrix.os == 'macos-latest'
uses: KyleMayes/install-llvm-action@v2
with:
version: "15.0"

- name: Setup Python
uses: actions/setup-python@v4
with:
Expand Down
55 changes: 29 additions & 26 deletions src/definition.zig
Original file line number Diff line number Diff line change
Expand Up @@ -698,17 +698,26 @@ pub const DataDefinition = struct {
}

var memory_index: ?u32 = null;
var index_type: ValType = .I32;
if (data_type == 0x00) {
memory_index = 0;
} else if (data_type == 0x02) {
memory_index = try common.decodeLEB128(u32, reader);
}

if (memory_index) |index| {
if (module_def.imports.memories.items.len + module_def.memories.items.len <= index) {
return error.ValidationUnknownMemory;
}
const limits = module_def.getMemoryLimits();
index_type = limits.indexType();
}

var mode = DataMode.Passive;
var offset: ?ConstantExpression = null;
if (data_type == 0x00 or data_type == 0x02) {
mode = DataMode.Active;
offset = try ConstantExpression.decode(reader, module_def, .Immutable, .I32);
offset = try ConstantExpression.decode(reader, module_def, .Immutable, index_type);
}

const num_bytes = try common.decodeLEB128(u32, reader);
Expand All @@ -719,12 +728,6 @@ pub const DataDefinition = struct {
return error.MalformedUnexpectedEnd;
}

if (memory_index) |index| {
if (module_def.imports.memories.items.len + module_def.memories.items.len <= index) {
return error.ValidationUnknownMemory;
}
}

return DataDefinition{
.bytes = bytes,
.memory_index = memory_index,
Expand Down Expand Up @@ -1520,18 +1523,6 @@ const ModuleValidator = struct {
}
}

fn getMemoryLimits(module: *const ModuleDefinition) Limits {
if (module.imports.memories.items.len > 0) {
return module.imports.memories.items[0].limits;
}

if (module.memories.items.len > 0) {
return module.memories.items[0].limits;
}

unreachable;
}

fn validateElementIndex(index: u64, module: *const ModuleDefinition) !void {
if (module.elements.items.len <= index) {
return error.ValidationUnknownElement;
Expand Down Expand Up @@ -1653,14 +1644,14 @@ const ModuleValidator = struct {

fn validateLoadOp(validator: *ModuleValidator, module_: *const ModuleDefinition, load_type: ValType) !void {
try validateMemoryIndex(module_);
const index_type: ValType = getMemoryLimits(module_).indexType();
const index_type: ValType = module_.getMemoryLimits().indexType();
try validator.popType(index_type);
try validator.pushType(load_type);
}

fn validateStoreOp(validator: *ModuleValidator, module_: *const ModuleDefinition, store_type: ValType) !void {
try validateMemoryIndex(module_);
const index_type: ValType = getMemoryLimits(module_).indexType();
const index_type: ValType = module_.getMemoryLimits().indexType();
try validator.popType(store_type);
try validator.popType(index_type);
}
Expand Down Expand Up @@ -1926,12 +1917,12 @@ const ModuleValidator = struct {
},
.Memory_Size => {
try validateMemoryIndex(module);
const index_type: ValType = getMemoryLimits(module).indexType();
const index_type: ValType = module.getMemoryLimits().indexType();
try self.pushType(index_type);
},
.Memory_Grow => {
try validateMemoryIndex(module);
const index_type: ValType = getMemoryLimits(module).indexType();
const index_type: ValType = module.getMemoryLimits().indexType();
try self.popType(index_type);
try self.pushType(index_type);
},
Expand Down Expand Up @@ -2143,7 +2134,7 @@ const ModuleValidator = struct {
.Memory_Init => {
try validateMemoryIndex(module);
try validateDataIndex(instruction.immediate.Index, module);
const index_type: ValType = getMemoryLimits(module).indexType();
const index_type: ValType = module.getMemoryLimits().indexType();
try self.popType(index_type);
try self.popType(index_type);
try self.popType(index_type);
Expand All @@ -2157,14 +2148,14 @@ const ModuleValidator = struct {
},
.Memory_Fill => {
try validateMemoryIndex(module);
const index_type: ValType = getMemoryLimits(module).indexType();
const index_type: ValType = module.getMemoryLimits().indexType();
try self.popType(index_type);
try self.popType(.I32);
try self.popType(index_type);
},
.Memory_Copy => {
try validateMemoryIndex(module);
const index_type: ValType = getMemoryLimits(module).indexType();
const index_type: ValType = module.getMemoryLimits().indexType();
try self.popType(index_type);
try self.popType(index_type);
try self.popType(index_type);
Expand Down Expand Up @@ -3669,4 +3660,16 @@ pub const ModuleDefinition = struct {
return func_def.type_index;
}
}

fn getMemoryLimits(module: *const ModuleDefinition) Limits {
if (module.imports.memories.items.len > 0) {
return module.imports.memories.items[0].limits;
}

if (module.memories.items.len > 0) {
return module.memories.items[0].limits;
}

unreachable;
}
};
2 changes: 1 addition & 1 deletion test/mem64/main.zig
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var allocator: std.mem.Allocator = gpa.allocator();

const wasm_data: []u8 = try std.fs.cwd().readFileAlloc(allocator, "zig-out/bin/memtest.wasm", 1024 * 128);
const wasm_data: []u8 = try std.fs.cwd().readFileAlloc(allocator, "zig-out/bin/memtest.wasm", 1024 * 512);
defer allocator.free(wasm_data);

const module_def = try bytebox.createModuleDefinition(allocator, .{});
Expand Down
53 changes: 39 additions & 14 deletions test/mem64/memtest.zig
Original file line number Diff line number Diff line change
@@ -1,16 +1,29 @@
const std = @import("std");

const KB = 1024;
const MB = 1024 * KB;
const GB = 1024 * MB;

const PAGE_SIZE = 64 * KB;
const PAGES_PER_GB = GB / PAGE_SIZE;

const GLOBAL_DATA: []const volatile u8 = "YNDKMI*#"; // tests if data segments use index type

fn assert(cond: bool) !void {
if (!cond) {
return error.Failed;
}
}

fn alignPtr(mem: [*]volatile u8, alignment: usize) [*]volatile u8 {
return @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(mem), alignment)); // volatile?
}

fn alignToSinglePtr(comptime T: type, mem: [*]volatile u8) *volatile T {
const mem_aligned = alignPtr(mem, @alignOf(T));
return @ptrCast(@alignCast(mem_aligned));
}

export fn memtest(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) i32 {
testInternal(val_i32, val_i64, val_f32, val_f64) catch {
return 1;
Expand All @@ -19,23 +32,23 @@ export fn memtest(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) i32 {
}

fn testInternal(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) !void {
_ = @wasmMemoryGrow(0, PAGES_PER_GB * 4);

const grow_value: isize = @wasmMemoryGrow(0, PAGES_PER_GB * 6); // memory.grow
try assert(grow_value != -1);

// volatile pointers ensure the loads and stores don't get optimized away
const start_page: [*]volatile u8 = @ptrFromInt(@as(usize, @intCast(grow_value)));

const mem = start_page + (GB * 4);
const mem_stores = mem + MB * 1; // volatile?
const mem_loads = mem + MB * 2; // volatile?
const mem_loads: [*]volatile u8 = mem + MB * 2;
const mem_stores: [*]volatile u8 = mem + MB * 1;

const num_pages: usize = @wasmMemorySize(0);
try assert(num_pages >= PAGES_PER_GB * 6);

const ptr_load_i32 = @as(*volatile i32, @ptrCast(@alignCast(mem_loads)));
const ptr_load_i64 = @as(*volatile i64, @ptrCast(@alignCast(mem_loads + 8)));
const ptr_load_f32 = @as(*volatile f32, @ptrCast(@alignCast(mem_loads + 16)));
const ptr_load_f64 = @as(*volatile f64, @ptrCast(@alignCast(mem_loads + 24)));
const ptr_load_i32 = alignToSinglePtr(i32, mem_loads + 0);
const ptr_load_i64 = alignToSinglePtr(i64, mem_loads + 64);
const ptr_load_f32 = alignToSinglePtr(f32, mem_loads + 128);
const ptr_load_f64 = alignToSinglePtr(f64, mem_loads + 192);

ptr_load_i32.* = val_i32; // i32.store
ptr_load_i64.* = val_i64; // i64.store
Expand All @@ -47,10 +60,10 @@ fn testInternal(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) !void {
try assert(ptr_load_f32.* == val_f32);
try assert(ptr_load_f64.* == val_f64);

const ptr_store_i32 = @as(*volatile i32, @ptrCast(@alignCast(mem_stores)));
const ptr_store_i64 = @as(*volatile i64, @ptrCast(@alignCast(mem_stores + 8)));
const ptr_store_f32 = @as(*volatile f32, @ptrCast(@alignCast(mem_stores + 16)));
const ptr_store_f64 = @as(*volatile f64, @ptrCast(@alignCast(mem_stores + 24)));
const ptr_store_i32 = alignToSinglePtr(i32, mem_stores + 0);
const ptr_store_i64 = alignToSinglePtr(i64, mem_stores + 64);
const ptr_store_f32 = alignToSinglePtr(f32, mem_stores + 128);
const ptr_store_f64 = alignToSinglePtr(f64, mem_stores + 192);

ptr_store_i32.* = ptr_load_i32.*; // i32.load && i32.store
ptr_store_i64.* = ptr_load_i64.*; // i64.load && i64.store
Expand Down Expand Up @@ -87,13 +100,13 @@ fn testInternal(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) !void {
load64 = @as(*volatile i16, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load16_s
try assert(load64 == 0x7FFF);
ptr_load_i64.* = 0xFFFF;
load64 = @as(*volatile u16, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load16_s
load64 = @as(*volatile u16, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load16_u
try assert(load64 == 0xFFFF);
ptr_load_i64.* = 0x7FFFFFFF;
load64 = @as(*volatile i32, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load32_s
try assert(load64 == 0x7FFFFFFF);
ptr_load_i64.* = 0xFFFFFFFF;
load64 = @as(*volatile u32, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load32_s
load64 = @as(*volatile u32, @ptrCast(@alignCast(ptr_load_i64))).*; // i64.load32_u
try assert(load64 == 0xFFFFFFFF);

const memset_dest = (mem + KB)[0..KB];
Expand All @@ -105,4 +118,16 @@ fn testInternal(val_i32: i32, val_i64: i64, val_f32: f32, val_f64: f64) !void {
try assert(memset_dest[KB - 1] == 0xFF);
try assert(memcpy_dest[0] == 0xFF);
try assert(memcpy_dest[KB - 1] == 0xFF);

// forces data segment to be generated
@memcpy(memcpy_dest[0..GLOBAL_DATA.len], GLOBAL_DATA);

try assert(memcpy_dest[0] == 'Y');
try assert(memcpy_dest[1] == 'N');
try assert(memcpy_dest[2] == 'D');
try assert(memcpy_dest[3] == 'K');
try assert(memcpy_dest[4] == 'M');
try assert(memcpy_dest[5] == 'I');
try assert(memcpy_dest[6] == '*');
try assert(memcpy_dest[7] == '#');
}
Loading