From 1e4fe3fb0ddf5a78ea42421238044a11d6db9caa Mon Sep 17 00:00:00 2001
From: Reuben Dunnington <4075241+rdunnington@users.noreply.github.com>
Date: Sat, 18 May 2024 23:16:20 -0700
Subject: [PATCH] memory64 support (#37)
* Adds support for the memory64 spec proposal. table64 support is TODO.
* New overridable logging interface. Exposure via FFI TODO.
* Reorganized tests a bit to ensure the wasm testsuite doesn't get mixed up with the other tests
* New mem64 test that builds a C program into wasm64 with clang and ensures it can access 64-bit addresses
* Readme updates to better document test requirements/dependencies
* Fixed a bug in constexpr funcref resolution where the module instance wasn't correctly set on resolution
---
.github/workflows/ci.yml | 30 ++++-
.gitignore | 5 +-
.gitmodules | 2 +-
README.md | 41 ++++---
build.zig | 57 +++++++---
run/main.zig | 46 ++++----
src/cffi.zig | 1 +
src/common.zig | 53 +++++++++
src/core.zig | 4 +
src/definition.zig | 158 ++++++++++++++++++++------
src/instance.zig | 101 +++++++++++-----
src/tests.zig | 30 ++++-
src/vm_stack.zig | 151 ++++++++++++------------
src/zig-stable-array/stable_array.zig | 49 +++++++-
test/mem64/main.zig | 32 ++++++
test/mem64/memtest.c | 66 +++++++++++
test/mem64/memtest.zig | 25 ++++
test/testsuite | 1 -
test/wasi/run.py | 6 +-
test/{ => wasm}/main.zig | 154 +++++++++++++++++++------
test/wasm/wasm-testsuite | 1 +
21 files changed, 765 insertions(+), 248 deletions(-)
create mode 100644 test/mem64/main.zig
create mode 100644 test/mem64/memtest.c
create mode 100644 test/mem64/memtest.zig
delete mode 160000 test/testsuite
rename test/{ => wasm}/main.zig (91%)
create mode 160000 test/wasm/wasm-testsuite
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9a78b30..d73f003 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,6 +1,10 @@
name: CI
-on: [push, pull_request]
+on:
+ push:
+ branches:
+ - main
+ pull_request:
jobs:
tests:
@@ -18,19 +22,31 @@ jobs:
with:
version: 0.11.0
+ # The current default version of clang on macos runners is 14, which doesn't support the wasm64-freestanding target.
+ - name: Install LLVM and Clang
+ if: matrix.os == 'macos-latest'
+ uses: KyleMayes/install-llvm-action@v2
+ with:
+ version: "15.0"
+
- name: Setup Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
cache: pip
- - name: Install dependencies
+ - name: Install python dependencies
working-directory: test/wasi/wasi-testsuite/test-runner
run: python3 -m pip install -r requirements.txt
- - uses: mwilliamson/setup-wabt-action@v2
- with:
- wabt-version: "1.0.31"
+ # Ideally we would use this but it seems to be broken
+ # - name: Setup wasm-tools
+ # uses: jcbhmr/setup-wasm-tools@v2
+ # with:
+ # wasm-tools-version: 1.207
+
+ - name: Setup wasm-tools
+ run: cargo install wasm-tools
- name: Build
run: |
@@ -44,6 +60,10 @@ jobs:
run: |
zig build test-wasm -- --log-suite
+ - name: Run mem64 test
+ run: |
+ zig build test-mem64
+
- name: Run wasi testsuite
run: |
zig build test-wasi
diff --git a/.gitignore b/.gitignore
index 3d17a45..3470e12 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,10 +1,11 @@
zig-cache
zig-out
-test/wasm
+test/wasm/wasm-generated
*.wasm
*.wasm.o
.vs
*.sublime-project
*.sublime-workspace
*.sln
-*.rdbg
\ No newline at end of file
+*.rdbg
+.DS_Store
diff --git a/.gitmodules b/.gitmodules
index 431eb09..6b80646 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,7 +2,7 @@
path = zig-stable-array
url = https://github.com/rdunnington/zig-stable-array.git
[submodule "test/testsuite"]
- path = test/testsuite
+ path = test/wasm/wasm-testsuite
url = https://github.com/WebAssembly/testsuite.git
[submodule "test/wasi/wasi-testsuite"]
path = test/wasi/wasi-testsuite
diff --git a/README.md b/README.md
index cc686be..51e298b 100644
--- a/README.md
+++ b/README.md
@@ -3,25 +3,32 @@

-Bytebox is a Webassembly VM.
+Bytebox is a WebAssembly VM.
-## Getting started
+# Getting started
-### Requirements
+## Requirements
Bytebox currently builds with [Zig 0.11.x](https://ziglang.org/download) to avoid churn on zig master.
-### Run
+To run the tests:
+* `wasm-tools` is required to run the wasm testsuite. You can install it via the rust toolchain `cargo install wasm-tools` or directly from the [release page](https://github.com/bytecodealliance/wasm-tools/releases).
+* `clang` v15.x+ is required to build the mem64 tests. However, if you don't have a compatible version of `clang` installed, you can pass `--noclang` to `zig build` to avoid the requirement.
+* `python3` is required to run the wasi testsuite. You may need to run `python3 -m pip install -r test/wasi/wasi-testsuite/test-runner/requirements.txt` to ensure the wasi test runner has all the necessary dependencies installed.
+
+## Run Tests
```sh
git clone --recurse-submodules https://github.com/rdunnington/bytebox.git
cd bytebox
-zig build test # run the WebAssembly spec testsuite
-# run the wasi testsuite
-python3 test/wasi/wasi-testsuite/test-runner/wasi_test_runner.py -r test/wasi/bytebox_adapter.py -t ./test/wasi/wasi-testsuite/tests/assemblyscript/testsuite/ ./test/wasi/wasi-testsuite/tests/c/testsuite/ ./test/wasi/wasi-testsuite/tests/rust/testsuite/
+zig build test-unit # run builtin zig unit tests
+zig build test-wasm # run official wasm spec testsuite
+zig build test-wasi # run official wasi spec testsuite
+zig build test-mem64 # run memory64 compat test
+zig build test # run all of the above in parallel (output will not be pretty!)
```
-### Usage
+## Usage
You can use the standalone runtime to load and execute WebAssembly programs:
```sh
@@ -56,22 +63,22 @@ pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var allocator: std.mem.Allocator = gpa.allocator();
- var wasm_data: []u8 = try std.fs.cwd().readFileAlloc(allocator, "example.wasm", 1024 * 128);
+ const wasm_data: []u8 = try std.fs.cwd().readFileAlloc(allocator, "example.wasm", 1024 * 128);
defer allocator.free(wasm_data);
- var module_definition = bytebox.ModuleDefinition.init(allocator, .{});
- defer module_definition.deinit();
- try module_definition.decode(wasm_data);
+ const module_def = try bytebox.createModuleDefinition(allocator, .{});
+ defer module_def.destroy();
+ try module_def.decode(wasm_data);
- var module_instance = bytebox.ModuleInstance.init(&module_definition, allocator);
- defer module_instance.deinit();
+ const module_instance = try bytebox.createModuleInstance(.Stack, module_def, allocator);
+ defer module_instance.destroy();
try module_instance.instantiate(.{});
}
```
Inter-language FFI is also supported. See `src/bytebox.h` for an overview in C. To use bytebox as a static library, link with the built library in `zig-out/lib/`. Note that Zig assumes a default stack size of 8MB, so you'll need to ensure the same in your program.
-## Status
+# Status
This project is still in the alpha stage.
@@ -81,7 +88,7 @@ This project is still in the alpha stage.
|❌|TODO|
|💀|Not planned/Removed from spec|
-### [WebAssembly](https://webassembly.github.io/spec/core/index.html) support:
+## [WebAssembly](https://webassembly.github.io/spec/core/index.html) support:
| Status | Feature |
| --- | --- |
@@ -95,7 +102,7 @@ This project is still in the alpha stage.
|✅|Bulk memory and table instructions|
|✅|Vector instructions|
-### [WASI Preview 1](https://github.com/WebAssembly/WASI/tree/main) support:
+## [WASI Preview 1](https://github.com/WebAssembly/WASI/tree/main) support:
| Status | Feature |
| --- | --- |
diff --git a/build.zig b/build.zig
index c382863..ad40f16 100644
--- a/build.zig
+++ b/build.zig
@@ -17,14 +17,15 @@ const ExeOpts = struct {
pub fn build(b: *Build) void {
const should_emit_asm = b.option(bool, "asm", "Emit asm for the bytebox binaries") orelse false;
+ const no_clang = b.option(bool, "noclang", "Pass this if clang isn't in the PATH") orelse false;
+
+ var bench_add_one_step: *CompileStep = buildWasmLib(b, "bench/samples/add-one.zig");
+ var bench_fibonacci_step: *CompileStep = buildWasmLib(b, "bench/samples/fibonacci.zig");
+ var bench_mandelbrot_step: *CompileStep = buildWasmLib(b, "bench/samples/mandelbrot.zig");
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
- var bench_add_one_step: *CompileStep = buildWasmLib(b, "bench/samples/add-one.zig", optimize);
- var bench_fibonacci_step: *CompileStep = buildWasmLib(b, "bench/samples/fibonacci.zig", optimize);
- var bench_mandelbrot_step: *CompileStep = buildWasmLib(b, "bench/samples/mandelbrot.zig", optimize);
-
const bytebox_module: *Build.Module = b.addModule("bytebox", .{
.source_file = Build.LazyPath.relative("src/core.zig"),
});
@@ -43,7 +44,7 @@ pub fn build(b: *Build) void {
&bench_mandelbrot_step.step,
};
_ = buildExeWithRunStep(b, target, optimize, bytebox_module, .{
- .exe_name = "benchmark",
+ .exe_name = "bench",
.root_src = "bench/main.zig",
.step_name = "bench",
.description = "Run the benchmark suite",
@@ -71,8 +72,8 @@ pub fn build(b: *Build) void {
// wasm tests
var wasm_testsuite_step = buildExeWithRunStep(b, target, optimize, bytebox_module, .{
- .exe_name = "testsuite",
- .root_src = "test/main.zig",
+ .exe_name = "test-wasm",
+ .root_src = "test/wasm/main.zig",
.step_name = "test-wasm",
.description = "Run the wasm testsuite",
});
@@ -83,11 +84,43 @@ pub fn build(b: *Build) void {
const wasi_testsuite_step = b.step("test-wasi", "Run wasi testsuite");
wasi_testsuite_step.dependOn(&wasi_testsuite.step);
+ // mem64 step
+ var mem64_test_step: ?*Build.Step = null;
+ if (!no_clang) {
+ // need to use clang to compile the C test due to https://github.com/ziglang/zig/issues/19942
+ // eventually we will ziggify this test
+ // ideally this test would go away, but the existing spec tests don't provide very good coverage
+ // of the instructions
+ const compile_memtest = b.addSystemCommand(&.{"clang"});
+ compile_memtest.addArg("--target=wasm64-freestanding");
+ compile_memtest.addArg("-mbulk-memory");
+ compile_memtest.addArg("-nostdlib");
+ compile_memtest.addArg("-O2");
+ compile_memtest.addArg("-Wl,--no-entry");
+ compile_memtest.addArg("-Wl,--export-dynamic");
+ compile_memtest.addArg("-o");
+ compile_memtest.addArg("test/mem64/memtest.wasm");
+ compile_memtest.addFileArg(.{ .path = "test/mem64/memtest.c" });
+ compile_memtest.has_side_effects = true;
+
+ b.getInstallStep().dependOn(&compile_memtest.step);
+
+ mem64_test_step = buildExeWithRunStep(b, target, optimize, bytebox_module, .{
+ .exe_name = "test-mem64",
+ .root_src = "test/mem64/main.zig",
+ .step_name = "test-mem64",
+ .description = "Run the mem64 test",
+ });
+ }
+
// All tests
const all_tests_step = b.step("test", "Run unit, wasm, and wasi tests");
all_tests_step.dependOn(unit_test_step);
all_tests_step.dependOn(wasm_testsuite_step);
all_tests_step.dependOn(wasi_testsuite_step);
+ if (mem64_test_step) |step| {
+ all_tests_step.dependOn(step);
+ }
}
fn buildExeWithRunStep(b: *Build, target: CrossTarget, optimize: std.builtin.Mode, bytebox_module: *Build.Module, opts: ExeOpts) *Build.Step {
@@ -100,10 +133,6 @@ fn buildExeWithRunStep(b: *Build, target: CrossTarget, optimize: std.builtin.Mod
exe.addModule("bytebox", bytebox_module);
- // exe.addModule("bytebox", .{
- // .source_file = Build.LazyPath.relative("src/core.zig"),
- // });
-
// exe.emit_asm = if (opts.should_emit_asm) .emit else .default;
b.installArtifact(exe);
@@ -125,7 +154,7 @@ fn buildExeWithRunStep(b: *Build, target: CrossTarget, optimize: std.builtin.Mod
return step;
}
-fn buildWasmLib(b: *Build, filepath: []const u8, optimize: std.builtin.Mode) *CompileStep {
+fn buildWasmLib(b: *Build, filepath: []const u8) *CompileStep {
var filename: []const u8 = std.fs.path.basename(filepath);
var filename_no_extension: []const u8 = filename[0 .. filename.len - 4];
@@ -136,11 +165,9 @@ fn buildWasmLib(b: *Build, filepath: []const u8, optimize: std.builtin.Mode) *Co
.cpu_arch = .wasm32,
.os_tag = .freestanding,
},
- .optimize = optimize,
+ .optimize = .ReleaseSmall,
});
- // const mode = b.standardOptimizeOption();
- // lib.setBuildMode(mode);
b.installArtifact(lib);
return lib;
diff --git a/run/main.zig b/run/main.zig
index 7d9159e..735cd91 100644
--- a/run/main.zig
+++ b/run/main.zig
@@ -6,6 +6,8 @@ const Val = bytebox.Val;
const ValType = bytebox.ValType;
const TraceMode = bytebox.DebugTrace.Mode;
+const log = bytebox.Logger.default();
+
const RunErrors = error{
IoError,
MissingFunction,
@@ -102,12 +104,8 @@ fn parseCmdOpts(args: [][]const u8, env_buffer: *std.ArrayList([]const u8), dir_
} else if (std.mem.eql(u8, arg, "-t") or std.mem.eql(u8, arg, "--trace")) {
arg_index += 1;
if (getArgSafe(arg_index, args)) |mode_str| {
- if (std.ascii.eqlIgnoreCase(mode_str, "function") or std.ascii.eqlIgnoreCase(mode_str, "func")) {
- opts.trace = TraceMode.Function;
- } else if (std.ascii.eqlIgnoreCase(mode_str, "instruction") or std.ascii.eqlIgnoreCase(mode_str, "instr")) {
- opts.trace = TraceMode.Instruction;
- } else if (std.ascii.eqlIgnoreCase(mode_str, "none")) {
- opts.trace = TraceMode.None;
+ if (bytebox.DebugTrace.parseMode(mode_str)) |mode| {
+ opts.trace = mode;
} else {
opts.invalid_arg = mode_str;
}
@@ -135,7 +133,7 @@ fn parseCmdOpts(args: [][]const u8, env_buffer: *std.ArrayList([]const u8), dir_
const version_string = "bytebox v0.0.1";
-fn printHelp(args: [][]const u8) !void {
+fn printHelp(args: [][]const u8) void {
const usage_string: []const u8 =
\\Usage: {s} [WASM_ARGS]... [OPTION]...
\\
@@ -175,8 +173,7 @@ fn printHelp(args: [][]const u8) !void {
\\
;
- const stdout = std.io.getStdOut().writer();
- try stdout.print(usage_string, .{args[0]});
+ log.info(usage_string, .{args[0]});
}
pub fn main() !void {
@@ -196,33 +193,28 @@ pub fn main() !void {
const opts: CmdOpts = parseCmdOpts(args, &env_buffer, &dir_buffer);
- const stdout = std.io.getStdOut().writer();
- const stderr = std.io.getStdErr().writer();
-
if (opts.print_help) {
- try printHelp(args);
+ printHelp(args);
return;
} else if (opts.print_version) {
- try stdout.print("{s}", .{version_string});
+ log.info("{s}", .{version_string});
return;
} else if (opts.invalid_arg) |invalid_arg| {
- try stderr.print("Invalid argument '{s}'.\n", .{invalid_arg});
- try printHelp(args);
+ log.err("Invalid argument '{s}'.\n", .{invalid_arg});
+ printHelp(args);
return;
} else if (opts.missing_options) |missing_options| {
- try stderr.print("Argument {s} is missing required options.\n", .{missing_options});
- try printHelp(args);
+ log.err("Argument {s} is missing required options.\n", .{missing_options});
+ printHelp(args);
return;
} else if (opts.invoke != null and opts.filename == null) {
- try stderr.print("Cannot invoke {s} without a file to load.", .{opts.invoke.?.funcname});
- try printHelp(args);
+ log.err("Cannot invoke {s} without a file to load.", .{opts.invoke.?.funcname});
+ printHelp(args);
return;
}
if (opts.trace != .None) {
- if (bytebox.DebugTrace.setMode(opts.trace) == false) {
- try stderr.print("Failed to set trace mode to {}. Option unavailable in non-debug builds.\n", .{opts.trace});
- }
+ bytebox.DebugTrace.setMode(opts.trace);
}
std.debug.assert(opts.filename != null);
@@ -236,6 +228,7 @@ pub fn main() !void {
const module_def_opts = bytebox.ModuleDefinitionOpts{
.debug_name = std.fs.path.basename(opts.filename.?),
+ .log = log,
};
var module_def = try bytebox.createModuleDefinition(allocator, module_def_opts);
defer module_def.destroy();
@@ -249,7 +242,7 @@ pub fn main() !void {
var strbuf = std.ArrayList(u8).init(allocator);
try strbuf.ensureTotalCapacity(1024 * 16);
try module_def.dump(strbuf.writer());
- try stdout.print("{s}", .{strbuf.items});
+ log.info("{s}", .{strbuf.items});
return;
}
@@ -265,10 +258,11 @@ pub fn main() !void {
var instantiate_opts = bytebox.ModuleInstantiateOpts{
.imports = &[_]bytebox.ModuleImportPackage{imports_wasi},
+ .log = log,
};
module_instance.instantiate(instantiate_opts) catch |e| {
- std.log.err("Caught {} instantiating module.", .{e});
+ std.log.err("Caught error instantiating module {}.", .{e});
return e;
};
@@ -382,7 +376,7 @@ pub fn main() !void {
try std.fmt.format(writer, "\n", .{});
}
if (strbuf.items.len > 0) {
- try stdout.print("{s}\n", .{strbuf.items});
+ log.info("{s}\n", .{strbuf.items});
}
}
}
diff --git a/src/cffi.zig b/src/cffi.zig
index 7b717ff..b2d0f4c 100644
--- a/src/cffi.zig
+++ b/src/cffi.zig
@@ -293,6 +293,7 @@ export fn bb_import_package_add_memory(package: ?*ModuleImportPackage, config: ?
const limits = core.Limits{
.min = min_pages,
.max = max_pages,
+ .limit_type = 1,
};
var allocator: *std.mem.Allocator = &package.?.allocator;
diff --git a/src/common.zig b/src/common.zig
index cd6410f..1c97f8f 100644
--- a/src/common.zig
+++ b/src/common.zig
@@ -24,6 +24,59 @@ pub fn decodeLEB128(comptime T: type, reader: anytype) !T {
}
}
+pub const LogLevel = enum(c_int) {
+ Info,
+ Error,
+};
+
+pub const Logger = struct {
+ const LogFn = *const fn (level: LogLevel, text: [:0]const u8) void;
+
+ log_fn: ?LogFn,
+
+ pub fn default() Logger {
+ return .{
+ .log_fn = &defaultLog,
+ };
+ }
+
+ pub fn empty() Logger {
+ return .{
+ .log_fn = null,
+ };
+ }
+
+ fn defaultLog(level: LogLevel, text: [:0]const u8) void {
+ var fd = switch (level) {
+ .Info => std.io.getStdOut(),
+ .Error => std.io.getStdErr(),
+ };
+ var writer = fd.writer();
+ nosuspend writer.writeAll(text) catch |e| {
+ std.debug.print("Failed logging due to error: {}\n", .{e});
+ };
+ }
+
+ pub fn info(self: Logger, comptime format: []const u8, args: anytype) void {
+ self.log(.Info, format, args);
+ }
+
+ pub fn err(self: Logger, comptime format: []const u8, args: anytype) void {
+ self.log(.Error, format, args);
+ }
+
+ pub fn log(self: Logger, level: LogLevel, comptime format: []const u8, args: anytype) void {
+ if (self.log_fn) |logger| {
+ var buf: [2048]u8 = undefined;
+ const formatted = std.fmt.bufPrintZ(&buf, format ++ "\n", args) catch |e| {
+ std.debug.print("Failed logging due to error: {}\n", .{e});
+ return;
+ };
+ logger(level, formatted);
+ }
+ }
+};
+
pub const ScratchAllocator = struct {
buffer: StableArray(u8),
diff --git a/src/core.zig b/src/core.zig
index 14b19e4..3366e04 100644
--- a/src/core.zig
+++ b/src/core.zig
@@ -1,10 +1,14 @@
const std = @import("std");
+const common = @import("common.zig");
const def = @import("definition.zig");
const inst = @import("instance.zig");
const vm_stack = @import("vm_stack.zig");
const vm_register = @import("vm_register.zig");
pub const wasi = @import("wasi.zig");
+pub const LogLevel = common.LogLevel;
+pub const Logger = common.Logger;
+
pub const i8x16 = def.i8x16;
pub const u8x16 = def.u8x16;
pub const i16x8 = def.i16x8;
diff --git a/src/definition.zig b/src/definition.zig
index f80ed0e..7272808 100644
--- a/src/definition.zig
+++ b/src/definition.zig
@@ -4,6 +4,7 @@ const std = @import("std");
const AllocError = std.mem.Allocator.Error;
const common = @import("common.zig");
+const Logger = common.Logger;
const StableArray = common.StableArray;
const opcodes = @import("opcode.zig");
@@ -269,21 +270,50 @@ pub const TaggedVal = struct {
};
pub const Limits = struct {
- min: u32,
- max: ?u32,
+ min: u64,
+ max: ?u64,
+ limit_type: u8,
+
+ // limit_type table:
+ // 0x00 n:u32 ⇒ i32, {min n, max ?}, 0
+ // 0x01 n:u32 m:u32 ⇒ i32, {min n, max m}, 0
+ // 0x02 n:u32 ⇒ i32, {min n, max ?}, 1 ;; from threads proposal
+ // 0x03 n:u32 m:u32 ⇒ i32, {min n, max m}, 1 ;; from threads proposal
+ // 0x04 n:u64 ⇒ i64, {min n, max ?}, 0
+ // 0x05 n:u64 m:u64 ⇒ i64, {min n, max m}, 0
+ // 0x06 n:u64 ⇒ i64, {min n, max ?}, 1 ;; from threads proposal
+ // 0x07 n:u64 m:u64 ⇒ i64, {min n, max m}, 1 ;; from threads proposal
+
+ pub const k_max_bytes_i32 = k_max_pages_i32 * MemoryDefinition.k_page_size;
+ pub const k_max_pages_i32 = std.math.powi(usize, 2, 16) catch unreachable;
+
+ // Technically the max bytes should be maxInt(u64), but that is wayyy more memory than PCs have available and
+ // is just a waste of virtual address space in the implementation. Instead we'll set the upper limit to 128GB.
+ pub const k_max_bytes_i64 = (1024 * 1024 * 1024 * 128);
+ pub const k_max_pages_i64 = k_max_bytes_i64 / MemoryDefinition.k_page_size;
fn decode(reader: anytype) !Limits {
- const has_max = try reader.readByte();
- if (has_max > 1) {
+ const limit_type: u8 = try reader.readByte();
+
+ if (limit_type > 7) {
+ return error.MalformedLimits;
+ }
+
+ const is_u32 = limit_type < 4;
+
+ const min = common.decodeLEB128(u64, reader) catch return error.MalformedLimits;
+ if (is_u32 and min > std.math.maxInt(u32)) {
return error.MalformedLimits;
}
- const min = try common.decodeLEB128(u32, reader);
- var max: ?u32 = null;
+ var max: ?u64 = null;
- switch (has_max) {
+ switch (std.math.rem(u8, limit_type, 2) catch unreachable) {
0 => {},
1 => {
- max = try common.decodeLEB128(u32, reader);
+ max = common.decodeLEB128(u64, reader) catch return error.MalformedLimits;
+ if (is_u32 and max.? > std.math.maxInt(u32)) {
+ return error.MalformedLimits;
+ }
if (max.? < min) {
return error.ValidationLimitsMinMustNotBeLargerThanMax;
}
@@ -294,8 +324,29 @@ pub const Limits = struct {
return Limits{
.min = min,
.max = max,
+ .limit_type = limit_type,
};
}
+
+ pub fn isIndex32(self: Limits) bool {
+ return self.limit_type < 4;
+ }
+
+ pub fn indexType(self: Limits) ValType {
+ return if (self.limit_type < 4) .I32 else .I64;
+ }
+
+ pub fn maxPages(self: Limits) usize {
+ if (self.max) |max| {
+ return @max(1, max);
+ }
+
+ return self.indexTypeMaxPages();
+ }
+
+ pub fn indexTypeMaxPages(self: Limits) usize {
+ return if (self.limit_type < 4) k_max_pages_i32 else k_max_pages_i64;
+ }
};
const BlockType = enum {
@@ -433,12 +484,17 @@ pub const ConstantExpression = union(ConstantExpressionType) {
return expr;
}
- pub fn resolve(self: *const ConstantExpression, store: *Store) Val {
+ pub fn resolve(self: *ConstantExpression, module_instance: *ModuleInstance) Val {
switch (self.*) {
.Value => |val| {
- return val.val;
+ var inner_val: Val = val.val;
+ if (val.type == .FuncRef) {
+ inner_val.FuncRef.module_instance = module_instance;
+ }
+ return inner_val;
},
.Global => |global_index| {
+ const store: *Store = &module_instance.store;
std.debug.assert(global_index < store.imports.globals.items.len + store.globals.items.len);
const global: *GlobalInstance = store.getGlobal(global_index);
return global.value;
@@ -446,8 +502,8 @@ pub const ConstantExpression = union(ConstantExpressionType) {
}
}
- pub fn resolveTo(self: *const ConstantExpression, store: *Store, comptime T: type) T {
- const val: Val = self.resolve(store);
+ pub fn resolveTo(self: *ConstantExpression, module_instance: *ModuleInstance, comptime T: type) T {
+ const val: Val = self.resolve(module_instance);
switch (T) {
i32 => return val.I32,
u32 => return @as(u32, @bitCast(val.I32)),
@@ -621,7 +677,6 @@ pub const MemoryDefinition = struct {
limits: Limits,
pub const k_page_size: usize = 64 * 1024;
- pub const k_max_pages: usize = std.math.powi(usize, 2, 16) catch unreachable;
};
pub const ElementMode = enum {
@@ -722,13 +777,13 @@ const GlobalImportDefinition = struct {
const MemArg = struct {
alignment: u32,
- offset: u32,
+ offset: u64,
fn decode(reader: anytype, comptime bitwidth: u32) !MemArg {
std.debug.assert(bitwidth % 8 == 0);
var memarg = MemArg{
.alignment = try common.decodeLEB128(u32, reader),
- .offset = try common.decodeLEB128(u32, reader),
+ .offset = try common.decodeLEB128(u64, reader),
};
const bit_alignment = std.math.powi(u32, 2, memarg.alignment) catch return error.ValidationBadAlignment;
if (bit_alignment > bitwidth / 8) {
@@ -739,7 +794,7 @@ const MemArg = struct {
};
pub const MemoryOffsetAndLaneImmediates = struct {
- offset: u32,
+ offset: u64,
laneidx: u8,
};
@@ -800,7 +855,7 @@ pub const InstructionImmediates = union(InstructionImmediatesTypes) {
ValueVec: v128,
Index: u32,
LabelId: u32,
- MemoryOffset: u32,
+ MemoryOffset: u64,
MemoryOffsetAndLane: MemoryOffsetAndLaneImmediates,
Block: BlockImmediates,
CallIndirect: CallIndirectImmediates,
@@ -1393,12 +1448,14 @@ const ModuleValidator = struct {
type_stack: std.ArrayList(?ValType),
control_stack: std.ArrayList(ControlFrame),
control_types: StableArray(ValType),
+ log: Logger,
- fn init(allocator: std.mem.Allocator) ModuleValidator {
+ fn init(allocator: std.mem.Allocator, log: Logger) ModuleValidator {
return ModuleValidator{
.type_stack = std.ArrayList(?ValType).init(allocator),
.control_stack = std.ArrayList(ControlFrame).init(allocator),
.control_types = StableArray(ValType).init(1 * 1024 * 1024),
+ .log = log,
};
}
@@ -1451,6 +1508,18 @@ const ModuleValidator = struct {
}
}
+ fn getMemoryLimits(module: *const ModuleDefinition) Limits {
+ if (module.imports.memories.items.len > 0) {
+ return module.imports.memories.items[0].limits;
+ }
+
+ if (module.memories.items.len > 0) {
+ return module.memories.items[0].limits;
+ }
+
+ unreachable;
+ }
+
fn validateElementIndex(index: u64, module: *const ModuleDefinition) !void {
if (module.elements.items.len <= index) {
return error.ValidationUnknownElement;
@@ -1566,15 +1635,17 @@ const ModuleValidator = struct {
}
fn validateLoadOp(validator: *ModuleValidator, module_: *const ModuleDefinition, load_type: ValType) !void {
- try validator.popType(.I32);
try validateMemoryIndex(module_);
+ const index_type: ValType = getMemoryLimits(module_).indexType();
+ try validator.popType(index_type);
try validator.pushType(load_type);
}
fn validateStoreOp(validator: *ModuleValidator, module_: *const ModuleDefinition, store_type: ValType) !void {
try validateMemoryIndex(module_);
+ const index_type: ValType = getMemoryLimits(module_).indexType();
try validator.popType(store_type);
- try validator.popType(.I32);
+ try validator.popType(index_type);
}
fn validateVectorLane(comptime T: type, laneidx: u32) !void {
@@ -1837,12 +1908,14 @@ const ModuleValidator = struct {
},
.Memory_Size => {
try validateMemoryIndex(module);
- try self.pushType(.I32);
+ const index_type: ValType = getMemoryLimits(module).indexType();
+ try self.pushType(index_type);
},
.Memory_Grow => {
try validateMemoryIndex(module);
- try self.popType(.I32);
- try self.pushType(.I32);
+ const index_type: ValType = getMemoryLimits(module).indexType();
+ try self.popType(index_type);
+ try self.pushType(index_type);
},
.I32_Const => {
try self.pushType(.I32);
@@ -2052,9 +2125,10 @@ const ModuleValidator = struct {
.Memory_Init => {
try validateMemoryIndex(module);
try validateDataIndex(instruction.immediate.Index, module);
- try self.popType(.I32);
- try self.popType(.I32);
- try self.popType(.I32);
+ const index_type: ValType = getMemoryLimits(module).indexType();
+ try self.popType(index_type);
+ try self.popType(index_type);
+ try self.popType(index_type);
},
.Data_Drop => {
if (module.data_count != null) {
@@ -2063,11 +2137,19 @@ const ModuleValidator = struct {
return error.MalformedMissingDataCountSection;
}
},
- .Memory_Copy, .Memory_Fill => {
+ .Memory_Fill => {
try validateMemoryIndex(module);
+ const index_type: ValType = getMemoryLimits(module).indexType();
+ try self.popType(index_type);
try self.popType(.I32);
- try self.popType(.I32);
- try self.popType(.I32);
+ try self.popType(index_type);
+ },
+ .Memory_Copy => {
+ try validateMemoryIndex(module);
+ const index_type: ValType = getMemoryLimits(module).indexType();
+ try self.popType(index_type);
+ try self.popType(index_type);
+ try self.popType(index_type);
},
.Table_Init => {
const pair: TablePairImmediates = instruction.immediate.TablePair;
@@ -2488,6 +2570,7 @@ const ModuleValidator = struct {
fn popType(self: *ModuleValidator, expected_or_null: ?ValType) !void {
const valtype_or_null = try self.popAnyType();
if (valtype_or_null != expected_or_null and valtype_or_null != null and expected_or_null != null) {
+ self.log.err("Validation failed: Expected type {?} but got {?}", .{ expected_or_null, valtype_or_null });
return error.ValidationTypeMismatch;
}
}
@@ -2545,6 +2628,7 @@ const ModuleValidator = struct {
pub const ModuleDefinitionOpts = struct {
debug_name: []const u8 = "",
+ log: ?Logger = null, // if null, uses default logger
};
pub const ModuleDefinition = struct {
@@ -2588,6 +2672,7 @@ pub const ModuleDefinition = struct {
name_section: NameCustomSection,
+ log: Logger,
debug_name: []const u8,
start_func_index: ?u32 = null,
data_count: ?u32 = null,
@@ -2624,6 +2709,7 @@ pub const ModuleDefinition = struct {
.datas = std.ArrayList(DataDefinition).init(allocator),
.custom_sections = std.ArrayList(CustomSection).init(allocator),
.name_section = NameCustomSection.init(allocator),
+ .log = if (opts.log) |log| log else Logger.empty(),
.debug_name = try allocator.dupe(u8, opts.debug_name),
};
return def;
@@ -2676,7 +2762,7 @@ pub const ModuleDefinition = struct {
};
var allocator = self.allocator;
- var validator = ModuleValidator.init(allocator);
+ var validator = ModuleValidator.init(allocator, self.log);
defer validator.deinit();
var stream = std.io.fixedBufferStream(wasm);
@@ -2880,7 +2966,11 @@ pub const ModuleDefinition = struct {
while (memory_index < num_memories) : (memory_index += 1) {
var limits = try Limits.decode(reader);
- if (limits.min > MemoryDefinition.k_max_pages) {
+ if (limits.min > limits.maxPages()) {
+ self.log.err(
+ "Validation error: max memory pages exceeded. Got {} but max is {}",
+ .{ limits.min, limits.indexTypeMaxPages() },
+ );
return error.ValidationMemoryMaxPagesExceeded;
}
@@ -2888,7 +2978,11 @@ pub const ModuleDefinition = struct {
if (max < limits.min) {
return error.ValidationMemoryInvalidMaxLimit;
}
- if (max > MemoryDefinition.k_max_pages) {
+ if (max > limits.indexTypeMaxPages()) {
+ self.log.err(
+ "Validation error: max memory pages exceeded. Got {} but max is {}",
+ .{ max, limits.indexTypeMaxPages() },
+ );
return error.ValidationMemoryMaxPagesExceeded;
}
}
diff --git a/src/instance.zig b/src/instance.zig
index 50417f4..c6a4e1e 100644
--- a/src/instance.zig
+++ b/src/instance.zig
@@ -5,6 +5,7 @@ const builtin = @import("builtin");
const common = @import("common.zig");
const StableArray = common.StableArray;
+const Logger = common.Logger;
const opcodes = @import("opcode.zig");
const Opcode = opcodes.Opcode;
@@ -65,21 +66,28 @@ pub const DebugTrace = struct {
Instruction,
};
- pub fn setMode(new_mode: Mode) bool {
- if (builtin.mode == .Debug) {
- mode = new_mode;
- return true;
- }
+ pub fn setMode(new_mode: Mode) void {
+ mode = new_mode;
+ }
- return false;
+ pub fn parseMode(mode_str: []const u8) ?Mode {
+ if (std.ascii.eqlIgnoreCase(mode_str, "function") or std.ascii.eqlIgnoreCase(mode_str, "func")) {
+ return .Function;
+ } else if (std.ascii.eqlIgnoreCase(mode_str, "instruction") or std.ascii.eqlIgnoreCase(mode_str, "instr")) {
+ return .Instruction;
+ } else if (std.ascii.eqlIgnoreCase(mode_str, "none")) {
+ return .None;
+ } else {
+ return null;
+ }
}
pub fn shouldTraceFunctions() bool {
- return builtin.mode == .Debug and mode == .Function;
+ return mode == .Function;
}
pub fn shouldTraceInstructions() bool {
- return builtin.mode == .Debug and mode == .Instruction;
+ return mode == .Instruction;
}
pub fn printIndent(indent: u32) void {
@@ -183,7 +191,7 @@ pub const TableInstance = struct {
}
}
- fn init_range_expr(table: *TableInstance, module: *ModuleInstance, elems: []const ConstantExpression, init_length: u32, start_elem_index: u32, start_table_index: u32, store: *Store) !void {
+ fn init_range_expr(table: *TableInstance, module: *ModuleInstance, elems: []ConstantExpression, init_length: u32, start_elem_index: u32, start_table_index: u32) !void {
if (start_table_index < 0 or table.refs.items.len < start_table_index + init_length) {
return error.TrapOutOfBoundsTableAccess;
}
@@ -197,10 +205,11 @@ pub const TableInstance = struct {
var index: u32 = 0;
while (index < elem_range.len) : (index += 1) {
- var val: Val = elem_range[index].resolve(store);
+ var val: Val = elem_range[index].resolve(module);
if (table.reftype == .FuncRef) {
- val.FuncRef.module_instance = module;
+ // should be set in resolve() or global initialization
+ std.debug.assert(val.FuncRef.module_instance != null);
}
table_range[index] = val;
@@ -232,23 +241,27 @@ pub const MemoryInstance = struct {
};
pub const k_page_size: usize = MemoryDefinition.k_page_size;
- pub const k_max_pages: usize = MemoryDefinition.k_max_pages;
limits: Limits,
mem: BackingMemory,
pub fn init(limits: Limits, params: ?WasmMemoryExternal) MemoryInstance {
- const max_pages = if (limits.max) |max| @max(1, max) else k_max_pages;
+ const max_pages = limits.maxPages();
+ const max_bytes: u64 = max_pages * k_page_size;
var mem = if (params == null) BackingMemory{
- .Internal = StableArray(u8).init(max_pages * k_page_size),
+ .Internal = StableArray(u8).init(@intCast(max_bytes)),
} else BackingMemory{ .External = .{
.buffer = &[0]u8{},
.params = params.?,
} };
var instance = MemoryInstance{
- .limits = Limits{ .min = 0, .max = @as(u32, @intCast(max_pages)) },
+ .limits = Limits{
+ .min = 0,
+ .max = max_pages,
+ .limit_type = limits.limit_type,
+ },
.mem = mem,
};
@@ -269,13 +282,13 @@ pub const MemoryInstance = struct {
};
}
- pub fn grow(self: *MemoryInstance, num_pages: usize) bool {
+ pub fn grow(self: *MemoryInstance, num_pages: u64) bool {
if (num_pages == 0) {
return true;
}
const total_pages = self.limits.min + num_pages;
- const max_pages = if (self.limits.max) |max| max else k_max_pages;
+ const max_pages = self.limits.maxPages();
if (total_pages > max_pages) {
return false;
@@ -294,7 +307,7 @@ pub const MemoryInstance = struct {
},
}
- self.limits.min = @as(u32, @intCast(total_pages));
+ self.limits.min = total_pages;
return true;
}
@@ -598,6 +611,7 @@ pub const ModuleInstantiateOpts = struct {
wasm_memory_external: ?WasmMemoryExternal = null,
stack_size: usize = 0,
enable_debug: bool = false,
+ log: ?Logger = null,
};
pub const InvokeOpts = struct {
@@ -713,6 +727,7 @@ pub const ModuleInstance = struct {
userdata: ?*anyopaque = null, // any host data associated with this module
is_instantiated: bool = false,
vm: *VM,
+ log: Logger,
pub fn create(module_def: *const ModuleDefinition, vm: *VM, allocator: std.mem.Allocator) AllocError!*ModuleInstance {
var inst = try allocator.create(ModuleInstance);
@@ -721,6 +736,7 @@ pub const ModuleInstance = struct {
.store = Store.init(allocator),
.module_def = module_def,
.vm = vm,
+ .log = Logger.empty(),
};
return inst;
}
@@ -736,18 +752,21 @@ pub const ModuleInstance = struct {
pub fn instantiate(self: *ModuleInstance, opts: ModuleInstantiateOpts) !void {
const Helpers = struct {
fn areLimitsCompatible(def_limits: *const Limits, instance_limits: *const Limits) bool {
+ // if (def_limits.limit_type != instance_limits.limit_type) {
+ // return false;
+ // }
if (def_limits.max != null and instance_limits.max == null) {
return false;
}
- var def_max: u32 = if (def_limits.max) |max| max else std.math.maxInt(u32);
- var instance_max: u32 = if (instance_limits.max) |max| max else 0;
+ var def_max: u64 = if (def_limits.max) |max| max else std.math.maxInt(u64);
+ var instance_max: u64 = if (instance_limits.max) |max| max else 0;
return def_limits.min <= instance_limits.min and def_max >= instance_max;
}
// TODO probably should change the imports search to a hashed lookup of module_name+item_name -> array of items to make this faster
- fn findImportInMultiple(comptime T: type, names: *const ImportNames, imports_or_null: ?[]const ModuleImportPackage) UnlinkableError!*const T {
+ fn findImportInMultiple(comptime T: type, names: *const ImportNames, imports_or_null: ?[]const ModuleImportPackage, log: *Logger) UnlinkableError!*const T {
if (imports_or_null) |_imports| {
for (_imports) |*module_imports| {
const wildcard_name = std.mem.eql(u8, module_imports.name, "*");
@@ -816,6 +835,16 @@ pub const ModuleInstance = struct {
}
}
+ const import_type_str = switch (T) {
+ FunctionImport => "function",
+ TableImport => "table",
+ MemoryImport => "memory",
+ GlobalImport => "global",
+ else => unreachable,
+ };
+
+ log.err("Unable to find {s} import '{s}.{s}'", .{ import_type_str, names.module_name, names.import_name });
+
return error.UnlinkableUnknownImport;
}
@@ -840,17 +869,22 @@ pub const ModuleInstance = struct {
std.debug.assert(self.is_instantiated == false);
+ if (opts.log) |log| {
+ self.log = log;
+ }
+
var store: *Store = &self.store;
var module_def: *const ModuleDefinition = self.module_def;
var allocator = self.allocator;
for (module_def.imports.functions.items) |*func_import_def| {
- var import_func: *const FunctionImport = try Helpers.findImportInMultiple(FunctionImport, &func_import_def.names, opts.imports);
+ var import_func: *const FunctionImport = try Helpers.findImportInMultiple(FunctionImport, &func_import_def.names, opts.imports, &self.log);
const type_def: *const FunctionTypeDefinition = &module_def.types.items[func_import_def.type_index];
const is_type_signature_eql: bool = import_func.isTypeSignatureEql(type_def);
if (is_type_signature_eql == false) {
+ self.log.err("Incompatible function import '{s}.{s}'", .{ func_import_def.names.module_name, func_import_def.names.import_name });
return error.UnlinkableIncompatibleImportType;
}
@@ -858,7 +892,7 @@ pub const ModuleInstance = struct {
}
for (module_def.imports.tables.items) |*table_import_def| {
- var import_table: *const TableImport = try Helpers.findImportInMultiple(TableImport, &table_import_def.names, opts.imports);
+ var import_table: *const TableImport = try Helpers.findImportInMultiple(TableImport, &table_import_def.names, opts.imports, &self.log);
var is_eql: bool = undefined;
switch (import_table.data) {
@@ -874,6 +908,7 @@ pub const ModuleInstance = struct {
}
if (is_eql == false) {
+ self.log.err("Incompatible table import '{s}.{s}'", .{ table_import_def.names.module_name, table_import_def.names.import_name });
return error.UnlinkableIncompatibleImportType;
}
@@ -881,7 +916,7 @@ pub const ModuleInstance = struct {
}
for (module_def.imports.memories.items) |*memory_import_def| {
- var import_memory: *const MemoryImport = try Helpers.findImportInMultiple(MemoryImport, &memory_import_def.names, opts.imports);
+ var import_memory: *const MemoryImport = try Helpers.findImportInMultiple(MemoryImport, &memory_import_def.names, opts.imports, &self.log);
var is_eql: bool = undefined;
switch (import_memory.data) {
@@ -895,6 +930,7 @@ pub const ModuleInstance = struct {
}
if (is_eql == false) {
+ self.log.err("Incompatible memory import '{s}.{s}'", .{ memory_import_def.names.module_name, memory_import_def.names.import_name });
return error.UnlinkableIncompatibleImportType;
}
@@ -902,7 +938,7 @@ pub const ModuleInstance = struct {
}
for (module_def.imports.globals.items) |*global_import_def| {
- var import_global: *const GlobalImport = try Helpers.findImportInMultiple(GlobalImport, &global_import_def.names, opts.imports);
+ var import_global: *const GlobalImport = try Helpers.findImportInMultiple(GlobalImport, &global_import_def.names, opts.imports, &self.log);
var is_eql: bool = undefined;
switch (import_global.data) {
@@ -918,6 +954,7 @@ pub const ModuleInstance = struct {
}
if (is_eql == false) {
+ self.log.err("Incompatible global import '{s}.{s}'", .{ global_import_def.names.module_name, global_import_def.names.import_name });
return error.UnlinkableIncompatibleImportType;
}
@@ -949,7 +986,7 @@ pub const ModuleInstance = struct {
for (module_def.globals.items) |*def_global| {
var global = GlobalInstance{
.def = def_global,
- .value = def_global.expr.resolve(store),
+ .value = def_global.expr.resolve(self),
};
if (def_global.valtype == .FuncRef) {
global.value.FuncRef.module_instance = self;
@@ -971,7 +1008,7 @@ pub const ModuleInstance = struct {
var table: *TableInstance = store.getTable(def_elem.table_index);
- var start_table_index_i32: i32 = if (def_elem.offset) |offset| offset.resolveTo(store, i32) else 0;
+ var start_table_index_i32: i32 = if (def_elem.offset) |*offset| offset.resolveTo(self, i32) else 0;
if (start_table_index_i32 < 0) {
return error.UninstantiableOutOfBoundsTableAccess;
}
@@ -983,7 +1020,7 @@ pub const ModuleInstance = struct {
try table.init_range_val(self, elems, @as(u32, @intCast(elems.len)), 0, start_table_index);
} else {
var elems = def_elem.elems_expr.items;
- try table.init_range_expr(self, elems, @as(u32, @intCast(elems.len)), 0, start_table_index, store);
+ try table.init_range_expr(self, elems, @as(u32, @intCast(elems.len)), 0, start_table_index);
}
} else if (def_elem.mode == .Passive) {
if (def_elem.elems_value.items.len > 0) {
@@ -999,7 +1036,7 @@ pub const ModuleInstance = struct {
try elem.refs.resize(def_elem.elems_expr.items.len);
var index: usize = 0;
while (index < elem.refs.items.len) : (index += 1) {
- elem.refs.items[index] = def_elem.elems_expr.items[index].resolve(store);
+ elem.refs.items[index] = def_elem.elems_expr.items[index].resolve(self);
if (elem.reftype == .FuncRef) {
elem.refs.items[index].FuncRef.module_instance = self;
}
@@ -1017,7 +1054,7 @@ pub const ModuleInstance = struct {
var memory: *MemoryInstance = store.getMemory(memory_index);
const num_bytes: usize = def_data.bytes.items.len;
- const offset_begin: usize = (def_data.offset.?).resolveTo(store, u32);
+ const offset_begin: usize = (def_data.offset.?).resolveTo(self, u32);
const offset_end: usize = offset_begin + num_bytes;
const mem_buffer: []u8 = memory.buffer();
@@ -1118,6 +1155,8 @@ pub const ModuleInstance = struct {
}
}
+ self.log.err("Failed to find function {s}", .{func_name});
+
return error.ExportUnknownFunction;
}
@@ -1137,6 +1176,8 @@ pub const ModuleInstance = struct {
}
}
+ self.log.err("Failed to find global export {s}", .{global_name});
+
return error.ExportUnknownGlobal;
}
diff --git a/src/tests.zig b/src/tests.zig
index 2258a3e..1901663 100644
--- a/src/tests.zig
+++ b/src/tests.zig
@@ -32,11 +32,26 @@ test "MemoryInstance.init" {
const limits = Limits{
.min = 0,
.max = null,
+ .limit_type = 0, // i32 index type
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
try expectEqual(memory.limits.min, 0);
- try expectEqual(memory.limits.max, MemoryInstance.k_max_pages);
+ try expectEqual(memory.limits.max, Limits.k_max_pages_i32);
+ try expectEqual(memory.size(), 0);
+ try expectEqual(memory.mem.Internal.items.len, 0);
+ }
+
+ {
+ const limits = Limits{
+ .min = 0,
+ .max = null,
+ .limit_type = 4, // i64 index type
+ };
+ var memory = MemoryInstance.init(limits, null);
+ defer memory.deinit();
+ try expectEqual(memory.limits.min, 0);
+ try expectEqual(memory.limits.max, Limits.k_max_pages_i64);
try expectEqual(memory.size(), 0);
try expectEqual(memory.mem.Internal.items.len, 0);
}
@@ -45,6 +60,7 @@ test "MemoryInstance.init" {
const limits = Limits{
.min = 25,
.max = 25,
+ .limit_type = 1,
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
@@ -59,6 +75,7 @@ test "MemoryInstance.Internal.grow" {
const limits = Limits{
.min = 0,
.max = null,
+ .limit_type = 0,
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
@@ -67,14 +84,15 @@ test "MemoryInstance.Internal.grow" {
try expectEqual(memory.size(), 1);
try expectEqual(memory.grow(1), true);
try expectEqual(memory.size(), 2);
- try expectEqual(memory.grow(MemoryInstance.k_max_pages - memory.size()), true);
- try expectEqual(memory.size(), MemoryInstance.k_max_pages);
+ try expectEqual(memory.grow(Limits.k_max_pages_i32 - memory.size()), true);
+ try expectEqual(memory.size(), Limits.k_max_pages_i32);
}
{
const limits = Limits{
.min = 0,
.max = 25,
+ .limit_type = 1,
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
@@ -90,6 +108,7 @@ test "MemoryInstance.Internal.growAbsolute" {
const limits = Limits{
.min = 0,
.max = null,
+ .limit_type = 0,
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
@@ -99,14 +118,15 @@ test "MemoryInstance.Internal.growAbsolute" {
try expectEqual(memory.size(), 1);
try expectEqual(memory.growAbsolute(5), true);
try expectEqual(memory.size(), 5);
- try expectEqual(memory.growAbsolute(MemoryInstance.k_max_pages), true);
- try expectEqual(memory.size(), MemoryInstance.k_max_pages);
+ try expectEqual(memory.growAbsolute(Limits.k_max_pages_i32), true);
+ try expectEqual(memory.size(), Limits.k_max_pages_i32);
}
{
const limits = Limits{
.min = 0,
.max = 25,
+ .limit_type = 1,
};
var memory = MemoryInstance.init(limits, null);
defer memory.deinit();
diff --git a/src/vm_stack.zig b/src/vm_stack.zig
index ddeb8e6..5f840c5 100644
--- a/src/vm_stack.zig
+++ b/src/vm_stack.zig
@@ -236,6 +236,15 @@ const Stack = struct {
return stack.values[stack.num_values].V128;
}
+ fn popIndexType(stack: *Stack) i64 {
+ const index_type: ValType = stack.topFrame().module_instance.store.getMemory(0).limits.indexType();
+ return switch (index_type) {
+ .I32 => stack.popI32(),
+ .I64 => stack.popI64(),
+ else => unreachable,
+ };
+ }
+
fn pushLabel(stack: *Stack, num_returns: u32, continuation: u32) !void {
if (stack.num_labels < stack.labels.len) {
stack.labels[stack.num_labels] = Label{
@@ -253,16 +262,16 @@ const Stack = struct {
stack.num_labels -= 1;
}
- fn findLabel(stack: *const Stack, id: u32) *const Label {
+ fn findLabel(stack: Stack, id: u32) *const Label {
const index: usize = (stack.num_labels - 1) - id;
return &stack.labels[index];
}
- fn topLabel(stack: *const Stack) *const Label {
+ fn topLabel(stack: Stack) *const Label {
return &stack.labels[stack.num_labels - 1];
}
- fn frameLabel(stack: *const Stack) *const Label {
+ fn frameLabel(stack: Stack) *const Label {
var frame: *const CallFrame = stack.topFrame();
var frame_label: *const Label = &stack.labels[frame.start_offset_labels];
return frame_label;
@@ -350,7 +359,7 @@ const Stack = struct {
return null;
}
- fn topFrame(stack: *const Stack) *CallFrame {
+ fn topFrame(stack: Stack) *CallFrame {
return &stack.frames[stack.num_frames - 1];
}
@@ -359,6 +368,14 @@ const Stack = struct {
stack.num_labels = 0;
stack.num_frames = 0;
}
+
+ fn debugDump(stack: Stack) void {
+ std.debug.print("===== stack dump =====\n", .{});
+ for (stack.values[0..stack.num_values]) |val| {
+ std.debug.print("I32: {}, I64: {}, F32: {}, F64: {}\n", .{ val.I32, val.I64, val.F32, val.F64 });
+ }
+ std.debug.print("======================\n", .{});
+ }
};
// TODO move all definition stuff into definition.zig and vm stuff into vm_stack.zig
@@ -931,11 +948,13 @@ const InstructionFuncs = struct {
return @as(T, @intFromFloat(truncated));
}
- fn loadFromMem(comptime T: type, store: *Store, offset_from_memarg: usize, offset_from_stack: i32) !T {
+ fn loadFromMem(comptime T: type, stack: *Stack, offset_from_memarg: usize) TrapError!T {
+ var offset_from_stack: i64 = stack.popIndexType();
if (offset_from_stack < 0) {
return error.TrapOutOfBoundsMemoryAccess;
}
+ const store: *Store = &stack.topFrame().module_instance.store;
const memory: *const MemoryInstance = store.getMemory(0);
const offset: usize = offset_from_memarg + @as(usize, @intCast(offset_from_stack));
@@ -988,13 +1007,15 @@ const InstructionFuncs = struct {
return ret;
}
- fn storeInMem(value: anytype, store: *Store, offset_from_memarg: usize, offset_from_stack: i32) !void {
+ fn storeInMem(value: anytype, stack: *Stack, offset_from_memarg: usize) !void {
+ const offset_from_stack: i64 = stack.popIndexType();
if (offset_from_stack < 0) {
return error.TrapOutOfBoundsMemoryAccess;
}
+ const store: *Store = &stack.topFrame().module_instance.store;
const memory: *MemoryInstance = store.getMemory(0);
- const offset: usize = offset_from_memarg + @as(u32, @intCast(offset_from_stack));
+ const offset: usize = offset_from_memarg + @as(usize, @intCast(offset_from_stack));
const bit_count = @bitSizeOf(@TypeOf(value));
const write_type = switch (bit_count) {
@@ -1359,8 +1380,7 @@ const InstructionFuncs = struct {
var vec = @as(T, @bitCast(stack.popV128()));
const immediate = instruction.immediate.MemoryOffsetAndLane;
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try loadFromMem(vec_type_info.child, &stack.topFrame().module_instance.store, immediate.offset, offset_from_stack);
+ const scalar = try loadFromMem(vec_type_info.child, stack, immediate.offset);
vec[immediate.laneidx] = scalar;
stack.pushV128(@as(v128, @bitCast(vec)));
}
@@ -1376,8 +1396,7 @@ const InstructionFuncs = struct {
const vec_type_info = @typeInfo(T).Vector;
const mem_offset = instruction.immediate.MemoryOffset;
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try loadFromMem(vec_type_info.child, &stack.topFrame().module_instance.store, mem_offset, offset_from_stack);
+ const scalar = try loadFromMem(vec_type_info.child, stack, mem_offset);
var vec: T = @splat(0);
vec[0] = scalar;
stack.pushV128(@as(v128, @bitCast(vec)));
@@ -1386,9 +1405,8 @@ const InstructionFuncs = struct {
fn vectorStoreLane(comptime T: type, instruction: Instruction, stack: *Stack) !void {
var vec = @as(T, @bitCast(stack.popV128()));
const immediate = instruction.immediate.MemoryOffsetAndLane;
- const offset_from_stack: i32 = stack.popI32();
const scalar = vec[immediate.laneidx];
- try storeInMem(scalar, &stack.topFrame().module_instance.store, immediate.offset, offset_from_stack);
+ try storeInMem(scalar, stack, immediate.offset);
stack.pushV128(@as(v128, @bitCast(vec)));
}
@@ -1895,112 +1913,98 @@ const InstructionFuncs = struct {
fn op_I32_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Load", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value = try OpHelpers.loadFromMem(i32, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset);
stack.pushI32(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value = try OpHelpers.loadFromMem(i64, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value = try OpHelpers.loadFromMem(i64, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_F32_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("F32_Load", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value = try OpHelpers.loadFromMem(f32, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value = try OpHelpers.loadFromMem(f32, stack, code[pc].immediate.MemoryOffset);
stack.pushF32(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_F64_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("F64_Load", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value = try OpHelpers.loadFromMem(f64, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value = try OpHelpers.loadFromMem(f64, stack, code[pc].immediate.MemoryOffset);
stack.pushF64(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Load8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Load8_S", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: i32 = try OpHelpers.loadFromMem(i8, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: i32 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset);
stack.pushI32(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Load8_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Load8_U", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: u32 = try OpHelpers.loadFromMem(u8, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: u32 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset);
stack.pushI32(@as(i32, @bitCast(value)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Load16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Load16_S", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: i32 = try OpHelpers.loadFromMem(i16, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: i32 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset);
stack.pushI32(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Load16_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Load16_U", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: u32 = try OpHelpers.loadFromMem(u16, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: u32 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset);
stack.pushI32(@as(i32, @bitCast(value)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load8_S", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: i64 = try OpHelpers.loadFromMem(i8, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: i64 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load8_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load8_U", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: u64 = try OpHelpers.loadFromMem(u8, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: u64 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(@as(i64, @bitCast(value)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load16_S", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: i64 = try OpHelpers.loadFromMem(i16, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: i64 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load16_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load16_U", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: u64 = try OpHelpers.loadFromMem(u16, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: u64 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(@as(i64, @bitCast(value)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load32_S", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: i64 = try OpHelpers.loadFromMem(i32, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: i64 = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Load32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Load32_U", pc, code, stack);
- var offset_from_stack: i32 = stack.popI32();
- var value: u64 = try OpHelpers.loadFromMem(u32, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ var value: u64 = try OpHelpers.loadFromMem(u32, stack, code[pc].immediate.MemoryOffset);
stack.pushI64(@as(i64, @bitCast(value)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
@@ -2008,72 +2012,63 @@ const InstructionFuncs = struct {
fn op_I32_Store(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Store", pc, code, stack);
const value: i32 = stack.popI32();
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Store(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Store", pc, code, stack);
const value: i64 = stack.popI64();
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_F32_Store(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("F32_Store", pc, code, stack);
const value: f32 = stack.popF32();
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_F64_Store(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("F64_Store", pc, code, stack);
const value: f64 = stack.popF64();
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Store8(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Store8", pc, code, stack);
const value: i8 = @as(i8, @truncate(stack.popI32()));
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I32_Store16(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I32_Store16", pc, code, stack);
const value: i16 = @as(i16, @truncate(stack.popI32()));
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Store8(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Store8", pc, code, stack);
const value: i8 = @as(i8, @truncate(stack.popI64()));
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Store16(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Store16", pc, code, stack);
const value: i16 = @as(i16, @truncate(stack.popI64()));
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
fn op_I64_Store32(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("I64_Store32", pc, code, stack);
const value: i32 = @as(i32, @truncate(stack.popI64()));
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
@@ -2083,8 +2078,12 @@ const InstructionFuncs = struct {
const memory_index: usize = 0;
var memory_instance: *const MemoryInstance = stack.topFrame().module_instance.store.getMemory(memory_index);
- const num_pages: i32 = @as(i32, @intCast(memory_instance.size()));
- stack.pushI32(num_pages);
+ switch (memory_instance.limits.indexType()) {
+ .I32 => stack.pushI32(@intCast(memory_instance.size())),
+ .I64 => stack.pushI64(@intCast(memory_instance.size())),
+ else => unreachable,
+ }
+
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
@@ -2094,13 +2093,21 @@ const InstructionFuncs = struct {
var memory_instance: *MemoryInstance = stack.topFrame().module_instance.store.getMemory(memory_index);
const old_num_pages: i32 = @as(i32, @intCast(memory_instance.limits.min));
- const num_pages: i32 = stack.popI32();
+ const num_pages: i64 = switch (memory_instance.limits.indexType()) {
+ .I32 => stack.popI32(),
+ .I64 => stack.popI64(),
+ else => unreachable,
+ };
if (num_pages >= 0 and memory_instance.grow(@as(usize, @intCast(num_pages)))) {
stack.pushI32(old_num_pages);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
} else {
- stack.pushI32(-1);
+ switch (memory_instance.limits.indexType()) {
+ .I32 => stack.pushI32(-1),
+ .I64 => stack.pushI64(-1),
+ else => unreachable,
+ }
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
}
@@ -3622,8 +3629,7 @@ const InstructionFuncs = struct {
fn op_V128_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("V128_Load", pc, code, stack);
- const offset_from_stack: i32 = stack.popI32();
- const value = try OpHelpers.loadFromMem(v128, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ const value = try OpHelpers.loadFromMem(v128, stack, code[pc].immediate.MemoryOffset);
stack.pushV128(value);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
@@ -3666,8 +3672,7 @@ const InstructionFuncs = struct {
fn op_V128_Load8_Splat(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("V128_Load8_Splat", pc, code, stack);
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try OpHelpers.loadFromMem(u8, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ const scalar = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset);
const vec: u8x16 = @splat(scalar);
stack.pushV128(@as(v128, @bitCast(vec)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
@@ -3675,8 +3680,7 @@ const InstructionFuncs = struct {
fn op_V128_Load16_Splat(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("V128_Load16_Splat", pc, code, stack);
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try OpHelpers.loadFromMem(u16, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ const scalar = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset);
const vec: u16x8 = @splat(scalar);
stack.pushV128(@as(v128, @bitCast(vec)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
@@ -3684,8 +3688,7 @@ const InstructionFuncs = struct {
fn op_V128_Load32_Splat(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("V128_Load32_Splat", pc, code, stack);
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try OpHelpers.loadFromMem(u32, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ const scalar = try OpHelpers.loadFromMem(u32, stack, code[pc].immediate.MemoryOffset);
const vec: u32x4 = @splat(scalar);
stack.pushV128(@as(v128, @bitCast(vec)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
@@ -3693,8 +3696,7 @@ const InstructionFuncs = struct {
fn op_V128_Load64_Splat(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void {
try debugPreamble("V128_Load64_Splat", pc, code, stack);
- const offset_from_stack: i32 = stack.popI32();
- const scalar = try OpHelpers.loadFromMem(u64, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ const scalar = try OpHelpers.loadFromMem(u64, stack, code[pc].immediate.MemoryOffset);
const vec: u64x2 = @splat(scalar);
stack.pushV128(@as(v128, @bitCast(vec)));
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
@@ -4088,8 +4090,7 @@ const InstructionFuncs = struct {
try debugPreamble("V128_Store", pc, code, stack);
const value: v128 = stack.popV128();
- const offset_from_stack: i32 = stack.popI32();
- try OpHelpers.storeInMem(value, &stack.topFrame().module_instance.store, code[pc].immediate.MemoryOffset, offset_from_stack);
+ try OpHelpers.storeInMem(value, stack, code[pc].immediate.MemoryOffset);
try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack });
}
diff --git a/src/zig-stable-array/stable_array.zig b/src/zig-stable-array/stable_array.zig
index e6d39a7..626459e 100644
--- a/src/zig-stable-array/stable_array.zig
+++ b/src/zig-stable-array/stable_array.zig
@@ -263,11 +263,14 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type {
self.items.ptr = @alignCast(@ptrCast(addr));
self.items.len = 0;
} else {
- const prot: u32 = std.c.PROT.READ | std.c.PROT.WRITE;
+ const prot: u32 = std.c.PROT.NONE;
const map: u32 = std.c.MAP.PRIVATE | std.c.MAP.ANONYMOUS;
const fd: os.fd_t = -1;
const offset: usize = 0;
- var slice = try os.mmap(null, self.max_virtual_alloc_bytes, prot, map, fd, offset);
+ var slice = os.mmap(null, self.max_virtual_alloc_bytes, prot, map, fd, offset) catch |e| {
+ std.debug.print("caught initial sizing error {}, total bytes: {}\n", .{ e, self.max_virtual_alloc_bytes });
+ return e;
+ };
self.items.ptr = @alignCast(@ptrCast(slice.ptr));
self.items.len = 0;
}
@@ -279,6 +282,20 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type {
if (builtin.os.tag == .windows) {
const w = std.os.windows;
_ = try w.VirtualAlloc(@as(w.PVOID, @ptrCast(self.items.ptr)), new_capacity_bytes, w.MEM_COMMIT, w.PAGE_READWRITE);
+ } else {
+ const resize_capacity = new_capacity_bytes - current_capacity_bytes;
+ const region_begin: [*]u8 = @ptrCast(self.items.ptr);
+ const remap_region_begin: [*]u8 = region_begin + current_capacity_bytes;
+
+ const prot: u32 = std.c.PROT.READ | std.c.PROT.WRITE;
+ const map: u32 = std.c.MAP.PRIVATE | std.c.MAP.ANONYMOUS | std.c.MAP.FIXED;
+ const fd: os.fd_t = -1;
+ const offset: usize = 0;
+
+ _ = os.mmap(@alignCast(remap_region_begin), resize_capacity, prot, map, fd, offset) catch |e| {
+ std.debug.print("caught error {}\n", .{e});
+ return e;
+ };
}
}
@@ -395,6 +412,7 @@ test "shrinkAndFree" {
test "resize" {
const max: usize = 1024 * 1024 * 1;
var a = StableArray(u8).init(max);
+ defer a.deinit();
var size: usize = 512;
while (size <= max) {
@@ -405,6 +423,8 @@ test "resize" {
test "out of memory" {
var a = StableArrayAligned(u8, mem.page_size).init(TEST_VIRTUAL_ALLOC_SIZE);
+ defer a.deinit();
+
const max_capacity: usize = TEST_VIRTUAL_ALLOC_SIZE / mem.page_size;
try a.appendNTimes(0xFF, max_capacity);
for (a.items) |v| {
@@ -420,5 +440,28 @@ test "out of memory" {
assert(err == error.OutOfMemory);
};
assert(didCatchError == true);
- a.deinit();
+}
+
+test "huge max size" {
+ const KB = 1024;
+ const MB = KB * 1024;
+ const GB = MB * 1024;
+
+ var a = StableArray(u8).init(GB * 128);
+ defer a.deinit();
+
+ try a.resize(MB * 4);
+ try a.resize(MB * 8);
+ try a.resize(MB * 16);
+ a.items[MB * 16 - 1] = 0xFF;
+}
+
+test "growing retains values" {
+ var a = StableArray(u8).init(TEST_VIRTUAL_ALLOC_SIZE);
+ defer a.deinit();
+
+ try a.resize(mem.page_size);
+ a.items[0] = 0xFF;
+ try a.resize(mem.page_size * 2);
+ assert(a.items[0] == 0xFF);
}
diff --git a/test/mem64/main.zig b/test/mem64/main.zig
new file mode 100644
index 0000000..6a7d29e
--- /dev/null
+++ b/test/mem64/main.zig
@@ -0,0 +1,32 @@
+const std = @import("std");
+const bytebox = @import("bytebox");
+const Val = bytebox.Val;
+
+pub fn main() !void {
+ std.debug.print("\nRunning mem64 test...\n", .{});
+
+ var gpa = std.heap.GeneralPurposeAllocator(.{}){};
+ var allocator: std.mem.Allocator = gpa.allocator();
+
+ const wasm_data: []u8 = try std.fs.cwd().readFileAlloc(allocator, "test/mem64/memtest.wasm", 1024 * 128);
+ defer allocator.free(wasm_data);
+
+ const module_def = try bytebox.createModuleDefinition(allocator, .{});
+ defer module_def.destroy();
+ try module_def.decode(wasm_data);
+
+ const module_instance = try bytebox.createModuleInstance(.Stack, module_def, allocator);
+ defer module_instance.destroy();
+ try module_instance.instantiate(.{});
+
+ const handle = try module_instance.getFunctionHandle("memtest");
+ const input = [4]Val{ .{ .I32 = 27368 }, .{ .I64 = 34255 }, .{ .F32 = 34234.8690 }, .{ .F64 = 989343.2849 } };
+ var output = [1]Val{.{ .I32 = 0 }};
+ try module_instance.invoke(handle, &input, &output, .{});
+
+ if (output[0].I32 != 0) {
+ return error.TestFailed;
+ }
+
+ std.debug.print("success\n", .{});
+}
diff --git a/test/mem64/memtest.c b/test/mem64/memtest.c
new file mode 100644
index 0000000..1043d55
--- /dev/null
+++ b/test/mem64/memtest.c
@@ -0,0 +1,66 @@
+// clang --target=wasm64-freestanding -mbulk-memory -nostdlib -O2 -Wl,--no-entry -Wl,--export-dynamic -o memtest.wasm memtest.c
+// zig cc --target=wasm64-freestanding -mbulk-memory -nostdlib -O2 -Wl,--no-entry -Wl,--export-dynamic -o memtest.wasm memtest.c
+
+#include
+#include
+
+#define KB ((size_t)1024)
+#define MB (1024 * KB)
+#define GB (1024 * MB)
+
+#define PAGE_SIZE (64 * KB)
+#define PAGES_PER_GB (GB / PAGE_SIZE)
+
+#define assert(value) if (value == 0) __builtin_trap()
+
+__attribute__((visibility("default"))) int64_t memtest(int32_t val_i32, int64_t val_i64, float val_f32, double val_f64)
+{
+ int64_t start_page = __builtin_wasm_memory_grow(0, PAGES_PER_GB * 6); // memory.grow
+ assert(start_page != -1);
+
+ char* mem = (char*)(start_page) + GB * 4;
+ volatile char* mem_stores = mem + MB * 1;
+ volatile char* mem_loads = mem + MB * 2;
+
+ int64_t num_pages = __builtin_wasm_memory_size(0); // memory.size
+ assert(num_pages >= (PAGES_PER_GB * 6));
+
+ *(int32_t*)(mem_loads + 0) = val_i32; // i32.store
+ *(int64_t*)(mem_loads + 8) = val_i64; // i64.store
+ *(float*)(mem_loads + 16) = val_f32; // f32.store
+ *(double*)(mem_loads + 24) = val_f64; // f64.store
+
+ *(int32_t*)(mem_stores + 0) = *(int32_t*)(mem_loads + 0); // i32.load -> i32.store
+ *(int64_t*)(mem_stores + 8) = *(int64_t*)(mem_loads + 8); // i64.load -> i64.store
+ *(float*)(mem_stores + 16) = *(float*)(mem_loads + 16); // f32.load -> f32.store
+ *(double*)(mem_stores + 24) = *(double*)(mem_loads + 24); // f64.load -> f64.store
+
+ assert(*(int32_t*)(mem_stores + 0) == val_i32);
+ assert(*(int64_t*)(mem_stores + 8) == val_i64);
+ assert(*(float*)(mem_stores + 16) == val_f32);
+ assert(*(double*)(mem_stores + 24) == val_f64);
+
+ int32_t load32 = 0;
+ load32 += (int32_t)*(int8_t*)(mem_loads + 32); // i32.load8_s
+ load32 += (int32_t)*(uint8_t*)(mem_loads + 40); // i32.load8_u
+ load32 += (int32_t)*(int16_t*)(mem_loads + 48); // i32.load16_s
+ load32 += (int32_t)*(uint16_t*)(mem_loads + 56); // i32.load16_s
+
+ int64_t load64 = 0;
+ load64 += (int64_t)*(int8_t*)(mem_loads + 64); // i64.load8_s
+ load64 += (int64_t)*(uint8_t*)(mem_loads + 72); // i64.load8_u
+ load64 += (int64_t)*(int16_t*)(mem_loads + 80); // i64.load16_s
+ load64 += (int64_t)*(uint16_t*)(mem_loads + 88); // i64.load16_s
+ load64 += (int64_t)*(int32_t*)(mem_loads + 96); // i64.load32_s
+ load64 += (int64_t)*(uint32_t*)(mem_loads + 104); // i64.load32_s
+
+ // forces the compiler to not elide or condense the loads
+ *(int32_t*)(mem_stores + 0) = load32;
+ *(int64_t*)(mem_stores + 8) = load64;
+
+ __builtin_memset(mem + KB, 0xFF, KB); // memory.fill
+ __builtin_memcpy(mem + KB * 4, mem + KB * 3, KB); // memory.copy
+
+ return 0;
+}
+
diff --git a/test/mem64/memtest.zig b/test/mem64/memtest.zig
new file mode 100644
index 0000000..d04b212
--- /dev/null
+++ b/test/mem64/memtest.zig
@@ -0,0 +1,25 @@
+// 0.12.0: zig build-exe memtest.zig -target wasm64-freestanding -fno-entry --export=memtest -O ReleaseSmall
+// 0.11.0: zig build-lib memtest.zig -target wasm64-freestanding -dynamic -rdynamic -O ReleaseSmall
+
+const KB = 1024;
+const MB = 1024 * KB;
+const GB = 1024 * MB;
+
+const PAGE_SIZE = 64 * KB;
+const PAGES_PER_GB = GB / PAGE_SIZE;
+
+export fn memtest() i32 {
+ _ = @wasmMemoryGrow(0, PAGES_PER_GB * 4);
+
+ var mem: [*]u8 = @ptrFromInt(4 * GB);
+
+ for (0..MB) |i| {
+ mem[i] = 0xFF;
+ mem[(4 * GB) - MB + i] = 0xFF;
+ }
+ return 0;
+}
+
+// export fn memtest() void {
+// _ = @wasmMemoryGrow(0, PAGES_PER_GB * 8);
+// }
diff --git a/test/testsuite b/test/testsuite
deleted file mode 160000
index 3a04b2c..0000000
--- a/test/testsuite
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 3a04b2cf93bd8fce277458d419eea8d9c326345c
diff --git a/test/wasi/run.py b/test/wasi/run.py
index 8ff84f7..227147a 100644
--- a/test/wasi/run.py
+++ b/test/wasi/run.py
@@ -1,6 +1,6 @@
import subprocess
-subprocess.run([
+completedProcess = subprocess.run([
"python3",
"test/wasi/wasi-testsuite/test-runner/wasi_test_runner.py",
"-r",
@@ -12,3 +12,7 @@
# the wasi tests leave a bunch of untracked files around after a test run
subprocess.run(["git", "clean", "-f"], cwd="test/wasi/wasi-testsuite")
+
+# propagate the test suite return code if there was an error
+if completedProcess.returncode != 0:
+ exit(completedProcess.returncode)
diff --git a/test/main.zig b/test/wasm/main.zig
similarity index 91%
rename from test/main.zig
rename to test/wasm/main.zig
index 65a5743..6407704 100644
--- a/test/main.zig
+++ b/test/wasm/main.zig
@@ -363,12 +363,13 @@ fn isSameError(err: anyerror, err_string: []const u8) bool {
bytebox.MalformedError.MalformedMagicSignature => strcmp(err_string, "magic header not detected"),
bytebox.MalformedError.MalformedUnexpectedEnd => strcmp(err_string, "unexpected end") or
strcmp(err_string, "unexpected end of section or function") or
- strcmp(err_string, "length out of bounds"),
+ strcmp(err_string, "length out of bounds") or
+ strcmp(err_string, "malformed section id"),
bytebox.MalformedError.MalformedUnsupportedWasmVersion => strcmp(err_string, "unknown binary version"),
bytebox.MalformedError.MalformedSectionId => strcmp(err_string, "malformed section id"),
bytebox.MalformedError.MalformedTypeSentinel => strcmp(err_string, "integer representation too long") or strcmp(err_string, "integer too large"),
bytebox.MalformedError.MalformedLEB128 => strcmp(err_string, "integer representation too long") or strcmp(err_string, "integer too large"),
- bytebox.MalformedError.MalformedMissingZeroByte => strcmp(err_string, "zero byte expected"),
+ bytebox.MalformedError.MalformedMissingZeroByte => strcmp(err_string, "zero flag expected"),
bytebox.MalformedError.MalformedTooManyLocals => strcmp(err_string, "too many locals"),
bytebox.MalformedError.MalformedFunctionCodeSectionMismatch => strcmp(err_string, "function and code section have inconsistent lengths"),
bytebox.MalformedError.MalformedMissingDataCountSection => strcmp(err_string, "data count section required") or strcmp(err_string, "unknown data segment"),
@@ -378,15 +379,15 @@ fn isSameError(err: anyerror, err_string: []const u8) bool {
bytebox.MalformedError.MalformedReferenceType => strcmp(err_string, "malformed reference type"),
bytebox.MalformedError.MalformedSectionSizeMismatch => strcmp(err_string, "section size mismatch") or
strcmp(err_string, "malformed section id") or
- strcmp(err_string, "function and code section have inconsistent lengths"), // this one is a bit of a hack to resolve custom.8.wasm
+ strcmp(err_string, "function and code section have inconsistent lengths") or // this one is a bit of a hack to resolve custom.8.wasm
+ strcmp(err_string, "zero flag expected"), // the memory64 binary tests don't seem to be up to date with the reference types spec
bytebox.MalformedError.MalformedInvalidImport => strcmp(err_string, "malformed import kind"),
- bytebox.MalformedError.MalformedLimits => strcmp(err_string, "integer too large") or strcmp(err_string, "integer representation too long"),
+ bytebox.MalformedError.MalformedLimits => strcmp(err_string, "integer too large") or strcmp(err_string, "integer representation too long") or strcmp(err_string, "malformed limits flags"),
bytebox.MalformedError.MalformedMultipleStartSections => strcmp(err_string, "multiple start sections") or
- strcmp(err_string, "unexpected content after last section"),
+ strcmp(err_string, "junk after last section"),
bytebox.MalformedError.MalformedElementType => strcmp(err_string, "integer representation too long") or strcmp(err_string, "integer too large"),
bytebox.MalformedError.MalformedUTF8Encoding => strcmp(err_string, "malformed UTF-8 encoding"),
bytebox.MalformedError.MalformedMutability => strcmp(err_string, "malformed mutability"),
-
// ValidationTypeMismatch: result arity handles select.2.wasm which is the exact same binary as select.1.wasm but the test expects a different error :/
bytebox.ValidationError.ValidationTypeMismatch => strcmp(err_string, "type mismatch") or strcmp(err_string, "invalid result arity"),
bytebox.ValidationError.ValidationTypeMustBeNumeric => strcmp(err_string, "type mismatch"),
@@ -394,7 +395,8 @@ fn isSameError(err: anyerror, err_string: []const u8) bool {
bytebox.ValidationError.ValidationUnknownFunction => std.mem.startsWith(u8, err_string, "unknown function"),
bytebox.ValidationError.ValidationUnknownGlobal => std.mem.startsWith(u8, err_string, "unknown global"),
bytebox.ValidationError.ValidationUnknownLocal => std.mem.startsWith(u8, err_string, "unknown local"),
- bytebox.ValidationError.ValidationUnknownTable => std.mem.startsWith(u8, err_string, "unknown table"),
+ bytebox.ValidationError.ValidationUnknownTable => std.mem.startsWith(u8, err_string, "unknown table") or
+ strcmp(err_string, "zero flag expected"), // the memory64 binary tests don't seem to be up to date with the reference types spec
bytebox.ValidationError.ValidationUnknownMemory => std.mem.startsWith(u8, err_string, "unknown memory"),
bytebox.ValidationError.ValidationUnknownElement => strcmp(err_string, "unknown element") or std.mem.startsWith(u8, err_string, "unknown elem segment"),
bytebox.ValidationError.ValidationUnknownData => strcmp(err_string, "unknown data") or std.mem.startsWith(u8, err_string, "unknown data segment"),
@@ -589,7 +591,9 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array
.err = try Helpers.parseBadModuleError(&json_command, allocator),
},
};
- try commands.append(command);
+ if (std.mem.endsWith(u8, command.AssertInvalid.err.module, ".wasm")) {
+ try commands.append(command);
+ }
} else if (strcmp("assert_unlinkable", json_command_type.string)) {
var command = Command{
.AssertUnlinkable = CommandAssertUnlinkable{
@@ -625,8 +629,10 @@ const TestOpts = struct {
test_filter_or_null: ?[]const u8 = null,
command_filter_or_null: ?[]const u8 = null,
module_filter_or_null: ?[]const u8 = null,
+ trace_mode: bytebox.DebugTrace.Mode = .None,
force_wasm_regen_only: bool = false,
log_suite: bool = false,
+ log: bytebox.Logger = bytebox.Logger.empty(),
};
fn makeSpectestImports(allocator: std.mem.Allocator) !bytebox.ModuleImportPackage {
@@ -709,7 +715,7 @@ fn makeSpectestImports(allocator: std.mem.Allocator) !bytebox.ModuleImportPackag
const TableInstance = bytebox.TableInstance;
var table = try allocator.create(TableInstance);
- table.* = try TableInstance.init(ValType.FuncRef, bytebox.Limits{ .min = 10, .max = 20 }, allocator);
+ table.* = try TableInstance.init(ValType.FuncRef, bytebox.Limits{ .min = 10, .max = 20, .limit_type = 1 }, allocator);
try imports.tables.append(bytebox.TableImport{
.name = try allocator.dupe(u8, "table"),
.data = .{ .Host = table },
@@ -721,6 +727,7 @@ fn makeSpectestImports(allocator: std.mem.Allocator) !bytebox.ModuleImportPackag
memory.* = MemoryInstance.init(bytebox.Limits{
.min = 1,
.max = 2,
+ .limit_type = 1,
}, null);
_ = memory.grow(1);
try imports.memories.append(bytebox.MemoryImport{
@@ -868,7 +875,11 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp
module.filename = try allocator.dupe(u8, module_filename);
- module.def = try bytebox.createModuleDefinition(allocator, .{ .debug_name = std.fs.path.basename(module_filename) });
+ const module_def_opts = bytebox.ModuleDefinitionOpts{
+ .debug_name = std.fs.path.basename(module_filename),
+ .log = opts.log,
+ };
+ module.def = try bytebox.createModuleDefinition(allocator, module_def_opts);
(module.def.?).decode(module_data) catch |e| {
var expected_str_or_null: ?[]const u8 = null;
if (decode_expected_error) |unwrapped_expected| {
@@ -931,7 +942,12 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp
}
module.inst = try bytebox.createModuleInstance(opts.vm_type, module.def.?, allocator);
- (module.inst.?).instantiate(.{ .imports = imports.items }) catch |e| {
+
+ const instantiate_opts = bytebox.ModuleInstantiateOpts{
+ .imports = imports.items,
+ .log = opts.log,
+ };
+ (module.inst.?).instantiate(instantiate_opts) catch |e| {
if (instantiate_expected_error) |expected_str| {
if (isSameError(e, expected_str)) {
logVerbose("\tSuccess!\n", .{});
@@ -1077,7 +1093,14 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp
print("assert_return: {s}:{s}({any})\n", .{ module.filename, c.action.field, c.action.args.items });
}
- print("\tFail on return {}/{}. Expected: {}, Actual: {}\n", .{ i + 1, returns.len, expected_value, r });
+ const format_str = "\tFail on return {}/{}. Expected: {}, Actual: {}\n";
+ switch (expected_value.type) {
+ .I32 => print(format_str, .{ i + 1, returns.len, expected_value.val.I32, r.I32 }),
+ .I64 => print(format_str, .{ i + 1, returns.len, expected_value.val.I64, r.I64 }),
+ .F32 => print(format_str, .{ i + 1, returns.len, expected_value.val.F32, r.F32 }),
+ .F64 => print(format_str, .{ i + 1, returns.len, expected_value.val.F64, r.F64 }),
+ else => unreachable,
+ }
action_succeeded = false;
}
} else {
@@ -1140,12 +1163,12 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp
}
}
}
+ }
- if (action_succeeded) {
- logVerbose("\tSuccess!\n", .{});
- } else {
- did_fail_any_test = true;
- }
+ if (action_succeeded) {
+ logVerbose("\tSuccess!\n", .{});
+ } else {
+ did_fail_any_test = true;
}
},
.AssertTrap => |c| {
@@ -1222,7 +1245,7 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp
return !did_fail_any_test;
}
-pub fn parse_vm_type(backend_str: []const u8) VmType {
+pub fn parseVmType(backend_str: []const u8) VmType {
if (strcmp("stack", backend_str)) {
return .Stack;
} else if (strcmp("register", backend_str)) {
@@ -1233,6 +1256,31 @@ pub fn parse_vm_type(backend_str: []const u8) VmType {
}
}
+fn pathExists(path: []const u8) bool {
+ std.fs.cwd().access(path, .{ .mode = .read_only }) catch |e| {
+ return switch (e) {
+ error.PermissionDenied,
+ error.FileBusy,
+ error.ReadOnlyFileSystem,
+ => true,
+
+ error.FileNotFound => false,
+
+ // unknown status, but we'll count it as a fail
+ error.NameTooLong,
+ error.InputOutput,
+ error.SystemResources,
+ error.BadPathName,
+ error.SymLinkLoop,
+ error.InvalidUtf8,
+ => false,
+ else => false,
+ };
+ };
+
+ return true;
+}
+
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var allocator: std.mem.Allocator = gpa.allocator();
@@ -1267,6 +1315,10 @@ pub fn main() !void {
\\ --test
\\ Run all tests where the 'field' in the json driver matches this filter.
\\
+ \\ --trace
+ \\ Print debug traces while executing the test at the given level. can
+ \\ be: none (default), function, instruction
+ \\
\\ --force-wasm-regen-only
\\ By default, if a given testsuite can't find its' .json file driver, it will
\\ regenerate the wasm files and json driver, then run the test. This command
@@ -1275,6 +1327,9 @@ pub fn main() !void {
\\ --log-suite
\\ Log the name of each suite and aggregate test result.
\\
+ \\ --module-logging
+ \\ Enables logging from inside the module when reporting errors.
+ \\
\\ --verbose
\\ Turn on verbose logging for each step of the test suite run.
\\
@@ -1284,7 +1339,7 @@ pub fn main() !void {
return;
} else if (strcmp("--backend", arg)) {
args_index += 1;
- opts.vm_type = parse_vm_type(args[args_index]);
+ opts.vm_type = parseVmType(args[args_index]);
} else if (strcmp("--suite", arg)) {
args_index += 1;
opts.suite_filter_or_null = args[args_index];
@@ -1301,11 +1356,21 @@ pub fn main() !void {
args_index += 1;
opts.test_filter_or_null = args[args_index];
print("found test filter: {s}\n", .{opts.test_filter_or_null.?});
+ } else if (strcmp("--trace", arg)) {
+ args_index += 1;
+ if (bytebox.DebugTrace.parseMode(args[args_index])) |mode| {
+ bytebox.DebugTrace.setMode(mode);
+ } else {
+ print("got invalid trace mode '{s}', check help for allowed options", .{args[args_index]});
+ return;
+ }
} else if (strcmp("--force-wasm-regen-only", arg)) {
opts.force_wasm_regen_only = true;
print("Force-regenerating wasm files and driver .json, skipping test run\n", .{});
} else if (strcmp("--log-suite", arg)) {
opts.log_suite = true;
+ } else if (strcmp("--module-logging", arg)) {
+ opts.log = bytebox.Logger.default();
} else if (strcmp("--verbose", arg) or strcmp("-v", arg)) {
g_verbose_logging = true;
print("verbose logging: on\n", .{});
@@ -1324,7 +1389,7 @@ pub fn main() !void {
"bulk",
"call",
"call_indirect",
- "comments",
+ // "comments", // wabt seems to error on this
"const",
"conversions",
"custom",
@@ -1469,37 +1534,56 @@ pub fn main() !void {
}
}
- var suite_path_no_extension: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", suite, suite });
+ // determine if there is a memory64 version of the test - if so, run that one
+ const suite_wast_base_path_no_extension: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-testsuite", suite });
+ defer allocator.free(suite_wast_base_path_no_extension);
+ const suite_wast_base_path: []u8 = try std.mem.join(allocator, "", &[_][]const u8{ suite_wast_base_path_no_extension, ".wast" });
+ defer allocator.free(suite_wast_base_path);
+
+ const suite_wast_mem64_path_no_extension: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-testsuite", "proposals", "memory64", suite });
+ defer allocator.free(suite_wast_mem64_path_no_extension);
+ const suite_wast_mem64_path: []u8 = try std.mem.join(allocator, "", &[_][]const u8{ suite_wast_mem64_path_no_extension, ".wast" });
+ defer allocator.free(suite_wast_mem64_path);
+
+ const suite_wast_path = blk: {
+ if (pathExists(suite_wast_mem64_path)) {
+ if (opts.log_suite) {
+ print("Using memory64 for suite {s}\n", .{suite});
+ }
+ break :blk suite_wast_mem64_path;
+ } else {
+ break :blk suite_wast_base_path;
+ }
+ };
+
+ // wasm path
+ const suite_path_no_extension: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-generated", suite, suite });
defer allocator.free(suite_path_no_extension);
- var suite_path = try std.mem.join(allocator, "", &[_][]const u8{ suite_path_no_extension, ".json" });
+ const suite_path = try std.mem.join(allocator, "", &[_][]const u8{ suite_path_no_extension, ".json" });
defer allocator.free(suite_path);
var needs_regen: bool = false;
if (opts.force_wasm_regen_only) {
needs_regen = true;
} else {
- std.fs.cwd().access(suite_path, .{ .mode = .read_only }) catch |e| {
- if (e == std.os.AccessError.FileNotFound) {
- needs_regen = true;
- }
- };
+ needs_regen = pathExists(suite_path) == false;
}
if (needs_regen) {
logVerbose("Regenerating wasm and json driver for suite {s}\n", .{suite});
- // var suite_wast_path_no_extension = try std.fs.path.join(allocator, &[_][]const u8{ "test", "testsuite", suite });
- var suite_wast_path_no_extension = try std.fs.path.join(allocator, &[_][]const u8{ "../../testsuite", suite });
- defer allocator.free(suite_wast_path_no_extension);
+ // need to navigate back to repo root because the wast2json process will be running in a subdir
+ var suite_wast_path_relative = try std.fs.path.join(allocator, &[_][]const u8{ "../../../../", suite_wast_path });
+ defer allocator.free(suite_wast_path_relative);
- var suite_wast_path = try std.mem.join(allocator, "", &[_][]const u8{ suite_wast_path_no_extension, ".wast" });
- defer allocator.free(suite_wast_path);
+ const suite_json_filename: []const u8 = try std.mem.join(allocator, "", &[_][]const u8{ suite, ".json" });
+ defer allocator.free(suite_json_filename);
- var suite_wasm_folder: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", suite });
+ var suite_wasm_folder: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-generated", suite });
defer allocator.free(suite_wasm_folder);
- std.fs.cwd().makeDir("test/wasm") catch |e| {
+ std.fs.cwd().makeDir("test/wasm/wasm-generated") catch |e| {
if (e != error.PathAlreadyExists) {
return e;
}
@@ -1511,7 +1595,7 @@ pub fn main() !void {
}
};
- var process = std.ChildProcess.init(&[_][]const u8{ "wast2json", suite_wast_path }, allocator);
+ var process = std.ChildProcess.init(&[_][]const u8{ "wasm-tools", "json-from-wast", "--pretty", "-o", suite_json_filename, suite_wast_path_relative }, allocator);
process.cwd = suite_wasm_folder;
diff --git a/test/wasm/wasm-testsuite b/test/wasm/wasm-testsuite
new file mode 160000
index 0000000..dc27dad
--- /dev/null
+++ b/test/wasm/wasm-testsuite
@@ -0,0 +1 @@
+Subproject commit dc27dad3e34e466bdbfea32fe3c73f5e31f88560