diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d73f003..92d014d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,6 @@ name: CI -on: +on: push: branches: - main @@ -20,7 +20,7 @@ jobs: - uses: goto-bus-stop/setup-zig@v2 with: - version: 0.11.0 + version: 0.12.0 # The current default version of clang on macos runners is 14, which doesn't support the wasm64-freestanding target. - name: Install LLVM and Clang @@ -40,7 +40,7 @@ jobs: run: python3 -m pip install -r requirements.txt # Ideally we would use this but it seems to be broken - # - name: Setup wasm-tools + # - name: Setup wasm-tools # uses: jcbhmr/setup-wasm-tools@v2 # with: # wasm-tools-version: 1.207 diff --git a/README.md b/README.md index 51e298b..30d6064 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ Bytebox is a WebAssembly VM. # Getting started -## Requirements -Bytebox currently builds with [Zig 0.11.x](https://ziglang.org/download) to avoid churn on zig master. +### Requirements +Bytebox currently builds with [Zig 0.12.x](https://ziglang.org/download) to avoid churn on zig master. To run the tests: * `wasm-tools` is required to run the wasm testsuite. You can install it via the rust toolchain `cargo install wasm-tools` or directly from the [release page](https://github.com/bytecodealliance/wasm-tools/releases). diff --git a/bench/main.zig b/bench/main.zig index 533f354..6b3911c 100644 --- a/bench/main.zig +++ b/bench/main.zig @@ -3,6 +3,10 @@ const bytebox = @import("bytebox"); const Val = bytebox.Val; const Timer = std.time.Timer; +pub const std_options: std.Options = .{ + .log_level = .info, +}; + const Benchmark = struct { name: []const u8, filename: []const u8, @@ -10,14 +14,14 @@ const Benchmark = struct { }; fn elapsedMilliseconds(timer: *std.time.Timer) f64 { - var ns_elapsed: f64 = @as(f64, @floatFromInt(timer.read())); + const ns_elapsed: f64 = @as(f64, @floatFromInt(timer.read())); const ms_elapsed = ns_elapsed / 1000000.0; return ms_elapsed; } fn run(allocator: std.mem.Allocator, benchmark: Benchmark) !void { var cwd = std.fs.cwd(); - var wasm_data: []u8 = try cwd.readFileAlloc(allocator, benchmark.filename, 1024 * 64); // Our wasm programs aren't very large + const wasm_data: []u8 = try cwd.readFileAlloc(allocator, benchmark.filename, 1024 * 64); // Our wasm programs aren't very large var timer = try Timer.start(); @@ -40,19 +44,19 @@ fn run(allocator: std.mem.Allocator, benchmark: Benchmark) !void { pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - var allocator: std.mem.Allocator = gpa.allocator(); + const allocator: std.mem.Allocator = gpa.allocator(); const benchmarks = [_]Benchmark{ .{ .name = "add-one", - .filename = "zig-out/lib/add-one.wasm", + .filename = "zig-out/bin/add-one.wasm", .param = 123456789, }, .{ .name = "fibonacci", - .filename = "zig-out/lib/fibonacci.wasm", + .filename = "zig-out/bin/fibonacci.wasm", .param = 20, }, .{ .name = "mandelbrot", - .filename = "zig-out/lib/mandelbrot.wasm", + .filename = "zig-out/bin/mandelbrot.wasm", .param = 20, } }; diff --git a/bench/samples/fibonacci.zig b/bench/samples/fibonacci.zig index e0c2a3c..06fede2 100644 --- a/bench/samples/fibonacci.zig +++ b/bench/samples/fibonacci.zig @@ -2,8 +2,8 @@ export fn run(n: i32) i32 { if (n < 2) { return 1; } else { - var a = run(n - 1); - var b = run(n - 2); + const a = run(n - 1); + const b = run(n - 2); return a + b; } } diff --git a/build.zig b/build.zig index ad40f16..9fcd83f 100644 --- a/build.zig +++ b/build.zig @@ -2,9 +2,7 @@ const std = @import("std"); const Build = std.Build; const CrossTarget = std.zig.CrossTarget; -const Builder = std.build.Builder; -const CompileStep = std.build.CompileStep; -const InstallFileStep = std.build.InstallFileStep; +const CompileStep = std.Build.Step.Compile; const ExeOpts = struct { exe_name: []const u8, @@ -19,15 +17,15 @@ pub fn build(b: *Build) void { const should_emit_asm = b.option(bool, "asm", "Emit asm for the bytebox binaries") orelse false; const no_clang = b.option(bool, "noclang", "Pass this if clang isn't in the PATH") orelse false; - var bench_add_one_step: *CompileStep = buildWasmLib(b, "bench/samples/add-one.zig"); - var bench_fibonacci_step: *CompileStep = buildWasmLib(b, "bench/samples/fibonacci.zig"); - var bench_mandelbrot_step: *CompileStep = buildWasmLib(b, "bench/samples/mandelbrot.zig"); - const target = b.standardTargetOptions(.{}); const optimize = b.standardOptimizeOption(.{}); + var bench_add_one_step: *CompileStep = buildWasmExe(b, "bench/samples/add-one.zig"); + var bench_fibonacci_step: *CompileStep = buildWasmExe(b, "bench/samples/fibonacci.zig"); + var bench_mandelbrot_step: *CompileStep = buildWasmExe(b, "bench/samples/mandelbrot.zig"); + const bytebox_module: *Build.Module = b.addModule("bytebox", .{ - .source_file = Build.LazyPath.relative("src/core.zig"), + .root_source_file = b.path("src/core.zig"), }); _ = buildExeWithRunStep(b, target, optimize, bytebox_module, .{ @@ -53,16 +51,16 @@ pub fn build(b: *Build) void { const lib_bytebox = b.addStaticLibrary(.{ .name = "bytebox", - .root_source_file = .{ .path = "src/cffi.zig" }, + .root_source_file = b.path("src/cffi.zig"), .target = target, .optimize = optimize, }); - lib_bytebox.installHeader("src/bytebox.h", "bytebox.h"); + lib_bytebox.installHeader(b.path("src/bytebox.h"), "bytebox.h"); b.installArtifact(lib_bytebox); // Unit tests const unit_tests = b.addTest(.{ - .root_source_file = .{ .path = "src/tests.zig" }, + .root_source_file = b.path("src/tests.zig"), .target = target, .optimize = optimize, }); @@ -71,7 +69,7 @@ pub fn build(b: *Build) void { unit_test_step.dependOn(&run_unit_tests.step); // wasm tests - var wasm_testsuite_step = buildExeWithRunStep(b, target, optimize, bytebox_module, .{ + const wasm_testsuite_step = buildExeWithRunStep(b, target, optimize, bytebox_module, .{ .exe_name = "test-wasm", .root_src = "test/wasm/main.zig", .step_name = "test-wasm", @@ -100,7 +98,7 @@ pub fn build(b: *Build) void { compile_memtest.addArg("-Wl,--export-dynamic"); compile_memtest.addArg("-o"); compile_memtest.addArg("test/mem64/memtest.wasm"); - compile_memtest.addFileArg(.{ .path = "test/mem64/memtest.c" }); + compile_memtest.addFileArg(b.path("test/mem64/memtest.c")); compile_memtest.has_side_effects = true; b.getInstallStep().dependOn(&compile_memtest.step); @@ -123,15 +121,15 @@ pub fn build(b: *Build) void { } } -fn buildExeWithRunStep(b: *Build, target: CrossTarget, optimize: std.builtin.Mode, bytebox_module: *Build.Module, opts: ExeOpts) *Build.Step { +fn buildExeWithRunStep(b: *Build, target: Build.ResolvedTarget, optimize: std.builtin.Mode, bytebox_module: *Build.Module, opts: ExeOpts) *Build.Step { const exe = b.addExecutable(.{ .name = opts.exe_name, - .root_source_file = Build.LazyPath.relative(opts.root_src), + .root_source_file = b.path(opts.root_src), .target = target, .optimize = optimize, }); - exe.addModule("bytebox", bytebox_module); + exe.root_module.addImport("bytebox", bytebox_module); // exe.emit_asm = if (opts.should_emit_asm) .emit else .default; b.installArtifact(exe); @@ -154,21 +152,23 @@ fn buildExeWithRunStep(b: *Build, target: CrossTarget, optimize: std.builtin.Mod return step; } -fn buildWasmLib(b: *Build, filepath: []const u8) *CompileStep { +fn buildWasmExe(b: *Build, filepath: []const u8) *CompileStep { var filename: []const u8 = std.fs.path.basename(filepath); - var filename_no_extension: []const u8 = filename[0 .. filename.len - 4]; + const filename_no_extension: []const u8 = filename[0 .. filename.len - 4]; - const lib = b.addSharedLibrary(.{ + var exe = b.addExecutable(.{ .name = filename_no_extension, - .root_source_file = Build.LazyPath.relative(filepath), - .target = CrossTarget{ + .root_source_file = b.path(filepath), + .target = b.resolveTargetQuery(.{ .cpu_arch = .wasm32, .os_tag = .freestanding, - }, + }), .optimize = .ReleaseSmall, }); + exe.rdynamic = true; + exe.entry = .disabled; - b.installArtifact(lib); + b.installArtifact(exe); - return lib; + return exe; } diff --git a/build.zig.zon b/build.zig.zon index cb7761b..2b85168 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -1,5 +1,19 @@ .{ - .name = "bytebox", - .version = "0.0.1", - .dependencies = .{}, + .name = "bytebox", + .version = "0.0.1", + .minimum_zig_version = "0.12.0", + .paths = .{ + "src", + "test/mem64", + "test/wasi/run.py", + "test/wasi/bytebox_adapter.py", + "test/wasm/main.zig", + "bench", + "run", + "build.zig", + "build.zig.zon", + "LICENSE", + "README.md", + }, + .dependencies = .{}, } diff --git a/run/main.zig b/run/main.zig index 735cd91..3ed525f 100644 --- a/run/main.zig +++ b/run/main.zig @@ -53,13 +53,13 @@ fn parseCmdOpts(args: [][]const u8, env_buffer: *std.ArrayList([]const u8), dir_ var arg_index: usize = 1; while (arg_index < args.len) { - var arg = args[arg_index]; + const arg = args[arg_index]; if (arg_index == 1 and !isArgvOption(arg)) { opts.filename = arg; opts.wasm_argv = args[1..2]; } else if (arg_index == 2 and !isArgvOption(arg)) { - var wasm_argv_begin: usize = arg_index - 1; // include wasm filename + const wasm_argv_begin: usize = arg_index - 1; // include wasm filename var wasm_argv_end: usize = arg_index; while (wasm_argv_end + 1 < args.len and !isArgvOption(args[wasm_argv_end + 1])) { wasm_argv_end += 1; @@ -136,7 +136,7 @@ const version_string = "bytebox v0.0.1"; fn printHelp(args: [][]const u8) void { const usage_string: []const u8 = \\Usage: {s} [WASM_ARGS]... [OPTION]... - \\ + \\ \\ Options: \\ \\ -h, --help @@ -148,7 +148,7 @@ fn printHelp(args: [][]const u8) void { \\ --dump \\ Prints the given module definition's imports and exports. Imports are qualified \\ with the import module name. - \\ + \\ \\ -i, --invoke [ARGS]... \\ Call an exported, named function with arguments. The arguments are automatically \\ translated from string inputs to the function's native types. If the conversion @@ -180,7 +180,7 @@ pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var allocator: std.mem.Allocator = gpa.allocator(); - var args = try std.process.argsAlloc(allocator); + const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); var env_buffer = std.ArrayList([]const u8).init(allocator); @@ -220,7 +220,7 @@ pub fn main() !void { std.debug.assert(opts.filename != null); var cwd = std.fs.cwd(); - var wasm_data: []u8 = cwd.readFileAlloc(allocator, opts.filename.?, 1024 * 1024 * 128) catch |e| { + const wasm_data: []u8 = cwd.readFileAlloc(allocator, opts.filename.?, 1024 * 1024 * 128) catch |e| { std.log.err("Failed to read file '{s}' into memory: {}", .{ opts.filename.?, e }); return RunErrors.IoError; }; @@ -256,7 +256,7 @@ pub fn main() !void { }, allocator); defer wasi.deinitImports(&imports_wasi); - var instantiate_opts = bytebox.ModuleInstantiateOpts{ + const instantiate_opts = bytebox.ModuleInstantiateOpts{ .imports = &[_]bytebox.ModuleImportPackage{imports_wasi}, .log = log, }; @@ -302,28 +302,28 @@ pub fn main() !void { const arg: []const u8 = invoke_args[i]; switch (valtype) { .I32 => { - var parsed: i32 = std.fmt.parseInt(i32, arg, 0) catch |e| { + const parsed: i32 = std.fmt.parseInt(i32, arg, 0) catch |e| { std.log.err("Failed to parse arg at index {} ('{s}') as an i32: {}", .{ i, arg, e }); return RunErrors.BadFunctionParam; }; params.items[i] = Val{ .I32 = parsed }; }, .I64 => { - var parsed: i64 = std.fmt.parseInt(i64, arg, 0) catch |e| { + const parsed: i64 = std.fmt.parseInt(i64, arg, 0) catch |e| { std.log.err("Failed to parse arg at index {} ('{s}') as an i64: {}", .{ i, arg, e }); return RunErrors.BadFunctionParam; }; params.items[i] = Val{ .I64 = parsed }; }, .F32 => { - var parsed: f32 = std.fmt.parseFloat(f32, arg) catch |e| { + const parsed: f32 = std.fmt.parseFloat(f32, arg) catch |e| { std.log.err("Failed to parse arg at index {} ('{s}') as a f32: {}", .{ i, arg, e }); return RunErrors.BadFunctionParam; }; params.items[i] = Val{ .F32 = parsed }; }, .F64 => { - var parsed: f64 = std.fmt.parseFloat(f64, arg) catch |e| { + const parsed: f64 = std.fmt.parseFloat(f64, arg) catch |e| { std.log.err("Failed to parse arg at index {} ('{s}') as a f64: {}", .{ i, arg, e }); return RunErrors.BadFunctionParam; }; @@ -357,7 +357,7 @@ pub fn main() !void { { var strbuf = std.ArrayList(u8).init(allocator); defer strbuf.deinit(); - var writer = strbuf.writer(); + const writer = strbuf.writer(); if (returns.items.len > 0) { const return_types = func_export.returns; @@ -382,13 +382,13 @@ pub fn main() !void { } fn writeSignature(strbuf: *std.ArrayList(u8), info: *const bytebox.FunctionExport) !void { - var writer = strbuf.writer(); + const writer = strbuf.writer(); if (info.params.len == 0) { try std.fmt.format(writer, " params: none\n", .{}); } else { try std.fmt.format(writer, " params:\n", .{}); for (info.params) |valtype| { - var name: []const u8 = valtypeToString(valtype); + const name: []const u8 = valtypeToString(valtype); try std.fmt.format(writer, " {s}\n", .{name}); } } @@ -398,7 +398,7 @@ fn writeSignature(strbuf: *std.ArrayList(u8), info: *const bytebox.FunctionExpor } else { try std.fmt.format(writer, " returns:\n", .{}); for (info.returns) |valtype| { - var name: []const u8 = valtypeToString(valtype); + const name: []const u8 = valtypeToString(valtype); try std.fmt.format(writer, " {s}\n", .{name}); } } diff --git a/src/cffi.zig b/src/cffi.zig index b2d0f4c..95401b6 100644 --- a/src/cffi.zig +++ b/src/cffi.zig @@ -184,7 +184,7 @@ export fn bb_error_str(c_error: CError) [*:0]const u8 { } export fn bb_module_definition_create(c_opts: CModuleDefinitionInitOpts) ?*core.ModuleDefinition { - var allocator = cffi_gpa.allocator(); + const allocator = cffi_gpa.allocator(); const debug_name: []const u8 = if (c_opts.debug_name == null) "" else std.mem.sliceTo(c_opts.debug_name.?, 0); const opts_translated = core.ModuleDefinitionOpts{ @@ -311,7 +311,7 @@ export fn bb_import_package_add_memory(package: ?*ModuleImportPackage, config: ? unreachable; } - var mem_import = core.MemoryImport{ + const mem_import = core.MemoryImport{ .name = name, .data = .{ .Host = mem_instance }, }; @@ -336,7 +336,7 @@ export fn bb_set_debug_trace_mode(c_mode: CDebugTraceMode) void { } export fn bb_module_instance_create(module_definition: ?*ModuleDefinition) ?*ModuleInstance { - var allocator = cffi_gpa.allocator(); + const allocator = cffi_gpa.allocator(); var module: ?*core.ModuleInstance = null; @@ -362,7 +362,7 @@ export fn bb_module_instance_instantiate(module: ?*ModuleInstance, c_opts: CModu if (module != null and c_opts.packages != null and num_wasm_memory_callbacks != 1) { const packages: []?*const ModuleImportPackage = c_opts.packages.?[0..c_opts.num_packages]; - var allocator = cffi_gpa.allocator(); + const allocator = cffi_gpa.allocator(); var flat_packages = std.ArrayList(ModuleImportPackage).init(allocator); defer flat_packages.deinit(); @@ -455,8 +455,8 @@ export fn bb_module_instance_invoke(module: ?*ModuleInstance, c_handle: CFuncHan .trap_on_start = opts.trap_on_start, }; - var params_slice: []const Val = if (params != null) params.?[0..num_params] else &[_]Val{}; - var returns_slice: []Val = if (returns != null) returns.?[0..num_returns] else &[_]Val{}; + const params_slice: []const Val = if (params != null) params.?[0..num_params] else &[_]Val{}; + const returns_slice: []Val = if (returns != null) returns.?[0..num_returns] else &[_]Val{}; if (module.?.invoke(handle, params_slice.ptr, returns_slice.ptr, invoke_opts)) { return CError.Ok; @@ -491,7 +491,7 @@ export fn bb_module_instance_debug_set_trap(module: ?*ModuleInstance, address: u export fn bb_module_instance_mem(module: ?*ModuleInstance, offset: usize, length: usize) ?*anyopaque { if (module != null and length > 0) { - var mem = module.?.memorySlice(offset, length); + const mem = module.?.memorySlice(offset, length); return if (mem.len > 0) mem.ptr else null; } @@ -500,7 +500,7 @@ export fn bb_module_instance_mem(module: ?*ModuleInstance, offset: usize, length export fn bb_module_instance_mem_all(module: ?*ModuleInstance) CSlice { if (module != null) { - var mem = module.?.memoryAll(); + const mem = module.?.memoryAll(); return CSlice{ .data = mem.ptr, .length = mem.len, @@ -600,16 +600,16 @@ comptime { // Default stack-probe functions emitted by LLVM if (is_mingw) { - @export(_chkstk, .{ .name = "_alloca", .linkage = .Weak }); - @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = .Weak }); + @export(_chkstk, .{ .name = "_alloca", .linkage = .weak }); + @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = .weak }); if (builtin.cpu.arch.isAARCH64()) { - @export(__chkstk, .{ .name = "__chkstk", .linkage = .Weak }); + @export(__chkstk, .{ .name = "__chkstk", .linkage = .weak }); } } else if (!builtin.link_libc) { // This symbols are otherwise exported by MSVCRT.lib - @export(_chkstk, .{ .name = "_chkstk", .linkage = .Weak }); - @export(__chkstk, .{ .name = "__chkstk", .linkage = .Weak }); + @export(_chkstk, .{ .name = "_chkstk", .linkage = .weak }); + @export(__chkstk, .{ .name = "__chkstk", .linkage = .weak }); } } @@ -617,7 +617,7 @@ comptime { .x86, .x86_64, => { - @export(zig_probe_stack, .{ .name = "__zig_probe_stack", .linkage = .Weak }); + @export(zig_probe_stack, .{ .name = "__zig_probe_stack", .linkage = .weak }); }, else => {}, } diff --git a/src/core.zig b/src/core.zig index 3366e04..d9d32e1 100644 --- a/src/core.zig +++ b/src/core.zig @@ -68,7 +68,7 @@ pub const VmType = enum { }; pub fn createModuleInstance(vm_type: VmType, module_def: *const ModuleDefinition, allocator: std.mem.Allocator) AllocError!*ModuleInstance { - var vm: *inst.VM = switch (vm_type) { + const vm: *inst.VM = switch (vm_type) { .Stack => try inst.VM.create(vm_stack.StackVM, allocator), .Register => try inst.VM.create(vm_register.RegisterVM, allocator), }; diff --git a/src/definition.zig b/src/definition.zig index 7272808..d77d48e 100644 --- a/src/definition.zig +++ b/src/definition.zig @@ -98,8 +98,8 @@ const k_block_type_void_sentinel_byte: u8 = 0x40; fn decodeFloat(comptime T: type, reader: anytype) !T { return switch (T) { - f32 => @as(f32, @bitCast(try reader.readIntLittle(u32))), - f64 => @as(f64, @bitCast(try reader.readIntLittle(u64))), + f32 => @as(f32, @bitCast(try reader.readInt(u32, .little))), + f64 => @as(f64, @bitCast(try reader.readInt(u64, .little))), else => unreachable, }; } @@ -139,7 +139,7 @@ pub const ValType = enum(c_int) { } fn decodeReftype(reader: anytype) !ValType { - var valtype = try decode(reader); + const valtype = try decode(reader); if (isRefType(valtype) == false) { return error.MalformedReferenceType; } @@ -565,7 +565,7 @@ pub const FunctionTypeDefinition = struct { } for (a.types.items, 0..) |typeA, i| { - var typeB = b.types.items[i]; + const typeB = b.types.items[i]; if (typeA != typeB) { return false; } @@ -575,13 +575,13 @@ pub const FunctionTypeDefinition = struct { } fn less(context: Self, a: *FunctionTypeDefinition, b: *FunctionTypeDefinition) bool { - var ord = Self.order(context, a, b); + const ord = Self.order(context, a, b); return ord == std.math.Order.lt; } fn order(context: Self, a: *FunctionTypeDefinition, b: *FunctionTypeDefinition) std.math.Order { - var hashA = Self.hash(context, a); - var hashB = Self.hash(context, b); + const hashA = Self.hash(context, a); + const hashB = Self.hash(context, b); if (hashA < hashB) { return std.math.Order.lt; @@ -706,7 +706,7 @@ pub const DataDefinition = struct { mode: DataMode, fn decode(reader: anytype, module_def: *const ModuleDefinition, allocator: std.mem.Allocator) !DataDefinition { - var data_type: u32 = try common.decodeLEB128(u32, reader); + const data_type: u32 = try common.decodeLEB128(u32, reader); if (data_type > 2) { return error.MalformedDataType; } @@ -725,10 +725,10 @@ pub const DataDefinition = struct { offset = try ConstantExpression.decode(reader, module_def, .Immutable, .I32); } - var num_bytes = try common.decodeLEB128(u32, reader); + const num_bytes = try common.decodeLEB128(u32, reader); var bytes = std.ArrayList(u8).init(allocator); try bytes.resize(num_bytes); - var num_read = try reader.read(bytes.items); + const num_read = try reader.read(bytes.items); if (num_read != num_bytes) { return error.MalformedUnexpectedEnd; } @@ -781,7 +781,7 @@ const MemArg = struct { fn decode(reader: anytype, comptime bitwidth: u32) !MemArg { std.debug.assert(bitwidth % 8 == 0); - var memarg = MemArg{ + const memarg = MemArg{ .alignment = try common.decodeLEB128(u32, reader), .offset = try common.decodeLEB128(u64, reader), }; @@ -880,11 +880,11 @@ pub const Instruction = struct { blocktype = BlockTypeValue{ .Void = {} }; } else { _reader.context.pos -= 1; // move the stream backwards 1 byte to reconstruct the integer - var index_33bit = try common.decodeLEB128(i33, _reader); + const index_33bit = try common.decodeLEB128(i33, _reader); if (index_33bit < 0) { return error.MalformedBytecode; } - var index: u32 = @as(u32, @intCast(index_33bit)); + const index: u32 = @as(u32, @intCast(index_33bit)); if (index < _module.types.items.len) { blocktype = BlockTypeValue{ .TypeIndex = index }; } else { @@ -892,7 +892,7 @@ pub const Instruction = struct { } } } else { - var valtype: ValType = valtype_or_err catch unreachable; + const valtype: ValType = valtype_or_err catch unreachable; blocktype = BlockTypeValue{ .ValType = valtype }; } @@ -930,7 +930,7 @@ pub const Instruction = struct { }; const wasm_op: WasmOpcode = try WasmOpcode.decode(reader); - var opcode: Opcode = wasm_op.toOpcode(); + const opcode: Opcode = wasm_op.toOpcode(); var immediate = InstructionImmediates{ .Void = {} }; switch (opcode) { @@ -1006,10 +1006,10 @@ pub const Instruction = struct { var index: u32 = 0; while (index < table_length) : (index += 1) { - var id = try common.decodeLEB128(u32, reader); + const id = try common.decodeLEB128(u32, reader); label_ids.addOneAssumeCapacity().* = id; } - var fallback_id = try common.decodeLEB128(u32, reader); + const fallback_id = try common.decodeLEB128(u32, reader); var branch_table = BranchTableImmediates{ .label_ids = label_ids, @@ -1043,105 +1043,105 @@ pub const Instruction = struct { } }; }, .I32_Load => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load => { - var memarg = try MemArg.decode(reader, 64); + const memarg = try MemArg.decode(reader, 64); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .F32_Load => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .F64_Load => { - var memarg = try MemArg.decode(reader, 64); + const memarg = try MemArg.decode(reader, 64); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Load8_S => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Load8_U => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Load16_S => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Load16_U => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load8_S => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load8_U => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load16_S => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load16_U => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load32_S => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Load32_U => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Store => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Store => { - var memarg = try MemArg.decode(reader, 64); + const memarg = try MemArg.decode(reader, 64); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .F32_Store => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .F64_Store => { - var memarg = try MemArg.decode(reader, 64); + const memarg = try MemArg.decode(reader, 64); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Store8 => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I32_Store16 => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Store8 => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Store16 => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I64_Store32 => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .Memory_Size => { - var reserved = try reader.readByte(); + const reserved = try reader.readByte(); if (reserved != 0x00) { return error.MalformedMissingZeroByte; } }, .Memory_Grow => { - var reserved = try reader.readByte(); + const reserved = try reader.readByte(); if (reserved != 0x00) { return error.MalformedMissingZeroByte; } @@ -1155,13 +1155,13 @@ pub const Instruction = struct { immediate = InstructionImmediates{ .Index = try common.decodeLEB128(u32, reader) }; // dataidx - var reserved = try reader.readByte(); + const reserved = try reader.readByte(); if (reserved != 0x00) { return error.MalformedMissingZeroByte; } }, .Ref_Null => { - var valtype = try ValType.decode(reader); + const valtype = try ValType.decode(reader); if (valtype.isRefType() == false) { return error.MalformedBytecode; } @@ -1185,7 +1185,7 @@ pub const Instruction = struct { } }, .Memory_Fill => { - var reserved = try reader.readByte(); + const reserved = try reader.readByte(); if (reserved != 0x00) { return error.MalformedMissingZeroByte; } @@ -1209,42 +1209,42 @@ pub const Instruction = struct { immediate = InstructionImmediates{ .Index = try common.decodeLEB128(u32, reader) }; // elemidx }, .V128_Load => { - var memarg = try MemArg.decode(reader, 128); + const memarg = try MemArg.decode(reader, 128); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load8x8_S, .V128_Load8x8_U => { - var memarg = try MemArg.decode(reader, 8 * 8); + const memarg = try MemArg.decode(reader, 8 * 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load16x4_S, .V128_Load16x4_U => { - var memarg = try MemArg.decode(reader, 16 * 4); + const memarg = try MemArg.decode(reader, 16 * 4); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load32x2_S, .V128_Load32x2_U => { - var memarg = try MemArg.decode(reader, 32 * 2); + const memarg = try MemArg.decode(reader, 32 * 2); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load8_Splat => { - var memarg = try MemArg.decode(reader, 8); + const memarg = try MemArg.decode(reader, 8); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load16_Splat => { - var memarg = try MemArg.decode(reader, 16); + const memarg = try MemArg.decode(reader, 16); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load32_Splat => { - var memarg = try MemArg.decode(reader, 32); + const memarg = try MemArg.decode(reader, 32); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load64_Splat => { - var memarg = try MemArg.decode(reader, 64); + const memarg = try MemArg.decode(reader, 64); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .I8x16_Extract_Lane_S, .I8x16_Extract_Lane_U, .I8x16_Replace_Lane, .I16x8_Extract_Lane_S, .I16x8_Extract_Lane_U, .I16x8_Replace_Lane, .I32x4_Extract_Lane, .I32x4_Replace_Lane, .I64x2_Extract_Lane, .I64x2_Replace_Lane, .F32x4_Extract_Lane, .F32x4_Replace_Lane, .F64x2_Extract_Lane, .F64x2_Replace_Lane => { immediate = InstructionImmediates{ .Index = try reader.readByte() }; // laneidx }, .V128_Store => { - var memarg = try MemArg.decode(reader, 128); + const memarg = try MemArg.decode(reader, 128); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Const => { @@ -1283,11 +1283,11 @@ pub const Instruction = struct { immediate = try Helpers.decodeMemoryOffsetAndLane(reader, 64); }, .V128_Load32_Zero => { - var memarg = try MemArg.decode(reader, 128); + const memarg = try MemArg.decode(reader, 128); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, .V128_Load64_Zero => { - var memarg = try MemArg.decode(reader, 128); + const memarg = try MemArg.decode(reader, 128); immediate = InstructionImmediates{ .MemoryOffset = memarg.offset }; }, else => {}, @@ -1353,7 +1353,7 @@ pub const NameCustomSection = struct { fn decodeInternal(self: *NameCustomSection, module_definition: *const ModuleDefinition, bytes: []const u8) !void { const DecodeHelpers = struct { fn readName(stream: anytype) ![]const u8 { - var reader = stream.reader(); + const reader = stream.reader(); const name_length = try common.decodeLEB128(u32, reader); const name: []const u8 = stream.buffer[stream.pos .. stream.pos + name_length]; try stream.seekBy(name_length); @@ -1557,8 +1557,8 @@ const ModuleValidator = struct { else => unreachable, }; - var start_types: []const ValType = blocktype.getBlocktypeParamTypes(module_); - var end_types: []const ValType = blocktype.getBlocktypeReturnTypes(module_); + const start_types: []const ValType = blocktype.getBlocktypeParamTypes(module_); + const end_types: []const ValType = blocktype.getBlocktypeReturnTypes(module_); try popReturnTypes(validator, start_types); try validator.pushControl(instruction_.opcode, start_types, end_types); @@ -1690,7 +1690,7 @@ const ModuleValidator = struct { return error.ValidationUnknownLabel; } const stack_index = validator.control_stack.items.len - control_index - 1; - var frame: *ControlFrame = &validator.control_stack.items[stack_index]; + const frame: *ControlFrame = &validator.control_stack.items[stack_index]; return if (frame.opcode != .Loop) frame.end_types else frame.start_types; } @@ -1768,7 +1768,7 @@ const ModuleValidator = struct { } }, .Branch_Table => { - var immediates: *const BranchTableImmediates = &module.code.branch_table.items[instruction.immediate.Index]; + const immediates: *const BranchTableImmediates = &module.code.branch_table.items[instruction.immediate.Index]; const fallback_block_return_types: []const ValType = try Helpers.getControlTypes(self, immediates.fallback_id); @@ -1811,7 +1811,7 @@ const ModuleValidator = struct { return error.ValidationUnknownFunction; } - var type_index: u32 = module.getFuncTypeIndex(func_index); + const type_index: u32 = module.getFuncTypeIndex(func_index); try Helpers.popPushFuncTypes(self, type_index, module); }, .Call_Indirect => { @@ -2063,7 +2063,7 @@ const ModuleValidator = struct { try self.pushType(instruction.immediate.ValType); }, .Ref_Is_Null => { - var valtype_or_null: ?ValType = try self.popAnyType(); + const valtype_or_null: ?ValType = try self.popAnyType(); if (valtype_or_null) |valtype| { if (valtype.isRefType() == false) { return error.ValidationTypeMismatch; @@ -2195,7 +2195,7 @@ const ModuleValidator = struct { try self.popType(.I32); if (try self.popAnyType()) |init_type| { - var table_reftype: ValType = try getTableReftype(module, instruction.immediate.Index); + const table_reftype: ValType = try getTableReftype(module, instruction.immediate.Index); if (init_type != table_reftype) { return error.ValidationTypeMismatch; } @@ -2211,7 +2211,7 @@ const ModuleValidator = struct { try validateTableIndex(instruction.immediate.Index, module); try self.popType(.I32); if (try self.popAnyType()) |valtype| { - var table_reftype: ValType = try getTableReftype(module, instruction.immediate.Index); + const table_reftype: ValType = try getTableReftype(module, instruction.immediate.Index); if (valtype != table_reftype) { return error.ValidationTypeMismatch; } @@ -2578,11 +2578,11 @@ const ModuleValidator = struct { fn pushControl(self: *ModuleValidator, opcode: Opcode, start_types: []const ValType, end_types: []const ValType) !void { const control_types_start_index: usize = self.control_types.items.len; try self.control_types.appendSlice(start_types); - var control_start_types: []const ValType = self.control_types.items[control_types_start_index..self.control_types.items.len]; + const control_start_types: []const ValType = self.control_types.items[control_types_start_index..self.control_types.items.len]; const control_types_end_index: usize = self.control_types.items.len; try self.control_types.appendSlice(end_types); - var control_end_types: []const ValType = self.control_types.items[control_types_end_index..self.control_types.items.len]; + const control_end_types: []const ValType = self.control_types.items[control_types_end_index..self.control_types.items.len]; try self.control_stack.append(ControlFrame{ .opcode = opcode, @@ -2621,7 +2621,7 @@ const ModuleValidator = struct { } fn freeControlTypes(self: *ModuleValidator, frame: *const ControlFrame) !void { - var num_used_types: usize = frame.start_types.len + frame.end_types.len; + const num_used_types: usize = frame.start_types.len + frame.end_types.len; try self.control_types.resize(self.control_types.items.len - num_used_types); } }; @@ -2680,7 +2680,7 @@ pub const ModuleDefinition = struct { is_decoded: bool = false, pub fn create(allocator: std.mem.Allocator, opts: ModuleDefinitionOpts) AllocError!*ModuleDefinition { - var def = try allocator.create(ModuleDefinition); + const def = try allocator.create(ModuleDefinition); def.* = ModuleDefinition{ .allocator = allocator, .code = Code{ @@ -2719,7 +2719,7 @@ pub const ModuleDefinition = struct { std.debug.assert(self.is_decoded == false); self.decode_internal(wasm) catch |e| { - var wrapped_error: anyerror = switch (e) { + const wrapped_error: anyerror = switch (e) { error.EndOfStream => error.MalformedUnexpectedEnd, else => e, }; @@ -2746,9 +2746,9 @@ pub const ModuleDefinition = struct { fn readName(reader: anytype, _allocator: std.mem.Allocator) ![]const u8 { const name_length = try common.decodeLEB128(u32, reader); - var name: []u8 = try _allocator.alloc(u8, name_length); + const name: []u8 = try _allocator.alloc(u8, name_length); errdefer _allocator.free(name); - var read_length = try reader.read(name); + const read_length = try reader.read(name); if (read_length != name_length) { return error.MalformedUnexpectedEnd; } @@ -2770,11 +2770,11 @@ pub const ModuleDefinition = struct { // wasm header { - const magic = try reader.readIntBig(u32); + const magic = try reader.readInt(u32, .big); if (magic != 0x0061736D) { return error.MalformedMagicSignature; } - const version = try reader.readIntLittle(u32); + const version = try reader.readInt(u32, .little); if (version != 1) { return error.MalformedUnsupportedWasmVersion; } @@ -2795,7 +2795,7 @@ pub const ModuleDefinition = struct { return error.MalformedUnexpectedEnd; } - var name = try DecodeHelpers.readName(reader, allocator); + const name = try DecodeHelpers.readName(reader, allocator); errdefer allocator.free(name); var section = CustomSection{ @@ -2838,7 +2838,7 @@ pub const ModuleDefinition = struct { while (params_left > 0) { params_left -= 1; - var param_type = try ValType.decode(reader); + const param_type = try ValType.decode(reader); try func.types.append(param_type); } @@ -2847,7 +2847,7 @@ pub const ModuleDefinition = struct { while (returns_left > 0) { returns_left -= 1; - var return_type = try ValType.decode(reader); + const return_type = try ValType.decode(reader); try func.types.append(return_type); } @@ -2859,10 +2859,10 @@ pub const ModuleDefinition = struct { var import_index: u32 = 0; while (import_index < num_imports) : (import_index += 1) { - var module_name: []const u8 = try DecodeHelpers.readName(reader, allocator); + const module_name: []const u8 = try DecodeHelpers.readName(reader, allocator); errdefer allocator.free(module_name); - var import_name: []const u8 = try DecodeHelpers.readName(reader, allocator); + const import_name: []const u8 = try DecodeHelpers.readName(reader, allocator); errdefer allocator.free(import_name); const names = ImportNames{ @@ -2920,7 +2920,7 @@ pub const ModuleDefinition = struct { var func_index: u32 = 0; while (func_index < num_funcs) : (func_index += 1) { - var func = FunctionDefinition{ + const func = FunctionDefinition{ .type_index = try common.decodeLEB128(u32, reader), .locals = std.ArrayList(ValType).init(allocator), @@ -2964,7 +2964,7 @@ pub const ModuleDefinition = struct { var memory_index: u32 = 0; while (memory_index < num_memories) : (memory_index += 1) { - var limits = try Limits.decode(reader); + const limits = try Limits.decode(reader); if (limits.min > limits.maxPages()) { self.log.err( @@ -2987,7 +2987,7 @@ pub const ModuleDefinition = struct { } } - var def = MemoryDefinition{ + const def = MemoryDefinition{ .limits = limits, }; try self.memories.append(def); @@ -3000,8 +3000,8 @@ pub const ModuleDefinition = struct { var global_index: u32 = 0; while (global_index < num_globals) : (global_index += 1) { - var valtype = try ValType.decode(reader); - var mut = try GlobalMut.decode(reader); + const valtype = try ValType.decode(reader); + const mut = try GlobalMut.decode(reader); const expr = try ConstantExpression.decode(reader, self, .Immutable, valtype); @@ -3029,11 +3029,11 @@ pub const ModuleDefinition = struct { var export_index: u32 = 0; while (export_index < num_exports) : (export_index += 1) { - var name: []const u8 = try DecodeHelpers.readName(reader, allocator); + const name: []const u8 = try DecodeHelpers.readName(reader, allocator); errdefer allocator.free(name); { - var getOrPutResult = try export_names.getOrPut(name); + const getOrPutResult = try export_names.getOrPut(name); if (getOrPutResult.found_existing == true) { return error.ValidationDuplicateExportName; } @@ -3080,7 +3080,7 @@ pub const ModuleDefinition = struct { if (self.start_func_index.? < self.imports.functions.items.len) { func_type_index = self.imports.functions.items[self.start_func_index.?].type_index; } else { - var local_func_index = self.start_func_index.? - self.imports.functions.items.len; + const local_func_index = self.start_func_index.? - self.imports.functions.items.len; func_type_index = self.functions.items[local_func_index].type_index; } @@ -3092,7 +3092,7 @@ pub const ModuleDefinition = struct { .Element => { const ElementHelpers = struct { fn readOffsetExpr(_reader: anytype, _module: *const ModuleDefinition) !ConstantExpression { - var expr = try ConstantExpression.decode(_reader, _module, .Immutable, .I32); + const expr = try ConstantExpression.decode(_reader, _module, .Immutable, .I32); return expr; } @@ -3116,13 +3116,13 @@ pub const ModuleDefinition = struct { var elem_index: u32 = 0; while (elem_index < num_elems) : (elem_index += 1) { - var expr = try ConstantExpression.decode(_reader, _module, .Any, expected_reftype); + const expr = try ConstantExpression.decode(_reader, _module, .Any, expected_reftype); try elems.append(expr); } } fn readNullElemkind(_reader: anytype) !void { - var null_elemkind = try _reader.readByte(); + const null_elemkind = try _reader.readByte(); if (null_elemkind != 0x00) { return error.MalformedBytecode; } @@ -3135,7 +3135,7 @@ pub const ModuleDefinition = struct { var segment_index: u32 = 0; while (segment_index < num_segments) : (segment_index += 1) { - var flags = try common.decodeLEB128(u32, reader); + const flags = try common.decodeLEB128(u32, reader); var def = ElementDefinition{ .mode = ElementMode.Active, @@ -3311,7 +3311,7 @@ pub const ModuleDefinition = struct { else => unreachable, } - var else_index_or_null = if_to_else_offsets.get(block.begin_index); + const else_index_or_null = if_to_else_offsets.get(block.begin_index); if (else_index_or_null) |index| { var else_instruction: *Instruction = &instructions.items[index]; else_instruction.immediate = block_instruction.immediate; @@ -3358,7 +3358,7 @@ pub const ModuleDefinition = struct { var data_index: u32 = 0; while (data_index < num_datas) : (data_index += 1) { - var data = try DataDefinition.decode(reader, self, allocator); + const data = try DataDefinition.decode(reader, self, allocator); try self.datas.append(data); } }, @@ -3368,7 +3368,7 @@ pub const ModuleDefinition = struct { }, } - var consumed_bytes = stream.pos - section_start_pos; + const consumed_bytes = stream.pos - section_start_pos; if (section_size_bytes != consumed_bytes) { return error.MalformedSectionSizeMismatch; } @@ -3441,6 +3441,10 @@ pub const ModuleDefinition = struct { item.elems_expr.deinit(); } + for (self.datas.items) |*data| { + data.bytes.deinit(); + } + self.types.deinit(); self.imports.functions.deinit(); self.imports.tables.deinit(); @@ -3487,8 +3491,8 @@ pub const ModuleDefinition = struct { }; const type_def: *const FunctionTypeDefinition = &self.types.items[type_index]; - var params: []const ValType = type_def.getParams(); - var returns: []const ValType = type_def.getReturns(); + const params: []const ValType = type_def.getParams(); + const returns: []const ValType = type_def.getReturns(); return FunctionExport{ .params = params, diff --git a/src/instance.zig b/src/instance.zig index c6a4e1e..4186fda 100644 --- a/src/instance.zig +++ b/src/instance.zig @@ -156,7 +156,7 @@ pub const TableInstance = struct { const max = if (table.limits.max) |m| m else std.math.maxInt(i32); std.debug.assert(table.refs.items.len == table.limits.min); - var old_length: usize = table.limits.min; + const old_length: usize = table.limits.min; if (old_length + length > max) { return false; } @@ -176,7 +176,7 @@ pub const TableInstance = struct { return error.TrapOutOfBoundsTableAccess; } - var elem_range = elems[start_elem_index .. start_elem_index + init_length]; + const elem_range = elems[start_elem_index .. start_elem_index + init_length]; var table_range = table.refs.items[start_table_index .. start_table_index + init_length]; var index: u32 = 0; @@ -205,7 +205,7 @@ pub const TableInstance = struct { var index: u32 = 0; while (index < elem_range.len) : (index += 1) { - var val: Val = elem_range[index].resolve(module); + const val: Val = elem_range[index].resolve(module); if (table.reftype == .FuncRef) { // should be set in resolve() or global initialization @@ -249,14 +249,14 @@ pub const MemoryInstance = struct { const max_pages = limits.maxPages(); const max_bytes: u64 = max_pages * k_page_size; - var mem = if (params == null) BackingMemory{ + const mem = if (params == null) BackingMemory{ .Internal = StableArray(u8).init(@intCast(max_bytes)), } else BackingMemory{ .External = .{ .buffer = &[0]u8{}, .params = params.?, } }; - var instance = MemoryInstance{ + const instance = MemoryInstance{ .limits = Limits{ .min = 0, .max = max_pages, @@ -330,12 +330,12 @@ pub const MemoryInstance = struct { fn ensureMinSize(self: *MemoryInstance, size_bytes: usize) !void { if (self.limits.min * k_page_size < size_bytes) { - var num_min_pages = std.math.divCeil(usize, size_bytes, k_page_size) catch unreachable; + const num_min_pages = std.math.divCeil(usize, size_bytes, k_page_size) catch unreachable; if (num_min_pages > self.limits.max.?) { return error.TrapOutOfBoundsMemoryAccess; } - var needed_pages = num_min_pages - self.limits.min; + const needed_pages = num_min_pages - self.limits.min; if (self.resize(needed_pages) == false) { unreachable; } @@ -398,7 +398,7 @@ pub const FunctionImport = struct { return type_comparer.eql(&data.func_def, type_signature); }, .Wasm => |data| { - var func_type_def: *const FunctionTypeDefinition = data.module_instance.findFuncTypeDef(data.index); + const func_type_def: *const FunctionTypeDefinition = data.module_instance.findFuncTypeDef(data.index); return type_comparer.eql(func_type_def, type_signature); }, } @@ -534,7 +534,7 @@ pub const Store = struct { }, fn init(allocator: std.mem.Allocator) Store { - var store = Store{ + const store = Store{ .imports = .{ .functions = std.ArrayList(FunctionImport).init(allocator), .tables = std.ArrayList(TableImport).init(allocator), @@ -567,10 +567,10 @@ pub const Store = struct { pub fn getTable(self: *Store, index: usize) *TableInstance { if (self.imports.tables.items.len <= index) { - var instance_index = index - self.imports.tables.items.len; + const instance_index = index - self.imports.tables.items.len; return &self.tables.items[instance_index]; } else { - var import: *TableImport = &self.imports.tables.items[index]; + const import: *TableImport = &self.imports.tables.items[index]; return switch (import.data) { .Host => |data| data, .Wasm => |data| data.module_instance.store.getTable(data.index), @@ -580,10 +580,10 @@ pub const Store = struct { pub fn getMemory(self: *Store, index: usize) *MemoryInstance { if (self.imports.memories.items.len <= index) { - var instance_index = index - self.imports.memories.items.len; + const instance_index = index - self.imports.memories.items.len; return &self.memories.items[instance_index]; } else { - var import: *MemoryImport = &self.imports.memories.items[index]; + const import: *MemoryImport = &self.imports.memories.items[index]; return switch (import.data) { .Host => |data| data, .Wasm => |data| data.module_instance.store.getMemory(data.index), @@ -593,10 +593,10 @@ pub const Store = struct { pub fn getGlobal(self: *Store, index: usize) *GlobalInstance { // TODO make private if (self.imports.globals.items.len <= index) { - var instance_index = index - self.imports.globals.items.len; + const instance_index = index - self.imports.globals.items.len; return &self.globals.items[instance_index]; } else { - var import: *GlobalImport = &self.imports.globals.items[index]; + const import: *GlobalImport = &self.imports.globals.items[index]; return switch (import.data) { .Host => |data| data, .Wasm => |data| data.module_instance.store.getGlobal(data.index), @@ -659,7 +659,7 @@ pub const VM = struct { var mem = try allocator.alloc(u8, total_alloc_size); var vm: *VM = @as(*VM, @alignCast(@ptrCast(mem.ptr))); - var impl: *T = @as(*T, @alignCast(@ptrCast(mem[vm_alloc_size..].ptr))); + const impl: *T = @as(*T, @alignCast(@ptrCast(mem[vm_alloc_size..].ptr))); vm.deinit_fn = T.deinit; vm.instantiate_fn = T.instantiate; @@ -683,7 +683,7 @@ pub const VM = struct { vm.deinit_fn(vm); var allocator = vm.allocator; - var mem = vm.mem; + const mem = vm.mem; allocator.free(mem); } @@ -730,7 +730,7 @@ pub const ModuleInstance = struct { log: Logger, pub fn create(module_def: *const ModuleDefinition, vm: *VM, allocator: std.mem.Allocator) AllocError!*ModuleInstance { - var inst = try allocator.create(ModuleInstance); + const inst = try allocator.create(ModuleInstance); inst.* = ModuleInstance{ .allocator = allocator, .store = Store.init(allocator), @@ -759,8 +759,8 @@ pub const ModuleInstance = struct { return false; } - var def_max: u64 = if (def_limits.max) |max| max else std.math.maxInt(u64); - var instance_max: u64 = if (instance_limits.max) |max| max else 0; + const def_max: u64 = if (def_limits.max) |max| max else std.math.maxInt(u64); + const instance_max: u64 = if (instance_limits.max) |max| max else 0; return def_limits.min <= instance_limits.min and def_max >= instance_max; } @@ -849,7 +849,7 @@ pub const ModuleInstance = struct { } fn findImportInSingle(comptime T: type, names: *const ImportNames, module_imports: *const ModuleImportPackage) ?*const T { - var items: []const T = switch (T) { + const items: []const T = switch (T) { FunctionImport => module_imports.functions.items, TableImport => module_imports.tables.items, MemoryImport => module_imports.memories.items, @@ -875,7 +875,7 @@ pub const ModuleInstance = struct { var store: *Store = &self.store; var module_def: *const ModuleDefinition = self.module_def; - var allocator = self.allocator; + const allocator = self.allocator; for (module_def.imports.functions.items) |*func_import_def| { var import_func: *const FunctionImport = try Helpers.findImportInMultiple(FunctionImport, &func_import_def.names, opts.imports, &self.log); @@ -967,7 +967,7 @@ pub const ModuleInstance = struct { try store.tables.ensureTotalCapacity(module_def.imports.tables.items.len + module_def.tables.items.len); for (module_def.tables.items) |*def_table| { - var t = try TableInstance.init(def_table.reftype, def_table.limits, allocator); + const t = try TableInstance.init(def_table.reftype, def_table.limits, allocator); try store.tables.append(t); } @@ -1008,18 +1008,18 @@ pub const ModuleInstance = struct { var table: *TableInstance = store.getTable(def_elem.table_index); - var start_table_index_i32: i32 = if (def_elem.offset) |*offset| offset.resolveTo(self, i32) else 0; + const start_table_index_i32: i32 = if (def_elem.offset) |*offset| offset.resolveTo(self, i32) else 0; if (start_table_index_i32 < 0) { return error.UninstantiableOutOfBoundsTableAccess; } - var start_table_index = @as(u32, @intCast(start_table_index_i32)); + const start_table_index = @as(u32, @intCast(start_table_index_i32)); if (def_elem.elems_value.items.len > 0) { - var elems = def_elem.elems_value.items; + const elems = def_elem.elems_value.items; try table.init_range_val(self, elems, @as(u32, @intCast(elems.len)), 0, start_table_index); } else { - var elems = def_elem.elems_expr.items; + const elems = def_elem.elems_expr.items; try table.init_range_expr(self, elems, @as(u32, @intCast(elems.len)), 0, start_table_index); } } else if (def_elem.mode == .Passive) { @@ -1050,7 +1050,7 @@ pub const ModuleInstance = struct { for (module_def.datas.items) |*def_data| { // instructions using passive elements just use the module definition's data to avoid an extra copy if (def_data.mode == .Active) { - var memory_index: u32 = def_data.memory_index.?; + const memory_index: u32 = def_data.memory_index.?; var memory: *MemoryInstance = store.getMemory(memory_index); const num_bytes: usize = def_data.bytes.items.len; @@ -1063,13 +1063,13 @@ pub const ModuleInstance = struct { return error.UninstantiableOutOfBoundsMemoryAccess; } - var destination = mem_buffer[offset_begin..offset_end]; - std.mem.copy(u8, destination, def_data.bytes.items); + const destination = mem_buffer[offset_begin..offset_end]; + @memcpy(destination, def_data.bytes.items); } } if (module_def.start_func_index) |func_index| { - var no_vals: []Val = &[0]Val{}; + const no_vals: []Val = &[0]Val{}; try self.vm.invokeWithIndex(self, func_index, no_vals.ptr, no_vals.ptr); } } @@ -1132,7 +1132,7 @@ pub const ModuleInstance = struct { for (self.module_def.exports.functions.items) |func_export| { if (std.mem.eql(u8, func_name, func_export.name)) { if (func_export.index >= self.module_def.imports.functions.items.len) { - var func_index: usize = func_export.index - self.module_def.imports.functions.items.len; + const func_index: usize = func_export.index - self.module_def.imports.functions.items.len; return FunctionHandle{ .index = @as(u32, @intCast(func_index)), .type = .Export, @@ -1203,7 +1203,7 @@ pub const ModuleInstance = struct { const buffer = memory.buffer(); if (offset + length < buffer.len) { - var data: []u8 = buffer[offset .. offset + length]; + const data: []u8 = buffer[offset .. offset + length]; return data; } @@ -1228,11 +1228,11 @@ pub const ModuleInstance = struct { pub fn memoryWriteInt(self: *ModuleInstance, comptime T: type, value: T, offset: usize) bool { var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined; - std.mem.writeIntLittle(T, &bytes, value); + std.mem.writeInt(T, &bytes, value, .little); - var destination = self.memorySlice(offset, bytes.len); + const destination = self.memorySlice(offset, bytes.len); if (destination.len == bytes.len) { - std.mem.copy(u8, destination, &bytes); + @memcpy(destination, &bytes); return true; } @@ -1247,11 +1247,11 @@ pub const ModuleInstance = struct { fn findFuncTypeDef(self: *ModuleInstance, index: usize) *const FunctionTypeDefinition { const num_imports: usize = self.store.imports.functions.items.len; if (index >= num_imports) { - var local_func_index: usize = index - num_imports; + const local_func_index: usize = index - num_imports; return self.vm.findFuncTypeDef(self, local_func_index); } else { - var import: *const FunctionImport = &self.store.imports.functions.items[index]; - var func_type_def: *const FunctionTypeDefinition = switch (import.data) { + const import: *const FunctionImport = &self.store.imports.functions.items[index]; + const func_type_def: *const FunctionTypeDefinition = switch (import.data) { .Host => |data| &data.func_def, .Wasm => |data| data.module_instance.findFuncTypeDef(data.index), }; @@ -1262,10 +1262,10 @@ pub const ModuleInstance = struct { fn getGlobalWithIndex(self: *ModuleInstance, index: usize) *GlobalInstance { const num_imports: usize = self.module_def.imports.globals.items.len; if (index >= num_imports) { - var local_global_index: usize = index - self.module_def.imports.globals.items.len; + const local_global_index: usize = index - self.module_def.imports.globals.items.len; return &self.store.globals.items[local_global_index]; } else { - var import: *const GlobalImport = &self.store.imports.globals.items[index]; + const import: *const GlobalImport = &self.store.imports.globals.items[index]; return switch (import.data) { .Host => |data| data, .Wasm => |data| data.module_instance.getGlobalWithIndex(data.index), diff --git a/src/opcode.zig b/src/opcode.zig index 9a29ad4..7582bde 100644 --- a/src/opcode.zig +++ b/src/opcode.zig @@ -915,14 +915,14 @@ pub const WasmOpcode = enum(u16) { } pub fn decode(reader: anytype) !WasmOpcode { - var byte = try reader.readByte(); + const byte = try reader.readByte(); var wasm_op: WasmOpcode = undefined; if (byte == 0xFC or byte == 0xFD) { - var type_opcode = try common.decodeLEB128(u32, reader); + const type_opcode = try common.decodeLEB128(u32, reader); if (type_opcode > std.math.maxInt(u8)) { return error.MalformedIllegalOpcode; } - var byte2 = @as(u8, @intCast(type_opcode)); + const byte2 = @as(u8, @intCast(type_opcode)); var extended: u16 = byte; extended = extended << 8; extended |= byte2; diff --git a/src/stringpool.zig b/src/stringpool.zig index d2566be..1b354c4 100644 --- a/src/stringpool.zig +++ b/src/stringpool.zig @@ -44,10 +44,10 @@ pub fn put(self: *StringPool, str: []const u8) ![]const u8 { try self.lookup.put(hash, str_offset_begin); var bytes: []u8 = self.buffer.items[str_offset_begin..str_offset_end]; - var str_len: *StringLenType = @alignCast(@ptrCast(bytes.ptr)); + const str_len: *StringLenType = @alignCast(@ptrCast(bytes.ptr)); str_len.* = @as(StringLenType, @intCast(str.len)); - var str_bytes: []u8 = bytes[@sizeOf(StringLenType)..]; - std.mem.copy(u8, str_bytes, str); + const str_bytes: []u8 = bytes[@sizeOf(StringLenType)..]; + @memcpy(str_bytes, str); return str_bytes; } @@ -57,8 +57,8 @@ pub fn find(self: *StringPool, str: []const u8) ?[]const u8 { if (self.lookup.get(hash)) |string_bytes_begin| { var str_bytes: [*]u8 = self.buffer.items[string_bytes_begin..].ptr; - var str_len: *StringLenType = @alignCast(@ptrCast(str_bytes)); - var pooled_str: []u8 = str_bytes[@sizeOf(StringLenType) .. @sizeOf(StringLenType) + str_len.*]; + const str_len: *StringLenType = @alignCast(@ptrCast(str_bytes)); + const pooled_str: []u8 = str_bytes[@sizeOf(StringLenType) .. @sizeOf(StringLenType) + str_len.*]; return pooled_str; } @@ -114,7 +114,7 @@ test "basic" { try std.testing.expect(std.mem.eql(u8, test2_str_found.?, test2_str)); try std.testing.expect(std.mem.eql(u8, long_str_found.?, long_str)); - var lazyadd_str1 = try pool.findOrPut("lazy put"); - var lazyadd_str2 = try pool.findOrPut("lazy put"); + const lazyadd_str1 = try pool.findOrPut("lazy put"); + const lazyadd_str2 = try pool.findOrPut("lazy put"); try std.testing.expect(lazyadd_str1.ptr == lazyadd_str2.ptr); } diff --git a/src/tests.zig b/src/tests.zig index 1901663..3d861ba 100644 --- a/src/tests.zig +++ b/src/tests.zig @@ -7,12 +7,12 @@ const Limits = core.Limits; const MemoryInstance = core.MemoryInstance; test "StackVM.Integration" { - const wasm_filepath = "zig-out/lib/mandelbrot.wasm"; + const wasm_filepath = "zig-out/bin/mandelbrot.wasm"; var allocator = std.testing.allocator; var cwd = std.fs.cwd(); - var wasm_data: []u8 = try cwd.readFileAlloc(allocator, wasm_filepath, 1024 * 1024 * 128); + const wasm_data: []u8 = try cwd.readFileAlloc(allocator, wasm_filepath, 1024 * 1024 * 128); defer allocator.free(wasm_data); const module_def_opts = core.ModuleDefinitionOpts{ diff --git a/src/vm_register.zig b/src/vm_register.zig index 6647cfa..52244c4 100644 --- a/src/vm_register.zig +++ b/src/vm_register.zig @@ -78,7 +78,7 @@ const IRNode = struct { edges_out_count: u32, fn createWithInstruction(mir: *ModuleIR, instruction_index: u32) AllocError!*IRNode { - var node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; + const node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; node.* = IRNode{ .opcode = mir.module_def.code.instructions.items[instruction_index].opcode, .is_phi = false, @@ -92,7 +92,7 @@ const IRNode = struct { } fn createStandalone(mir: *ModuleIR, opcode: Opcode) AllocError!*IRNode { - var node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; + const node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; node.* = IRNode{ .opcode = opcode, .is_phi = false, @@ -106,7 +106,7 @@ const IRNode = struct { } fn createPhi(mir: *ModuleIR) AllocError!*IRNode { - var node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; + const node: *IRNode = mir.ir.addOne() catch return AllocError.OutOfMemory; node.* = IRNode{ .opcode = .Invalid, .is_phi = true, @@ -277,7 +277,7 @@ const RegisterSlots = struct { }); } - var index = self.last_free.?; + const index = self.last_free.?; var slot: *Slot = &self.slots.items[index]; self.last_free = slot.prev; slot.node = node; @@ -411,7 +411,7 @@ const IRFunction = struct { } const end_instruction_offset = instructions.items.len; - var emitted_instructions = instructions.items[start_instruction_offset..end_instruction_offset]; + const emitted_instructions = instructions.items[start_instruction_offset..end_instruction_offset]; std.mem.reverse(RegInstruction, emitted_instructions); } @@ -653,7 +653,7 @@ const ModuleIR = struct { var edges_buffer: [8]*IRNode = undefined; // 8 should be more stack slots than any one instruction can pop std.debug.assert(num_consumed <= edges_buffer.len); - var edges = edges_buffer[0..num_consumed]; + const edges = edges_buffer[0..num_consumed]; for (edges) |*e| { e.* = self.value_stack.pop(); } @@ -677,9 +677,9 @@ const ModuleIR = struct { else => @compileError("Unsupported const instruction"), }; - var res = try self.unique_constants.getOrPut(val); + const res = try self.unique_constants.getOrPut(val); if (res.found_existing == false) { - var node = try IRNode.createWithInstruction(mir, instruction_index); + const node = try IRNode.createWithInstruction(mir, instruction_index); res.value_ptr.* = node; } if (self.is_unreachable == false) { @@ -690,7 +690,7 @@ const ModuleIR = struct { fn addPendingEdgeLabel(self: *IntermediateCompileData, node: *IRNode, label_id: u32) !void { const last_block_index = self.blocks.blocks.items.len - 1; - var continuation: u32 = self.blocks.blocks.items[last_block_index - label_id].continuation; + const continuation: u32 = self.blocks.blocks.items[last_block_index - label_id].continuation; try self.pending_continuation_edges.append(PendingContinuationEdge{ .node = node, .continuation = continuation, @@ -870,7 +870,7 @@ const ModuleIR = struct { var nodes_with_side_effects: *std.ArrayList(*IRNode) = &compile_data.scratch_node_list_1; defer nodes_with_side_effects.clearRetainingCapacity(); - var current_block_nodes: []*IRNode = compile_data.blocks.currentBlockNodes(); + const current_block_nodes: []*IRNode = compile_data.blocks.currentBlockNodes(); for (current_block_nodes) |block_node| { if (block_node.hasSideEffects() or block_node.isFlowControl()) { @@ -881,7 +881,7 @@ const ModuleIR = struct { if (nodes_with_side_effects.items.len >= 2) { var i: i32 = @intCast(nodes_with_side_effects.items.len - 2); while (i >= 0) : (i -= 1) { - var ii: u32 = @intCast(i); + const ii: u32 = @intCast(i); var node_a: *IRNode = nodes_with_side_effects.items[ii]; if (try node_a.isIsland(&compile_data.scratch_node_list_2)) { var node_b: *IRNode = nodes_with_side_effects.items[ii + 1]; @@ -1021,14 +1021,14 @@ const ModuleIR = struct { assert(node == null); if (compile_data.is_unreachable == false) { - var n: *IRNode = compile_data.value_stack.pop(); + const n: *IRNode = compile_data.value_stack.pop(); locals[instruction.immediate.Index] = n; } }, .Local_Tee => { assert(node == null); if (compile_data.is_unreachable == false) { - var n: *IRNode = compile_data.value_stack.items[compile_data.value_stack.items.len - 1]; + const n: *IRNode = compile_data.value_stack.items[compile_data.value_stack.items.len - 1]; locals[instruction.immediate.Index] = n; } }, @@ -1203,7 +1203,7 @@ fn runTestWithViz(wasm_filepath: []const u8, viz_dir: []const u8) !void { var allocator = std.testing.allocator; var cwd = std.fs.cwd(); - var wasm_data: []u8 = try cwd.readFileAlloc(allocator, wasm_filepath, 1024 * 1024 * 128); + const wasm_data: []u8 = try cwd.readFileAlloc(allocator, wasm_filepath, 1024 * 1024 * 128); defer allocator.free(wasm_data); const module_def_opts = def.ModuleDefinitionOpts{ diff --git a/src/vm_stack.zig b/src/vm_stack.zig index 5f840c5..ad1715b 100644 --- a/src/vm_stack.zig +++ b/src/vm_stack.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const assert = std.debug.assert; const common = @import("common.zig"); const StableArray = common.StableArray; @@ -126,7 +127,7 @@ const Stack = struct { }; fn init(allocator: std.mem.Allocator) Stack { - var stack = Stack{ + const stack = Stack{ .values = &[_]Val{}, .labels = &[_]Label{}, .frames = &[_]CallFrame{}, @@ -203,7 +204,7 @@ const Stack = struct { fn popValue(stack: *Stack) Val { stack.num_values -= 1; - var value: Val = stack.values[stack.num_values]; + const value: Val = stack.values[stack.num_values]; return value; } @@ -272,14 +273,14 @@ const Stack = struct { } fn frameLabel(stack: Stack) *const Label { - var frame: *const CallFrame = stack.topFrame(); - var frame_label: *const Label = &stack.labels[frame.start_offset_labels]; + const frame: *const CallFrame = stack.topFrame(); + const frame_label: *const Label = &stack.labels[frame.start_offset_labels]; return frame_label; } fn popAllUntilLabelId(stack: *Stack, label_id: u64, pop_final_label: bool, num_returns: usize) void { - var label_index: u16 = @as(u16, @intCast((stack.num_labels - label_id) - 1)); - var label: *const Label = &stack.labels[label_index]; + const label_index: u16 = @as(u16, @intCast((stack.num_labels - label_id) - 1)); + const label: *const Label = &stack.labels[label_index]; if (pop_final_label) { const source_begin: usize = stack.num_values - num_returns; @@ -289,7 +290,11 @@ const Stack = struct { const returns_source: []const Val = stack.values[source_begin..source_end]; const returns_dest: []Val = stack.values[dest_begin..dest_end]; - std.mem.copy(Val, returns_dest, returns_source); + if (dest_begin <= source_begin) { + std.mem.copyForwards(Val, returns_dest, returns_source); + } else { + std.mem.copyBackwards(Val, returns_dest, returns_source); + } stack.num_values = @as(u32, @intCast(dest_end)); stack.num_labels = label_index; @@ -304,11 +309,11 @@ const Stack = struct { // the stack should already be populated with the params to the function, so all that's // left to do is initialize the locals to their default values - var values_index_begin: u32 = stack.num_values - @as(u32, @intCast(param_types.len)); - var values_index_end: u32 = stack.num_values + @as(u32, @intCast(non_param_types.len)); + const values_index_begin: u32 = stack.num_values - @as(u32, @intCast(param_types.len)); + const values_index_end: u32 = stack.num_values + @as(u32, @intCast(non_param_types.len)); if (stack.num_frames < stack.frames.len and values_index_end < stack.values.len) { - var locals_and_params: []Val = stack.values[values_index_begin..values_index_end]; + const locals_and_params: []Val = stack.values[values_index_begin..values_index_end]; var locals = stack.values[stack.num_values..values_index_end]; stack.num_values = values_index_end; @@ -332,8 +337,8 @@ const Stack = struct { } fn popFrame(stack: *Stack) ?FuncCallData { - var frame: *CallFrame = stack.topFrame(); - var frame_label: Label = stack.labels[frame.start_offset_labels]; + const frame: *CallFrame = stack.topFrame(); + const frame_label: Label = stack.labels[frame.start_offset_labels]; const num_returns: usize = frame.num_returns; const source_begin: usize = stack.num_values - num_returns; @@ -343,7 +348,8 @@ const Stack = struct { const returns_source: []const Val = stack.values[source_begin..source_end]; const returns_dest: []Val = stack.values[dest_begin..dest_end]; - std.mem.copy(Val, returns_dest, returns_source); + assert(dest_begin <= source_begin); + std.mem.copyForwards(Val, returns_dest, returns_source); stack.num_values = @as(u32, @intCast(dest_end)); stack.num_labels = frame.start_offset_labels; @@ -894,7 +900,7 @@ const InstructionFuncs = struct { else => @compileError("Only f32 and f64 are supported inputs."), } - var truncated = @trunc(value); + const truncated = @trunc(value); if (std.math.isNan(truncated)) { return error.TrapInvalidIntegerConversion; @@ -928,7 +934,7 @@ const InstructionFuncs = struct { else => @compileError("Only f32 and f64 are supported inputs."), } - var truncated = @trunc(value); + const truncated = @trunc(value); if (std.math.isNan(truncated)) { return 0; @@ -949,7 +955,7 @@ const InstructionFuncs = struct { } fn loadFromMem(comptime T: type, stack: *Stack, offset_from_memarg: usize) TrapError!T { - var offset_from_stack: i64 = stack.popIndexType(); + const offset_from_stack: i64 = stack.popIndexType(); if (offset_from_stack < 0) { return error.TrapOutOfBoundsMemoryAccess; } @@ -976,7 +982,8 @@ const InstructionFuncs = struct { } const mem = buffer[offset..end]; - const value = std.mem.readIntSliceLittle(read_type, mem); + const byte_count = bit_count / 8; + const value = std.mem.readInt(read_type, mem[0..byte_count], .little); return @as(T, @bitCast(value)); } @@ -1002,7 +1009,7 @@ const InstructionFuncs = struct { while (i < array_len) : (i += 1) { const value_start = i * byte_count; const value_end = value_start + byte_count; - ret[i] = std.mem.readIntSliceLittle(read_type, mem[value_start..value_end]); + ret[i] = std.mem.readInt(read_type, mem[value_start..value_end][0..byte_count], .little); } return ret; } @@ -1037,7 +1044,8 @@ const InstructionFuncs = struct { const write_value = @as(write_type, @bitCast(value)); const mem = buffer[offset..end]; - std.mem.writeIntSliceLittle(write_type, mem, write_value); + const byte_count = bit_count / 8; + std.mem.writeInt(write_type, mem[0..byte_count], write_value, .little); } fn call(pc: u32, stack: *Stack, module_instance: *ModuleInstance, func: *const FunctionInstance) !FuncCallData { @@ -1064,18 +1072,19 @@ const InstructionFuncs = struct { const returns_len: u32 = @as(u32, @intCast(data.func_def.calcNumReturns())); if (stack.num_values + returns_len < stack.values.len) { - var module: *ModuleInstance = stack.topFrame().module_instance; - var params = stack.values[stack.num_values - params_len .. stack.num_values]; - var returns_temp = stack.values[stack.num_values .. stack.num_values + returns_len]; + const module: *ModuleInstance = stack.topFrame().module_instance; + const params = stack.values[stack.num_values - params_len .. stack.num_values]; + const returns_temp = stack.values[stack.num_values .. stack.num_values + returns_len]; DebugTrace.traceHostFunction(module, stack.num_frames + 1, func.name); data.callback(data.userdata, module, params.ptr, returns_temp.ptr); stack.num_values = (stack.num_values - params_len) + returns_len; - var returns_dest = stack.values[stack.num_values - returns_len .. stack.num_values]; + const returns_dest = stack.values[stack.num_values - returns_len .. stack.num_values]; - std.mem.copy(Val, returns_dest, returns_temp); + assert(@intFromPtr(returns_dest.ptr) < @intFromPtr(returns_temp.ptr)); + std.mem.copyForwards(Val, returns_dest, returns_temp); return FuncCallData{ .code = stack.topFrame().module_instance.module_def.code.instructions.items.ptr, @@ -1243,7 +1252,7 @@ const InstructionFuncs = struct { const vec = @as(T, @bitCast(stack.popV128())); var arr: [type_info.len]child_type = undefined; for (&arr, 0..) |*v, i| { - v.* = @as(child_type, @bitCast(std.math.absCast(vec[i]))); + v.* = @as(child_type, @bitCast(@abs(vec[i]))); } const abs: T = arr; stack.pushV128(@as(v128, @bitCast(abs))); @@ -1403,7 +1412,7 @@ const InstructionFuncs = struct { } fn vectorStoreLane(comptime T: type, instruction: Instruction, stack: *Stack) !void { - var vec = @as(T, @bitCast(stack.popV128())); + const vec = @as(T, @bitCast(stack.popV128())); const immediate = instruction.immediate.MemoryOffsetAndLane; const scalar = vec[immediate.laneidx]; try storeInMem(scalar, stack, immediate.offset); @@ -1594,7 +1603,7 @@ const InstructionFuncs = struct { fn op_DebugTrap(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("DebugTrap", pc, code, stack); - var root_module_instance: *ModuleInstance = stack.frames[0].module_instance; + const root_module_instance: *ModuleInstance = stack.frames[0].module_instance; const stack_vm = StackVM.fromVM(root_module_instance.vm); std.debug.assert(stack_vm.debug_state != null); @@ -1654,7 +1663,7 @@ const InstructionFuncs = struct { fn op_Else(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Else", pc, code, stack); // getting here means we reached the end of the if opcode chain, so skip to the true end opcode - var next_pc: u32 = code[pc].immediate.If.end_continuation; + const next_pc: u32 = code[pc].immediate.If.end_continuation; try @call(.always_tail, InstructionFuncs.lookup(code[next_pc].opcode), .{ next_pc, code, stack }); } @@ -1724,7 +1733,7 @@ const InstructionFuncs = struct { fn op_Return(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Return", pc, code, stack); - var next: FuncCallData = stack.popFrame() orelse return; + const next: FuncCallData = stack.popFrame() orelse return; try @call(.always_tail, InstructionFuncs.lookup(next.code[next.continuation].opcode), .{ next.continuation, next.code, stack }); } @@ -1742,7 +1751,7 @@ const InstructionFuncs = struct { const func: *const FunctionInstance = &stack_vm.functions.items[@as(usize, @intCast(func_instance_index))]; next = try OpHelpers.call(pc, stack, module_instance, func); } else { - var func_import = &store.imports.functions.items[func_index]; + const func_import = &store.imports.functions.items[func_index]; next = try OpHelpers.callImport(pc, stack, func_import); } @@ -1791,7 +1800,7 @@ const InstructionFuncs = struct { next = try OpHelpers.call(pc, stack, call_module, func); } else { var func_import: *const FunctionImport = &call_store.imports.functions.items[func_index]; - var func_type_def: *const FunctionTypeDefinition = &call_module.module_def.types.items[immediates.type_index]; + const func_type_def: *const FunctionTypeDefinition = &call_module.module_def.types.items[immediates.type_index]; if (func_import.isTypeSignatureEql(func_type_def) == false) { return error.TrapIndirectCallTypeMismatch; } @@ -1810,9 +1819,9 @@ const InstructionFuncs = struct { fn op_Select(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Select", pc, code, stack); - var boolean: i32 = stack.popI32(); - var v2: Val = stack.popValue(); - var v1: Val = stack.popValue(); + const boolean: i32 = stack.popI32(); + const v2: Val = stack.popValue(); + const v1: Val = stack.popValue(); if (boolean != 0) { stack.pushValue(v1); @@ -1826,9 +1835,9 @@ const InstructionFuncs = struct { fn op_Select_T(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Select_T", pc, code, stack); - var boolean: i32 = stack.popI32(); - var v2: Val = stack.popValue(); - var v1: Val = stack.popValue(); + const boolean: i32 = stack.popI32(); + const v2: Val = stack.popValue(); + const v1: Val = stack.popValue(); if (boolean != 0) { stack.pushValue(v1); @@ -1842,9 +1851,9 @@ const InstructionFuncs = struct { fn op_Local_Get(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Local_Get", pc, code, stack); try stack.checkExhausted(1); - var locals_index: u32 = code[pc].immediate.Index; - var frame: *const CallFrame = stack.topFrame(); - var v: Val = frame.locals[locals_index]; + const locals_index: u32 = code[pc].immediate.Index; + const frame: *const CallFrame = stack.topFrame(); + const v: Val = frame.locals[locals_index]; stack.pushValue(v); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -1852,18 +1861,18 @@ const InstructionFuncs = struct { fn op_Local_Set(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Local_Set", pc, code, stack); - var locals_index: u32 = code[pc].immediate.Index; + const locals_index: u32 = code[pc].immediate.Index; var frame: *CallFrame = stack.topFrame(); - var v: Val = stack.popValue(); + const v: Val = stack.popValue(); frame.locals[locals_index] = v; try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_Local_Tee(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Local_Tee", pc, code, stack); - var locals_index: u32 = code[pc].immediate.Index; + const locals_index: u32 = code[pc].immediate.Index; var frame: *CallFrame = stack.topFrame(); - var v: Val = stack.topValue(); + const v: Val = stack.topValue(); frame.locals[locals_index] = v; try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -1871,16 +1880,16 @@ const InstructionFuncs = struct { fn op_Global_Get(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Global_Get", pc, code, stack); try stack.checkExhausted(1); - var global_index: u32 = code[pc].immediate.Index; - var global: *GlobalInstance = stack.topFrame().module_instance.store.getGlobal(global_index); + const global_index: u32 = code[pc].immediate.Index; + const global: *GlobalInstance = stack.topFrame().module_instance.store.getGlobal(global_index); stack.pushValue(global.value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_Global_Set(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Global_Set", pc, code, stack); - var global_index: u32 = code[pc].immediate.Index; - var global: *GlobalInstance = stack.topFrame().module_instance.store.getGlobal(global_index); + const global_index: u32 = code[pc].immediate.Index; + const global: *GlobalInstance = stack.topFrame().module_instance.store.getGlobal(global_index); global.value = stack.popValue(); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -1913,98 +1922,98 @@ const InstructionFuncs = struct { fn op_I32_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Load", pc, code, stack); - var value = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset); + const value = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load", pc, code, stack); - var value = try OpHelpers.loadFromMem(i64, stack, code[pc].immediate.MemoryOffset); + const value = try OpHelpers.loadFromMem(i64, stack, code[pc].immediate.MemoryOffset); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Load", pc, code, stack); - var value = try OpHelpers.loadFromMem(f32, stack, code[pc].immediate.MemoryOffset); + const value = try OpHelpers.loadFromMem(f32, stack, code[pc].immediate.MemoryOffset); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Load(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Load", pc, code, stack); - var value = try OpHelpers.loadFromMem(f64, stack, code[pc].immediate.MemoryOffset); + const value = try OpHelpers.loadFromMem(f64, stack, code[pc].immediate.MemoryOffset); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Load8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Load8_S", pc, code, stack); - var value: i32 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset); + const value: i32 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Load8_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Load8_U", pc, code, stack); - var value: u32 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset); + const value: u32 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset); stack.pushI32(@as(i32, @bitCast(value))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Load16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Load16_S", pc, code, stack); - var value: i32 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset); + const value: i32 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Load16_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Load16_U", pc, code, stack); - var value: u32 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset); + const value: u32 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset); stack.pushI32(@as(i32, @bitCast(value))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load8_S", pc, code, stack); - var value: i64 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset); + const value: i64 = try OpHelpers.loadFromMem(i8, stack, code[pc].immediate.MemoryOffset); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load8_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load8_U", pc, code, stack); - var value: u64 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset); + const value: u64 = try OpHelpers.loadFromMem(u8, stack, code[pc].immediate.MemoryOffset); stack.pushI64(@as(i64, @bitCast(value))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load16_S", pc, code, stack); - var value: i64 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset); + const value: i64 = try OpHelpers.loadFromMem(i16, stack, code[pc].immediate.MemoryOffset); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load16_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load16_U", pc, code, stack); - var value: u64 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset); + const value: u64 = try OpHelpers.loadFromMem(u16, stack, code[pc].immediate.MemoryOffset); stack.pushI64(@as(i64, @bitCast(value))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load32_S", pc, code, stack); - var value: i64 = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset); + const value: i64 = try OpHelpers.loadFromMem(i32, stack, code[pc].immediate.MemoryOffset); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Load32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Load32_U", pc, code, stack); - var value: u64 = try OpHelpers.loadFromMem(u32, stack, code[pc].immediate.MemoryOffset); + const value: u64 = try OpHelpers.loadFromMem(u32, stack, code[pc].immediate.MemoryOffset); stack.pushI64(@as(i64, @bitCast(value))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -2100,7 +2109,11 @@ const InstructionFuncs = struct { }; if (num_pages >= 0 and memory_instance.grow(@as(usize, @intCast(num_pages)))) { - stack.pushI32(old_num_pages); + switch (memory_instance.limits.indexType()) { + .I32 => stack.pushI32(old_num_pages), + .I64 => stack.pushI64(old_num_pages), + else => unreachable, + } try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } else { switch (memory_instance.limits.indexType()) { @@ -2115,7 +2128,7 @@ const InstructionFuncs = struct { fn op_I32_Const(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Const", pc, code, stack); try stack.checkExhausted(1); - var v: i32 = code[pc].immediate.ValueI32; + const v: i32 = code[pc].immediate.ValueI32; stack.pushI32(v); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -2123,7 +2136,7 @@ const InstructionFuncs = struct { fn op_I64_Const(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Const", pc, code, stack); try stack.checkExhausted(1); - var v: i64 = code[pc].immediate.ValueI64; + const v: i64 = code[pc].immediate.ValueI64; stack.pushI64(v); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -2131,7 +2144,7 @@ const InstructionFuncs = struct { fn op_F32_Const(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Const", pc, code, stack); try stack.checkExhausted(1); - var v: f32 = code[pc].immediate.ValueF32; + const v: f32 = code[pc].immediate.ValueF32; stack.pushF32(v); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -2139,371 +2152,371 @@ const InstructionFuncs = struct { fn op_F64_Const(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Const", pc, code, stack); try stack.checkExhausted(1); - var v: f64 = code[pc].immediate.ValueF64; + const v: f64 = code[pc].immediate.ValueF64; stack.pushF64(v); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Eqz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Eqz", pc, code, stack); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 == 0) 1 else 0; + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 == 0) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Eq(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Eq", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 == v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 == v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_NE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_NE", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 != v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 != v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_LT_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_LT_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 < v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 < v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_LT_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_LT_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var result: i32 = if (v1 < v2) 1 else 0; + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const result: i32 = if (v1 < v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_GT_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_GT_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 > v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 > v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_GT_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_GT_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var result: i32 = if (v1 > v2) 1 else 0; + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const result: i32 = if (v1 > v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_LE_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_LE_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 <= v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_LE_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_LE_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var result: i32 = if (v1 <= v2) 1 else 0; + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const result: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_GE_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_GE_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result: i32 = if (v1 >= v2) 1 else 0; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_GE_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_GE_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var result: i32 = if (v1 >= v2) 1 else 0; + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const result: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Eqz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Eqz", pc, code, stack); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 == 0) 1 else 0; + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 == 0) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Eq(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Eq", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 == v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 == v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_NE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_NE", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 != v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 != v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_LT_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_LT_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 < v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 < v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_LT_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_LT_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var result: i32 = if (v1 < v2) 1 else 0; + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const result: i32 = if (v1 < v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_GT_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_GT_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 > v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 > v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_GT_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_GT_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var result: i32 = if (v1 > v2) 1 else 0; + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const result: i32 = if (v1 > v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_LE_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_LE_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 <= v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_LE_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_LE_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var result: i32 = if (v1 <= v2) 1 else 0; + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const result: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_GE_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_GE_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result: i32 = if (v1 >= v2) 1 else 0; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_GE_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_GE_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var result: i32 = if (v1 >= v2) 1 else 0; + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const result: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_EQ(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_EQ", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 == v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 == v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_NE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_NE", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 != v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 != v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_LT(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_LT", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 < v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 < v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_GT(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_GT", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 > v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 > v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_LE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_LE", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 <= v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_GE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_GE", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value: i32 = if (v1 >= v2) 1 else 0; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_EQ(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_EQ", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 == v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 == v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_NE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_NE", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 != v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 != v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_LT(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_LT", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 < v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 < v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_GT(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_GT", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 > v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 > v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_LE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_LE", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 <= v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 <= v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_GE(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_GE", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value: i32 = if (v1 >= v2) 1 else 0; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value: i32 = if (v1 >= v2) 1 else 0; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Clz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Clz", pc, code, stack); - var v: i32 = stack.popI32(); - var num_zeroes = @clz(v); + const v: i32 = stack.popI32(); + const num_zeroes = @clz(v); stack.pushI32(num_zeroes); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Ctz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Ctz", pc, code, stack); - var v: i32 = stack.popI32(); - var num_zeroes = @ctz(v); + const v: i32 = stack.popI32(); + const num_zeroes = @ctz(v); stack.pushI32(num_zeroes); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Popcnt(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Popcnt", pc, code, stack); - var v: i32 = stack.popI32(); - var num_bits_set = @popCount(v); + const v: i32 = stack.popI32(); + const num_bits_set = @popCount(v); stack.pushI32(num_bits_set); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Add(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Add", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result = v1 +% v2; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result = v1 +% v2; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Sub(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Sub", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var result = v1 -% v2; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const result = v1 -% v2; stack.pushI32(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Mul(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Mul", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var value = v1 *% v2; + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const value = v1 *% v2; stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Div_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Div_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var value = std.math.divTrunc(i32, v1, v2) catch |e| { + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const value = std.math.divTrunc(i32, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else if (e == error.Overflow) { @@ -2518,9 +2531,9 @@ const InstructionFuncs = struct { fn op_I32_Div_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Div_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var value_unsigned = std.math.divFloor(u32, v1, v2) catch |e| { + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const value_unsigned = std.math.divFloor(u32, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else if (e == error.Overflow) { @@ -2529,17 +2542,17 @@ const InstructionFuncs = struct { return e; } }; - var value = @as(i32, @bitCast(value_unsigned)); + const value = @as(i32, @bitCast(value_unsigned)); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Rem_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Rem_S", pc, code, stack); - var v2: i32 = stack.popI32(); - var v1: i32 = stack.popI32(); - var denom = try std.math.absInt(v2); - var value = std.math.rem(i32, v1, denom) catch |e| { + const v2: i32 = stack.popI32(); + const v1: i32 = stack.popI32(); + const denom: i32 = @intCast(@abs(v2)); + const value = std.math.rem(i32, v1, denom) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else { @@ -2552,151 +2565,151 @@ const InstructionFuncs = struct { fn op_I32_Rem_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Rem_U", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var value_unsigned = std.math.rem(u32, v1, v2) catch |e| { + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const value_unsigned = std.math.rem(u32, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else { return e; } }; - var value = @as(i32, @bitCast(value_unsigned)); + const value = @as(i32, @bitCast(value_unsigned)); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_And(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_And", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var value = @as(i32, @bitCast(v1 & v2)); + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const value = @as(i32, @bitCast(v1 & v2)); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Or(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Or", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var value = @as(i32, @bitCast(v1 | v2)); + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const value = @as(i32, @bitCast(v1 | v2)); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Xor(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Xor", pc, code, stack); - var v2: u32 = @as(u32, @bitCast(stack.popI32())); - var v1: u32 = @as(u32, @bitCast(stack.popI32())); - var value = @as(i32, @bitCast(v1 ^ v2)); + const v2: u32 = @as(u32, @bitCast(stack.popI32())); + const v1: u32 = @as(u32, @bitCast(stack.popI32())); + const value = @as(i32, @bitCast(v1 ^ v2)); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Shl(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Shl", pc, code, stack); - var shift_unsafe: i32 = stack.popI32(); - var int: i32 = stack.popI32(); - var shift: i32 = try std.math.mod(i32, shift_unsafe, 32); - var value = std.math.shl(i32, int, shift); + const shift_unsafe: i32 = stack.popI32(); + const int: i32 = stack.popI32(); + const shift: i32 = try std.math.mod(i32, shift_unsafe, 32); + const value = std.math.shl(i32, int, shift); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Shr_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Shr_S", pc, code, stack); - var shift_unsafe: i32 = stack.popI32(); - var int: i32 = stack.popI32(); - var shift = try std.math.mod(i32, shift_unsafe, 32); - var value = std.math.shr(i32, int, shift); + const shift_unsafe: i32 = stack.popI32(); + const int: i32 = stack.popI32(); + const shift = try std.math.mod(i32, shift_unsafe, 32); + const value = std.math.shr(i32, int, shift); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Shr_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Shr_U", pc, code, stack); - var shift_unsafe: u32 = @as(u32, @bitCast(stack.popI32())); - var int: u32 = @as(u32, @bitCast(stack.popI32())); - var shift = try std.math.mod(u32, shift_unsafe, 32); - var value = @as(i32, @bitCast(std.math.shr(u32, int, shift))); + const shift_unsafe: u32 = @as(u32, @bitCast(stack.popI32())); + const int: u32 = @as(u32, @bitCast(stack.popI32())); + const shift = try std.math.mod(u32, shift_unsafe, 32); + const value = @as(i32, @bitCast(std.math.shr(u32, int, shift))); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Rotl(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Rotl", pc, code, stack); - var rot: u32 = @as(u32, @bitCast(stack.popI32())); - var int: u32 = @as(u32, @bitCast(stack.popI32())); - var value = @as(i32, @bitCast(std.math.rotl(u32, int, rot))); + const rot: u32 = @as(u32, @bitCast(stack.popI32())); + const int: u32 = @as(u32, @bitCast(stack.popI32())); + const value = @as(i32, @bitCast(std.math.rotl(u32, int, rot))); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Rotr(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Rotr", pc, code, stack); - var rot: u32 = @as(u32, @bitCast(stack.popI32())); - var int: u32 = @as(u32, @bitCast(stack.popI32())); - var value = @as(i32, @bitCast(std.math.rotr(u32, int, rot))); + const rot: u32 = @as(u32, @bitCast(stack.popI32())); + const int: u32 = @as(u32, @bitCast(stack.popI32())); + const value = @as(i32, @bitCast(std.math.rotr(u32, int, rot))); stack.pushI32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Clz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Clz", pc, code, stack); - var v: i64 = stack.popI64(); - var num_zeroes = @clz(v); + const v: i64 = stack.popI64(); + const num_zeroes = @clz(v); stack.pushI64(num_zeroes); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Ctz(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Ctz", pc, code, stack); - var v: i64 = stack.popI64(); - var num_zeroes = @ctz(v); + const v: i64 = stack.popI64(); + const num_zeroes = @ctz(v); stack.pushI64(num_zeroes); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Popcnt(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Popcnt", pc, code, stack); - var v: i64 = stack.popI64(); - var num_bits_set = @popCount(v); + const v: i64 = stack.popI64(); + const num_bits_set = @popCount(v); stack.pushI64(num_bits_set); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Add(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Add", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result = v1 +% v2; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result = v1 +% v2; stack.pushI64(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Sub(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Sub", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var result = v1 -% v2; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const result = v1 -% v2; stack.pushI64(result); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Mul(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Mul", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var value = v1 *% v2; + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const value = v1 *% v2; stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Div_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Div_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var value = std.math.divTrunc(i64, v1, v2) catch |e| { + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const value = std.math.divTrunc(i64, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else if (e == error.Overflow) { @@ -2711,9 +2724,9 @@ const InstructionFuncs = struct { fn op_I64_Div_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Div_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var value_unsigned = std.math.divFloor(u64, v1, v2) catch |e| { + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const value_unsigned = std.math.divFloor(u64, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else if (e == error.Overflow) { @@ -2722,17 +2735,17 @@ const InstructionFuncs = struct { return e; } }; - var value = @as(i64, @bitCast(value_unsigned)); + const value = @as(i64, @bitCast(value_unsigned)); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Rem_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Rem_S", pc, code, stack); - var v2: i64 = stack.popI64(); - var v1: i64 = stack.popI64(); - var denom = try std.math.absInt(v2); - var value = std.math.rem(i64, v1, denom) catch |e| { + const v2: i64 = stack.popI64(); + const v1: i64 = stack.popI64(); + const denom: i64 = @intCast(@abs(v2)); + const value = std.math.rem(i64, v1, denom) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else { @@ -2745,140 +2758,140 @@ const InstructionFuncs = struct { fn op_I64_Rem_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Rem_U", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var value_unsigned = std.math.rem(u64, v1, v2) catch |e| { + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const value_unsigned = std.math.rem(u64, v1, v2) catch |e| { if (e == error.DivisionByZero) { return error.TrapIntegerDivisionByZero; } else { return e; } }; - var value = @as(i64, @bitCast(value_unsigned)); + const value = @as(i64, @bitCast(value_unsigned)); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_And(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_And", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var value = @as(i64, @bitCast(v1 & v2)); + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const value = @as(i64, @bitCast(v1 & v2)); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Or(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Or", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var value = @as(i64, @bitCast(v1 | v2)); + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const value = @as(i64, @bitCast(v1 | v2)); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Xor(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Xor", pc, code, stack); - var v2: u64 = @as(u64, @bitCast(stack.popI64())); - var v1: u64 = @as(u64, @bitCast(stack.popI64())); - var value = @as(i64, @bitCast(v1 ^ v2)); + const v2: u64 = @as(u64, @bitCast(stack.popI64())); + const v1: u64 = @as(u64, @bitCast(stack.popI64())); + const value = @as(i64, @bitCast(v1 ^ v2)); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Shl(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Shl", pc, code, stack); - var shift_unsafe: i64 = stack.popI64(); - var int: i64 = stack.popI64(); - var shift: i64 = try std.math.mod(i64, shift_unsafe, 64); - var value = std.math.shl(i64, int, shift); + const shift_unsafe: i64 = stack.popI64(); + const int: i64 = stack.popI64(); + const shift: i64 = try std.math.mod(i64, shift_unsafe, 64); + const value = std.math.shl(i64, int, shift); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Shr_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Shr_S", pc, code, stack); - var shift_unsafe: i64 = stack.popI64(); - var int: i64 = stack.popI64(); - var shift = try std.math.mod(i64, shift_unsafe, 64); - var value = std.math.shr(i64, int, shift); + const shift_unsafe: i64 = stack.popI64(); + const int: i64 = stack.popI64(); + const shift = try std.math.mod(i64, shift_unsafe, 64); + const value = std.math.shr(i64, int, shift); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Shr_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Shr_U", pc, code, stack); - var shift_unsafe: u64 = @as(u64, @bitCast(stack.popI64())); - var int: u64 = @as(u64, @bitCast(stack.popI64())); - var shift = try std.math.mod(u64, shift_unsafe, 64); - var value = @as(i64, @bitCast(std.math.shr(u64, int, shift))); + const shift_unsafe: u64 = @as(u64, @bitCast(stack.popI64())); + const int: u64 = @as(u64, @bitCast(stack.popI64())); + const shift = try std.math.mod(u64, shift_unsafe, 64); + const value = @as(i64, @bitCast(std.math.shr(u64, int, shift))); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Rotl(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Rotl", pc, code, stack); - var rot: u64 = @as(u64, @bitCast(stack.popI64())); - var int: u64 = @as(u64, @bitCast(stack.popI64())); - var value = @as(i64, @bitCast(std.math.rotl(u64, int, rot))); + const rot: u64 = @as(u64, @bitCast(stack.popI64())); + const int: u64 = @as(u64, @bitCast(stack.popI64())); + const value = @as(i64, @bitCast(std.math.rotl(u64, int, rot))); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Rotr(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Rotr", pc, code, stack); - var rot: u64 = @as(u64, @bitCast(stack.popI64())); - var int: u64 = @as(u64, @bitCast(stack.popI64())); - var value = @as(i64, @bitCast(std.math.rotr(u64, int, rot))); + const rot: u64 = @as(u64, @bitCast(stack.popI64())); + const int: u64 = @as(u64, @bitCast(stack.popI64())); + const value = @as(i64, @bitCast(std.math.rotr(u64, int, rot))); stack.pushI64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Abs(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Abs", pc, code, stack); - var f = stack.popF32(); - var value = std.math.fabs(f); + const f = stack.popF32(); + const value = @abs(f); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Neg(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Neg", pc, code, stack); - var f = stack.popF32(); + const f = stack.popF32(); stack.pushF32(-f); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Ceil(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Ceil", pc, code, stack); - var f = stack.popF32(); - var value = @ceil(f); + const f = stack.popF32(); + const value = @ceil(f); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Floor(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Floor", pc, code, stack); - var f = stack.popF32(); - var value = @floor(f); + const f = stack.popF32(); + const value = @floor(f); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Trunc(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Trunc", pc, code, stack); - var f = stack.popF32(); - var value = std.math.trunc(f); + const f = stack.popF32(); + const value = std.math.trunc(f); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Nearest(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Nearest", pc, code, stack); - var f = stack.popF32(); + const f = stack.popF32(); var value: f32 = undefined; - var ceil = @ceil(f); - var floor = @floor(f); + const ceil = @ceil(f); + const floor = @floor(f); if (ceil - f == f - floor) { value = if (@mod(ceil, 2) == 0) ceil else floor; } else { @@ -2890,120 +2903,120 @@ const InstructionFuncs = struct { fn op_F32_Sqrt(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Sqrt", pc, code, stack); - var f = stack.popF32(); - var value = std.math.sqrt(f); + const f = stack.popF32(); + const value = std.math.sqrt(f); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Add(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Add", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = v1 + v2; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = v1 + v2; stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Sub(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Sub", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = v1 - v2; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = v1 - v2; stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Mul(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Mul", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = v1 * v2; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = v1 * v2; stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Div(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Div", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = v1 / v2; + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = v1 / v2; stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Min(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Min", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = OpHelpers.propagateNanWithOp(.Min, v1, v2); + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = OpHelpers.propagateNanWithOp(.Min, v1, v2); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Max(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Max", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = OpHelpers.propagateNanWithOp(.Max, v1, v2); + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = OpHelpers.propagateNanWithOp(.Max, v1, v2); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Copysign(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Copysign", pc, code, stack); - var v2 = stack.popF32(); - var v1 = stack.popF32(); - var value = std.math.copysign(v1, v2); + const v2 = stack.popF32(); + const v1 = stack.popF32(); + const value = std.math.copysign(v1, v2); stack.pushF32(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Abs(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Abs", pc, code, stack); - var f = stack.popF64(); - var value = std.math.fabs(f); + const f = stack.popF64(); + const value = @abs(f); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Neg(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Neg", pc, code, stack); - var f = stack.popF64(); + const f = stack.popF64(); stack.pushF64(-f); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Ceil(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Ceil", pc, code, stack); - var f = stack.popF64(); - var value = @ceil(f); + const f = stack.popF64(); + const value = @ceil(f); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Floor(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Floor", pc, code, stack); - var f = stack.popF64(); - var value = @floor(f); + const f = stack.popF64(); + const value = @floor(f); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Trunc(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Trunc", pc, code, stack); - var f = stack.popF64(); - var value = @trunc(f); + const f = stack.popF64(); + const value = @trunc(f); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Nearest(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Nearest", pc, code, stack); - var f = stack.popF64(); + const f = stack.popF64(); var value: f64 = undefined; - var ceil = @ceil(f); - var floor = @floor(f); + const ceil = @ceil(f); + const floor = @floor(f); if (ceil - f == f - floor) { value = if (@mod(ceil, 2) == 0) ceil else floor; } else { @@ -3015,302 +3028,302 @@ const InstructionFuncs = struct { fn op_F64_Sqrt(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Sqrt", pc, code, stack); - var f = stack.popF64(); - var value = std.math.sqrt(f); + const f = stack.popF64(); + const value = std.math.sqrt(f); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Add(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Add", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = v1 + v2; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = v1 + v2; stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Sub(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Sub", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = v1 - v2; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = v1 - v2; stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Mul(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Mul", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = v1 * v2; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = v1 * v2; stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Div(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Div", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = v1 / v2; + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = v1 / v2; stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Min(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Min", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = OpHelpers.propagateNanWithOp(.Min, v1, v2); + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = OpHelpers.propagateNanWithOp(.Min, v1, v2); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Max(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Max", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = OpHelpers.propagateNanWithOp(.Max, v1, v2); + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = OpHelpers.propagateNanWithOp(.Max, v1, v2); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Copysign(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Copysign", pc, code, stack); - var v2 = stack.popF64(); - var v1 = stack.popF64(); - var value = std.math.copysign(v1, v2); + const v2 = stack.popF64(); + const v1 = stack.popF64(); + const value = std.math.copysign(v1, v2); stack.pushF64(value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Wrap_I64(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Wrap_I64", pc, code, stack); - var v = stack.popI64(); - var mod = @as(i32, @truncate(v)); + const v = stack.popI64(); + const mod = @as(i32, @truncate(v)); stack.pushI32(mod); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_F32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_F32_S", pc, code, stack); - var v = stack.popF32(); - var int = try OpHelpers.truncateTo(i32, v); + const v = stack.popF32(); + const int = try OpHelpers.truncateTo(i32, v); stack.pushI32(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_F32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_F32_U", pc, code, stack); - var v = stack.popF32(); - var int = try OpHelpers.truncateTo(u32, v); + const v = stack.popF32(); + const int = try OpHelpers.truncateTo(u32, v); stack.pushI32(@as(i32, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_F64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_F64_S", pc, code, stack); - var v = stack.popF64(); - var int = try OpHelpers.truncateTo(i32, v); + const v = stack.popF64(); + const int = try OpHelpers.truncateTo(i32, v); stack.pushI32(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_F64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_F64_U", pc, code, stack); - var v = stack.popF64(); - var int = try OpHelpers.truncateTo(u32, v); + const v = stack.popF64(); + const int = try OpHelpers.truncateTo(u32, v); stack.pushI32(@as(i32, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Extend_I32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Extend_I32_S", pc, code, stack); - var v32 = stack.popI32(); - var v64: i64 = v32; + const v32 = stack.popI32(); + const v64: i64 = v32; stack.pushI64(v64); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Extend_I32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Extend_I32_U", pc, code, stack); - var v32 = stack.popI32(); - var v64: u64 = @as(u32, @bitCast(v32)); + const v32 = stack.popI32(); + const v64: u64 = @as(u32, @bitCast(v32)); stack.pushI64(@as(i64, @bitCast(v64))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_F32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_F32_S", pc, code, stack); - var v = stack.popF32(); - var int = try OpHelpers.truncateTo(i64, v); + const v = stack.popF32(); + const int = try OpHelpers.truncateTo(i64, v); stack.pushI64(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_F32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_F32_U", pc, code, stack); - var v = stack.popF32(); - var int = try OpHelpers.truncateTo(u64, v); + const v = stack.popF32(); + const int = try OpHelpers.truncateTo(u64, v); stack.pushI64(@as(i64, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_F64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_F64_S", pc, code, stack); - var v = stack.popF64(); - var int = try OpHelpers.truncateTo(i64, v); + const v = stack.popF64(); + const int = try OpHelpers.truncateTo(i64, v); stack.pushI64(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_F64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_F64_U", pc, code, stack); - var v = stack.popF64(); - var int = try OpHelpers.truncateTo(u64, v); + const v = stack.popF64(); + const int = try OpHelpers.truncateTo(u64, v); stack.pushI64(@as(i64, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Convert_I32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Convert_I32_S", pc, code, stack); - var v = stack.popI32(); + const v = stack.popI32(); stack.pushF32(@as(f32, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Convert_I32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Convert_I32_U", pc, code, stack); - var v = @as(u32, @bitCast(stack.popI32())); + const v = @as(u32, @bitCast(stack.popI32())); stack.pushF32(@as(f32, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Convert_I64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Convert_I64_S", pc, code, stack); - var v = stack.popI64(); + const v = stack.popI64(); stack.pushF32(@as(f32, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Convert_I64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Convert_I64_U", pc, code, stack); - var v = @as(u64, @bitCast(stack.popI64())); + const v = @as(u64, @bitCast(stack.popI64())); stack.pushF32(@as(f32, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Demote_F64(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Demote_F64", pc, code, stack); - var v = stack.popF64(); + const v = stack.popF64(); stack.pushF32(@as(f32, @floatCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Convert_I32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Convert_I32_S", pc, code, stack); - var v = stack.popI32(); + const v = stack.popI32(); stack.pushF64(@as(f64, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Convert_I32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Convert_I32_U", pc, code, stack); - var v = @as(u32, @bitCast(stack.popI32())); + const v = @as(u32, @bitCast(stack.popI32())); stack.pushF64(@as(f64, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Convert_I64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Convert_I64_S", pc, code, stack); - var v = stack.popI64(); + const v = stack.popI64(); stack.pushF64(@as(f64, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Convert_I64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Convert_I64_U", pc, code, stack); - var v = @as(u64, @bitCast(stack.popI64())); + const v = @as(u64, @bitCast(stack.popI64())); stack.pushF64(@as(f64, @floatFromInt(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Promote_F32(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Promote_F32", pc, code, stack); - var v = stack.popF32(); + const v = stack.popF32(); stack.pushF64(@as(f64, @floatCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Reinterpret_F32(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Reinterpret_F32", pc, code, stack); - var v = stack.popF32(); + const v = stack.popF32(); stack.pushI32(@as(i32, @bitCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Reinterpret_F64(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Reinterpret_F64", pc, code, stack); - var v = stack.popF64(); + const v = stack.popF64(); stack.pushI64(@as(i64, @bitCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F32_Reinterpret_I32(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32_Reinterpret_I32", pc, code, stack); - var v = stack.popI32(); + const v = stack.popI32(); stack.pushF32(@as(f32, @bitCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_F64_Reinterpret_I64(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64_Reinterpret_I64", pc, code, stack); - var v = stack.popI64(); + const v = stack.popI64(); stack.pushF64(@as(f64, @bitCast(v))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Extend8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Extend8_S", pc, code, stack); - var v = stack.popI32(); - var v_truncated = @as(i8, @truncate(v)); - var v_extended: i32 = v_truncated; + const v = stack.popI32(); + const v_truncated = @as(i8, @truncate(v)); + const v_extended: i32 = v_truncated; stack.pushI32(v_extended); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Extend16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Extend16_S", pc, code, stack); - var v = stack.popI32(); - var v_truncated = @as(i16, @truncate(v)); - var v_extended: i32 = v_truncated; + const v = stack.popI32(); + const v_truncated = @as(i16, @truncate(v)); + const v_extended: i32 = v_truncated; stack.pushI32(v_extended); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Extend8_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Extend8_S", pc, code, stack); - var v = stack.popI64(); - var v_truncated = @as(i8, @truncate(v)); - var v_extended: i64 = v_truncated; + const v = stack.popI64(); + const v_truncated = @as(i8, @truncate(v)); + const v_extended: i64 = v_truncated; stack.pushI64(v_extended); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Extend16_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Extend16_S", pc, code, stack); - var v = stack.popI64(); - var v_truncated = @as(i16, @truncate(v)); - var v_extended: i64 = v_truncated; + const v = stack.popI64(); + const v_truncated = @as(i16, @truncate(v)); + const v_extended: i64 = v_truncated; stack.pushI64(v_extended); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Extend32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Extend32_S", pc, code, stack); - var v = stack.popI64(); - var v_truncated = @as(i32, @truncate(v)); - var v_extended: i64 = v_truncated; + const v = stack.popI64(); + const v_truncated = @as(i32, @truncate(v)); + const v_extended: i64 = v_truncated; stack.pushI64(v_extended); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -3318,8 +3331,8 @@ const InstructionFuncs = struct { fn op_Ref_Null(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Ref_Null", pc, code, stack); try stack.checkExhausted(1); - var valtype = code[pc].immediate.ValType; - var val = try Val.nullRef(valtype); + const valtype = code[pc].immediate.ValType; + const val = try Val.nullRef(valtype); stack.pushValue(val); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -3343,64 +3356,64 @@ const InstructionFuncs = struct { fn op_I32_Trunc_Sat_F32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_Sat_F32_S", pc, code, stack); - var v = stack.popF32(); - var int = OpHelpers.saturatedTruncateTo(i32, v); + const v = stack.popF32(); + const int = OpHelpers.saturatedTruncateTo(i32, v); stack.pushI32(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_Sat_F32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_Sat_F32_U", pc, code, stack); - var v = stack.popF32(); - var int = OpHelpers.saturatedTruncateTo(u32, v); + const v = stack.popF32(); + const int = OpHelpers.saturatedTruncateTo(u32, v); stack.pushI32(@as(i32, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_Sat_F64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_Sat_F64_S", pc, code, stack); - var v = stack.popF64(); - var int = OpHelpers.saturatedTruncateTo(i32, v); + const v = stack.popF64(); + const int = OpHelpers.saturatedTruncateTo(i32, v); stack.pushI32(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I32_Trunc_Sat_F64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I32_Trunc_Sat_F64_U", pc, code, stack); - var v = stack.popF64(); - var int = OpHelpers.saturatedTruncateTo(u32, v); + const v = stack.popF64(); + const int = OpHelpers.saturatedTruncateTo(u32, v); stack.pushI32(@as(i32, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_Sat_F32_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_Sat_F32_S", pc, code, stack); - var v = stack.popF32(); - var int = OpHelpers.saturatedTruncateTo(i64, v); + const v = stack.popF32(); + const int = OpHelpers.saturatedTruncateTo(i64, v); stack.pushI64(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_Sat_F32_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_Sat_F32_U", pc, code, stack); - var v = stack.popF32(); - var int = OpHelpers.saturatedTruncateTo(u64, v); + const v = stack.popF32(); + const int = OpHelpers.saturatedTruncateTo(u64, v); stack.pushI64(@as(i64, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_Sat_F64_S(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_Sat_F64_S", pc, code, stack); - var v = stack.popF64(); - var int = OpHelpers.saturatedTruncateTo(i64, v); + const v = stack.popF64(); + const int = OpHelpers.saturatedTruncateTo(i64, v); stack.pushI64(int); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_I64_Trunc_Sat_F64_U(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I64_Trunc_Sat_F64_U", pc, code, stack); - var v = stack.popF64(); - var int = OpHelpers.saturatedTruncateTo(u64, v); + const v = stack.popF64(); + const int = OpHelpers.saturatedTruncateTo(u64, v); stack.pushI64(@as(i64, @bitCast(int))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -3431,16 +3444,16 @@ const InstructionFuncs = struct { const memory_offset_u32 = @as(u32, @intCast(memory_offset)); const length_u32 = @as(u32, @intCast(length)); - var source = data.bytes.items[data_offset_u32 .. data_offset_u32 + length_u32]; - var destination = buffer[memory_offset_u32 .. memory_offset_u32 + length_u32]; - std.mem.copy(u8, destination, source); + const source = data.bytes.items[data_offset_u32 .. data_offset_u32 + length_u32]; + const destination = buffer[memory_offset_u32 .. memory_offset_u32 + length_u32]; + @memcpy(destination, source); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } fn op_Data_Drop(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("Data_Drop", pc, code, stack); const data_index: u32 = code[pc].immediate.Index; - var data: *DataDefinition = &stack.topFrame().module_instance.module_def.datas.items[data_index]; + const data: *DataDefinition = &stack.topFrame().module_instance.module_def.datas.items[data_index]; data.bytes.clearAndFree(); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -3469,11 +3482,11 @@ const InstructionFuncs = struct { const dest_offset_u32 = @as(u32, @intCast(dest_offset)); const length_u32 = @as(u32, @intCast(length)); - var source = buffer[source_offset_u32 .. source_offset_u32 + length_u32]; - var destination = buffer[dest_offset_u32 .. dest_offset_u32 + length_u32]; + const source = buffer[source_offset_u32 .. source_offset_u32 + length_u32]; + const destination = buffer[dest_offset_u32 .. dest_offset_u32 + length_u32]; if (@intFromPtr(destination.ptr) < @intFromPtr(source.ptr)) { - std.mem.copy(u8, destination, source); + std.mem.copyForwards(u8, destination, source); } else { std.mem.copyBackwards(u8, destination, source); } @@ -3500,7 +3513,7 @@ const InstructionFuncs = struct { const offset_u32 = @as(u32, @intCast(offset)); const length_u32 = @as(u32, @intCast(length)); - var destination = buffer[offset_u32 .. offset_u32 + length_u32]; + const destination = buffer[offset_u32 .. offset_u32 + length_u32]; @memset(destination, value); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); @@ -3533,9 +3546,10 @@ const InstructionFuncs = struct { const table_begin = @as(usize, @intCast(table_start_index)); const length = @as(usize, @intCast(length_i32)); - var dest: []Val = table.refs.items[table_begin .. table_begin + length]; - var src: []const Val = elem.refs.items[elem_begin .. elem_begin + length]; - std.mem.copy(Val, dest, src); + const dest: []Val = table.refs.items[table_begin .. table_begin + length]; + const src: []const Val = elem.refs.items[elem_begin .. elem_begin + length]; + + @memcpy(dest, src); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -3574,10 +3588,10 @@ const InstructionFuncs = struct { const src_begin = @as(usize, @intCast(src_start_index)); const length = @as(usize, @intCast(length_i32)); - var dest: []Val = dest_table.refs.items[dest_begin .. dest_begin + length]; - var src: []const Val = src_table.refs.items[src_begin .. src_begin + length]; + const dest: []Val = dest_table.refs.items[dest_begin .. dest_begin + length]; + const src: []const Val = src_table.refs.items[src_begin .. src_begin + length]; if (dest_start_index <= src_start_index) { - std.mem.copy(Val, dest, src); + std.mem.copyForwards(Val, dest, src); } else { std.mem.copyBackwards(Val, dest, src); } @@ -3621,7 +3635,7 @@ const InstructionFuncs = struct { const dest_begin = @as(usize, @intCast(dest_table_index)); const length = @as(usize, @intCast(length_i32)); - var dest: []Val = table.refs.items[dest_begin .. dest_begin + length]; + const dest: []Val = table.refs.items[dest_begin .. dest_begin + length]; @memset(dest, funcref); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); @@ -4130,7 +4144,7 @@ const InstructionFuncs = struct { fn op_I8x16_Swizzle(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("I8x16_Swizzle", pc, code, stack); const indices: i8x16 = @as(i8x16, @bitCast(stack.popV128())); - var vec: i8x16 = @as(i8x16, @bitCast(stack.popV128())); + const vec: i8x16 = @as(i8x16, @bitCast(stack.popV128())); var swizzled: i8x16 = undefined; var i: usize = 0; while (i < 16) : (i += 1) { @@ -4977,7 +4991,7 @@ const InstructionFuncs = struct { fn op_F32x4_Abs(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F32x4_Abs", pc, code, stack); const vec = @as(f32x4, @bitCast(stack.popV128())); - const abs = @fabs(vec); + const abs = @abs(vec); stack.pushV128(@as(v128, @bitCast(abs))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -5049,7 +5063,7 @@ const InstructionFuncs = struct { fn op_F64x2_Abs(pc: u32, code: [*]const Instruction, stack: *Stack) anyerror!void { try debugPreamble("F64x2_Abs", pc, code, stack); const vec = @as(f64x2, @bitCast(stack.popV128())); - const abs = @fabs(vec); + const abs = @abs(vec); stack.pushV128(@as(v128, @bitCast(abs))); try @call(.always_tail, InstructionFuncs.lookup(code[pc + 1].opcode), .{ pc + 1, code, stack }); } @@ -5245,7 +5259,7 @@ pub const StackVM = struct { local_types.appendSliceAssumeCapacity(param_types); local_types.appendSliceAssumeCapacity(def_func.locals.items); - var f = FunctionInstance{ + const f = FunctionInstance{ .type_def_index = def_func.type_index, .def_index = @as(u32, @intCast(i)), .instructions_begin = def_func.instructions_begin, @@ -5278,7 +5292,7 @@ pub const StackVM = struct { const num_imports = module.module_def.imports.functions.items.len; if (func_index >= num_imports) { - var instance_index = func_index - num_imports; + const instance_index = func_index - num_imports; try self.invokeInternal(module, instance_index, params, returns); } else { try invokeImportInternal(module, func_index, params, returns, .{}); @@ -5397,8 +5411,8 @@ pub const StackVM = struct { pub fn findFuncTypeDef(vm: *VM, module: *ModuleInstance, local_func_index: usize) *const FunctionTypeDefinition { var self: *StackVM = fromVM(vm); - var func_instance: *const FunctionInstance = &self.functions.items[local_func_index]; - var func_type_def: *const FunctionTypeDefinition = &module.module_def.types.items[func_instance.type_def_index]; + const func_instance: *const FunctionInstance = &self.functions.items[local_func_index]; + const func_type_def: *const FunctionTypeDefinition = &module.module_def.types.items[func_instance.type_def_index]; return func_type_def; } @@ -5409,7 +5423,7 @@ pub const StackVM = struct { const param_types: []const ValType = func_type.getParams(); const return_types: []const ValType = func_type.getReturns(); - var params_slice = params[0..param_types.len]; + const params_slice = params[0..param_types.len]; var returns_slice = returns[0..return_types.len]; // Ensure any leftover stack state doesn't pollute this invoke. Can happen if the previous invoke returned an error. diff --git a/src/wasi.zig b/src/wasi.zig index 76e2b89..5f10c8b 100644 --- a/src/wasi.zig +++ b/src/wasi.zig @@ -11,7 +11,7 @@ const ModuleImportPackage = core.ModuleImportPackage; const WasiContext = struct { const FdInfo = struct { - fd: std.os.fd_t, + fd: std.posix.fd_t, path_absolute: []const u8, rights: WasiRights, is_preopen: bool, @@ -48,7 +48,7 @@ const WasiContext = struct { { var cwd_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined; - const cwd: []const u8 = try std.os.getcwd(&cwd_buffer); + const cwd: []const u8 = try std.process.getCwd(&cwd_buffer); context.cwd = try context.strings.put(cwd); } @@ -77,7 +77,7 @@ const WasiContext = struct { const path_stdout = try context.strings.put("stdout"); const path_stderr = try context.strings.put("stderr"); - var empty_dir_entries = std.ArrayList(WasiDirEntry).init(allocator); + const empty_dir_entries = std.ArrayList(WasiDirEntry).init(allocator); try context.fd_table.ensureTotalCapacity(3 + context.dirs.len); context.fd_table.appendAssumeCapacity(FdInfo{ .fd = std.io.getStdIn().handle, .path_absolute = path_stdin, .rights = .{}, .is_preopen = true, .dir_entries = empty_dir_entries }); @@ -197,7 +197,7 @@ const WasiContext = struct { if (self.resolveAndCache(fd_info_dir, path)) |resolved_path| { // Found an entry for this path, just reuse it while creating a new wasi fd if (self.fd_path_lookup.get(resolved_path)) |fd_table_index| { - var fd_wasi: u32 = self.next_fd_id; + const fd_wasi: u32 = self.next_fd_id; self.next_fd_id += 1; self.fd_wasi_table.put(fd_wasi, fd_table_index) catch |err| { errno.* = Errno.translateError(err); @@ -217,7 +217,7 @@ const WasiContext = struct { } if (open_func(resolved_path, lookupflags, openflags2, fdflags, rights, errno)) |fd_os| { - var fd_wasi: u32 = self.next_fd_id; + const fd_wasi: u32 = self.next_fd_id; self.next_fd_id += 1; var info: *FdInfo = undefined; @@ -263,7 +263,7 @@ const WasiContext = struct { return null; } - fn fdUpdate(self: *WasiContext, fd_wasi: u32, new_fd: std.os.fd_t) void { + fn fdUpdate(self: *WasiContext, fd_wasi: u32, new_fd: std.posix.fd_t) void { if (self.fd_wasi_table.get(fd_wasi)) |fd_table_index| { self.fd_table.items[fd_table_index].fd = new_fd; } else { @@ -314,7 +314,7 @@ const WasiContext = struct { fd_info.open_handles -= 1; if (fd_info.open_handles == 0) { - std.os.close(fd_info.fd); + std.posix.close(fd_info.fd); self.fd_table_freelist.appendAssumeCapacity(fd_table_index); // capacity was allocated when the associated fd_table slot was allocated } } else { @@ -335,7 +335,7 @@ const WasiContext = struct { _ = self.fd_path_lookup.remove(path_absolute); var fd_info: *FdInfo = &self.fd_table.items[fd_table_index]; - std.os.close(fd_info.fd); + std.posix.close(fd_info.fd); fd_info.open_handles = 0; self.fd_table_freelist.appendAssumeCapacity(fd_table_index); // capacity was allocated when the associated fd_table slot was allocated } @@ -719,7 +719,7 @@ const Helpers = struct { writeIntToMemory(u32, dest_string_strings, dest_string_ptrs, module, &errno); if (getMemorySlice(module, dest_string_strings, string.len + 1, &errno)) |mem| { - std.mem.copy(u8, mem[0..string.len], string); + @memcpy(mem[0..string.len], string); mem[string.len] = 0; // null terminator dest_string_ptrs += @sizeOf(u32); @@ -731,20 +731,17 @@ const Helpers = struct { returns[0] = Val{ .I32 = @intFromEnum(errno) }; } - fn convertClockId(wasi_clockid: i32, errno: *Errno) i32 { - return switch (wasi_clockid) { - std.os.wasi.CLOCK.REALTIME => if (builtin.os.tag != .windows) std.os.system.CLOCK.REALTIME else WindowsApi.CLOCK.REALTIME, - std.os.wasi.CLOCK.MONOTONIC => if (builtin.os.tag != .windows) std.os.system.CLOCK.MONOTONIC else WindowsApi.CLOCK.MONOTONIC, - std.os.wasi.CLOCK.PROCESS_CPUTIME_ID => if (builtin.os.tag != .windows) std.os.system.CLOCK.PROCESS_CPUTIME_ID else WindowsApi.CLOCK.PROCESS_CPUTIME_ID, - std.os.wasi.CLOCK.THREAD_CPUTIME_ID => if (builtin.os.tag != .windows) std.os.system.CLOCK.THREAD_CPUTIME_ID else WindowsApi.CLOCK.THREAD_CPUTIME_ID, - else => { - errno.* = Errno.INVAL; - return 0; - }, + fn convertClockId(wasi_clockid: i32) i32 { + const clockid_t: std.os.wasi.clockid_t = @enumFromInt(wasi_clockid); + return switch (clockid_t) { + std.os.wasi.clockid_t.REALTIME => if (builtin.os.tag != .windows) std.posix.CLOCK.REALTIME else WindowsApi.CLOCK.REALTIME, + std.os.wasi.clockid_t.MONOTONIC => if (builtin.os.tag != .windows) std.posix.CLOCK.MONOTONIC else WindowsApi.CLOCK.MONOTONIC, + std.os.wasi.clockid_t.PROCESS_CPUTIME_ID => if (builtin.os.tag != .windows) std.posix.CLOCK.PROCESS_CPUTIME_ID else WindowsApi.CLOCK.PROCESS_CPUTIME_ID, + std.os.wasi.clockid_t.THREAD_CPUTIME_ID => if (builtin.os.tag != .windows) std.posix.CLOCK.THREAD_CPUTIME_ID else WindowsApi.CLOCK.THREAD_CPUTIME_ID, }; } - fn posixTimespecToWasi(ts: std.os.system.timespec) std.os.wasi.timestamp_t { + fn posixTimespecToWasi(ts: std.posix.timespec) std.os.wasi.timestamp_t { const ns_per_second = 1000000000; const sec_part = @as(u64, @intCast(ts.tv_sec)); const nsec_part = @as(u64, @intCast(ts.tv_nsec)); @@ -834,23 +831,23 @@ const Helpers = struct { }; } - fn fdflagsToFlagsPosix(fdflags: WasiFdFlags) u32 { - var flags: u32 = 0; + fn fdflagsToFlagsPosix(fdflags: WasiFdFlags) std.posix.O { + var flags: std.posix.O = .{}; if (fdflags.append) { - flags |= std.os.O.APPEND; + flags.APPEND = true; } if (fdflags.dsync) { - flags |= std.os.O.DSYNC; + flags.DSYNC = true; } if (fdflags.nonblock) { - flags |= std.os.O.NONBLOCK; + flags.NONBLOCK = true; } - if (builtin.os.tag != .macos and fdflags.rsync) { - flags |= std.os.O.RSYNC; + if (builtin.os.tag != .macos and builtin.os.tag != .linux and fdflags.rsync) { + flags.RSYNC = true; } if (fdflags.sync) { - flags |= std.os.O.SYNC; + flags.SYNC = true; } return flags; @@ -866,16 +863,16 @@ const Helpers = struct { } } - fn posixModeToWasiFiletype(mode: std.os.mode_t) std.os.wasi.filetype_t { - if (std.os.S.ISREG(mode)) { + fn posixModeToWasiFiletype(mode: std.posix.mode_t) std.os.wasi.filetype_t { + if (std.posix.S.ISREG(mode)) { return .REGULAR_FILE; - } else if (std.os.S.ISDIR(mode)) { + } else if (std.posix.S.ISDIR(mode)) { return .DIRECTORY; - } else if (std.os.S.ISCHR(mode)) { + } else if (std.posix.S.ISCHR(mode)) { return .CHARACTER_DEVICE; - } else if (std.os.S.ISBLK(mode)) { + } else if (std.posix.S.ISBLK(mode)) { return .BLOCK_DEVICE; - } else if (std.os.S.ISLNK(mode)) { + } else if (std.posix.S.ISLNK(mode)) { return .SYMBOLIC_LINK; // } else if (std.os.S.ISSOCK(mode)) { // stat_wasi.fs_filetype = std.os.wasi.filetype_t.SOCKET_STREAM; // not sure if this is SOCKET_STREAM or SOCKET_DGRAM @@ -885,16 +882,49 @@ const Helpers = struct { } } - fn fdstatGetWindows(fd: std.os.fd_t, errno: *Errno) std.os.wasi.fdstat_t { + const WASI_RIGHTS_ALL: std.os.wasi.rights_t = .{ + .FD_DATASYNC = true, + .FD_READ = true, + .FD_SEEK = true, + .FD_FDSTAT_SET_FLAGS = true, + .FD_SYNC = true, + .FD_TELL = true, + .FD_WRITE = true, + .FD_ADVISE = true, + .FD_ALLOCATE = true, + .PATH_CREATE_DIRECTORY = true, + .PATH_CREATE_FILE = true, + .PATH_LINK_SOURCE = true, + .PATH_LINK_TARGET = true, + .PATH_OPEN = true, + .FD_READDIR = true, + .PATH_READLINK = true, + .PATH_RENAME_SOURCE = true, + .PATH_RENAME_TARGET = true, + .PATH_FILESTAT_GET = true, + .PATH_FILESTAT_SET_SIZE = true, + .PATH_FILESTAT_SET_TIMES = true, + .FD_FILESTAT_GET = true, + .FD_FILESTAT_SET_SIZE = true, + .FD_FILESTAT_SET_TIMES = true, + .PATH_SYMLINK = true, + .PATH_REMOVE_DIRECTORY = true, + .PATH_UNLINK_FILE = true, + .POLL_FD_READWRITE = true, + .SOCK_SHUTDOWN = true, + .SOCK_ACCEPT = true, + }; + + fn fdstatGetWindows(fd: std.posix.fd_t, errno: *Errno) std.os.wasi.fdstat_t { if (builtin.os.tag != .windows) { @compileError("This function should only be called on the Windows OS."); } var stat_wasi = std.os.wasi.fdstat_t{ .fs_filetype = std.os.wasi.filetype_t.REGULAR_FILE, - .fs_flags = 0, - .fs_rights_base = std.os.wasi.RIGHT.ALL, - .fs_rights_inheriting = std.os.wasi.RIGHT.ALL, + .fs_flags = .{}, + .fs_rights_base = WASI_RIGHTS_ALL, + .fs_rights_inheriting = WASI_RIGHTS_ALL, }; var info: WindowsApi.BY_HANDLE_FILE_INFORMATION = undefined; @@ -902,11 +932,11 @@ const Helpers = struct { stat_wasi.fs_filetype = windowsFileAttributeToWasiFiletype(info.dwFileAttributes); if (stat_wasi.fs_filetype == .DIRECTORY) { - stat_wasi.fs_rights_base &= ~std.os.wasi.RIGHT.FD_SEEK; + stat_wasi.fs_rights_base.FD_SEEK = false; } if (info.dwFileAttributes & std.os.windows.FILE_ATTRIBUTE_READONLY != 0) { - stat_wasi.fs_rights_base &= ~std.os.wasi.RIGHT.FD_WRITE; + stat_wasi.fs_rights_base.FD_WRITE = false; } } else { errno.* = Errno.getLastWin32Error(); @@ -917,52 +947,53 @@ const Helpers = struct { return stat_wasi; } - fn fdstatGetPosix(fd: std.os.fd_t, errno: *Errno) std.os.wasi.fdstat_t { + fn fdstatGetPosix(fd: std.posix.fd_t, errno: *Errno) std.os.wasi.fdstat_t { if (builtin.os.tag == .windows) { @compileError("This function should only be called on an OS that supports posix APIs."); } var stat_wasi = std.os.wasi.fdstat_t{ .fs_filetype = std.os.wasi.filetype_t.UNKNOWN, - .fs_flags = 0, - .fs_rights_base = std.os.wasi.RIGHT.ALL, - .fs_rights_inheriting = std.os.wasi.RIGHT.ALL, + .fs_flags = .{}, + .fs_rights_base = WASI_RIGHTS_ALL, + .fs_rights_inheriting = WASI_RIGHTS_ALL, }; - if (std.os.fcntl(fd, std.os.F.GETFL, 0)) |fd_flags| { - if (std.os.fstat(fd)) |fd_stat| { + if (std.posix.fcntl(fd, std.posix.F.GETFL, 0)) |fd_flags| { + if (std.posix.fstat(fd)) |fd_stat| { + const flags: std.posix.O = @bitCast(@as(u32, @intCast(fd_flags))); // filetype stat_wasi.fs_filetype = posixModeToWasiFiletype(fd_stat.mode); // flags - if (fd_flags & std.os.O.APPEND != 0) { - stat_wasi.fs_flags |= std.os.wasi.FDFLAG.APPEND; + if (flags.APPEND) { + stat_wasi.fs_flags.APPEND = true; } - if (fd_flags & std.os.O.DSYNC != 0) { - stat_wasi.fs_flags |= std.os.wasi.FDFLAG.DSYNC; + if (flags.DSYNC) { + stat_wasi.fs_flags.DSYNC = true; } - if (fd_flags & std.os.O.NONBLOCK != 0) { - stat_wasi.fs_flags |= std.os.wasi.FDFLAG.NONBLOCK; + if (flags.NONBLOCK) { + stat_wasi.fs_flags.NONBLOCK = true; } - if (builtin.os.tag != .macos and fd_flags & std.os.O.RSYNC != 0) { - stat_wasi.fs_flags |= std.os.wasi.FDFLAG.RSYNC; + if (builtin.os.tag != .macos and builtin.os.tag != .linux and flags.RSYNC) { + stat_wasi.fs_flags.RSYNC = true; } - if (fd_flags & std.os.O.SYNC != 0) { - stat_wasi.fs_flags |= std.os.wasi.FDFLAG.SYNC; + if (flags.SYNC) { + stat_wasi.fs_flags.SYNC = true; } // rights - if (fd_flags & std.os.O.RDWR != 0) { + if (flags.ACCMODE == .RDWR) { // noop since all rights includes this by default - } else if (fd_flags & std.os.O.RDONLY != 0) { - stat_wasi.fs_rights_base &= ~std.os.wasi.RIGHT.FD_WRITE; - } else if (fd_flags & std.os.O.WRONLY != 0) { - stat_wasi.fs_rights_base &= ~std.os.wasi.RIGHT.FD_READ; + } else if (flags.ACCMODE == .RDONLY) { + stat_wasi.fs_rights_base.FD_WRITE = false; + } else if (flags.ACCMODE == .WRONLY) { + stat_wasi.fs_rights_base.FD_READ = false; } if (stat_wasi.fs_filetype == .DIRECTORY) { - stat_wasi.fs_rights_base &= ~std.os.wasi.RIGHT.FD_SEEK; + stat_wasi.fs_rights_base.FD_SEEK = false; } } else |err| { errno.* = Errno.translateError(err); @@ -974,7 +1005,7 @@ const Helpers = struct { return stat_wasi; } - fn fdstatSetFlagsWindows(fd_info: *const WasiContext.FdInfo, fdflags: WasiFdFlags, errno: *Errno) ?std.os.fd_t { + fn fdstatSetFlagsWindows(fd_info: *const WasiContext.FdInfo, fdflags: WasiFdFlags, errno: *Errno) ?std.posix.fd_t { const w = std.os.windows; const file_pos = w.SetFilePointerEx_CURRENT_get(fd_info.fd) catch |err| { @@ -984,7 +1015,7 @@ const Helpers = struct { w.CloseHandle(fd_info.fd); - const pathspace_w: w.PathSpace = w.sliceToPrefixedFileW(fd_info.path_absolute) catch |err| { + const pathspace_w: w.PathSpace = w.sliceToPrefixedFileW(fd_info.fd, fd_info.path_absolute) catch |err| { errno.* = Errno.translateError(err); return null; }; @@ -1062,10 +1093,11 @@ const Helpers = struct { return fd_new; } - fn fdstatSetFlagsPosix(fd_info: *const WasiContext.FdInfo, fdflags: WasiFdFlags, errno: *Errno) ?std.os.fd_t { - const flags: u32 = fdflagsToFlagsPosix(fdflags); + fn fdstatSetFlagsPosix(fd_info: *const WasiContext.FdInfo, fdflags: WasiFdFlags, errno: *Errno) ?std.posix.fd_t { + const flags = fdflagsToFlagsPosix(fdflags); + const flags_int = @as(u32, @bitCast(flags)); - if (std.os.fcntl(fd_info.fd, std.os.F.SETFL, flags)) |_| {} else |err| { + if (std.posix.fcntl(fd_info.fd, std.posix.F.SETFL, flags_int)) |_| {} else |err| { errno.* = Errno.translateError(err); } @@ -1073,17 +1105,18 @@ const Helpers = struct { return null; } - fn fdFilestatSetTimesWindows(fd: std.os.fd_t, timestamp_wasi_access: u64, timestamp_wasi_modified: u64, fstflags: u32, errno: *Errno) void { + fn fdFilestatSetTimesWindows(fd: std.posix.fd_t, timestamp_wasi_access: u64, timestamp_wasi_modified: u64, fstflags: u32, errno: *Errno) void { var filetime_now: WindowsApi.FILETIME = undefined; // helps avoid 2 calls to GetSystemTimeAsFiletime var filetime_now_needs_set: bool = true; var access_time: std.os.windows.FILETIME = undefined; var access_time_was_set: bool = false; - if (fstflags & std.os.wasi.FILESTAT_SET_ATIM != 0) { + const flags: std.os.wasi.fstflags_t = @bitCast(@as(u16, @intCast(fstflags))); + if (flags.ATIM) { access_time = std.os.windows.nanoSecondsToFileTime(timestamp_wasi_access); access_time_was_set = true; } - if (fstflags & std.os.wasi.FILESTAT_SET_ATIM_NOW != 0) { + if (flags.ATIM_NOW) { std.os.windows.kernel32.GetSystemTimeAsFileTime(&filetime_now); filetime_now_needs_set = false; access_time = filetime_now; @@ -1092,11 +1125,11 @@ const Helpers = struct { var modify_time: std.os.windows.FILETIME = undefined; var modify_time_was_set: bool = false; - if (fstflags & std.os.wasi.FILESTAT_SET_MTIM != 0) { + if (flags.MTIM) { modify_time = std.os.windows.nanoSecondsToFileTime(timestamp_wasi_modified); modify_time_was_set = true; } - if (fstflags & std.os.wasi.FILESTAT_SET_MTIM_NOW != 0) { + if (flags.MTIM_NOW) { if (filetime_now_needs_set) { std.os.windows.kernel32.GetSystemTimeAsFileTime(&filetime_now); } @@ -1112,12 +1145,21 @@ const Helpers = struct { }; } - fn fdFilestatSetTimesPosix(fd: std.os.fd_t, timestamp_wasi_access: u64, timestamp_wasi_modified: u64, fstflags: u32, errno: *Errno) void { + fn timespecFromTimestamp(timestamp: u64) std.posix.timespec { + const tv_sec = timestamp / 1_000_000_000; + const tv_nsec = timestamp - tv_sec * 1_000_000_000; + return .{ + .tv_sec = @as(isize, @intCast(tv_sec)), + .tv_nsec = @as(isize, @intCast(tv_nsec)), + }; + } + + fn fdFilestatSetTimesPosix(fd: std.posix.fd_t, timestamp_wasi_access: u64, timestamp_wasi_modified: u64, fstflags: u32, errno: *Errno) void { const is_darwin = builtin.os.tag.isDarwin(); const UTIME_NOW: i64 = if (is_darwin) @as(i32, -1) else (1 << 30) - 1; const UTIME_OMIT: i64 = if (is_darwin) @as(i32, -2) else (1 << 30) - 2; - var times = [2]std.os.timespec{ + var times = [2]std.posix.timespec{ .{ // access time .tv_sec = 0, .tv_nsec = UTIME_OMIT, @@ -1128,24 +1170,25 @@ const Helpers = struct { }, }; - if (fstflags & std.os.wasi.FILESTAT_SET_ATIM != 0) { - var ts: std.os.wasi.timespec = std.os.wasi.timespec.fromTimestamp(timestamp_wasi_access); + const flags: std.os.wasi.fstflags_t = @bitCast(@as(u16, @intCast(fstflags))); + if (flags.ATIM) { + const ts: std.posix.timespec = timespecFromTimestamp(timestamp_wasi_access); times[0].tv_sec = ts.tv_sec; times[0].tv_nsec = ts.tv_nsec; } - if (fstflags & std.os.wasi.FILESTAT_SET_ATIM_NOW != 0) { + if (flags.ATIM_NOW) { times[0].tv_nsec = UTIME_NOW; } - if (fstflags & std.os.wasi.FILESTAT_SET_MTIM != 0) { - var ts: std.os.wasi.timespec = std.os.wasi.timespec.fromTimestamp(timestamp_wasi_modified); + if (flags.MTIM) { + const ts: std.posix.timespec = timespecFromTimestamp(timestamp_wasi_modified); times[1].tv_sec = ts.tv_sec; times[1].tv_nsec = ts.tv_nsec; } - if (fstflags & std.os.wasi.FILESTAT_SET_MTIM_NOW != 0) { + if (flags.MTIM_NOW) { times[1].tv_nsec = UTIME_NOW; } - std.os.futimens(fd, ×) catch |err| { + std.posix.futimens(fd, ×) catch |err| { errno.* = Errno.translateError(err); }; } @@ -1154,7 +1197,7 @@ const Helpers = struct { return (high << 32) | low; } - fn filestatGetWindows(fd: std.os.fd_t, errno: *Errno) std.os.wasi.filestat_t { + fn filestatGetWindows(fd: std.posix.fd_t, errno: *Errno) std.os.wasi.filestat_t { if (builtin.os.tag != .windows) { @compileError("This function should only be called on an OS that supports posix APIs."); } @@ -1178,14 +1221,14 @@ const Helpers = struct { return stat_wasi; } - fn filestatGetPosix(fd: std.os.fd_t, errno: *Errno) std.os.wasi.filestat_t { + fn filestatGetPosix(fd: std.posix.fd_t, errno: *Errno) std.os.wasi.filestat_t { if (builtin.os.tag == .windows) { @compileError("This function should only be called on an OS that supports posix APIs."); } var stat_wasi: std.os.wasi.filestat_t = undefined; - if (std.os.fstat(fd)) |stat| { + if (std.posix.fstat(fd)) |stat| { stat_wasi.dev = if (builtin.os.tag == .macos) @as(u32, @bitCast(stat.dev)) else stat.dev; stat_wasi.ino = stat.ino; stat_wasi.filetype = posixModeToWasiFiletype(stat.mode); @@ -1209,14 +1252,14 @@ const Helpers = struct { // As of this 0.10.1, the zig stdlib has a bug in std.os.open() that doesn't respect the append flag properly. // To get this working, we'll just use NtCreateFile directly. - fn openPathWindows(path: []const u8, lookupflags: WasiLookupFlags, openflags: WasiOpenFlags, fdflags: WasiFdFlags, rights: WasiRights, errno: *Errno) ?std.os.fd_t { + fn openPathWindows(path: []const u8, lookupflags: WasiLookupFlags, openflags: WasiOpenFlags, fdflags: WasiFdFlags, rights: WasiRights, errno: *Errno) ?std.posix.fd_t { if (builtin.os.tag != .windows) { @compileError("This function should only be called on an OS that supports windows APIs."); } const w = std.os.windows; - const pathspace_w: w.PathSpace = w.sliceToPrefixedFileW(path) catch |err| { + const pathspace_w: w.PathSpace = w.sliceToPrefixedFileW(null, path) catch |err| { errno.* = Errno.translateError(err); return null; }; @@ -1286,7 +1329,7 @@ const Helpers = struct { errno.* = Errno.LOOP; } if (rc == .SUCCESS) { - std.os.close(fd); + std.posix.close(fd); } return null; } @@ -1317,47 +1360,48 @@ const Helpers = struct { return null; } - fn openPathPosix(path: []const u8, lookupflags: WasiLookupFlags, openflags: WasiOpenFlags, fdflags: WasiFdFlags, rights: WasiRights, errno: *Errno) ?std.os.fd_t { + fn openPathPosix(path: []const u8, lookupflags: WasiLookupFlags, openflags: WasiOpenFlags, fdflags: WasiFdFlags, rights: WasiRights, errno: *Errno) ?std.posix.fd_t { if (builtin.os.tag == .windows) { @compileError("This function should only be called on an OS that supports posix APIs."); } - var flags: u32 = 0; + var flags: std.posix.O = .{}; if (openflags.creat) { - flags |= std.os.O.CREAT; + flags.CREAT = true; } if (openflags.directory) { - flags |= std.os.O.DIRECTORY; + flags.DIRECTORY = true; } if (openflags.excl) { - flags |= std.os.O.EXCL; + flags.EXCL = true; } if (openflags.trunc) { - flags |= std.os.O.TRUNC; + flags.TRUNC = true; } if (lookupflags.symlink_follow == false) { - flags |= std.os.O.NOFOLLOW; + flags.NOFOLLOW = true; } const fdflags_os = fdflagsToFlagsPosix(fdflags); - flags |= fdflags_os; + const combined = @as(u32, @bitCast(flags)) | @as(u32, @bitCast(fdflags_os)); + flags = @bitCast(combined); if (rights.fd_read and rights.fd_write) { if (openflags.directory) { - flags |= std.os.O.RDONLY; + flags.ACCMODE = .RDONLY; } else { - flags |= std.os.O.RDWR; + flags.ACCMODE = .RDWR; } } else if (rights.fd_read) { - flags |= std.os.O.RDONLY; + flags.ACCMODE = .RDONLY; } else if (rights.fd_write) { - flags |= std.os.O.WRONLY; + flags.ACCMODE = .WRONLY; } - const S = std.os.linux.S; - const mode: std.os.mode_t = S.IRUSR | S.IWUSR | S.IRGRP | S.IWGRP | S.IROTH; - if (std.os.open(path, flags, mode)) |fd| { + const S = std.posix.S; + const mode: std.posix.mode_t = S.IRUSR | S.IWUSR | S.IRGRP | S.IWGRP | S.IROTH; + if (std.posix.open(path, flags, mode)) |fd| { return fd; } else |err| { errno.* = Errno.translateError(err); @@ -1393,10 +1437,10 @@ const Helpers = struct { if (file_index < fd_info.dir_entries.items.len) { for (fd_info.dir_entries.items[file_index..]) |entry| { const cookie = file_index + 1; - writer.writeIntLittle(u64, cookie) catch break; - writer.writeIntLittle(u64, entry.inode) catch break; - writer.writeIntLittle(u32, signedCast(u32, entry.filename.len, errno)) catch break; - writer.writeIntLittle(u32, @intFromEnum(entry.filetype)) catch break; + writer.writeInt(u64, cookie, .little) catch break; + writer.writeInt(u64, entry.inode, .little) catch break; + writer.writeInt(u32, signedCast(u32, entry.filename.len, errno), .little) catch break; + writer.writeInt(u32, @intFromEnum(entry.filetype), .little) catch break; _ = writer.write(entry.filename) catch break; file_index += 1; @@ -1413,7 +1457,7 @@ const Helpers = struct { restart_scan = false; } - var bytes_written = signedCast(u32, fbs.pos, errno); + const bytes_written = signedCast(u32, fbs.pos, errno); return bytes_written; } @@ -1424,7 +1468,7 @@ const Helpers = struct { var file_info_buffer: [1024]u8 align(@alignOf(WindowsApi.FILE_ID_FULL_DIR_INFORMATION)) = undefined; var io: std.os.windows.IO_STATUS_BLOCK = undefined; - var rc: std.os.windows.NTSTATUS = std.os.windows.ntdll.NtQueryDirectoryFile( + const rc: std.os.windows.NTSTATUS = std.os.windows.ntdll.NtQueryDirectoryFile( fd_info.fd, null, null, @@ -1459,7 +1503,7 @@ const Helpers = struct { var static_path_buffer: [std.fs.MAX_PATH_BYTES * 2]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&static_path_buffer); - var allocator = fba.allocator(); + const allocator = fba.allocator(); const filename: []u8 = std.unicode.utf16leToUtf8Alloc(allocator, filename_utf16le) catch unreachable; var filetype: std.os.wasi.filetype_t = .REGULAR_FILE; @@ -1469,7 +1513,7 @@ const Helpers = struct { filetype = .SYMBOLIC_LINK; } - var filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { + const filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { errno.* = Errno.translateError(err); return false; }; @@ -1489,7 +1533,7 @@ const Helpers = struct { fn enumerateDirEntriesDarwin(fd_info: *WasiContext.FdInfo, restart_scan: bool, errno: *Errno) bool { if (restart_scan) { - std.os.lseek_SET(fd_info.fd, 0) catch |err| { + std.posix.lseek_SET(fd_info.fd, 0) catch |err| { errno.* = Errno.translateError(err); return false; }; @@ -1499,8 +1543,8 @@ const Helpers = struct { var dirent_buffer: [1024]u8 align(@alignOf(dirent_t)) = undefined; var unused_seek: i64 = 0; - const rc = std.os.system.__getdirentries64(fd_info.fd, &dirent_buffer, dirent_buffer.len, &unused_seek); - errno.* = switch (std.c.getErrno(rc)) { + const rc = std.c.__getdirentries64(fd_info.fd, &dirent_buffer, dirent_buffer.len, &unused_seek); + errno.* = switch (std.posix.errno(rc)) { .SUCCESS => .SUCCESS, .BADF => .BADF, .FAULT => .FAULT, @@ -1520,13 +1564,13 @@ const Helpers = struct { var buffer_offset: usize = 0; while (buffer_offset < rc) { const dirent_entry = @as(*align(1) dirent_t, @ptrCast(dirent_buffer[buffer_offset..])); - buffer_offset += dirent_entry.d_reclen; + buffer_offset += dirent_entry.reclen; // TODO length should be (d_reclen - 2 - offsetof(dirent64, d_name)) // const filename: []u8 = std.mem.sliceTo(@ptrCast([*:0]u8, &dirent_entry.d_name), 0); - const filename: []u8 = @as([*]u8, @ptrCast(&dirent_entry.d_name))[0..dirent_entry.d_namlen]; + const filename: []u8 = @as([*]u8, @ptrCast(&dirent_entry.name))[0..dirent_entry.namlen]; - const filetype: std.os.wasi.filetype_t = switch (dirent_entry.d_type) { + const filetype: std.os.wasi.filetype_t = switch (dirent_entry.type) { std.c.DT.UNKNOWN => .UNKNOWN, std.c.DT.FIFO => .UNKNOWN, std.c.DT.CHR => .CHARACTER_DEVICE, @@ -1539,13 +1583,13 @@ const Helpers = struct { else => .UNKNOWN, }; - var filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { + const filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { errno.* = Errno.translateError(err); break; }; fd_info.dir_entries.append(WasiDirEntry{ - .inode = dirent_entry.d_ino, + .inode = dirent_entry.ino, .filetype = filetype, .filename = filename_duped, }) catch |err| { @@ -1559,7 +1603,7 @@ const Helpers = struct { fn enumerateDirEntriesLinux(fd_info: *WasiContext.FdInfo, restart_scan: bool, errno: *Errno) bool { if (restart_scan) { - std.os.lseek_SET(fd_info.fd, 0) catch |err| { + std.posix.lseek_SET(fd_info.fd, 0) catch |err| { errno.* = Errno.translateError(err); return false; }; @@ -1567,7 +1611,7 @@ const Helpers = struct { var dirent_buffer: [1024]u8 align(@alignOf(std.os.linux.dirent64)) = undefined; const rc = std.os.linux.getdents64(fd_info.fd, &dirent_buffer, dirent_buffer.len); - errno.* = switch (std.os.linux.getErrno(rc)) { + errno.* = switch (std.posix.errno(rc)) { .SUCCESS => Errno.SUCCESS, .BADF => unreachable, // should never happen since this call is wrapped by fdLookup .FAULT => Errno.FAULT, @@ -1587,12 +1631,12 @@ const Helpers = struct { var buffer_offset: usize = 0; while (buffer_offset < rc) { const dirent_entry = @as(*align(1) std.os.linux.dirent64, @ptrCast(dirent_buffer[buffer_offset..])); - buffer_offset += dirent_entry.d_reclen; + buffer_offset += dirent_entry.reclen; // TODO length should be (d_reclen - 2 - offsetof(dirent64, d_name)) - const filename: []u8 = std.mem.sliceTo(@as([*:0]u8, @ptrCast(&dirent_entry.d_name)), 0); + const filename: []u8 = std.mem.sliceTo(@as([*:0]u8, @ptrCast(&dirent_entry.name)), 0); - const filetype: std.os.wasi.filetype_t = switch (dirent_entry.d_type) { + const filetype: std.os.wasi.filetype_t = switch (dirent_entry.type) { std.os.linux.DT.BLK => .BLOCK_DEVICE, std.os.linux.DT.CHR => .CHARACTER_DEVICE, std.os.linux.DT.DIR => .DIRECTORY, @@ -1603,13 +1647,13 @@ const Helpers = struct { else => .UNKNOWN, }; - var filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { + const filename_duped = fd_info.dir_entries.allocator.dupe(u8, filename) catch |err| { errno.* = Errno.translateError(err); break; }; fd_info.dir_entries.append(WasiDirEntry{ - .inode = @as(u64, @bitCast(dirent_entry.d_ino)), + .inode = @as(u64, @bitCast(dirent_entry.ino)), .filetype = filetype, .filename = filename_duped, }) catch |err| { @@ -1630,12 +1674,12 @@ const Helpers = struct { var reader = stream.reader(); for (iov) |*iovec| { - const iov_base: u32 = reader.readIntLittle(u32) catch { + const iov_base: u32 = reader.readInt(u32, .little) catch { errno.* = Errno.INVAL; return null; }; - const iov_len: u32 = reader.readIntLittle(u32) catch { + const iov_len: u32 = reader.readInt(u32, .little) catch { errno.* = Errno.INVAL; return null; }; @@ -1661,43 +1705,44 @@ fn wasi_proc_exit(_: ?*anyopaque, _: *ModuleInstance, params: [*]const Val, _: [ if (raw_exit_code >= 0 and raw_exit_code < std.math.maxInt(u8)) { const exit_code = @as(u8, @intCast(raw_exit_code)); - std.os.exit(exit_code); + std.process.exit(exit_code); } else { - std.os.exit(1); + std.process.exit(1); } } fn wasi_args_sizes_get(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { - var context = WasiContext.fromUserdata(userdata); + const context = WasiContext.fromUserdata(userdata); Helpers.stringsSizesGet(module, context.argv, params, returns); } fn wasi_args_get(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { - var context = WasiContext.fromUserdata(userdata); + const context = WasiContext.fromUserdata(userdata); Helpers.stringsGet(module, context.argv, params, returns); } fn wasi_environ_sizes_get(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { - var context = WasiContext.fromUserdata(userdata); + const context = WasiContext.fromUserdata(userdata); Helpers.stringsSizesGet(module, context.env, params, returns); } fn wasi_environ_get(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { - var context = WasiContext.fromUserdata(userdata); + const context = WasiContext.fromUserdata(userdata); Helpers.stringsGet(module, context.env, params, returns); } fn wasi_clock_res_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { var errno = Errno.SUCCESS; - const system_clockid: i32 = Helpers.convertClockId(params[0].I32, &errno); + const system_clockid: i32 = Helpers.convertClockId(params[0].I32); const timestamp_mem_begin = Helpers.signedCast(u32, params[1].I32, &errno); if (errno == .SUCCESS) { var freqency_ns: u64 = 0; if (builtin.os.tag == .windows) { + const clockid: std.os.wasi.clockid_t = @enumFromInt(system_clockid); // Follow the mingw pattern since clock_getres() isn't linked in libc for windows - if (system_clockid == std.os.wasi.CLOCK.REALTIME or system_clockid == std.os.wasi.CLOCK.MONOTONIC) { + if (clockid == std.os.wasi.clockid_t.REALTIME or clockid == std.os.wasi.clockid_t.MONOTONIC) { const ns_per_second: u64 = 1000000000; const tick_frequency: u64 = std.os.windows.QueryPerformanceFrequency(); freqency_ns = (ns_per_second + (tick_frequency >> 1)) / tick_frequency; @@ -1715,8 +1760,8 @@ fn wasi_clock_res_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const } } } else { - var ts: std.os.system.timespec = undefined; - if (std.os.clock_getres(system_clockid, &ts)) { + var ts: std.posix.timespec = undefined; + if (std.posix.clock_getres(system_clockid, &ts)) { freqency_ns = @as(u64, @intCast(ts.tv_nsec)); } else |_| { errno = Errno.INVAL; @@ -1732,7 +1777,7 @@ fn wasi_clock_res_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const fn wasi_clock_time_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const Val, returns: [*]Val) void { var errno = Errno.SUCCESS; - const system_clockid: i32 = Helpers.convertClockId(params[0].I32, &errno); + const system_clockid: i32 = Helpers.convertClockId(params[0].I32); //const precision = params[1].I64; // unused const timestamp_mem_begin = Helpers.signedCast(u32, params[2].I32, &errno); @@ -1741,14 +1786,15 @@ fn wasi_clock_time_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const var timestamp_ns: u64 = 0; if (builtin.os.tag == .windows) { - switch (system_clockid) { - std.os.wasi.CLOCK.REALTIME => { + const clockid: std.os.wasi.clockid_t = @enumFromInt(system_clockid); + switch (clockid) { + std.os.wasi.clockid_t.REALTIME => { var ft: WindowsApi.FILETIME = undefined; std.os.windows.kernel32.GetSystemTimeAsFileTime(&ft); timestamp_ns = Helpers.windowsFiletimeToWasi(ft); }, - std.os.wasi.CLOCK.MONOTONIC => { + std.os.wasi.clockid_t.MONOTONIC => { const ticks: u64 = std.os.windows.QueryPerformanceCounter(); const ticks_per_second: u64 = std.os.windows.QueryPerformanceFrequency(); @@ -1758,7 +1804,7 @@ fn wasi_clock_time_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const timestamp_ns = timestamp_secs_part + timestamp_ns_part; }, - std.os.wasi.CLOCK.PROCESS_CPUTIME_ID => { + std.os.wasi.clockid_t.PROCESS_CPUTIME_ID => { var createTime: WindowsApi.FILETIME = undefined; var exitTime: WindowsApi.FILETIME = undefined; var kernelTime: WindowsApi.FILETIME = undefined; @@ -1770,7 +1816,7 @@ fn wasi_clock_time_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const errno = Errno.INVAL; } }, - std.os.wasi.CLOCK.THREAD_CPUTIME_ID => { + std.os.wasi.clockid_t.THREAD_CPUTIME_ID => { var createTime: WindowsApi.FILETIME = undefined; var exitTime: WindowsApi.FILETIME = undefined; var kernelTime: WindowsApi.FILETIME = undefined; @@ -1782,11 +1828,10 @@ fn wasi_clock_time_get(_: ?*anyopaque, module: *ModuleInstance, params: [*]const errno = Errno.INVAL; } }, - else => unreachable, } } else { - var ts: std.os.system.timespec = undefined; - if (std.os.clock_gettime(system_clockid, &ts)) { + var ts: std.posix.timespec = undefined; + if (std.posix.clock_gettime(system_clockid, &ts)) { timestamp_ns = Helpers.posixTimespecToWasi(ts); } else |_| { errno = Errno.INVAL; @@ -1806,7 +1851,7 @@ fn fd_wasi_datasync(userdata: ?*anyopaque, _: *ModuleInstance, params: [*]const var errno = Errno.SUCCESS; if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - std.os.fdatasync(fd_info.fd) catch |err| { + std.posix.fdatasync(fd_info.fd) catch |err| { errno = Errno.translateError(err); }; } @@ -1823,14 +1868,14 @@ fn fd_wasi_fdstat_get(userdata: ?*anyopaque, module: *ModuleInstance, params: [* if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - const fd_os: std.os.fd_t = fd_info.fd; + const fd_os = fd_info.fd; const stat: std.os.wasi.fdstat_t = if (builtin.os.tag == .windows) Helpers.fdstatGetWindows(fd_os, &errno) else Helpers.fdstatGetPosix(fd_os, &errno); if (errno == .SUCCESS) { Helpers.writeIntToMemory(u8, @intFromEnum(stat.fs_filetype), fdstat_mem_offset + 0, module, &errno); - Helpers.writeIntToMemory(u16, stat.fs_flags, fdstat_mem_offset + 2, module, &errno); - Helpers.writeIntToMemory(u64, stat.fs_rights_base, fdstat_mem_offset + 8, module, &errno); - Helpers.writeIntToMemory(u64, stat.fs_rights_inheriting, fdstat_mem_offset + 16, module, &errno); + Helpers.writeIntToMemory(u16, @bitCast(stat.fs_flags), fdstat_mem_offset + 2, module, &errno); + Helpers.writeIntToMemory(u64, @bitCast(stat.fs_rights_base), fdstat_mem_offset + 8, module, &errno); + Helpers.writeIntToMemory(u64, @bitCast(stat.fs_rights_inheriting), fdstat_mem_offset + 16, module, &errno); } } } @@ -1886,7 +1931,7 @@ fn fd_wasi_prestat_dir_name(userdata: ?*anyopaque, module: *ModuleInstance, para if (context.fdDirPath(fd_dir_wasi, &errno)) |path_source| { if (Helpers.getMemorySlice(module, path_mem_offset, path_mem_length, &errno)) |path_dest| { if (path_source.len <= path_dest.len) { - std.mem.copy(u8, path_dest, path_source); + @memcpy(path_dest, path_source); // add null terminator if there's room if (path_dest.len > path_source.len) { @@ -1913,9 +1958,9 @@ fn fd_wasi_read(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - var stack_iov = [_]std.os.iovec{undefined} ** 1024; - if (Helpers.initIovecs(std.os.iovec, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { - if (std.os.readv(fd_info.fd, iov)) |read_bytes| { + var stack_iov = [_]std.posix.iovec{undefined} ** 1024; + if (Helpers.initIovecs(std.posix.iovec, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { + if (std.posix.readv(fd_info.fd, iov)) |read_bytes| { if (read_bytes <= std.math.maxInt(u32)) { Helpers.writeIntToMemory(u32, @as(u32, @intCast(read_bytes)), bytes_read_out_offset, module, &errno); } else { @@ -1944,7 +1989,7 @@ fn fd_wasi_readdir(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]co if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { if (Helpers.getMemorySlice(module, dirent_mem_offset, dirent_mem_length, &errno)) |dirent_buffer| { - var bytes_written = Helpers.enumerateDirEntries(fd_info, cookie, dirent_buffer, &errno); + const bytes_written = Helpers.enumerateDirEntries(fd_info, cookie, dirent_buffer, &errno); Helpers.writeIntToMemory(u32, bytes_written, bytes_written_out_offset, module, &errno); } } @@ -1977,9 +2022,9 @@ fn fd_wasi_pread(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]cons if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - var stack_iov = [_]std.os.iovec{undefined} ** 1024; - if (Helpers.initIovecs(std.os.iovec, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { - if (std.os.preadv(fd_info.fd, iov, read_offset)) |read_bytes| { + var stack_iov = [_]std.posix.iovec{undefined} ** 1024; + if (Helpers.initIovecs(std.posix.iovec, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { + if (std.posix.preadv(fd_info.fd, iov, read_offset)) |read_bytes| { if (read_bytes <= std.math.maxInt(u32)) { Helpers.writeIntToMemory(u32, @as(u32, @intCast(read_bytes)), bytes_read_out_offset, module, &errno); } else { @@ -2010,21 +2055,18 @@ fn fd_wasi_advise(userdata: ?*anyopaque, _: *ModuleInstance, params: [*]const Va // fadvise isn't available on windows or macos, but fadvise is just an optimization hint, so don't // return a bad error code if (builtin.os.tag == .linux) { - const advice: usize = switch (advice_wasi) { - std.os.wasi.ADVICE_NORMAL => std.os.POSIX_FADV.NORMAL, - std.os.wasi.ADVICE_SEQUENTIAL => std.os.POSIX_FADV.SEQUENTIAL, - std.os.wasi.ADVICE_RANDOM => std.os.POSIX_FADV.RANDOM, - std.os.wasi.ADVICE_WILLNEED => std.os.POSIX_FADV.WILLNEED, - std.os.wasi.ADVICE_DONTNEED => std.os.POSIX_FADV.DONTNEED, - std.os.wasi.ADVICE_NOREUSE => std.os.POSIX_FADV.NOREUSE, - else => blk: { - errno = Errno.INVAL; - break :blk 0; - }, + const wasi_advice: std.os.wasi.advice_t = @enumFromInt(advice_wasi); + const advice: usize = switch (wasi_advice) { + std.os.wasi.advice_t.NORMAL => std.os.linux.POSIX_FADV.NORMAL, + std.os.wasi.advice_t.SEQUENTIAL => std.os.linux.POSIX_FADV.SEQUENTIAL, + std.os.wasi.advice_t.RANDOM => std.os.linux.POSIX_FADV.RANDOM, + std.os.wasi.advice_t.WILLNEED => std.os.linux.POSIX_FADV.WILLNEED, + std.os.wasi.advice_t.DONTNEED => std.os.linux.POSIX_FADV.DONTNEED, + std.os.wasi.advice_t.NOREUSE => std.os.linux.POSIX_FADV.NOREUSE, }; if (errno == .SUCCESS) { - const ret = @as(std.os.linux.E, @enumFromInt(std.os.system.fadvise(fd_info.fd, offset, length, advice))); + const ret = @as(std.os.linux.E, @enumFromInt(std.os.linux.fadvise(fd_info.fd, offset, length, advice))); errno = switch (ret) { .SUCCESS => Errno.SUCCESS, .SPIPE => Errno.SPIPE, @@ -2071,7 +2113,7 @@ fn fd_wasi_allocate(userdata: ?*anyopaque, _: *ModuleInstance, params: [*]const } else if (builtin.os.tag == .linux) { const mode = 0; const rc = std.os.linux.fallocate(fd_info.fd, mode, offset, length_relative); - errno = switch (std.os.linux.getErrno(rc)) { + errno = switch (std.posix.errno(rc)) { .SUCCESS => Errno.SUCCESS, .BADF => unreachable, // should never happen since this call is wrapped by fdLookup .FBIG => Errno.FBIG, @@ -2093,7 +2135,7 @@ fn fd_wasi_allocate(userdata: ?*anyopaque, _: *ModuleInstance, params: [*]const // so we need to emulate that behavior here const length_total = @as(u64, @intCast(@as(i128, offset) + length_relative)); if (stat.size < length_total) { - std.os.ftruncate(fd_info.fd, length_total) catch |err| { + std.posix.ftruncate(fd_info.fd, length_total) catch |err| { errno = Errno.translateError(err); }; } @@ -2155,7 +2197,7 @@ fn fd_wasi_filestat_set_size(userdata: ?*anyopaque, _: *ModuleInstance, params: if (Helpers.isStdioHandle(fd_wasi)) { errno = Errno.BADF; } else if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - std.os.ftruncate(fd_info.fd, size) catch |err| { + std.posix.ftruncate(fd_info.fd, size) catch |err| { errno = Errno.translateError(err); }; } @@ -2171,14 +2213,14 @@ fn fd_wasi_filestat_set_times(userdata: ?*anyopaque, _: *ModuleInstance, params: const fd_wasi = @as(u32, @bitCast(params[0].I32)); const timestamp_wasi_access = Helpers.signedCast(u64, params[1].I64, &errno); const timestamp_wasi_modified = Helpers.signedCast(u64, params[2].I64, &errno); - const fstflags = @as(u32, @bitCast(params[3].I32)); + const fstflags: std.os.wasi.fstflags_t = @bitCast(@as(u16, @intCast(params[3].I32))); if (errno == .SUCCESS) { - if (fstflags & std.os.wasi.FILESTAT_SET_ATIM != 0 and fstflags & std.os.wasi.FILESTAT_SET_ATIM_NOW != 0) { + if (fstflags.ATIM and fstflags.ATIM_NOW) { errno = Errno.INVAL; } - if (fstflags & std.os.wasi.FILESTAT_SET_MTIM != 0 and fstflags & std.os.wasi.FILESTAT_SET_MTIM_NOW != 0) { + if (fstflags.MTIM and fstflags.MTIM_NOW) { errno = Errno.INVAL; } } @@ -2188,7 +2230,8 @@ fn fd_wasi_filestat_set_times(userdata: ?*anyopaque, _: *ModuleInstance, params: errno = Errno.BADF; } else if (context.fdLookup(fd_wasi, &errno)) |fd_info| { const fd_filestat_set_times_func = if (builtin.os.tag == .windows) Helpers.fdFilestatSetTimesWindows else Helpers.fdFilestatSetTimesPosix; - fd_filestat_set_times_func(fd_info.fd, timestamp_wasi_access, timestamp_wasi_modified, fstflags, &errno); + const flags_int: u32 = @as(u16, @bitCast(fstflags)); + fd_filestat_set_times_func(fd_info.fd, timestamp_wasi_access, timestamp_wasi_modified, flags_int, &errno); } } @@ -2207,30 +2250,30 @@ fn fd_wasi_seek(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { if (fd_info.rights.fd_seek) { - const fd_os: std.os.fd_t = fd_info.fd; + const fd_os: std.posix.fd_t = fd_info.fd; if (Whence.fromInt(whence_raw)) |whence| { switch (whence) { .Set => { if (offset >= 0) { const offset_unsigned = @as(u64, @intCast(offset)); - std.os.lseek_SET(fd_os, offset_unsigned) catch |err| { + std.posix.lseek_SET(fd_os, offset_unsigned) catch |err| { errno = Errno.translateError(err); }; } }, .Cur => { - std.os.lseek_CUR(fd_os, offset) catch |err| { + std.posix.lseek_CUR(fd_os, offset) catch |err| { errno = Errno.translateError(err); }; }, .End => { - std.os.lseek_END(fd_os, offset) catch |err| { + std.posix.lseek_END(fd_os, offset) catch |err| { errno = Errno.translateError(err); }; }, } - if (std.os.lseek_CUR_get(fd_os)) |filepos| { + if (std.posix.lseek_CUR_get(fd_os)) |filepos| { Helpers.writeIntToMemory(u64, filepos, filepos_out_offset, module, &errno); } else |err| { errno = Errno.translateError(err); @@ -2257,7 +2300,7 @@ fn fd_wasi_tell(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]const if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - if (std.os.lseek_CUR_get(fd_info.fd)) |filepos| { + if (std.posix.lseek_CUR_get(fd_info.fd)) |filepos| { Helpers.writeIntToMemory(u64, filepos, filepos_out_offset, module, &errno); } else |err| { errno = Errno.translateError(err); @@ -2279,9 +2322,9 @@ fn fd_wasi_write(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]cons if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - var stack_iov = [_]std.os.iovec_const{undefined} ** 1024; - if (Helpers.initIovecs(std.os.iovec_const, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { - if (std.os.writev(fd_info.fd, iov)) |written_bytes| { + var stack_iov = [_]std.posix.iovec_const{undefined} ** 1024; + if (Helpers.initIovecs(std.posix.iovec_const, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { + if (std.posix.writev(fd_info.fd, iov)) |written_bytes| { Helpers.writeIntToMemory(u32, @as(u32, @intCast(written_bytes)), bytes_written_out_offset, module, &errno); } else |err| { errno = Errno.translateError(err); @@ -2305,9 +2348,9 @@ fn fd_wasi_pwrite(userdata: ?*anyopaque, module: *ModuleInstance, params: [*]con if (errno == .SUCCESS) { if (context.fdLookup(fd_wasi, &errno)) |fd_info| { - var stack_iov = [_]std.os.iovec_const{undefined} ** 1024; - if (Helpers.initIovecs(std.os.iovec_const, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { - if (std.os.pwritev(fd_info.fd, iov, write_offset)) |written_bytes| { + var stack_iov = [_]std.posix.iovec_const{undefined} ** 1024; + if (Helpers.initIovecs(std.posix.iovec_const, &stack_iov, &errno, module, iovec_array_begin, iovec_array_count)) |iov| { + if (std.posix.pwritev(fd_info.fd, iov, write_offset)) |written_bytes| { Helpers.writeIntToMemory(u32, @as(u32, @intCast(written_bytes)), bytes_written_out_offset, module, &errno); } else |err| { errno = Errno.translateError(err); @@ -2331,9 +2374,8 @@ fn wasi_path_create_directory(userdata: ?*anyopaque, module: *ModuleInstance, pa if (context.fdLookup(fd_dir_wasi, &errno)) |fd_info| { if (Helpers.getMemorySlice(module, path_mem_offset, path_mem_length, &errno)) |path| { if (context.hasPathAccess(fd_info, path, &errno)) { - const S = std.os.linux.S; - const mode: std.os.mode_t = if (builtin.os.tag == .windows) undefined else S.IRWXU | S.IRWXG | S.IROTH; - std.os.mkdirat(fd_info.fd, path, mode) catch |err| { + const mode: std.posix.mode_t = if (builtin.os.tag == .windows) undefined else std.posix.S.IRWXU | std.posix.S.IRWXG | std.posix.S.IROTH; + std.posix.mkdirat(fd_info.fd, path, mode) catch |err| { errno = Errno.translateError(err); }; } @@ -2358,22 +2400,42 @@ fn wasi_path_filestat_get(userdata: ?*anyopaque, module: *ModuleInstance, params if (context.fdLookup(fd_dir_wasi, &errno)) |fd_info| { if (Helpers.getMemorySlice(module, path_mem_offset, path_mem_length, &errno)) |path| { if (context.hasPathAccess(fd_info, path, &errno)) { - var flags: u32 = std.os.O.RDONLY; - if (lookup_flags.symlink_follow == false) { - flags |= std.os.O.NOFOLLOW; - } - - const mode: std.os.mode_t = if (builtin.os.tag != .windows) 644 else undefined; - if (std.os.openat(fd_info.fd, path, flags, mode)) |fd_opened| { - defer std.os.close(fd_opened); + if (builtin.os.tag == .windows) { + const dir = std.fs.Dir{ + .fd = fd_info.fd, + }; + if (dir.openFile(path, .{})) |file| { + defer file.close(); - const stat: std.os.wasi.filestat_t = if (builtin.os.tag == .windows) Helpers.filestatGetWindows(fd_opened, &errno) else Helpers.filestatGetPosix(fd_opened, &errno); + const stat: std.os.wasi.filestat_t = Helpers.filestatGetWindows(file.handle, &errno); if (errno == .SUCCESS) { Helpers.writeFilestatToMemory(&stat, filestat_out_mem_offset, module, &errno); } - } else |err| { - errno = Errno.translateError(err); + } else |err| { + errno = Errno.translateError(err); + } + } else { + + var flags: std.posix.O = .{ + .ACCMODE = .RDONLY, + }; + if (lookup_flags.symlink_follow == false) { + flags.NOFOLLOW = true; + } + + const mode: std.posix.mode_t = 644; + + if (std.posix.openat(fd_info.fd, path, flags, mode)) |fd_opened| { + defer std.posix.close(fd_opened); + + const stat: std.os.wasi.filestat_t = Helpers.filestatGetPosix(fd_opened, &errno); + if (errno == .SUCCESS) { + Helpers.writeFilestatToMemory(&stat, filestat_out_mem_offset, module, &errno); + } + } else |err| { + errno = Errno.translateError(err); + } } } } @@ -2436,7 +2498,7 @@ fn wasi_path_remove_directory(userdata: ?*anyopaque, module: *ModuleInstance, pa if (context.hasPathAccess(fd_info, path, &errno)) { var static_path_buffer: [std.fs.MAX_PATH_BYTES * 2]u8 = undefined; if (Helpers.resolvePath(fd_info, path, &static_path_buffer, &errno)) |resolved_path| { - std.os.unlinkat(FD_OS_INVALID, resolved_path, std.os.AT.REMOVEDIR) catch |err| { + std.posix.unlinkat(FD_OS_INVALID, resolved_path, std.posix.AT.REMOVEDIR) catch |err| { errno = Errno.translateError(err); }; @@ -2474,12 +2536,12 @@ fn wasi_path_symlink(userdata: ?*anyopaque, module: *ModuleInstance, params: [*] if (Helpers.resolvePath(fd_info, link_path, &static_path_buffer, &errno)) |resolved_link_path| { const w = std.os.windows; - const link_contents_w: w.PathSpace = w.sliceToPrefixedFileW(link_contents) catch |err| blk: { + const link_contents_w: w.PathSpace = w.sliceToPrefixedFileW(fd_info.fd, link_contents) catch |err| blk: { errno = Errno.translateError(err); break :blk undefined; }; - const resolved_link_path_w: w.PathSpace = w.sliceToPrefixedFileW(resolved_link_path) catch |err| blk: { + const resolved_link_path_w: w.PathSpace = w.sliceToPrefixedFileW(fd_info.fd, resolved_link_path) catch |err| blk: { errno = Errno.translateError(err); break :blk undefined; }; @@ -2492,7 +2554,7 @@ fn wasi_path_symlink(userdata: ?*anyopaque, module: *ModuleInstance, params: [*] } } } else { - std.os.symlinkat(link_contents, fd_info.fd, link_path) catch |err| { + std.posix.symlinkat(link_contents, fd_info.fd, link_path) catch |err| { errno = Errno.translateError(err); }; } @@ -2521,7 +2583,7 @@ fn wasi_path_unlink_file(userdata: ?*anyopaque, module: *ModuleInstance, params: if (context.hasPathAccess(fd_info, path, &errno)) { var static_path_buffer: [std.fs.MAX_PATH_BYTES * 2]u8 = undefined; if (Helpers.resolvePath(fd_info, path, &static_path_buffer, &errno)) |resolved_path| { - std.os.unlinkat(FD_OS_INVALID, resolved_path, 0) catch |err| { + std.posix.unlinkat(FD_OS_INVALID, resolved_path, 0) catch |err| { errno = Errno.translateError(err); }; diff --git a/src/zig-stable-array/stable_array.zig b/src/zig-stable-array/stable_array.zig index 626459e..ace780d 100644 --- a/src/zig-stable-array/stable_array.zig +++ b/src/zig-stable-array/stable_array.zig @@ -61,7 +61,7 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { self.items.len += items.len; mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]); - mem.copy(T, self.items[i .. i + items.len], items); + @memcpy(self.items[i .. i + items.len], items); } pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) !void { @@ -69,15 +69,15 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { const range = self.items[start..after_range]; if (range.len == new_items.len) - mem.copy(T, range, new_items) + @memcpy(range, new_items) else if (range.len < new_items.len) { const first = new_items[0..range.len]; const rest = new_items[range.len..]; - mem.copy(T, range, first); + @memcpy(range, first); try self.insertSlice(after_range, rest); } else { - mem.copy(T, range, new_items); + @memcpy(range, new_items); const after_subrange = start + new_items.len; for (self.items[after_range..], 0..) |item, i| { @@ -108,7 +108,7 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { const new_len = old_len + items.len; assert(new_len <= self.capacity); self.items.len = new_len; - mem.copy(T, self.items[old_len..], items); + @memcpy(self.items[old_len..], items); } pub fn appendNTimes(self: *Self, value: T, n: usize) !void { @@ -203,19 +203,19 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { const addr: usize = @intFromPtr(self.items.ptr) + new_capacity_bytes; w.VirtualFree(@as(w.PVOID, @ptrFromInt(addr)), bytes_to_free, w.MEM_DECOMMIT); } else { - var base_addr: usize = @intFromPtr(self.items.ptr); - var offset_addr: usize = base_addr + new_capacity_bytes; - var addr: [*]align(mem.page_size) u8 = @ptrFromInt(offset_addr); + const base_addr: usize = @intFromPtr(self.items.ptr); + const offset_addr: usize = base_addr + new_capacity_bytes; + const addr: [*]align(mem.page_size) u8 = @ptrFromInt(offset_addr); if (comptime builtin.target.isDarwin()) { const MADV_DONTNEED = 4; const err: c_int = darwin.madvise(addr, bytes_to_free, MADV_DONTNEED); - switch (@as(os.darwin.E, @enumFromInt(err))) { - os.E.INVAL => unreachable, - os.E.NOMEM => unreachable, + switch (@as(std.posix.E, @enumFromInt(err))) { + std.posix.E.INVAL => unreachable, + std.posix.E.NOMEM => unreachable, else => {}, } } else { - os.madvise(addr, bytes_to_free, std.c.MADV.DONTNEED) catch unreachable; + std.posix.madvise(addr, bytes_to_free, std.c.MADV.DONTNEED) catch unreachable; } } @@ -243,7 +243,7 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { var slice: []align(mem.page_size) const u8 = undefined; slice.ptr = @alignCast(@as([*]u8, @ptrCast(self.items.ptr))); slice.len = self.max_virtual_alloc_bytes; - os.munmap(slice); + std.posix.munmap(slice); } } @@ -264,10 +264,13 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { self.items.len = 0; } else { const prot: u32 = std.c.PROT.NONE; - const map: u32 = std.c.MAP.PRIVATE | std.c.MAP.ANONYMOUS; - const fd: os.fd_t = -1; + const map: std.c.MAP = .{ + .ANONYMOUS = true, + .TYPE = .PRIVATE, + }; + const fd: std.posix.fd_t = -1; const offset: usize = 0; - var slice = os.mmap(null, self.max_virtual_alloc_bytes, prot, map, fd, offset) catch |e| { + const slice = std.posix.mmap(null, self.max_virtual_alloc_bytes, prot, map, fd, offset) catch |e| { std.debug.print("caught initial sizing error {}, total bytes: {}\n", .{ e, self.max_virtual_alloc_bytes }); return e; }; @@ -288,11 +291,15 @@ pub fn StableArrayAligned(comptime T: type, comptime alignment: u29) type { const remap_region_begin: [*]u8 = region_begin + current_capacity_bytes; const prot: u32 = std.c.PROT.READ | std.c.PROT.WRITE; - const map: u32 = std.c.MAP.PRIVATE | std.c.MAP.ANONYMOUS | std.c.MAP.FIXED; - const fd: os.fd_t = -1; + const map: std.c.MAP = .{ + .ANONYMOUS = true, + .TYPE = .PRIVATE, + .FIXED = true, + }; + const fd: std.posix.fd_t = -1; const offset: usize = 0; - _ = os.mmap(@alignCast(remap_region_begin), resize_capacity, prot, map, fd, offset) catch |e| { + _ = std.posix.mmap(@alignCast(remap_region_begin), resize_capacity, prot, map, fd, offset) catch |e| { std.debug.print("caught error {}\n", .{e}); return e; }; diff --git a/test/wasm/main.zig b/test/wasm/main.zig index 6407704..870ce96 100644 --- a/test/wasm/main.zig +++ b/test/wasm/main.zig @@ -207,7 +207,7 @@ fn parseVal(obj: std.json.ObjectMap) !TaggedVal { fn parseF32(str: []const u8) !f32 { if (std.mem.startsWith(u8, str, "nan:")) { - return std.math.nan_f32; // don't differentiate between arithmetic/canonical nan + return std.math.nan(f32); // don't differentiate between arithmetic/canonical nan } else { const int = try std.fmt.parseInt(u32, str, 10); return @as(f32, @bitCast(int)); @@ -216,7 +216,7 @@ fn parseVal(obj: std.json.ObjectMap) !TaggedVal { fn parseF64(str: []const u8) !f64 { if (std.mem.startsWith(u8, str, "nan:")) { - return std.math.nan_f64; // don't differentiate between arithmetic/canonical nan + return std.math.nan(f64); // don't differentiate between arithmetic/canonical nan } else { const int = try std.fmt.parseInt(u64, str, 10); return @as(f64, @bitCast(int)); @@ -242,9 +242,9 @@ fn parseVal(obj: std.json.ObjectMap) !TaggedVal { v.* = try parse_func(json_strings[i].string); } - var parsed_bytes = std.mem.sliceAsBytes(&parsed_values); + const parsed_bytes = std.mem.sliceAsBytes(&parsed_values); var bytes: [16]u8 = undefined; - std.mem.copy(u8, &bytes, parsed_bytes); + @memcpy(&bytes, parsed_bytes); return std.mem.bytesToValue(v128, &bytes); } }; @@ -259,10 +259,10 @@ fn parseVal(obj: std.json.ObjectMap) !TaggedVal { const int = try Helpers.parseI64(json_value.string); return TaggedVal{ .type = .I64, .val = Val{ .I64 = int } }; } else if (strcmp("f32", json_type.string)) { - var float: f32 = try Helpers.parseF32(json_value.string); + const float: f32 = try Helpers.parseF32(json_value.string); return TaggedVal{ .type = .F32, .val = Val{ .F32 = float } }; } else if (strcmp("f64", json_type.string)) { - var float: f64 = try Helpers.parseF64(json_value.string); + const float: f64 = try Helpers.parseF64(json_value.string); return TaggedVal{ .type = .F64, .val = Val{ .F64 = float } }; } else if (strcmp("v128", json_type.string)) { const json_lane_type = obj.get("lane_type").?; @@ -345,7 +345,7 @@ const LaneTypedVal = struct { }; fn parseLaneTypedVal(obj: std.json.ObjectMap) !LaneTypedVal { - var v: TaggedVal = try parseVal(obj); + const v: TaggedVal = try parseVal(obj); var lane_type = V128LaneType.I8x16; if (v.type == .V128) { const json_lane_type = obj.get("lane_type").?; @@ -462,7 +462,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array var args = std.ArrayList(LaneTypedVal).init(_allocator); if (json_args_or_null) |json_args| { for (json_args.array.items) |item| { - var val: LaneTypedVal = try parseLaneTypedVal(item.object); + const val: LaneTypedVal = try parseLaneTypedVal(item.object); try args.append(val); } } @@ -493,7 +493,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array }; // print("json_path: {s}\n", .{json_path}); - var json_data = try std.fs.cwd().readFileAlloc(allocator, json_path, 1024 * 1024 * 8); + const json_data = try std.fs.cwd().readFileAlloc(allocator, json_path, 1024 * 1024 * 8); var parsed = try std.json.parseFromSlice(std.json.Value, allocator, json_data, .{}); var fallback_module: []const u8 = ""; @@ -507,7 +507,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array if (strcmp("module", json_command_type.string)) { const json_filename = json_command.object.getPtr("filename").?; - var filename: []const u8 = try allocator.dupe(u8, json_filename.string); + const filename: []const u8 = try allocator.dupe(u8, json_filename.string); fallback_module = filename; var name = try allocator.dupe(u8, filename); @@ -515,7 +515,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array name = try allocator.dupe(u8, json_module_name.string); } - var command = Command{ + const command = Command{ .DecodeModule = CommandDecodeModule{ .module_filename = try allocator.dupe(u8, filename), .module_name = name, @@ -524,7 +524,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array try commands.append(command); } else if (strcmp("register", json_command_type.string)) { const json_as = json_command.object.getPtr("as").?; - var json_import_name: []const u8 = json_as.string; + const json_import_name: []const u8 = json_as.string; var json_module_name: []const u8 = fallback_module; if (json_command.object.getPtr("name")) |json_name| { json_module_name = json_name.string; @@ -532,7 +532,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array // print("json_module_name: {s}, json_import_name: {s}\n", .{ json_module_name, json_import_name }); - var command = Command{ + const command = Command{ .Register = CommandRegister{ .module_filename = try allocator.dupe(u8, fallback_module), .module_name = try allocator.dupe(u8, json_module_name), @@ -543,7 +543,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array } else if (strcmp("assert_return", json_command_type.string) or strcmp("action", json_command_type.string)) { const json_action = json_command.object.getPtr("action").?; - var action = try Helpers.parseAction(json_action, fallback_module, allocator); + const action = try Helpers.parseAction(json_action, fallback_module, allocator); var expected_returns_or_null: ?std.ArrayList(LaneTypedVal) = null; const json_expected_or_null = json_command.object.getPtr("expected"); @@ -555,7 +555,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array expected_returns_or_null = expected_returns; } - var command = Command{ + const command = Command{ .AssertReturn = CommandAssertReturn{ .action = action, .expected_returns = expected_returns_or_null, @@ -565,11 +565,11 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array } else if (strcmp("assert_trap", json_command_type.string) or strcmp("assert_exhaustion", json_command_type.string)) { const json_action = json_command.object.getPtr("action").?; - var action = try Helpers.parseAction(json_action, fallback_module, allocator); + const action = try Helpers.parseAction(json_action, fallback_module, allocator); const json_text = json_command.object.getPtr("text").?; - var command = Command{ + const command = Command{ .AssertTrap = CommandAssertTrap{ .action = action, .expected_error = try allocator.dupe(u8, json_text.string), @@ -577,7 +577,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array }; try commands.append(command); } else if (strcmp("assert_malformed", json_command_type.string)) { - var command = Command{ + const command = Command{ .AssertMalformed = CommandAssertMalformed{ .err = try Helpers.parseBadModuleError(&json_command, allocator), }, @@ -586,7 +586,7 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array try commands.append(command); } } else if (strcmp("assert_invalid", json_command_type.string)) { - var command = Command{ + const command = Command{ .AssertInvalid = CommandAssertInvalid{ .err = try Helpers.parseBadModuleError(&json_command, allocator), }, @@ -595,14 +595,14 @@ fn parseCommands(json_path: []const u8, allocator: std.mem.Allocator) !std.Array try commands.append(command); } } else if (strcmp("assert_unlinkable", json_command_type.string)) { - var command = Command{ + const command = Command{ .AssertUnlinkable = CommandAssertUnlinkable{ .err = try Helpers.parseBadModuleError(&json_command, allocator), }, }; try commands.append(command); } else if (strcmp("assert_uninstantiable", json_command_type.string)) { - var command = Command{ + const command = Command{ .AssertUninstantiable = CommandAssertUninstantiable{ .err = try Helpers.parseBadModuleError(&json_command, allocator), }, @@ -682,13 +682,13 @@ fn makeSpectestImports(allocator: std.mem.Allocator) !bytebox.ModuleImportPackag f64 => Val{ .F64 = value }, else => unreachable, }; - var global_definition = try _allocator.create(bytebox.GlobalDefinition); + const global_definition = try _allocator.create(bytebox.GlobalDefinition); global_definition.* = bytebox.GlobalDefinition{ .valtype = valtype, .mut = mut, .expr = undefined, // unused }; - var global_instance = try _allocator.create(bytebox.GlobalInstance); + const global_instance = try _allocator.create(bytebox.GlobalInstance); global_instance.* = bytebox.GlobalInstance{ .def = global_definition, .value = val, @@ -714,7 +714,7 @@ fn makeSpectestImports(allocator: std.mem.Allocator) !bytebox.ModuleImportPackag const TableInstance = bytebox.TableInstance; - var table = try allocator.create(TableInstance); + const table = try allocator.create(TableInstance); table.* = try TableInstance.init(ValType.FuncRef, bytebox.Limits{ .min = 10, .max = 20, .limit_type = 1 }, allocator); try imports.tables.append(bytebox.TableImport{ .name = try allocator.dupe(u8, "table"), @@ -784,7 +784,7 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp // NOTE this shares the same copies of the import arrays, since the modules must share instances var imports = std.ArrayList(bytebox.ModuleImportPackage).init(allocator); defer { - var spectest_imports = imports.items[0]; + const spectest_imports = imports.items[0]; for (spectest_imports.tables.items) |*item| { allocator.free(item.name); item.data.Host.deinit(); @@ -819,7 +819,7 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp } // std.debug.print("looking for (name/filename) {s}:{s}\n", .{ module_name, module_filename }); - var entry = name_to_module.getOrPutAssumeCapacity(module_name); + const entry = name_to_module.getOrPutAssumeCapacity(module_name); var module: *Module = entry.value_ptr; if (entry.found_existing == false) { module.* = Module{}; @@ -844,7 +844,7 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp logVerbose("\tSetting export module name to {s}\n", .{c.import_name}); - var module_imports: bytebox.ModuleImportPackage = try (module.inst.?).exports(c.import_name); + const module_imports: bytebox.ModuleImportPackage = try (module.inst.?).exports(c.import_name); try imports.append(module_imports); continue; }, @@ -852,10 +852,10 @@ fn run(allocator: std.mem.Allocator, suite_path: []const u8, opts: *const TestOp } if (module.inst == null) { - var module_path = try std.fs.path.join(allocator, &[_][]const u8{ suite_dir, module_filename }); + const module_path = try std.fs.path.join(allocator, &[_][]const u8{ suite_dir, module_filename }); var cwd = std.fs.cwd(); - var module_data = try cwd.readFileAlloc(allocator, module_path, 1024 * 1024 * 8); + const module_data = try cwd.readFileAlloc(allocator, module_path, 1024 * 1024 * 8); var decode_expected_error: ?[]const u8 = null; switch (command.*) { @@ -1285,14 +1285,14 @@ pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; var allocator: std.mem.Allocator = gpa.allocator(); - var args = try std.process.argsAlloc(allocator); + const args = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, args); var opts = TestOpts{}; var args_index: u32 = 1; // skip program name while (args_index < args.len) : (args_index += 1) { - var arg = args[args_index]; + const arg = args[args_index]; if (strcmp("--help", arg) or strcmp("-h", arg) or strcmp("help", arg)) { const help_text = \\ @@ -1574,13 +1574,13 @@ pub fn main() !void { logVerbose("Regenerating wasm and json driver for suite {s}\n", .{suite}); // need to navigate back to repo root because the wast2json process will be running in a subdir - var suite_wast_path_relative = try std.fs.path.join(allocator, &[_][]const u8{ "../../../../", suite_wast_path }); + const suite_wast_path_relative = try std.fs.path.join(allocator, &[_][]const u8{ "../../../../", suite_wast_path }); defer allocator.free(suite_wast_path_relative); const suite_json_filename: []const u8 = try std.mem.join(allocator, "", &[_][]const u8{ suite, ".json" }); defer allocator.free(suite_json_filename); - var suite_wasm_folder: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-generated", suite }); + const suite_wasm_folder: []const u8 = try std.fs.path.join(allocator, &[_][]const u8{ "test", "wasm", "wasm-generated", suite }); defer allocator.free(suite_wasm_folder); std.fs.cwd().makeDir("test/wasm/wasm-generated") catch |e| { @@ -1607,7 +1607,7 @@ pub fn main() !void { print("Running test suite: {s}\n", .{suite}); } - var success: bool = try run(allocator, suite_path, &opts); + const success: bool = try run(allocator, suite_path, &opts); did_all_succeed = did_all_succeed and success; if (success and opts.log_suite and !g_verbose_logging) { @@ -1617,6 +1617,6 @@ pub fn main() !void { } if (did_all_succeed == false) { - std.os.exit(1); + std.process.exit(1); } }