Skip to content

Commit

Permalink
Added benchmarking script for Metatheory interpreter
Browse files Browse the repository at this point in the history
  • Loading branch information
ReubenJ committed Oct 7, 2024
1 parent e86c8c9 commit 9d88ceb
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 2 deletions.
1 change: 1 addition & 0 deletions benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
output/
1 change: 1 addition & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
HerbInterpret = "5bbddadd-02c5-4713-84b8-97364418cca7"
Metatheory = "e9d8d322-4543-424a-9be4-0cc815abe26c"
55 changes: 53 additions & 2 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ Pkg.instantiate()

using BenchmarkTools
using HerbInterpret
@warn "About to load Metatheory. Currently, the 3.0 development branch claims to be much faster. Consider installing it."
using Metatheory

const SUITE = BenchmarkGroup()

Expand Down Expand Up @@ -31,5 +33,54 @@ tab = Dict{Symbol, Any}(
:example_function => example_function
)

SUITE["interpret"]["compiled"] = @benchmarkable example_function(15)
SUITE["interpret"]["interpreted"] = @benchmarkable interpret(tab, :(example_function(input1)))
Mem = Dict{Symbol,Union{Bool,Int}}
read_mem = @theory v σ begin
(v::Symbol, σ::Mem) => σ[v]
end

σ₁ = Mem(:x => 2)
program = :(x, $σ₁)

benchmark_outputs = joinpath(@__DIR__, "output")

if !isdir(benchmark_outputs) mkdir(benchmark_outputs) end

@info "Benchmarking Compiled Version"
SUITE["interpret"] = @benchmarkable example_function(15)
tune!(SUITE)
results = run(SUITE; verbose=true)
BenchmarkTools.save(joinpath(benchmark_outputs, "bench-compiled.json"), results)

println("Sleeping for 5s to relax...")
sleep(5)

@info "Benchmarking with interpreter from `HerbInterpret`"
SUITE["interpret"] = @benchmarkable interpret(tab, :(example_function(input1)))
tune!(SUITE)
results = run(SUITE; verbose=true)
BenchmarkTools.save(joinpath(benchmark_outputs, "bench-interpret.json"), results)

println("Sleeping for 5s to relax...")
sleep(5)

@info "Benchmarking With Metatheory"
SUITE["interpret"] = @benchmarkable rewrite(program, read_mem)
tune!(SUITE)
results = run(SUITE; verbose=true)
BenchmarkTools.save(joinpath(benchmark_outputs, "bench-metatheory.json"), results)

results = Dict([basename(name)[7:end-5] => BenchmarkTools.load(name) for name in readdir(benchmark_outputs, join=true)])

interpret_vs_compiled = judge(
mean(results["interpret"][1]),
mean(results["compiled"][1]),
)["interpret"]

@show interpret_vs_compiled

metatheory_vs_compiled = judge(
mean(results["metatheory"][1]),
mean(results["compiled"][1]),
)["interpret"]

@show metatheory_vs_compiled
1 change: 1 addition & 0 deletions benchresults.json

Large diffs are not rendered by default.

0 comments on commit 9d88ceb

Please sign in to comment.