Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move log consolidation into run_tests #23

Merged
merged 1 commit into from
Apr 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 2 additions & 18 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ Optionally, instruction context messages may also be left in the original Protob
To run the test suite, use the following command:

```sh
solana-test-suite run-tests --input-dir <input_dir> --solana-target <solana_target.so> --target <firedancer> [--target <target_2> ...] --output-dir <log_output_dir> --num-processes <num_processes> [--randomize-output-buffer]
solana-test-suite run-tests --input-dir <input_dir> --solana-target <solana_target.so> --target <firedancer> [--target <target_2> ...] --output-dir <log_output_dir> --num-processes <num_processes> --chunk-size <chunk_size> [--randomize-output-buffer]
```

| Argument | Description |
Expand All @@ -55,6 +55,7 @@ solana-test-suite run-tests --input-dir <input_dir> --solana-target <solana_targ
| `--output-dir` | Log output directory for test results |
| `--num-processes` | Number of processes to use |
| `--randomize-output-buffer`| Randomizes bytes in output buffer before shared library execution |
| `--chunk-size` | Number of test results per log file |

**Note:** Each `.so` target file name should be unique.

Expand Down Expand Up @@ -90,23 +91,6 @@ solana-test-suite debug-instruction --input-dir <input_dir> --target <shared_lib
Recommended usage is opening two terminals side by side, and running the above command on both with one having `--executable-path` for Solana (`impl/lib/libsolfuzz_agave_v2.0.so`) and another for Firedancer (`impl/lib/libsolfuzz_firedancer.so`), and then stepping through the debugger for each corresponding test case.


### Analysis

After running tests, it may be helpful to squash log files together to compare multiple outputs side-by-side via `vimdiff`. To do so:

```sh
solana-test-suite consolidate-logs --input-dir <input_dir> --output-dir <output_dir> --chunk-size <chunk_size>
```

| Argument | Description |
|-----------------|-----------------------------------------------------------------------------------------------------|
| `--input-dir` | Input directory containing test results |
| `--output-dir` | Output directory for consolidated logs |
| `--chunk-size` | Number of test results per file |

By default, log files are arranged lexicographically based on the test case file name. Each chunked log file is named based on the first test case in that file.


### Validation

Used to detect potential memory corruption issues / inconsistent outputs. The program will run each supplied library `num-iteration` times on the entire test suite. Use the following:
Expand Down
7 changes: 0 additions & 7 deletions src/test_suite/multiprocessing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,13 +242,6 @@ def build_test_results(file_stem: Path, results: dict[str, str | None]) -> int:

protobuf_structures[target] = protobuf_struct

# Write output Protobuf struct to logs
with open(globals.output_dir / target.stem / (file_stem + ".txt"), "w") as f:
if protobuf_struct:
f.write(text_format.MessageToString(protobuf_struct))
else:
f.write(str(None))

test_case_passed = all(
protobuf_structures[globals.solana_shared_library] == result
for result in protobuf_structures.values()
Expand Down
89 changes: 37 additions & 52 deletions src/test_suite/test_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,58 +85,6 @@ def debug_instruction(
debug_host(shared_library, instruction_context, gdb=debugger)


@app.command()
def consolidate_logs(
input_dir: Path = typer.Option(
Path("test_results"),
"--input-dir",
"-i",
help="Input directory containing test results",
),
output_dir: Path = typer.Option(
Path("consolidated_logs"),
"--output-dir",
"-o",
help="Output directory for consolidated logs",
),
chunk_size: int = typer.Option(
10000, "--chunk-size", "-c", help="Number of test results per file"
),
):
# Create the output directory, if necessary
if output_dir.exists():
shutil.rmtree(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)

# Iterate through each library
for lib_dir in filter(lambda x: x.is_dir(), input_dir.iterdir()):
# Make the lib output directory
lib = lib_dir.stem
(output_dir / lib).mkdir(parents=True, exist_ok=True)

# Grab all log files
log_files = sorted(list(lib_dir.glob("*.txt")))

current_log_file = None

for i in range(len(log_files)):
# Open a new log file every chunk_size test cases
if i % chunk_size == 0:
if current_log_file:
current_log_file.close()
current_log_file = open(
output_dir / lib / f"{log_files[i].stem}.txt", "w"
)

# Write test case name + log contents + separators
current_log_file.write(log_files[i].stem + ":\n")
current_log_file.write(log_files[i].read_text())
current_log_file.write("\n" + "-" * LOG_FILE_SEPARATOR_LENGTH + "\n")

if current_log_file:
current_log_file.close()


@app.command()
def check_consistency(
input_dir: Path = typer.Option(
Expand Down Expand Up @@ -289,6 +237,9 @@ def run_tests(
"-r",
help="Randomizes bytes in output buffer before shared library execution",
),
log_chunk_size: int = typer.Option(
10000, "--chunk-size", "-c", help="Number of test results per file"
),
):
# Add Solana library to shared libraries
shared_libraries = [solana_shared_library] + shared_libraries
Expand Down Expand Up @@ -336,6 +287,40 @@ def run_tests(
failed = counts[-1]
skipped = counts[0]

print("Logging results...")
counter = 0
target_log_files = {target: None for target in shared_libraries}
for file, result in execution_results:
if result is None:
continue

for target, serialized_instruction_effects in result.items():
if counter % log_chunk_size == 0:
if target_log_files[target]:
target_log_files[target].close()
target_log_files[target] = open(
globals.output_dir / target.stem / (file + ".txt"), "w"
)

target_log_files[target].write(file + ":\n")

if serialized_instruction_effects is None:
target_log_files[target].write(str(None))
else:
instruction_effects = pb.InstrEffects()
instruction_effects.ParseFromString(serialized_instruction_effects)
target_log_files[target].write(
text_format.MessageToString(instruction_effects)
)
target_log_files[target].write(
"\n" + "-" * LOG_FILE_SEPARATOR_LENGTH + "\n"
)
counter += 1

for target in shared_libraries:
if target_log_files[target]:
target_log_files[target].close()

print("Cleaning up...")
for target in shared_libraries:
globals.target_libraries[target].sol_compat_fini()
Expand Down
Loading