diff --git a/.github/actions-rs/grcov.yml b/.github/actions-rs/grcov.yml new file mode 100644 index 0000000..5cfdff8 --- /dev/null +++ b/.github/actions-rs/grcov.yml @@ -0,0 +1,3 @@ +branch: false +ignore-not-existing: true +llvm: true \ No newline at end of file diff --git a/.github/workflows/cc.yml b/.github/workflows/cc.yml new file mode 100644 index 0000000..89751ac --- /dev/null +++ b/.github/workflows/cc.yml @@ -0,0 +1,52 @@ +name: code-coverage + +on: + push: + branches: [ "main"] + pull_request: + branches: [ "main" ] + +env: + CARGO_TERM_COLOR: always + +jobs: + unittest-cov: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Cargo cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-cargo-nightly-test-${{ hashFiles('**/Cargo.lock') }} + + - name: Install nightly toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + override: true + + - name: Run unittest + run: cargo +nightly test --all-features --no-fail-fast + env: + CARGO_INCREMENTAL: '0' + RUSTFLAGS: '-Zprofile -Ccodegen-units=1 -Copt-level=0 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests' + RUSTDOCFLAGS: '-Zprofile -Ccodegen-units=1 -Copt-level=0 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests' + + - id: coverage + uses: actions-rs/grcov@v0.1 + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + file: ${{ steps.coverage.outputs.report }} + fail_ci_if_error: true \ No newline at end of file diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 0000000..792c2c1 --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,100 @@ +# Based on https://github.com/actions-rs/meta/blob/master/recipes/quickstart.md +# +# While our "example" application has the platform-specific code, +# for simplicity we are compiling and testing everything on the Ubuntu environment only. +# For multi-OS testing see the `cross.yml` workflow. + +name: rust + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +env: + CARGO_TERM_COLOR: always + +jobs: + check: + name: check + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Run cargo check + uses: actions-rs/cargo@v1 + with: + command: check + + test: + name: test + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Cargo cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Run cargo test + uses: actions-rs/cargo@v1 + with: + command: test + args: --release + + lints: + name: lints + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + components: rustfmt, clippy + + - name: Run cargo fmt + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + - name: Run cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: -- -D warnings \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..d63578b --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,50 @@ +name: functional-test + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Cargo cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Build + run: cargo build --release --verbose + + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + if [ -f tests/requirements.txt ]; then pip install -r tests/requirements.txt; fi + + - name: Run tests + run: | + cd tests + python test_all.py \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..00a1d50 --- /dev/null +++ b/.gitignore @@ -0,0 +1,20 @@ +/target/ +.DS_Store +/.idea +/db +run/db +run/kv.DB +run/config.toml +run/run.sh +dummy* +tags* +test.py +test.txt +build.sh +ccov +tests/**/__pycache__ +*.pyc +tests/utility/geth +tests/utility/conflux +tests/tmp/** +zerog-storage-rust-backup \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..45961fc --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "zerog-storage-rust"] + path = zerog-storage-rust + url = git@github.com:zero-gravity-labs/zerog-storage-rust.git diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..c77a494 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,7428 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures 0.2.12", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + +[[package]] +name = "ahash" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +dependencies = [ + "getrandom 0.2.12", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704" +dependencies = [ + "backtrace", +] + +[[package]] +name = "append_merkle" +version = "0.1.0" +dependencies = [ + "anyhow", + "eth2_ssz", + "eth2_ssz_derive", + "ethereum-types 0.14.1", + "lazy_static", + "serde", + "tiny-keccak", + "tracing", +] + +[[package]] +name = "arrayref" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" + +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + +[[package]] +name = "asn1_der" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +dependencies = [ + "concurrent-queue", + "event-listener 4.0.3", + "event-listener-strategy", + "futures-core", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "async-executor" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +dependencies = [ + "async-lock 3.2.0", + "async-task", + "concurrent-queue", + "fastrand 2.0.1", + "futures-lite 2.2.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.1.1", + "async-executor", + "async-io 2.2.2", + "async-lock 3.2.0", + "blocking", + "futures-lite 2.2.0", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" +dependencies = [ + "async-lock 3.2.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.2.0", + "parking", + "polling 3.3.1", + "rustix 0.38.28", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" +dependencies = [ + "event-listener 4.0.3", + "event-listener-strategy", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "async-process" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" +dependencies = [ + "async-io 1.13.0", + "async-lock 2.8.0", + "async-signal", + "blocking", + "cfg-if", + "event-listener 3.1.0", + "futures-lite 1.13.0", + "rustix 0.38.28", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io 2.2.2", + "async-lock 2.8.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix 0.38.28", + "signal-hook-registry", + "slab", + "windows-sys 0.48.0", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite 0.2.13", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-std-resolver" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2f8a4a203be3325981310ab243a28e6e4ea55b6519bffce05d41ab60e09ad8" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "socket2 0.4.10", + "trust-dns-resolver", +] + +[[package]] +name = "async-task" +version = "4.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" + +[[package]] +name = "async-trait" +version = "0.1.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version", +] + +[[package]] +name = "asynchronous-codec" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "auto_impl" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c79fed4cdb43e993fcdadc7e58a09fd0e3e649c4436fa11da71c9f1f3ee7feb9" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", +] + +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + +[[package]] +name = "bindgen" +version = "0.65.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +dependencies = [ + "bitflags 1.3.2", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.48", +] + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "bitmaps" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703642b98a00b3b90513279a8ede3fcfa479c126c5fb46e78f3051522f021403" + +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty 2.0.0", + "radium 0.7.0", + "tap", + "wyz 0.5.1", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blocking" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +dependencies = [ + "async-channel 2.1.1", + "async-lock 3.2.0", + "async-task", + "fastrand 2.0.1", + "futures-io", + "futures-lite 2.2.0", + "piper", + "tracing", +] + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "bs58" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +dependencies = [ + "sha2 0.10.8", + "tinyvec", +] + +[[package]] +name = "bstr" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +dependencies = [ + "serde", +] + +[[package]] +name = "bzip2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" +dependencies = [ + "bzip2-sys", + "libc", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "jobserver", + "libc", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures 0.2.12", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + +[[package]] +name = "chrono" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.48.5", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + +[[package]] +name = "clang-sys" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_lex", + "indexmap 1.9.3", + "once_cell", + "strsim", + "termcolor", + "textwrap", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", +] + +[[package]] +name = "coins-bip32" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" +dependencies = [ + "bs58 0.5.0", + "coins-core", + "digest 0.10.7", + "hmac 0.12.1", + "k256", + "serde", + "sha2 0.10.8", + "thiserror", +] + +[[package]] +name = "coins-bip39" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" +dependencies = [ + "bitvec 1.0.1", + "coins-bip32", + "hmac 0.12.1", + "once_cell", + "pbkdf2 0.12.2", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", +] + +[[package]] +name = "coins-core" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" +dependencies = [ + "base64 0.21.6", + "bech32", + "bs58 0.5.0", + "digest 0.10.7", + "generic-array", + "hex", + "ripemd", + "serde", + "serde_derive", + "sha2 0.10.8", + "sha3", + "thiserror", +] + +[[package]] +name = "concurrent-queue" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-hex" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.12", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "contract-interface" +version = "0.1.0" +dependencies = [ + "ethers", + "serde_json", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "cpufeatures" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" +dependencies = [ + "libc", +] + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "typenum", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher", +] + +[[package]] +name = "ctrlc" +version = "3.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b467862cc8610ca6fc9a1532d7777cee0804e678ab45410897b9396495994a0b" +dependencies = [ + "nix 0.27.1", + "windows-sys 0.52.0", +] + +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + +[[package]] +name = "curve25519-dalek" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.12", + "curve25519-dalek-derive", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "darling" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "data-encoding" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" + +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "dns-parser" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +dependencies = [ + "byteorder", + "quick-error", +] + +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature 2.2.0", + "spki", +] + +[[package]] +name = "ed25519" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +dependencies = [ + "signature 1.6.4", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.2.0", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.9", + "zeroize", +] + +[[package]] +name = "either" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "ena" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" +dependencies = [ + "log", +] + +[[package]] +name = "encoding_rs" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "enr" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" +dependencies = [ + "base64 0.21.6", + "bytes", + "hex", + "k256", + "log", + "rand 0.8.5", + "rlp", + "serde", + "sha3", + "zeroize", +] + +[[package]] +name = "enum-as-inner" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "backtrace", + "version_check", +] + +[[package]] +name = "eth-keystore" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" +dependencies = [ + "aes", + "ctr", + "digest 0.10.7", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "scrypt", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3", + "thiserror", + "uuid", +] + +[[package]] +name = "eth2_hashing" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b67737df7e3769e823d9d583eb5d60bcc4b2ef97ca674d1964ef287a02f8517" +dependencies = [ + "cpufeatures 0.1.5", + "lazy_static", + "ring 0.16.20", + "sha2 0.9.9", +] + +[[package]] +name = "eth2_serde_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "477fffc25490dfc866288273f96344c6879676a1337187fc39245cd422e10825" +dependencies = [ + "hex", + "serde", + "serde_derive", +] + +[[package]] +name = "eth2_ssz" +version = "0.4.0" +dependencies = [ + "ethereum-types 0.14.1", + "smallvec", +] + +[[package]] +name = "eth2_ssz_derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "635b86d2c941bb71e7419a571e1763d65c93e51a1bafc400352e3bef6ff59fc9" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "eth2_ssz_types" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9423ac7fb37037f828a32b724cdfa65ea62290055811731402a90fb8a5bcbb1" +dependencies = [ + "eth2_serde_utils", + "eth2_ssz", + "serde", + "serde_derive", + "tree_hash", + "typenum", +] + +[[package]] +name = "ethabi" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" +dependencies = [ + "ethereum-types 0.14.1", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" +dependencies = [ + "crunchy", + "fixed-hash 0.7.0", + "impl-rlp", + "impl-serde 0.3.2", + "tiny-keccak", +] + +[[package]] +name = "ethbloom" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +dependencies = [ + "crunchy", + "fixed-hash 0.8.0", + "impl-codec 0.6.0", + "impl-rlp", + "impl-serde 0.4.0", + "scale-info", + "tiny-keccak", +] + +[[package]] +name = "ethereum-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" +dependencies = [ + "ethbloom 0.11.1", + "fixed-hash 0.7.0", + "impl-rlp", + "impl-serde 0.3.2", + "primitive-types 0.10.1", + "uint", +] + +[[package]] +name = "ethereum-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom 0.13.0", + "fixed-hash 0.8.0", + "impl-codec 0.6.0", + "impl-rlp", + "impl-serde 0.4.0", + "primitive-types 0.12.2", + "scale-info", + "uint", +] + +[[package]] +name = "ethers" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5344eea9b20effb5efeaad29418215c4d27017639fd1f908260f59cbbd226e" +dependencies = [ + "ethers-addressbook", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-middleware", + "ethers-providers", + "ethers-signers", + "ethers-solc", +] + +[[package]] +name = "ethers-addressbook" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c405f24ea3a517899ba7985385c43dc4a7eb1209af3b1e0a1a32d7dcc7f8d09" +dependencies = [ + "ethers-core", + "once_cell", + "serde", + "serde_json", +] + +[[package]] +name = "ethers-contract" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0111ead599d17a7bff6985fd5756f39ca7033edc79a31b23026a8d5d64fa95cd" +dependencies = [ + "const-hex", + "ethers-contract-abigen", + "ethers-contract-derive", + "ethers-core", + "ethers-providers", + "futures-util", + "once_cell", + "pin-project 1.1.3", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ethers-contract-abigen" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51258120c6b47ea9d9bec0d90f9e8af71c977fbefbef8213c91bfed385fe45eb" +dependencies = [ + "Inflector", + "const-hex", + "dunce", + "ethers-core", + "ethers-etherscan", + "eyre", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "reqwest", + "serde", + "serde_json", + "syn 2.0.48", + "toml 0.8.8", + "walkdir", +] + +[[package]] +name = "ethers-contract-derive" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936e7a0f1197cee2b62dc89f63eff3201dbf87c283ff7e18d86d38f83b845483" +dependencies = [ + "Inflector", + "const-hex", + "ethers-contract-abigen", + "ethers-core", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.48", +] + +[[package]] +name = "ethers-core" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f03e0bdc216eeb9e355b90cf610ef6c5bb8aca631f97b5ae9980ce34ea7878d" +dependencies = [ + "arrayvec", + "bytes", + "cargo_metadata", + "chrono", + "const-hex", + "elliptic-curve", + "ethabi", + "generic-array", + "k256", + "num_enum", + "once_cell", + "open-fastrlp", + "rand 0.8.5", + "rlp", + "serde", + "serde_json", + "strum", + "syn 2.0.48", + "tempfile", + "thiserror", + "tiny-keccak", + "unicode-xid", +] + +[[package]] +name = "ethers-etherscan" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abbac2c890bdbe0f1b8e549a53b00e2c4c1de86bb077c1094d1f38cdf9381a56" +dependencies = [ + "chrono", + "ethers-core", + "reqwest", + "semver", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "ethers-middleware" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681ece6eb1d10f7cf4f873059a77c04ff1de4f35c63dd7bccde8f438374fcb93" +dependencies = [ + "async-trait", + "auto_impl", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-providers", + "ethers-signers", + "futures-channel", + "futures-locks", + "futures-util", + "instant", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-futures", + "url", +] + +[[package]] +name = "ethers-providers" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" +dependencies = [ + "async-trait", + "auto_impl", + "base64 0.21.6", + "bytes", + "const-hex", + "enr", + "ethers-core", + "futures-channel", + "futures-core", + "futures-timer", + "futures-util", + "hashers", + "http", + "instant", + "jsonwebtoken", + "once_cell", + "pin-project 1.1.3", + "reqwest", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite", + "tracing", + "tracing-futures", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "ws_stream_wasm", +] + +[[package]] +name = "ethers-signers" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cb1b714e227bbd2d8c53528adb580b203009728b17d0d0e4119353aa9bc5532" +dependencies = [ + "async-trait", + "coins-bip32", + "coins-bip39", + "const-hex", + "elliptic-curve", + "eth-keystore", + "ethers-core", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", + "tracing", +] + +[[package]] +name = "ethers-solc" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a64f710586d147864cff66540a6d64518b9ff37d73ef827fee430538265b595f" +dependencies = [ + "cfg-if", + "const-hex", + "dirs", + "dunce", + "ethers-core", + "glob", + "home", + "md-5", + "num_cpus", + "once_cell", + "path-slash", + "rayon", + "regex", + "semver", + "serde", + "serde_json", + "solang-parser", + "svm-rs", + "thiserror", + "tiny-keccak", + "tokio", + "tracing", + "walkdir", + "yansi", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "exit-future" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" +dependencies = [ + "futures", +] + +[[package]] +name = "eyre" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "flate2" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +dependencies = [ + "crc32fast", + "libz-sys", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-executor" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.13", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +dependencies = [ + "fastrand 2.0.1", + "futures-core", + "futures-io", + "parking", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "futures-locks" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" +dependencies = [ + "futures-channel", + "futures-task", +] + +[[package]] +name = "futures-macro" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "futures-rustls" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" +dependencies = [ + "futures-io", + "rustls 0.20.9", + "webpki", +] + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +dependencies = [ + "gloo-timers", + "send_wrapper 0.4.0", +] + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite 0.2.13", + "pin-utils", + "slab", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "ghash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug", + "polyval", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "globset" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", +] + +[[package]] +name = "gloo-net" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9902a044653b26b99f7e3693a42f171312d9be8b26b5697bd1e43ad1f8a35e10" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "js-sys", + "pin-project 1.1.3", + "serde", + "serde_json", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "gloo-utils" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "h2" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 2.1.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.7", +] + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash 0.8.7", + "allocator-api2", +] + +[[package]] +name = "hashers" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" +dependencies = [ + "fxhash", +] + +[[package]] +name = "hashlink" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +dependencies = [ + "hashbrown 0.14.3", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" + +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array", + "hmac 0.8.1", +] + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite 0.2.13", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite 0.2.13", + "socket2 0.5.5", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +dependencies = [ + "http", + "hyper", + "log", + "rustls 0.20.9", + "rustls-native-certs", + "tokio", + "tokio-rustls 0.23.4", + "webpki-roots 0.22.6", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http", + "hyper", + "rustls 0.21.10", + "tokio", + "tokio-rustls 0.24.1", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "if-addrs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "if-watch" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "015a7df1eb6dda30df37f34b63ada9b7b352984b0e84de2a20ed526345000791" +dependencies = [ + "async-io 1.13.0", + "core-foundation", + "fnv", + "futures", + "if-addrs", + "ipnet", + "log", + "rtnetlink", + "system-configuration", + "windows", +] + +[[package]] +name = "impl-codec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec 2.3.1", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec 3.6.9", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-serde" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.3", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.5", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "is-terminal" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bad00257d07be169d870ab665980b06cdb366d792ad690bf2e76876dc503455" +dependencies = [ + "hermit-abi 0.3.3", + "rustix 0.38.28", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "jobserver" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonrpsee" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11e017217fcd18da0a25296d3693153dd19c8a6aadab330b3595285d075385d1" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-http-server", + "jsonrpsee-proc-macros", + "jsonrpsee-types", + "jsonrpsee-wasm-client", + "jsonrpsee-ws-client", + "jsonrpsee-ws-server", + "tracing", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce395539a14d3ad4ec1256fde105abd36a2da25d578a291cabe98f45adfdb111" +dependencies = [ + "anyhow", + "futures-channel", + "futures-timer", + "futures-util", + "gloo-net", + "http", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project 1.1.3", + "rustls-native-certs", + "soketto", + "thiserror", + "tokio", + "tokio-rustls 0.23.4", + "tokio-util", + "tracing", + "webpki-roots 0.22.6", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16efcd4477de857d4a2195a45769b2fe9ebb54f3ef5a4221d3b014a4fe33ec0b" +dependencies = [ + "anyhow", + "arrayvec", + "async-lock 2.8.0", + "async-trait", + "beef", + "futures-channel", + "futures-timer", + "futures-util", + "globset", + "hyper", + "jsonrpsee-types", + "lazy_static", + "parking_lot 0.12.1", + "rand 0.8.5", + "rustc-hash", + "serde", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tracing", + "unicase", + "wasm-bindgen-futures", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc1d8c0e4f455c47df21f8a29f4bbbcb75eb71bfee919b92e92502b48358392" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls 0.23.2", + "jsonrpsee-core", + "jsonrpsee-types", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-http-server" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdd69efeb3ce2cba767f126872f4eeb4624038a29098e75d77608b2b4345ad03" +dependencies = [ + "futures-channel", + "futures-util", + "hyper", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874cf3f6a027cebf36cae767feca9aa2e8a8f799880e49eb5540819fcbd8eada" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bcf76cd316f5d3ad48138085af1f45e2c58c98e02f0779783dbb034d43f7c86" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "jsonrpsee-wasm-client" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1fcb257e9b60de22ec3c151106ad1e089ab1c0691d25e3282f1cc7a0c7ba651" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee043cb5dd0d51d3eb93432e998d5bae797691a7b10ec4a325e036bcdb48c48a" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", +] + +[[package]] +name = "jsonrpsee-ws-server" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2e4d266774a671f8def3794255b28eddd09b18d76e0b913fa439f34588c0a" +dependencies = [ + "futures-channel", + "futures-util", + "jsonrpsee-core", + "jsonrpsee-types", + "serde_json", + "soketto", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", +] + +[[package]] +name = "jsonwebtoken" +version = "8.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +dependencies = [ + "base64 0.21.6", + "pem", + "ring 0.16.20", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2 0.10.8", + "signature 2.2.0", +] + +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures 0.2.12", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "kvdb" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7d770dcb02bf6835887c3a979b5107a04ff4bbde97a5f0928d27404a155add9" +dependencies = [ + "smallvec", +] + +[[package]] +name = "kvdb-memorydb" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" +dependencies = [ + "kvdb", + "parking_lot 0.12.1", +] + +[[package]] +name = "kvdb-rocksdb" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" +dependencies = [ + "kvdb", + "num_cpus", + "parking_lot 0.12.1", + "regex", + "rocksdb", + "smallvec", +] + +[[package]] +name = "lalrpop" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da4081d44f4611b66c6dd725e6de3169f9f63905421e8626fcb86b6a898998b8" +dependencies = [ + "ascii-canvas", + "bit-set", + "diff", + "ena", + "is-terminal", + "itertools 0.10.5", + "lalrpop-util", + "petgraph", + "regex", + "regex-syntax 0.7.5", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", +] + +[[package]] +name = "lalrpop-util" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f35c735096c0293d313e8f2a641627472b83d01b937177fe76e5e2708d31e0d" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" + +[[package]] +name = "libloading" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libp2p" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "getrandom 0.2.12", + "instant", + "lazy_static", + "libp2p-autonat", + "libp2p-core 0.33.0", + "libp2p-deflate", + "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", + "libp2p-metrics", + "libp2p-mplex", + "libp2p-noise", + "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-relay", + "libp2p-rendezvous", + "libp2p-request-response", + "libp2p-swarm", + "libp2p-swarm-derive", + "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", + "libp2p-websocket", + "libp2p-yamux", + "multiaddr", + "parking_lot 0.12.1", + "pin-project 1.1.3", + "rand 0.7.3", + "smallvec", +] + +[[package]] +name = "libp2p-autonat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d45945fd2f96c4b133c23d5c28a8b7fc8d7138e6dd8d5a8cd492dd384f888e3" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "libp2p-request-response", + "libp2p-swarm", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.8.5", +] + +[[package]] +name = "libp2p-core" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db5b02602099fb75cb2d16f9ea860a320d6eb82ce41e95ab680912c454805cd5" +dependencies = [ + "asn1_der", + "bs58 0.4.0", + "ed25519-dalek", + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "lazy_static", + "log", + "multiaddr", + "multihash", + "multistream-select", + "parking_lot 0.12.1", + "pin-project 1.1.3", + "prost 0.9.0", + "prost-build 0.9.0", + "rand 0.8.5", + "ring 0.16.20", + "rw-stream-sink 0.2.1", + "sha2 0.10.8", + "smallvec", + "thiserror", + "unsigned-varint", + "void", + "zeroize", +] + +[[package]] +name = "libp2p-core" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +dependencies = [ + "asn1_der", + "bs58 0.4.0", + "ed25519-dalek", + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "lazy_static", + "libsecp256k1", + "log", + "multiaddr", + "multihash", + "multistream-select", + "parking_lot 0.12.1", + "pin-project 1.1.3", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.8.5", + "ring 0.16.20", + "rw-stream-sink 0.3.0", + "sha2 0.10.8", + "smallvec", + "thiserror", + "unsigned-varint", + "void", + "zeroize", +] + +[[package]] +name = "libp2p-deflate" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86adefc55ea4ed8201149f052fb441210727481dff1fb0b8318460206a79f5fb" +dependencies = [ + "flate2", + "futures", + "libp2p-core 0.33.0", +] + +[[package]] +name = "libp2p-dns" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +dependencies = [ + "async-std-resolver", + "futures", + "libp2p-core 0.33.0", + "log", + "parking_lot 0.12.1", + "smallvec", + "trust-dns-resolver", +] + +[[package]] +name = "libp2p-floodsub" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a505d0c6f851cbf2919535150198e530825def8bd3757477f13dc3a57f46cbcc" +dependencies = [ + "cuckoofilter", + "fnv", + "futures", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.7.3", + "smallvec", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.38.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" +dependencies = [ + "asynchronous-codec", + "base64 0.13.1", + "byteorder", + "bytes", + "fnv", + "futures", + "hex_fmt", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "prometheus-client", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.7.3", + "regex", + "sha2 0.10.8", + "smallvec", + "unsigned-varint", + "wasm-timer", +] + +[[package]] +name = "libp2p-identify" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +dependencies = [ + "asynchronous-codec", + "futures", + "futures-timer", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "lru", + "prost 0.10.4", + "prost-build 0.10.4", + "prost-codec", + "smallvec", + "thiserror", + "void", +] + +[[package]] +name = "libp2p-kad" +version = "0.37.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6b5d4de90fcd35feb65ea6223fd78f3b747a64ca4b65e0813fbe66a27d56aa" +dependencies = [ + "arrayvec", + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.7.3", + "sha2 0.10.8", + "smallvec", + "thiserror", + "uint", + "unsigned-varint", + "void", +] + +[[package]] +name = "libp2p-mdns" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4783f8cf00c7b6c1ff0f1870b4fcf50b042b45533d2e13b6fb464caf447a6951" +dependencies = [ + "async-io 1.13.0", + "data-encoding", + "dns-parser", + "futures", + "if-watch", + "lazy_static", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "rand 0.8.5", + "smallvec", + "socket2 0.4.10", + "void", +] + +[[package]] +name = "libp2p-metrics" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" +dependencies = [ + "libp2p-core 0.33.0", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-kad", + "libp2p-ping", + "libp2p-relay", + "libp2p-swarm", + "prometheus-client", +] + +[[package]] +name = "libp2p-mplex" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core 0.33.0", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "rand 0.7.3", + "smallvec", + "unsigned-varint", +] + +[[package]] +name = "libp2p-noise" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +dependencies = [ + "bytes", + "curve25519-dalek 3.2.0", + "futures", + "lazy_static", + "libp2p-core 0.33.0", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.8.5", + "sha2 0.10.8", + "snow", + "static_assertions", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-ping" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d41516c82fe8dd148ec925eead0c5ec08a0628f7913597e93e126e4dfb4e0787" +dependencies = [ + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core 0.33.0", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "unsigned-varint", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6468f382568da936b4fa1cff273ce59b1debf873ff5f4ca412c3b91d0b37442c" +dependencies = [ + "futures", + "log", + "pin-project 1.1.3", + "rand 0.8.5", + "salsa20", + "sha3", +] + +[[package]] +name = "libp2p-relay" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624ead3406f64437a0d4567c31bd128a9a0b8226d5f16c074038f5d0fc32f650" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "pin-project 1.1.3", + "prost 0.10.4", + "prost-build 0.10.4", + "prost-codec", + "rand 0.8.5", + "smallvec", + "static_assertions", + "thiserror", + "void", +] + +[[package]] +name = "libp2p-rendezvous" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59967ea2db2c7560f641aa58ac05982d42131863fcd3dd6dcf0dd1daf81c60c" +dependencies = [ + "asynchronous-codec", + "bimap", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", + "unsigned-varint", + "void", +] + +[[package]] +name = "libp2p-request-response" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b02e0acb725e5a757d77c96b95298fd73a7394fe82ba7b8bbeea510719cbe441" +dependencies = [ + "async-trait", + "bytes", + "futures", + "instant", + "libp2p-core 0.33.0", + "libp2p-swarm", + "log", + "rand 0.7.3", + "smallvec", + "unsigned-varint", +] + +[[package]] +name = "libp2p-swarm" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.33.0", + "log", + "pin-project 1.1.3", + "rand 0.7.3", + "smallvec", + "thiserror", + "void", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "libp2p-tcp" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +dependencies = [ + "async-io 1.13.0", + "futures", + "futures-timer", + "if-addrs", + "if-watch", + "ipnet", + "libc", + "libp2p-core 0.33.0", + "log", + "socket2 0.4.10", + "tokio", +] + +[[package]] +name = "libp2p-uds" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24bdab114f7f2701757d6541266e1131b429bbae382008f207f2114ee4222dcb" +dependencies = [ + "async-std", + "futures", + "libp2p-core 0.32.1", + "log", +] + +[[package]] +name = "libp2p-wasm-ext" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f066f2b8b1a1d64793f05da2256e6842ecd0293d6735ca2e9bda89831a1bdc06" +dependencies = [ + "futures", + "js-sys", + "libp2p-core 0.33.0", + "parity-send-wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", +] + +[[package]] +name = "libp2p-websocket" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +dependencies = [ + "either", + "futures", + "futures-rustls", + "libp2p-core 0.33.0", + "log", + "parking_lot 0.12.1", + "quicksink", + "rw-stream-sink 0.3.0", + "soketto", + "url", + "webpki-roots 0.22.6", +] + +[[package]] +name = "libp2p-yamux" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +dependencies = [ + "futures", + "libp2p-core 0.33.0", + "parking_lot 0.12.1", + "thiserror", + "yamux", +] + +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall 0.4.1", +] + +[[package]] +name = "librocksdb-sys" +version = "0.11.0+8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "tikv-jemalloc-sys", +] + +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "295c17e837573c8c821dbaeb3cceb3d745ad082f7572191409e69cbc1b3fd050" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "lighthouse_metrics" +version = "0.2.0" +dependencies = [ + "lazy_static", + "prometheus", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +dependencies = [ + "value-bag", +] + +[[package]] +name = "log_entry_sync" +version = "0.1.0" +dependencies = [ + "anyhow", + "append_merkle", + "async-trait", + "contract-interface", + "ethereum-types 0.14.1", + "ethers", + "futures", + "futures-core", + "futures-util", + "jsonrpsee", + "serde_json", + "shared_types", + "storage_with_stream", + "task_executor", + "thiserror", + "tokio", +] + +[[package]] +name = "lru" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +dependencies = [ + "hashbrown 0.12.3", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "matches" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "merkle_light" +version = "0.4.0" +dependencies = [ + "rayon", +] + +[[package]] +name = "merkle_tree" +version = "0.1.0" +dependencies = [ + "hex", + "merkle_light", + "tiny-keccak", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "multiaddr" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" +dependencies = [ + "arrayref", + "bs58 0.4.0", + "byteorder", + "data-encoding", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint", + "url", +] + +[[package]] +name = "multihash" +version = "0.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" +dependencies = [ + "core2", + "digest 0.10.7", + "multihash-derive", + "sha2 0.10.8", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "multistream-select" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project 1.1.3", + "smallvec", + "unsigned-varint", +] + +[[package]] +name = "netlink-packet-core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +dependencies = [ + "anyhow", + "byteorder", + "libc", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror", +] + +[[package]] +name = "netlink-proto" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror", + "tokio", +] + +[[package]] +name = "netlink-sys" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +dependencies = [ + "async-io 1.13.0", + "bytes", + "futures", + "libc", + "log", +] + +[[package]] +name = "new_debug_unreachable" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54" + +[[package]] +name = "nix" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.1", + "cfg-if", + "libc", +] + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.3", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +dependencies = [ + "proc-macro-crate 3.0.0", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "open-fastrlp" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", + "ethereum-types 0.14.1", + "open-fastrlp-derive", +] + +[[package]] +name = "open-fastrlp-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" +dependencies = [ + "bytes", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "owning_ref" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" +dependencies = [ + "stable_deref_trait", +] + +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive 2.3.1", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "3.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +dependencies = [ + "arrayvec", + "bitvec 1.0.1", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive 3.6.9", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +dependencies = [ + "proc-macro-crate 2.0.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "parity-send-wrapper" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall 0.4.1", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "paste" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + +[[package]] +name = "path-slash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" + +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.8", +] + +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", +] + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap 2.1.0", +] + +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version", +] + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared 0.11.2", + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" +dependencies = [ + "pin-project-internal 0.4.30", +] + +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal 1.1.3", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.1", + "futures-io", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" + +[[package]] +name = "platforms" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite 0.2.13", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite 0.2.13", + "rustix 0.38.28", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures 0.2.12", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.12", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + +[[package]] +name = "prettyplease" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" +dependencies = [ + "proc-macro2", + "syn 2.0.48", +] + +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +dependencies = [ + "fixed-hash 0.7.0", + "impl-codec 0.5.1", + "impl-rlp", + "impl-serde 0.3.2", + "uint", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash 0.8.0", + "impl-codec 0.6.0", + "impl-rlp", + "impl-serde 0.4.0", + "scale-info", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror", + "toml 0.5.11", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + +[[package]] +name = "proc-macro-crate" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b2685dd208a3771337d8d386a89840f0f43cd68be8dae90a5f8c2384effc9cd" +dependencies = [ + "toml_edit 0.21.0", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot 0.12.1", + "protobuf", + "thiserror", +] + +[[package]] +name = "prometheus-client" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +dependencies = [ + "dtoa", + "itoa", + "owning_ref", + "prometheus-client-derive-text-encode", +] + +[[package]] +name = "prometheus-client-derive-text-encode" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bitflags 2.4.1", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "unarray", +] + +[[package]] +name = "prost" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +dependencies = [ + "bytes", + "prost-derive 0.9.0", +] + +[[package]] +name = "prost" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +dependencies = [ + "bytes", + "prost-derive 0.10.1", +] + +[[package]] +name = "prost-build" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +dependencies = [ + "bytes", + "heck 0.3.3", + "itertools 0.10.5", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.9.0", + "prost-types 0.9.0", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-build" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +dependencies = [ + "bytes", + "cfg-if", + "cmake", + "heck 0.4.1", + "itertools 0.10.5", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.10.4", + "prost-types 0.10.1", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-codec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +dependencies = [ + "asynchronous-codec", + "bytes", + "prost 0.10.4", + "thiserror", + "unsigned-varint", +] + +[[package]] +name = "prost-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "prost-types" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +dependencies = [ + "bytes", + "prost 0.9.0", +] + +[[package]] +name = "prost-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +dependencies = [ + "bytes", + "prost 0.10.4", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quicksink" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project-lite 0.1.12", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.12", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + +[[package]] +name = "rayon" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_users" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +dependencies = [ + "getrandom 0.2.12", + "libredox", + "thiserror", +] + +[[package]] +name = "regex" +version = "1.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.2", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "reqwest" +version = "0.11.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +dependencies = [ + "base64 0.21.6", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls 0.24.2", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite 0.2.13", + "rustls 0.21.10", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "system-configuration", + "tokio", + "tokio-rustls 0.24.1", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots 0.25.3", + "winreg", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom 0.2.12", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rlp-derive", + "rustc-hex", +] + +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rocksdb" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rpc" +version = "0.1.0" +dependencies = [ + "append_merkle", + "base64 0.13.1", + "ethereum-types 0.14.1", + "futures", + "futures-channel", + "jsonrpsee", + "merkle_light", + "merkle_tree", + "serde", + "serde_json", + "shared_types", + "storage_with_stream", + "task_executor", + "tokio", + "tracing", +] + +[[package]] +name = "rtnetlink" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +dependencies = [ + "async-global-executor", + "futures", + "log", + "netlink-packet-route", + "netlink-proto", + "nix 0.24.3", + "thiserror", +] + +[[package]] +name = "rusqlite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +dependencies = [ + "bitflags 1.3.2", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys 0.4.12", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.20.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +dependencies = [ + "log", + "ring 0.16.20", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.21.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +dependencies = [ + "log", + "ring 0.17.7", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.6", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring 0.17.7", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "rw-stream-sink" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" +dependencies = [ + "futures", + "pin-project 0.4.30", + "static_assertions", +] + +[[package]] +name = "rw-stream-sink" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26338f5e09bb721b85b135ea05af7767c90b52f6de4f087d4f4a3a9d64e7dc04" +dependencies = [ + "futures", + "pin-project 1.1.3", + "static_assertions", +] + +[[package]] +name = "ryu" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" + +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scale-info" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +dependencies = [ + "cfg-if", + "derive_more", + "parity-scale-codec 3.6.9", + "scale-info-derive", +] + +[[package]] +name = "scale-info-derive" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "schannel" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scrypt" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" +dependencies = [ + "hmac 0.12.1", + "pbkdf2 0.11.0", + "salsa20", + "sha2 0.10.8", +] + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.7", + "untrusted 0.9.0", +] + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +dependencies = [ + "serde", +] + +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + +[[package]] +name = "serde" +version = "1.0.195" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.195" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "serde_json" +version = "1.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures 0.2.12", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.12", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures 0.2.12", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures 0.2.12", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared_types" +version = "0.1.0" +dependencies = [ + "anyhow", + "append_merkle", + "chrono", + "eth2_ssz", + "eth2_ssz_derive", + "eth2_ssz_types", + "ethereum-types 0.14.1", + "merkle_light", + "merkle_tree", + "serde", + "tiny-keccak", + "tracing", + "typenum", +] + +[[package]] +name = "shlex" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time", +] + +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" + +[[package]] +name = "snow" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +dependencies = [ + "aes-gcm", + "blake2", + "chacha20poly1305", + "curve25519-dalek 4.1.1", + "rand_core 0.6.4", + "ring 0.17.7", + "rustc_version", + "sha2 0.10.8", + "subtle", +] + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64 0.13.1", + "bytes", + "flate2", + "futures", + "httparse", + "log", + "rand 0.8.5", + "sha-1", +] + +[[package]] +name = "solang-parser" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" +dependencies = [ + "itertools 0.11.0", + "lalrpop", + "lalrpop-util", + "phf", + "thiserror", + "unicode-xid", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "storage" +version = "0.1.0" +dependencies = [ + "anyhow", + "append_merkle", + "bitmaps", + "eth2_ssz", + "eth2_ssz_derive", + "ethereum-types 0.14.1", + "hex", + "hex-literal", + "itertools 0.10.5", + "kvdb", + "kvdb-memorydb", + "kvdb-rocksdb", + "merkle_light", + "merkle_tree", + "rand 0.8.5", + "rayon", + "shared_types", + "static_assertions", + "tempdir", + "tiny-keccak", + "tracing", + "typenum", + "zgs_seal", + "zgs_spec", +] + +[[package]] +name = "storage_with_stream" +version = "0.1.0" +dependencies = [ + "anyhow", + "append_merkle", + "async-trait", + "const_format", + "eth2_ssz", + "eth2_ssz_derive", + "ethereum-types 0.14.1", + "hex", + "kvdb", + "kvdb-memorydb", + "kvdb-rocksdb", + "merkle_light", + "merkle_tree", + "rand 0.8.5", + "rayon", + "rusqlite", + "shared_types", + "storage", + "tempdir", + "tokio-rusqlite", + "tracing", + "typenum", +] + +[[package]] +name = "stream" +version = "0.1.0" +dependencies = [ + "anyhow", + "append_merkle", + "async-trait", + "contract-interface", + "eth2_ssz", + "eth2_ssz_derive", + "ethereum-types 0.14.1", + "ethers", + "futures", + "jsonrpsee", + "rpc", + "rusqlite", + "serde_json", + "shared_types", + "storage_with_stream", + "task_executor", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot 0.12.1", + "phf_shared 0.10.0", + "precomputed-hash", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.48", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "svm-rs" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20689c7d03b6461b502d0b95d6c24874c7d24dea2688af80486a130a06af3b07" +dependencies = [ + "dirs", + "fs2", + "hex", + "once_cell", + "reqwest", + "semver", + "serde", + "serde_json", + "sha2 0.10.8", + "thiserror", + "url", + "zip", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "task_executor" +version = "0.1.0" +dependencies = [ + "exit-future", + "futures", + "lazy_static", + "lighthouse_metrics", + "tokio", + "tracing", +] + +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] + +[[package]] +name = "tempfile" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +dependencies = [ + "cfg-if", + "fastrand 2.0.1", + "redox_syscall 0.4.1", + "rustix 0.38.28", + "windows-sys 0.52.0", +] + +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" + +[[package]] +name = "thiserror" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "time" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +dependencies = [ + "deranged", + "itoa", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +dependencies = [ + "time-core", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot 0.12.1", + "pin-project-lite 0.2.13", + "signal-hook-registry", + "socket2 0.5.5", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "tokio-rusqlite" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31dfc8c4778d61428ef6b5d0eba26a5953314f307f10fb278f9839d0a1baae5" +dependencies = [ + "crossbeam-channel", + "rusqlite", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls 0.20.9", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.13", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +dependencies = [ + "futures-util", + "log", + "rustls 0.21.10", + "tokio", + "tokio-rustls 0.24.1", + "tungstenite", + "webpki-roots 0.25.3", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-io", + "futures-sink", + "pin-project-lite 0.2.13", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.1.0", + "toml_datetime", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite 0.2.13", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.1.3", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "tree_hash" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9c8a86fad3169a65aad2265d3c6a8bc119d0b771046af3c1b2fb0e9b12182b" +dependencies = [ + "eth2_hashing", + "ethereum-types 0.12.1", + "smallvec", +] + +[[package]] +name = "trust-dns-proto" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.3", + "ipnet", + "lazy_static", + "log", + "rand 0.8.5", + "smallvec", + "thiserror", + "tinyvec", + "tokio", + "url", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" +dependencies = [ + "cfg-if", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot 0.12.1", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "trust-dns-proto", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.21.10", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures-io", + "futures-util", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna 0.5.0", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.12", + "serde", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cdbaf5e132e593e9fc1de6a15bbec912395b11fb9719e061cf64f804524c503" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "waker-fn" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" + +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.48", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "wasm-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.11.2", + "pin-utils", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" +dependencies = [ + "ring 0.17.7", + "untrusted 0.9.0", +] + +[[package]] +name = "webpki-roots" +version = "0.22.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" +dependencies = [ + "webpki", +] + +[[package]] +name = "webpki-roots" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.28", +] + +[[package]] +name = "widestring" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +dependencies = [ + "windows_aarch64_msvc 0.34.0", + "windows_i686_gnu 0.34.0", + "windows_i686_msvc 0.34.0", + "windows_x86_64_gnu 0.34.0", + "windows_x86_64_msvc 0.34.0", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "winnow" +version = "0.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7520bbdec7211caa7c4e682eb1fbe07abe20cee6756b6e00f537c82c11816aa" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version", + "send_wrapper 0.6.0", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x25519-dalek" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" +dependencies = [ + "curve25519-dalek 3.2.0", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "yamux" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.1", + "rand 0.8.5", + "static_assertions", +] + +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + +[[package]] +name = "zgs_kv" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "ctrlc", + "error-chain", + "ethereum-types 0.14.1", + "exit-future", + "futures", + "http", + "libp2p", + "log_entry_sync", + "rpc", + "shared_types", + "storage_with_stream", + "stream", + "task_executor", + "tokio", + "tokio-stream", + "toml 0.5.11", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "zgs_seal" +version = "0.1.0" +dependencies = [ + "ethereum-types 0.14.1", + "tiny-keccak", + "zgs_spec", +] + +[[package]] +name = "zgs_spec" +version = "0.1.0" + +[[package]] +name = "zip" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" +dependencies = [ + "aes", + "byteorder", + "bzip2", + "constant_time_eq", + "crc32fast", + "crossbeam-utils", + "flate2", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "sha1", + "time", + "zstd", +] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.9+zstd.1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..3f673c3 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,20 @@ +[workspace] + +members = [ + "node", + "node/stream", + "node/log_entry_sync", + "node/rpc", + "node/shared_types", + "node/storage", + "node/storage_with_stream", + + "zerog-storage-rust/common/merkle_tree", + "zerog-storage-rust/common/merkle_light", + "zerog-storage-rust/common/spec", + "zerog-storage-rust/common/append_merkle" +] +resolver = "2" + +[patch.crates-io] +eth2_ssz = { path = "version-meld/eth2_ssz" } diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/node/Cargo.toml b/node/Cargo.toml new file mode 100644 index 0000000..0ec6e67 --- /dev/null +++ b/node/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "zgs_kv" +version = "0.1.0" +edition = "2021" +build = "build.rs" + +[dependencies] +anyhow = { version = "=1.0.58", features = ["backtrace"] } +clap = { version = "3.2.5", features = ["cargo"] } +ctrlc = "3.2.2" +error-chain = "0.12.4" +ethereum-types = "0.14" +exit-future = "0.2.0" +futures = "0.3.21" +log_entry_sync = { path = "./log_entry_sync" } +rpc = { path = "./rpc" } +shared_types = { path = "./shared_types" } +storage_with_stream = { path = "./storage_with_stream" } +stream = { path = "./stream" } +task_executor = { path = "../zerog-storage-rust/common/task_executor" } +tokio = { version = "1.19.2", features = ["full"] } +tokio-stream = { version = "0.1.9", features = ["sync"] } +toml = "0.5.9" +tracing = "0.1.35" +tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } +http = "0.2.8" + +[dependencies.libp2p] +version = "0.45.1" +default-features = true +features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] diff --git a/node/build.rs b/node/build.rs new file mode 100644 index 0000000..2a4c30b --- /dev/null +++ b/node/build.rs @@ -0,0 +1,13 @@ +use std::process::Command; + +fn main() { + println!("cargo:rerun-if-changed=../zerog-storage-rust"); + + let status = Command::new("cargo") + .current_dir("../zerog-storage-rust") + .args(vec!["build", "--release"]) + .status() + .unwrap(); + + println!("build zerog-storage-rust with status {}", status); +} diff --git a/node/log_entry_sync/Cargo.toml b/node/log_entry_sync/Cargo.toml new file mode 100644 index 0000000..7e481bb --- /dev/null +++ b/node/log_entry_sync/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "log_entry_sync" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { version = "=1.0.58", features = ["backtrace"] } +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +async-trait = "0.1.56" +ethereum-types = "0.14" +futures = "0.3.21" +jsonrpsee = { version = "0.14.0", features = ["full"] } +shared_types = { path = "../shared_types" } +task_executor = { path = "../../zerog-storage-rust/common/task_executor" } +tokio = "1.19.2" +ethers = { version = "^2", features = ["ws"] } +serde_json = "1.0.82" +storage_with_stream = { path = "../storage_with_stream" } +contract-interface = { path = "../../zerog-storage-rust/common/contract-interface" } +futures-core = "0.3.28" +futures-util = "0.3.28" +thiserror = "1.0.44" \ No newline at end of file diff --git a/node/log_entry_sync/src/lib.rs b/node/log_entry_sync/src/lib.rs new file mode 100644 index 0000000..9f27bd2 --- /dev/null +++ b/node/log_entry_sync/src/lib.rs @@ -0,0 +1,10 @@ +extern crate core; + +pub(crate) mod rpc_proxy; +mod sync_manager; + +pub use rpc_proxy::ContractAddress; +pub use sync_manager::{ + config::{CacheConfig, LogSyncConfig}, + LogSyncEvent, LogSyncManager, +}; diff --git a/node/log_entry_sync/src/rpc_proxy/eth.rs b/node/log_entry_sync/src/rpc_proxy/eth.rs new file mode 100644 index 0000000..f7a9506 --- /dev/null +++ b/node/log_entry_sync/src/rpc_proxy/eth.rs @@ -0,0 +1,29 @@ +use crate::rpc_proxy::{ContractAddress, EvmRpcProxy, SubEvent, SubFilter}; +use async_trait::async_trait; +use ethers::prelude::{Bytes, Middleware, Provider, Ws}; +use ethers::types::TransactionRequest; +use jsonrpsee::core::client::Subscription; + +pub struct EthClient { + client: Provider, +} + +impl EthClient { + #[allow(unused)] + pub async fn new(url: &str) -> anyhow::Result { + let client = Provider::new(Ws::connect(url).await?); + Ok(Self { client }) + } +} + +#[async_trait] +impl EvmRpcProxy for EthClient { + async fn call(&self, to: ContractAddress, data: Bytes) -> anyhow::Result { + let request = TransactionRequest::new().to(to).data(data); + Ok(self.client.call(&request.into(), None).await?) + } + + async fn sub_events(&self, _filter: SubFilter) -> Subscription { + todo!() + } +} diff --git a/node/log_entry_sync/src/rpc_proxy/mod.rs b/node/log_entry_sync/src/rpc_proxy/mod.rs new file mode 100644 index 0000000..a1efe63 --- /dev/null +++ b/node/log_entry_sync/src/rpc_proxy/mod.rs @@ -0,0 +1,38 @@ +use anyhow::Result; +use async_trait::async_trait; +use ethereum_types::{H160, H256}; +use ethers::prelude::Bytes; +use jsonrpsee::core::client::Subscription; + +// TODO: Define accounts/filter/events as associated types? +// TODO: Define an abstraction suitable for other chains. +#[async_trait] +pub trait EvmRpcProxy { + async fn call(&self, to: ContractAddress, data: Bytes) -> Result; + + async fn sub_events(&self, filter: SubFilter) -> Subscription; +} + +pub type ContractAddress = H160; + +pub type Topic = H256; + +#[allow(unused)] +pub struct SubFilter { + to: Option, + topics: Vec, +} + +#[allow(unused)] +pub struct SubEvent { + /// Address + pub address: ContractAddress, + + /// Topics + pub topics: Vec, + + /// Data + pub data: Bytes, +} + +pub(crate) mod eth; diff --git a/node/log_entry_sync/src/sync_manager/config.rs b/node/log_entry_sync/src/sync_manager/config.rs new file mode 100644 index 0000000..fae6e7b --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/config.rs @@ -0,0 +1,65 @@ +use crate::rpc_proxy::ContractAddress; + +pub struct LogSyncConfig { + pub rpc_endpoint_url: String, + pub contract_address: ContractAddress, + pub cache_config: CacheConfig, + + /// The block number where we start to sync data. + /// This is usually the block number when Zgs contract is deployed. + pub start_block_number: u64, + /// The number of blocks needed for confirmation on the blockchain. + /// This is used to rollback to a stable height if reorg happens during node restart. + /// TODO(zz): Some blockchains have better confirmation/finalization mechanisms. + pub confirmation_block_count: u64, + /// Maximum number of event logs to poll at a time. + pub log_page_size: u64, + + // blockchain provider retry params + // the number of retries after a connection times out + pub rate_limit_retries: u32, + // the nubmer of retries for rate limited responses + pub timeout_retries: u32, + // the duration to wait before retry, in ms + pub initial_backoff: u64, + // the duration between each paginated getLogs RPC call, in ms. + // This is set to avoid triggering the throttling mechanism in the RPC server. + pub recover_query_delay: u64, +} + +#[derive(Clone)] +pub struct CacheConfig { + /// The data with a size larger than this will not be cached. + /// This is reasonable because uploading + pub max_data_size: usize, + pub tx_seq_ttl: usize, +} + +impl LogSyncConfig { + #[allow(clippy::too_many_arguments)] + pub fn new( + rpc_endpoint_url: String, + contract_address: ContractAddress, + start_block_number: u64, + confirmation_block_count: u64, + cache_config: CacheConfig, + log_page_size: u64, + rate_limit_retries: u32, + timeout_retries: u32, + initial_backoff: u64, + recover_query_delay: u64, + ) -> Self { + Self { + rpc_endpoint_url, + contract_address, + cache_config, + start_block_number, + confirmation_block_count, + log_page_size, + rate_limit_retries, + timeout_retries, + initial_backoff, + recover_query_delay, + } + } +} diff --git a/node/log_entry_sync/src/sync_manager/data_cache.rs b/node/log_entry_sync/src/sync_manager/data_cache.rs new file mode 100644 index 0000000..00397d6 --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/data_cache.rs @@ -0,0 +1,58 @@ +use crate::sync_manager::config::CacheConfig; +use shared_types::DataRoot; +use std::cmp; +use std::collections::HashMap; + +struct CachedData { + /// Used for garbage collection. + last_seen_tx_seq: u64, + /// Complete data for a given DataRoot. + data: Vec, +} + +pub struct DataCache { + root_to_data: HashMap, + config: CacheConfig, +} + +impl DataCache { + pub fn new(config: CacheConfig) -> Self { + Self { + root_to_data: HashMap::new(), + config, + } + } + + pub fn add_data(&mut self, root: DataRoot, tx_seq: u64, data: Vec) -> bool { + if data.len() > self.config.max_data_size { + // large data are not cached. + return false; + } + // TODO: insert partial data and merge here. + self.root_to_data + .entry(root) + .and_modify(|cached| { + cached.last_seen_tx_seq = cmp::max(tx_seq, cached.last_seen_tx_seq) + }) + .or_insert(CachedData { + last_seen_tx_seq: tx_seq, + data, + }); + true + } + + /// Remove and return the data of a given `DataRoot`. + /// If two completed reverted transactions have the same root and both appear later, + /// the second one will have its data copied in `put_tx`. + pub fn pop_data(&mut self, root: &DataRoot) -> Option> { + self.root_to_data.remove(root).map(|e| e.data) + } + + /// Remove timeout data entries according to TTL. + pub fn garbage_collect(&mut self, latest_tx_seq: u64) { + // We won't keep too many data, so it's okay to just iterate here. + self.root_to_data.retain(|_, cached| { + cached.last_seen_tx_seq + self.config.tx_seq_ttl as u64 >= latest_tx_seq + }) + } +} diff --git a/node/log_entry_sync/src/sync_manager/log_entry_fetcher.rs b/node/log_entry_sync/src/sync_manager/log_entry_fetcher.rs new file mode 100644 index 0000000..e58976c --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/log_entry_fetcher.rs @@ -0,0 +1,377 @@ +use crate::rpc_proxy::ContractAddress; +use crate::sync_manager::log_query::LogQuery; +use crate::sync_manager::{repeat_run_and_log, RETRY_WAIT_MS}; +use anyhow::{anyhow, bail, Result}; +use append_merkle::{Algorithm, Sha3Algorithm}; +use contract_interface::{SubmissionNode, SubmitFilter, ZgsFlow}; +use ethers::abi::RawLog; +use ethers::prelude::{BlockNumber, EthLogDecode, Http, Log, Middleware, Provider, U256}; +use ethers::providers::{FilterKind, HttpRateLimitRetryPolicy, RetryClient, RetryClientBuilder}; +use ethers::types::H256; +use futures::StreamExt; +use jsonrpsee::tracing::{debug, error, info}; +use shared_types::{DataRoot, Transaction}; +use std::collections::{BTreeMap, VecDeque}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use task_executor::TaskExecutor; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; + +// SHA256("STREAM") +// df2ff3bb0af36c6384e6206552a4ed807f6f6a26e7d0aa6bff772ddc9d4307aa +const STREAM_DOMAIN: H256 = H256([ + 223, 47, 243, 187, 10, 243, 108, 99, 132, 230, 32, 101, 82, 164, 237, 128, 127, 111, 106, 38, + 231, 208, 170, 107, 255, 119, 45, 220, 157, 67, 7, 170, +]); + +pub struct LogEntryFetcher { + contract_address: ContractAddress, + log_page_size: u64, + provider: Arc>>, + + confirmation_delay: u64, +} + +impl LogEntryFetcher { + pub async fn new( + url: &str, + contract_address: ContractAddress, + log_page_size: u64, + confirmation_delay: u64, + rate_limit_retries: u32, + timeout_retries: u32, + initial_backoff: u64, + ) -> Result { + let provider = Arc::new(Provider::new( + RetryClientBuilder::default() + .rate_limit_retries(rate_limit_retries) + .timeout_retries(timeout_retries) + .initial_backoff(Duration::from_millis(initial_backoff)) + .build(Http::from_str(url)?, Box::new(HttpRateLimitRetryPolicy)), + )); + // TODO: `error` types are removed from the ABI json file. + Ok(Self { + contract_address, + provider, + log_page_size, + confirmation_delay, + }) + } + + pub fn start_recover( + &self, + start_block_number: u64, + end_block_number: u64, + executor: &TaskExecutor, + log_query_delay: Duration, + ) -> UnboundedReceiver { + let provider = self.provider.clone(); + let (recover_tx, recover_rx) = tokio::sync::mpsc::unbounded_channel(); + let contract = ZgsFlow::new(self.contract_address, provider.clone()); + let log_page_size = self.log_page_size; + + executor.spawn( + async move { + let mut progress = start_block_number; + let mut filter = contract + .submit_filter() + .from_block(progress) + .to_block(end_block_number) + .filter; + let mut stream = LogQuery::new(&provider, &filter, log_query_delay) + .with_page_size(log_page_size); + debug!( + "start_recover starts, start={} end={}", + start_block_number, end_block_number + ); + while let Some(maybe_log) = stream.next().await { + match maybe_log { + Ok(log) => { + let sync_progress = + if log.block_hash.is_some() && log.block_number.is_some() { + let synced_block = LogFetchProgress::SyncedBlock(( + log.block_number.unwrap().as_u64(), + log.block_hash.unwrap(), + )); + progress = log.block_number.unwrap().as_u64(); + Some(synced_block) + } else { + None + }; + debug!("recover: progress={:?}", sync_progress); + + match SubmitFilter::decode_log(&RawLog { + topics: log.topics, + data: log.data.to_vec(), + }) { + Ok(event) => { + if let Err(e) = recover_tx + .send(submission_event_to_transaction(event)) + .and_then(|_| match sync_progress { + Some(b) => recover_tx.send(b), + None => Ok(()), + }) + { + error!("send error: e={:?}", e); + } + } + Err(e) => { + error!("log decode error: e={:?}", e); + } + } + } + Err(e) => { + error!("log query error: e={:?}", e); + filter = filter.from_block(progress); + stream = LogQuery::new(&provider, &filter, log_query_delay) + .with_page_size(log_page_size); + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + } + } + } + }, + "log recover", + ); + recover_rx + } + + pub fn start_watch( + &self, + start_block_number: u64, + executor: &TaskExecutor, + ) -> UnboundedReceiver { + let (watch_tx, watch_rx) = tokio::sync::mpsc::unbounded_channel(); + let contract = ZgsFlow::new(self.contract_address, self.provider.clone()); + let provider = self.provider.clone(); + let mut log_confirmation_queue = LogConfirmationQueue::new(self.confirmation_delay); + executor.spawn( + async move { + let mut filter = contract + .submit_filter() + .from_block(start_block_number) + .filter; + debug!("start_watch starts, start={}", start_block_number); + let mut filter_id = + repeat_run_and_log(|| provider.new_filter(FilterKind::Logs(&filter))).await; + let mut progress = start_block_number; + + loop { + match Self::watch_loop( + provider.as_ref(), + filter_id, + &watch_tx, + &mut log_confirmation_queue, + ) + .await + { + Err(e) => { + error!("log sync watch error: e={:?}", e); + filter = filter.from_block(progress); + filter_id = repeat_run_and_log(|| { + provider.new_filter(FilterKind::Logs(&filter)) + }) + .await; + } + Ok(Some(p)) => { + progress = p; + info!("log sync to block number {:?}", progress); + } + Ok(None) => { + error!( + "log sync gets entries without progress? old_progress={}", + progress + ) + } + } + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + } + }, + "log watch", + ); + watch_rx + } + + async fn watch_loop( + provider: &Provider>, + filter_id: U256, + watch_tx: &UnboundedSender, + log_confirmation_queue: &mut LogConfirmationQueue, + ) -> Result> { + debug!("get block"); + let latest_block = provider + .get_block(BlockNumber::Latest) + .await? + .ok_or_else(|| anyhow!("None for latest block"))?; + debug!("get filter changes"); + let logs: Vec = provider.get_filter_changes(filter_id).await?; + if let Some(reverted) = log_confirmation_queue.push(logs)? { + watch_tx.send(LogFetchProgress::Reverted(reverted))?; + } + debug!("get filter end"); + for log in log_confirmation_queue.confirm_logs(latest_block.number.unwrap().as_u64()) { + assert!(!log.removed.unwrap_or(false)); + // TODO(zz): Log parse error means logs might be lost here. + let tx = SubmitFilter::decode_log(&RawLog { + topics: log.topics, + data: log.data.to_vec(), + })?; + watch_tx.send(submission_event_to_transaction(tx))?; + } + let progress = if latest_block.hash.is_some() && latest_block.number.is_some() { + Some(( + latest_block.number.unwrap().as_u64(), + latest_block.hash.unwrap(), + )) + } else { + None + }; + if let Some(p) = &progress { + watch_tx.send(LogFetchProgress::SyncedBlock(*p))?; + } + Ok(progress.map(|p| p.0)) + } + + pub fn provider(&self) -> &Provider> { + self.provider.as_ref() + } +} + +struct LogConfirmationQueue { + /// Keep the unconfirmed new logs. + /// The key is the block number and the value is the set of needed logs in that block. + queue: VecDeque<(u64, Vec)>, + + latest_block_number: u64, + confirmation_delay: u64, +} + +impl LogConfirmationQueue { + fn new(confirmation_delay: u64) -> Self { + Self { + queue: VecDeque::new(), + latest_block_number: 0, + confirmation_delay, + } + } + /// Push a set of new logs. + /// We assumes that these logs are in order, and removed logs are returned first. + /// + /// Return `Ok(Some(tx_seq))` of the first reverted tx_seq if chain reorg happens. + /// `Err` is returned if assumptions are violated (like the log have missing fields). + fn push(&mut self, logs: Vec) -> Result> { + let mut revert_to = None; + // First merge logs according to the block number. + let mut block_logs: BTreeMap> = BTreeMap::new(); + let mut removed_block_logs = BTreeMap::new(); + for log in logs { + let set = if log.removed.unwrap_or(false) { + &mut removed_block_logs + } else { + &mut block_logs + }; + let block_number = log + .block_number + .ok_or_else(|| anyhow!("block number missing"))? + .as_u64(); + set.entry(block_number).or_default().push(log); + } + + // Handle revert if it happens. + for (block_number, removed_logs) in &removed_block_logs { + if revert_to.is_none() { + let reverted_index = match self.queue.binary_search_by_key(block_number, |e| e.0) { + Ok(x) => x, + Err(x) => x, + }; + self.queue.truncate(reverted_index); + let first = removed_logs.first().expect("not empty"); + let first_reverted_tx_seq = SubmitFilter::decode_log(&RawLog { + topics: first.topics.clone(), + data: first.data.to_vec(), + })? + .submission_index + .as_u64(); + revert_to = Some(first_reverted_tx_seq); + } else { + // Other removed logs should have larger tx seq, so no need to process them. + break; + } + } + + // Add new logs to the queue. + for (block_number, new_logs) in block_logs { + if block_number <= self.queue.back().map(|e| e.0).unwrap_or(0) { + bail!("reverted without being notified"); + } + self.queue.push_back((block_number, new_logs)); + } + + Ok(revert_to) + } + + /// Pass in the latest block number and return the confirmed logs. + fn confirm_logs(&mut self, latest_block_number: u64) -> Vec { + self.latest_block_number = latest_block_number; + let mut confirmed_logs = Vec::new(); + while let Some((block_number, _)) = self.queue.front() { + if *block_number + > self + .latest_block_number + .wrapping_sub(self.confirmation_delay) + { + break; + } + let (_, mut logs) = self.queue.pop_front().unwrap(); + confirmed_logs.append(&mut logs); + } + confirmed_logs + } +} + +#[derive(Debug)] +pub enum LogFetchProgress { + SyncedBlock((u64, H256)), + Transaction(Transaction), + Reverted(u64), +} + +fn submission_topic_to_stream_ids(topic: Vec) -> Vec { + if topic.is_empty() || topic.len() % 32 != 0 || H256::from_slice(&topic[..32]) != STREAM_DOMAIN + { + return vec![]; + } + + let mut stream_ids = Vec::new(); + for i in (32..topic.len()).step_by(32) { + stream_ids.push(H256::from_slice(&topic[i..i + 32])) + } + stream_ids +} + +fn submission_event_to_transaction(e: SubmitFilter) -> LogFetchProgress { + LogFetchProgress::Transaction(Transaction { + stream_ids: submission_topic_to_stream_ids(e.submission.tags.to_vec()), + sender: e.sender, + data: vec![], + data_merkle_root: nodes_to_root(&e.submission.nodes), + merkle_nodes: e + .submission + .nodes + .iter() + // the submission height is the height of the root node starting from height 0. + .map(|SubmissionNode { root, height }| (height.as_usize() + 1, root.into())) + .collect(), + start_entry_index: e.start_pos.as_u64(), + size: e.submission.length.as_u64(), + seq: e.submission_index.as_u64(), + }) +} + +fn nodes_to_root(node_list: &Vec) -> DataRoot { + let mut root: DataRoot = node_list.last().expect("not empty").root.into(); + for next_node in node_list[..node_list.len() - 1].iter().rev() { + root = Sha3Algorithm::parent(&next_node.root.into(), &root); + } + root +} diff --git a/node/log_entry_sync/src/sync_manager/log_query.rs b/node/log_entry_sync/src/sync_manager/log_query.rs new file mode 100644 index 0000000..ee7f412 --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/log_query.rs @@ -0,0 +1,183 @@ +use ethers::prelude::{Filter, JsonRpcClient, Log, Middleware, Provider, ProviderError, U64}; +use futures_core::stream::Stream; +use jsonrpsee::tracing::trace; +use std::future::Future; +use std::time::Duration; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll}, +}; +use thiserror::Error; + +pub(crate) type PinBoxFut<'a, T> = + Pin> + Send + 'a>>; + +/// A log query provides streaming access to historical logs via a paginated +/// request. For streaming access to future logs, use [`Middleware::watch`] or +/// [`Middleware::subscribe_logs`] +pub struct LogQuery<'a, P> { + provider: &'a Provider

, + filter: Filter, + from_block: Option, + page_size: u64, + current_logs: VecDeque, + last_block: Option, + state: LogQueryState<'a>, + delay: Duration, +} + +enum LogQueryState<'a> { + Initial, + LoadLastBlock(PinBoxFut<'a, U64>), + LoadLogs(PinBoxFut<'a, Vec>), + Consume, +} + +impl<'a, P> LogQuery<'a, P> +where + P: JsonRpcClient, +{ + /// Instantiate a new `LogQuery` + pub fn new(provider: &'a Provider

, filter: &Filter, delay: Duration) -> Self { + Self { + provider, + filter: filter.clone(), + from_block: filter.get_from_block(), + page_size: 10000, + current_logs: VecDeque::new(), + last_block: None, + state: LogQueryState::Initial, + delay, + } + } + + /// set page size for pagination + pub fn with_page_size(mut self, page_size: u64) -> Self { + self.page_size = page_size; + self + } +} + +macro_rules! rewake_with_new_state { + ($ctx:ident, $this:ident, $new_state:expr) => { + $this.state = $new_state; + $ctx.waker().wake_by_ref(); + return Poll::Pending + }; +} + +/// Errors while querying for logs +#[derive(Error, Debug)] +pub enum LogQueryError { + /// Error loading latest block + #[error(transparent)] + LoadLastBlockError(E), + /// Error loading logs from block range + #[error(transparent)] + LoadLogsError(E), +} + +impl<'a, P> Stream for LogQuery<'a, P> +where + P: JsonRpcClient, +{ + type Item = Result>; + + fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll> { + let delay = self.delay; + match &mut self.state { + LogQueryState::Initial => { + if !self.filter.is_paginatable() { + // if not paginatable, load logs and consume + let filter = self.filter.clone(); + let provider = self.provider; + #[allow(clippy::redundant_async_block)] + let fut = Box::pin(async move { + tokio::time::sleep(delay).await; + provider.get_logs(&filter).await + }); + rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut)); + } else { + // if paginatable, load last block + let fut = self.provider.get_block_number(); + rewake_with_new_state!(ctx, self, LogQueryState::LoadLastBlock(fut)); + } + } + LogQueryState::LoadLastBlock(fut) => { + match futures_util::ready!(fut.as_mut().poll(ctx)) { + Ok(last_block) => { + trace!("log_query: last_block={}", last_block); + self.last_block = Some(last_block); + + // this is okay because we will only enter this state when the filter is + // paginatable i.e. from block is set + let from_block = self.filter.get_from_block().unwrap(); + let to_block = from_block + self.page_size; + self.from_block = Some(to_block + 1); + + let filter = self + .filter + .clone() + .from_block(from_block) + .to_block(to_block); + let provider = self.provider; + // load first page of logs + #[allow(clippy::redundant_async_block)] + let fut = Box::pin(async move { + tokio::time::sleep(delay).await; + provider.get_logs(&filter).await + }); + rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut)); + } + Err(err) => Poll::Ready(Some(Err(LogQueryError::LoadLastBlockError(err)))), + } + } + LogQueryState::LoadLogs(fut) => match futures_util::ready!(fut.as_mut().poll(ctx)) { + Ok(logs) => { + self.current_logs = VecDeque::from(logs); + rewake_with_new_state!(ctx, self, LogQueryState::Consume); + } + Err(err) => Poll::Ready(Some(Err(LogQueryError::LoadLogsError(err)))), + }, + LogQueryState::Consume => { + let log = self.current_logs.pop_front(); + if log.is_none() { + // consumed all the logs + if !self.filter.is_paginatable() { + Poll::Ready(None) + } else { + // load new logs if there are still more pages to go through + // can safely assume this will always be set in this state + let from_block = self.from_block.unwrap(); + let to_block = from_block + self.page_size; + + // no more pages to load, and everything is consumed + // can safely assume this will always be set in this state + if from_block > self.last_block.unwrap() { + return Poll::Ready(None); + } + // load next page + self.from_block = Some(to_block + 1); + + let filter = self + .filter + .clone() + .from_block(from_block) + .to_block(to_block); + let provider = self.provider; + #[allow(clippy::redundant_async_block)] + let fut = Box::pin(async move { + tokio::time::sleep(delay).await; + provider.get_logs(&filter).await + }); + + rewake_with_new_state!(ctx, self, LogQueryState::LoadLogs(fut)); + } + } else { + Poll::Ready(log.map(Ok)) + } + } + } + } +} diff --git a/node/log_entry_sync/src/sync_manager/mod.rs b/node/log_entry_sync/src/sync_manager/mod.rs new file mode 100644 index 0000000..af33943 --- /dev/null +++ b/node/log_entry_sync/src/sync_manager/mod.rs @@ -0,0 +1,321 @@ +use crate::sync_manager::config::LogSyncConfig; +use crate::sync_manager::data_cache::DataCache; +use crate::sync_manager::log_entry_fetcher::{LogEntryFetcher, LogFetchProgress}; +use anyhow::{bail, Result}; +use ethers::prelude::Middleware; +use futures::FutureExt; +use jsonrpsee::tracing::{debug, error, trace, warn}; +use shared_types::{ChunkArray, Transaction}; +use std::fmt::Debug; +use std::future::Future; +use std::sync::Arc; +use std::time::Duration; +use storage_with_stream::Store; +use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::sync::broadcast; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio::sync::RwLock; + +const RETRY_WAIT_MS: u64 = 500; +const BROADCAST_CHANNEL_CAPACITY: usize = 16; + +#[derive(Clone, Debug)] +pub enum LogSyncEvent { + /// Chain reorg detected without any operation yet. + ReorgDetected { tx_seq: u64 }, + /// Transaction reverted in storage. + Reverted { tx_seq: u64 }, + /// Synced a transaction from blockchain + TxSynced { tx: Transaction }, +} + +pub struct LogSyncManager { + config: LogSyncConfig, + log_fetcher: LogEntryFetcher, + store: Arc>, + data_cache: DataCache, + + next_tx_seq: u64, + + /// To broadcast events to handle in advance. + event_send: broadcast::Sender, +} + +impl LogSyncManager { + pub async fn spawn( + config: LogSyncConfig, + executor: TaskExecutor, + store: Arc>, + ) -> Result> { + let next_tx_seq = store.read().await.next_tx_seq(); + + let executor_clone = executor.clone(); + let mut shutdown_sender = executor.shutdown_sender(); + + let (event_send, _) = broadcast::channel(BROADCAST_CHANNEL_CAPACITY); + let event_send_cloned = event_send.clone(); + + // Spawn the task to sync log entries from the blockchain. + executor.spawn( + run_and_log( + move || { + shutdown_sender + .try_send(ShutdownReason::Failure("log sync failure")) + .expect("shutdown send error") + }, + async move { + let log_fetcher = LogEntryFetcher::new( + &config.rpc_endpoint_url, + config.contract_address, + config.log_page_size, + config.confirmation_block_count, + config.rate_limit_retries, + config.timeout_retries, + config.initial_backoff, + ) + .await?; + let data_cache = DataCache::new(config.cache_config.clone()); + let mut log_sync_manager = Self { + config, + log_fetcher, + next_tx_seq, + store, + data_cache, + event_send, + }; + + // Load previous progress from db and check if chain reorg happens after restart. + // TODO(zz): Handle reorg instead of return. + let start_block_number = + match log_sync_manager.store.read().await.get_sync_progress()? { + // No previous progress, so just use config. + None => log_sync_manager.config.start_block_number, + Some((block_number, block_hash)) => { + match log_sync_manager + .log_fetcher + .provider() + .get_block(block_number) + .await + { + Ok(Some(b)) => { + if b.hash == Some(block_hash) { + block_number + } else { + warn!( + "log sync progress check hash fails, \ + block_number={:?} expect={:?} get={:?}", + block_number, block_hash, b.hash + ); + // Assume the blocks before this are not reverted. + block_number.saturating_sub( + log_sync_manager.config.confirmation_block_count, + ) + } + } + e => { + error!("log sync progress check rpc fails, e={:?}", e); + bail!("log sync start error"); + } + } + } + }; + let latest_block_number = log_sync_manager + .log_fetcher + .provider() + .get_block_number() + .await? + .as_u64(); + + // Start watching before recovery to ensure that no log is skipped. + // TODO(zz): Rate limit to avoid OOM during recovery. + let watch_rx = log_sync_manager + .log_fetcher + .start_watch(latest_block_number, &executor_clone); + let recover_rx = log_sync_manager.log_fetcher.start_recover( + start_block_number, + // -1 so the recover and watch ranges do not overlap. + latest_block_number.wrapping_sub(1), + &executor_clone, + Duration::from_millis(log_sync_manager.config.recover_query_delay), + ); + log_sync_manager.handle_data(recover_rx).await?; + // Syncing `watch_rx` is supposed to block forever. + log_sync_manager.handle_data(watch_rx).await?; + Ok(()) + }, + ) + .map(|_| ()), + "log_sync", + ); + Ok(event_send_cloned) + } + + async fn put_tx(&mut self, tx: Transaction) -> bool { + // We call this after process chain reorg, so the sequence number should match. + match tx.seq.cmp(&self.next_tx_seq) { + std::cmp::Ordering::Less => true, + std::cmp::Ordering::Equal => { + debug!("log entry sync get entry: {:?}", tx); + self.put_tx_inner(tx).await + } + std::cmp::Ordering::Greater => { + error!( + "Unexpected transaction seq: next={} get={}", + self.next_tx_seq, tx.seq + ); + false + } + } + } + + /// `tx_seq` is the first reverted tx seq. + async fn process_reverted(&mut self, tx_seq: u64) { + warn!("revert for chain reorg: seq={}", tx_seq); + { + let store = self.store.read().await; + for seq in tx_seq..self.next_tx_seq { + if matches!(store.check_tx_completed(seq), Ok(true)) { + if let Ok(Some(tx)) = store.get_tx_by_seq_number(seq) { + // TODO(zz): Skip reading the rear padding data? + if let Ok(Some(data)) = + store.get_chunks_by_tx_and_index_range(seq, 0, tx.num_entries()) + { + if !self + .data_cache + .add_data(tx.data_merkle_root, seq, data.data) + { + // TODO(zz): Data too large. Save to disk? + warn!("large reverted data dropped for tx={:?}", tx); + } + } + } + } + } + } + + let _ = self.event_send.send(LogSyncEvent::ReorgDetected { tx_seq }); + + // TODO(zz): `wrapping_sub` here is a hack to handle the case of tx_seq=0. + if let Err(e) = self + .store + .write() + .await + .revert_stream(tx_seq.wrapping_sub(1)) + .await + { + error!("revert_to fails: e={:?}", e); + return; + } + self.next_tx_seq = tx_seq; + + let _ = self.event_send.send(LogSyncEvent::Reverted { tx_seq }); + } + + async fn handle_data(&mut self, mut rx: UnboundedReceiver) -> Result<()> { + while let Some(data) = rx.recv().await { + trace!("handle_data: data={:?}", data); + match data { + LogFetchProgress::SyncedBlock(progress) => { + match self + .log_fetcher + .provider() + .get_block( + progress + .0 + .saturating_sub(self.config.confirmation_block_count), + ) + .await + { + Ok(Some(b)) => { + if let (Some(block_number), Some(block_hash)) = (b.number, b.hash) { + self.store + .write() + .await + .put_sync_progress((block_number.as_u64(), block_hash))?; + } + } + e => { + error!("log put progress check rpc fails, e={:?}", e); + } + } + } + LogFetchProgress::Transaction(tx) => { + if !self.put_tx(tx.clone()).await { + // Unexpected error. + error!("log sync write error"); + break; + } + } + LogFetchProgress::Reverted(reverted) => { + self.process_reverted(reverted).await; + } + } + } + Ok(()) + } + + async fn put_tx_inner(&mut self, tx: Transaction) -> bool { + if let Err(e) = self.store.write().await.put_tx(tx.clone()) { + error!("put_tx error: e={:?}", e); + false + } else { + if let Some(data) = self.data_cache.pop_data(&tx.data_merkle_root) { + let mut store = self.store.write().await; + // We are holding a mutable reference of LogSyncManager, so no chain reorg is + // possible after put_tx. + if let Err(e) = store + .put_chunks_with_tx_hash( + tx.seq, + tx.hash(), + ChunkArray { + data, + start_index: 0, + }, + ) + .and_then(|_| store.finalize_tx_with_hash(tx.seq, tx.hash())) + { + error!("put_tx data error: e={:?}", e); + return false; + } + } + self.data_cache.garbage_collect(self.next_tx_seq); + self.next_tx_seq += 1; + true + } + } +} + +async fn run_and_log( + mut on_error: impl FnMut(), + f: impl Future> + Send, +) -> Option +where + E: Debug, +{ + match f.await { + Err(e) => { + error!("log sync failure: e={:?}", e); + on_error(); + None + } + Ok(r) => Some(r), + } +} + +async fn repeat_run_and_log(f: impl Fn() -> F) -> R +where + E: Debug, + F: Future> + Send, +{ + loop { + if let Some(r) = run_and_log(|| {}, f()).await { + break r; + } + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + } +} + +pub(crate) mod config; +mod data_cache; +mod log_entry_fetcher; +mod log_query; diff --git a/node/rpc/Cargo.toml b/node/rpc/Cargo.toml new file mode 100644 index 0000000..a8a8c43 --- /dev/null +++ b/node/rpc/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "rpc" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +futures = "0.3.21" +jsonrpsee = { version = "0.14.0", features = ["full"] } +serde = { version = "1.0.137", features = ["derive"] } +base64 = "0.13.0" +shared_types = { path = "../shared_types" } +task_executor = { path = "../../zerog-storage-rust/common/task_executor" } +tokio = { version = "1.19.2", features = ["macros", "sync"] } +tracing = "0.1.35" +merkle_light = { path = "../../zerog-storage-rust/common/merkle_light" } +merkle_tree = { path = "../../zerog-storage-rust/common/merkle_tree" } +futures-channel = "^0.3" +ethereum-types = "0.14" +storage_with_stream = { path = "../storage_with_stream" } + +[dev-dependencies] +serde_json = "1.0.82" diff --git a/node/rpc/src/config.rs b/node/rpc/src/config.rs new file mode 100644 index 0000000..9f805fe --- /dev/null +++ b/node/rpc/src/config.rs @@ -0,0 +1,11 @@ +use std::net::SocketAddr; + +#[derive(Clone)] +pub struct Config { + pub enabled: bool, + pub listen_address: SocketAddr, + pub chunks_per_segment: usize, + pub zgs_nodes: Vec, + pub max_query_len_in_bytes: u64, + pub max_response_body_in_bytes: u32, +} diff --git a/node/rpc/src/error.rs b/node/rpc/src/error.rs new file mode 100644 index 0000000..9369a4a --- /dev/null +++ b/node/rpc/src/error.rs @@ -0,0 +1,30 @@ +#![allow(dead_code)] + +use jsonrpsee::core::Error; +use jsonrpsee::types::error::{CallError, ErrorCode, ErrorObject}; + +pub fn not_supported() -> Error { + Error::Call(CallError::Custom(ErrorObject::borrowed( + ErrorCode::MethodNotFound.code(), + &"Not supported", + None, + ))) +} + +pub fn internal_error(msg: impl std::convert::AsRef) -> Error { + Error::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InternalError.code(), + "Internal error", + Some(msg.as_ref()), + ))) +} + +pub fn invalid_params(param: &str, msg: impl std::convert::AsRef) -> Error { + let error = &format!("Invalid params: {:}", param); + + Error::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + error, + Some(msg.as_ref()), + ))) +} diff --git a/node/rpc/src/kv_rpc_server/api.rs b/node/rpc/src/kv_rpc_server/api.rs new file mode 100644 index 0000000..b90ac63 --- /dev/null +++ b/node/rpc/src/kv_rpc_server/api.rs @@ -0,0 +1,109 @@ +use ethereum_types::{H160, H256}; +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; + +use crate::types::{KeyValueSegment, Segment, ValueSegment}; + +#[rpc(server, client, namespace = "kv")] +pub trait KeyValueRpc { + #[method(name = "getStatus")] + async fn get_status(&self) -> RpcResult; + + #[method(name = "getValue")] + async fn get_value( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult>; + + #[method(name = "getNext")] + async fn get_next( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + inclusive: bool, + version: Option, + ) -> RpcResult>; + + #[method(name = "getPrev")] + async fn get_prev( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + inclusive: bool, + version: Option, + ) -> RpcResult>; + + #[method(name = "getFirst")] + async fn get_first( + &self, + stream_id: H256, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult>; + + #[method(name = "getLast")] + async fn get_last( + &self, + stream_id: H256, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult>; + + #[method(name = "getTransactionResult")] + async fn get_trasanction_result(&self, tx_seq: u64) -> RpcResult>; + + #[method(name = "getHoldingStreamIds")] + async fn get_holding_stream_ids(&self) -> RpcResult>; + + #[method(name = "hasWritePermission")] + async fn has_write_permission( + &self, + account: H160, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult; + + #[method(name = "isAdmin")] + async fn is_admin( + &self, + account: H160, + stream_id: H256, + version: Option, + ) -> RpcResult; + + #[method(name = "isSpecialKey")] + async fn is_special_key( + &self, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult; + + #[method(name = "isWriterOfKey")] + async fn is_writer_of_key( + &self, + account: H160, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult; + + #[method(name = "isWriterOfStream")] + async fn is_writer_of_stream( + &self, + account: H160, + stream_id: H256, + version: Option, + ) -> RpcResult; +} diff --git a/node/rpc/src/kv_rpc_server/impl.rs b/node/rpc/src/kv_rpc_server/impl.rs new file mode 100644 index 0000000..0cd415e --- /dev/null +++ b/node/rpc/src/kv_rpc_server/impl.rs @@ -0,0 +1,415 @@ +use std::sync::Arc; + +use crate::error; +use crate::types::KeyValueSegment; +use crate::types::Segment; +use crate::types::ValueSegment; +use crate::Context; +use ethereum_types::H160; +use shared_types::KeyValuePair; +use storage_with_stream::log_store::log_manager::ENTRY_SIZE; + +use super::api::KeyValueRpcServer; +use ethereum_types::H256; +use jsonrpsee::core::async_trait; +use jsonrpsee::core::RpcResult; +pub struct KeyValueRpcServerImpl { + pub ctx: Context, +} + +impl KeyValueRpcServerImpl { + async fn get_value_segment( + &self, + pair: KeyValuePair, + start_index: u64, + len: u64, + ) -> RpcResult> { + if start_index > pair.end_index - pair.start_index { + return Err(error::invalid_params( + "start_index", + "start index is greater than value length", + )); + } + let start_byte_index = pair.start_index + start_index; + let end_byte_index = std::cmp::min(start_byte_index + len, pair.end_index); + let start_entry_index = start_byte_index / ENTRY_SIZE as u64; + let end_entry_index = if end_byte_index % ENTRY_SIZE as u64 == 0 { + end_byte_index / ENTRY_SIZE as u64 + } else { + end_byte_index / ENTRY_SIZE as u64 + 1 + }; + if let Some(entry_array) = self + .ctx + .store + .read() + .await + .get_chunk_by_flow_index(start_entry_index, end_entry_index - start_entry_index)? + { + return Ok(Some(ValueSegment { + version: pair.version, + data: entry_array.data[(start_byte_index as usize + - start_entry_index as usize * ENTRY_SIZE) + ..(end_byte_index as usize - start_entry_index as usize * ENTRY_SIZE) as usize] + .to_vec(), + size: pair.end_index - pair.start_index, + })); + } + Err(error::internal_error("key data is missing")) + } + + async fn get_key_value_segment( + &self, + pair: KeyValuePair, + start_index: u64, + len: u64, + ) -> RpcResult> { + if start_index > pair.end_index - pair.start_index { + return Err(error::invalid_params( + "start_index", + "start index is greater than value length", + )); + } + let start_byte_index = pair.start_index + start_index; + let end_byte_index = std::cmp::min(start_byte_index + len, pair.end_index); + let start_entry_index = start_byte_index / ENTRY_SIZE as u64; + let end_entry_index = if end_byte_index % ENTRY_SIZE as u64 == 0 { + end_byte_index / ENTRY_SIZE as u64 + } else { + end_byte_index / ENTRY_SIZE as u64 + 1 + }; + if let Some(entry_array) = self + .ctx + .store + .read() + .await + .get_chunk_by_flow_index(start_entry_index, end_entry_index - start_entry_index)? + { + return Ok(Some(KeyValueSegment { + version: pair.version, + key: pair.key, + data: entry_array.data[(start_byte_index as usize + - start_entry_index as usize * ENTRY_SIZE) + ..(end_byte_index as usize - start_entry_index as usize * ENTRY_SIZE) as usize] + .to_vec(), + size: pair.end_index - pair.start_index, + })); + } + Err(error::internal_error("key data is missing")) + } +} + +#[async_trait] +impl KeyValueRpcServer for KeyValueRpcServerImpl { + async fn get_status(&self) -> RpcResult { + debug!("kv_getStatus()"); + + Ok(true) + } + + async fn get_value( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult> { + debug!("kv_getValue()"); + + if len > self.ctx.config.max_query_len_in_bytes { + return Err(error::invalid_params("len", "query length too large")); + } + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + let store_read = self.ctx.store.read().await; + if let Some(pair) = store_read + .get_stream_key_value(stream_id, Arc::new(key.0), before_version) + .await? + { + if pair.end_index == pair.start_index { + return Ok(Some(ValueSegment { + version: pair.version, + data: vec![], + size: 0, + })); + } + drop(store_read); + return self.get_value_segment(pair, start_index, len).await; + } + Ok(Some(ValueSegment { + version: 0, + data: vec![], + size: 0, + })) + } + + async fn get_next( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + inclusive: bool, + version: Option, + ) -> RpcResult> { + debug!("kv_getNext()"); + + if len > self.ctx.config.max_query_len_in_bytes { + return Err(error::invalid_params("len", "query length too large")); + } + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + let store_read = self.ctx.store.read().await; + let mut next_key = Arc::new(key.0); + while let Some(pair) = store_read + .get_next_stream_key_value(stream_id, next_key.clone(), inclusive, before_version) + .await? + { + // skip deleted keys + // TODO: resolve this in sql statements? + if pair.end_index == pair.start_index { + next_key = Arc::new(pair.key); + continue; + } + drop(store_read); + return self.get_key_value_segment(pair, start_index, len).await; + } + Ok(None) + } + + async fn get_prev( + &self, + stream_id: H256, + key: Segment, + start_index: u64, + len: u64, + inclusive: bool, + version: Option, + ) -> RpcResult> { + debug!("kv_getPrev()"); + + if len > self.ctx.config.max_query_len_in_bytes { + return Err(error::invalid_params("len", "query length too large")); + } + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + let store_read = self.ctx.store.read().await; + let mut next_key = Arc::new(key.0); + while let Some(pair) = store_read + .get_prev_stream_key_value(stream_id, next_key.clone(), inclusive, before_version) + .await? + { + // skip deleted keys + // TODO: resolve this in sql statements? + if pair.end_index == pair.start_index { + next_key = Arc::new(pair.key); + continue; + } + drop(store_read); + return self.get_key_value_segment(pair, start_index, len).await; + } + Ok(None) + } + + async fn get_first( + &self, + stream_id: H256, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult> { + debug!("kv_getFirst()"); + + if len > self.ctx.config.max_query_len_in_bytes { + return Err(error::invalid_params("len", "query length too large")); + } + + let before_version = version.unwrap_or(u64::MAX); + + let store_read = self.ctx.store.read().await; + + let mut result = store_read.get_first(stream_id, before_version).await?; + while let Some(pair) = result { + // skip deleted keys + // TODO: resolve this in sql statements? + if pair.end_index == pair.start_index { + result = store_read + .get_next_stream_key_value(stream_id, Arc::new(pair.key), false, before_version) + .await?; + continue; + } + drop(store_read); + return self.get_key_value_segment(pair, start_index, len).await; + } + Ok(None) + } + + async fn get_last( + &self, + stream_id: H256, + start_index: u64, + len: u64, + version: Option, + ) -> RpcResult> { + debug!("kv_getLast()"); + + if len > self.ctx.config.max_query_len_in_bytes { + return Err(error::invalid_params("len", "query length too large")); + } + + let before_version = version.unwrap_or(u64::MAX); + + let store_read = self.ctx.store.read().await; + + let mut result = store_read.get_last(stream_id, before_version).await?; + while let Some(pair) = result { + // skip deleted keys + // TODO: resolve this in sql statements? + if pair.end_index == pair.start_index { + result = store_read + .get_prev_stream_key_value(stream_id, Arc::new(pair.key), false, before_version) + .await?; + continue; + } + drop(store_read); + return self.get_key_value_segment(pair, start_index, len).await; + } + Ok(None) + } + + async fn get_trasanction_result(&self, tx_seq: u64) -> RpcResult> { + debug!("kv_getTransactionResult()"); + + Ok(self.ctx.store.read().await.get_tx_result(tx_seq).await?) + } + + async fn get_holding_stream_ids(&self) -> RpcResult> { + debug!("kv_getHoldingStreamIds()"); + + Ok(self.ctx.store.read().await.get_holding_stream_ids().await?) + } + + async fn has_write_permission( + &self, + account: H160, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult { + debug!("kv_hasWritePermission()"); + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + Ok(self + .ctx + .store + .read() + .await + .has_write_permission(account, stream_id, Arc::new(key.0), before_version) + .await?) + } + + async fn is_admin( + &self, + account: H160, + stream_id: H256, + version: Option, + ) -> RpcResult { + debug!("kv_isAdmin()"); + + let before_version = version.unwrap_or(u64::MAX); + + Ok(self + .ctx + .store + .read() + .await + .is_admin(account, stream_id, before_version) + .await?) + } + + async fn is_special_key( + &self, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult { + debug!("kv_isSpecialKey()"); + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + Ok(self + .ctx + .store + .read() + .await + .is_special_key(stream_id, Arc::new(key.0), before_version) + .await?) + } + + async fn is_writer_of_key( + &self, + account: H160, + stream_id: H256, + key: Segment, + version: Option, + ) -> RpcResult { + debug!("kv_isWriterOfKey()"); + + if key.0.is_empty() { + return Err(error::invalid_params("key", "key is empty")); + } + + let before_version = version.unwrap_or(u64::MAX); + + Ok(self + .ctx + .store + .read() + .await + .is_writer_of_key(account, stream_id, Arc::new(key.0), before_version) + .await?) + } + + async fn is_writer_of_stream( + &self, + account: H160, + stream_id: H256, + version: Option, + ) -> RpcResult { + debug!("kv_isWriterOfStream()"); + + let before_version = version.unwrap_or(u64::MAX); + + Ok(self + .ctx + .store + .read() + .await + .is_writer_of_stream(account, stream_id, before_version) + .await?) + } +} diff --git a/node/rpc/src/kv_rpc_server/mod.rs b/node/rpc/src/kv_rpc_server/mod.rs new file mode 100644 index 0000000..84e4e64 --- /dev/null +++ b/node/rpc/src/kv_rpc_server/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod r#impl; + +pub use api::KeyValueRpcServer; +pub use r#impl::KeyValueRpcServerImpl; diff --git a/node/rpc/src/lib.rs b/node/rpc/src/lib.rs new file mode 100644 index 0000000..077cce2 --- /dev/null +++ b/node/rpc/src/lib.rs @@ -0,0 +1,55 @@ +#[macro_use] +extern crate tracing; + +mod config; +mod error; +mod kv_rpc_server; +mod types; +mod zgs_rpc_client; + +use futures::channel::mpsc::Sender; +pub use jsonrpsee::http_client::HttpClient; +use jsonrpsee::http_client::HttpClientBuilder; +use jsonrpsee::http_server::{HttpServerBuilder, HttpServerHandle}; +use kv_rpc_server::KeyValueRpcServer; +use std::error::Error; +use std::sync::Arc; +use storage_with_stream::Store; +use task_executor::ShutdownReason; +use tokio::sync::RwLock; +pub use zgs_rpc_client::ZgsRpcClient; + +pub use config::Config as RPCConfig; + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +#[derive(Clone)] +pub struct Context { + pub config: RPCConfig, + pub shutdown_sender: Sender, + pub store: Arc>, +} + +pub fn zgs_clients(ctx: &Context) -> Result, Box> { + ctx.config + .zgs_nodes + .iter() + .map(|url| Ok(HttpClientBuilder::default().build(url)?)) + .collect() +} + +pub async fn run_server(ctx: Context) -> Result> { + let server = HttpServerBuilder::default() + .max_response_body_size(ctx.config.max_response_body_in_bytes) + .build(ctx.config.listen_address) + .await?; + + let kv = (kv_rpc_server::KeyValueRpcServerImpl { ctx: ctx.clone() }).into_rpc(); + + let addr = server.local_addr()?; + let handle = server.start(kv)?; + info!("Server started http://{}", addr); + + Ok(handle) +} diff --git a/node/rpc/src/types.rs b/node/rpc/src/types.rs new file mode 100644 index 0000000..0b1199e --- /dev/null +++ b/node/rpc/src/types.rs @@ -0,0 +1,235 @@ +use crate::error; +use jsonrpsee::core::RpcResult; +use merkle_light::{hash::Algorithm, merkle::MerkleTree}; +use merkle_tree::RawLeafSha3Algorithm; +use serde::{Deserialize, Serialize}; +use shared_types::{ + compute_padded_chunk_size, compute_segment_size, DataRoot, FileProof, Transaction, CHUNK_SIZE, +}; +use std::hash::Hasher; + +const ZERO_HASH: [u8; 32] = [ + 0xd3, 0x97, 0xb3, 0xb0, 0x43, 0xd8, 0x7f, 0xcd, 0x6f, 0xad, 0x12, 0x91, 0xff, 0xb, 0xfd, 0x16, + 0x40, 0x1c, 0x27, 0x48, 0x96, 0xd8, 0xc6, 0x3a, 0x92, 0x37, 0x27, 0xf0, 0x77, 0xb8, 0xe0, 0xb5, +]; + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Status { + pub connected_peers: usize, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct FileInfo { + pub tx: Transaction, + pub finalized: bool, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Segment(#[serde(with = "base64")] pub Vec); + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SegmentWithProof { + /// File merkle root. + pub root: DataRoot, + #[serde(with = "base64")] + /// With fixed data size except the last segment. + pub data: Vec, + /// Segment index. + pub index: usize, + /// File merkle proof whose leaf node is segment root. + pub proof: FileProof, + /// File size + pub file_size: usize, +} + +impl SegmentWithProof { + /// Splits file into segments and returns the total number of segments and the last segment size. + pub fn split_file_into_segments( + file_size: usize, + chunks_per_segment: usize, + ) -> RpcResult<(usize, usize)> { + if file_size == 0 { + return Err(error::invalid_params("file_size", "file is empty")); + } + + let segment_size = chunks_per_segment * CHUNK_SIZE; + let remaining_size = file_size % segment_size; + let mut num_segments = file_size / segment_size; + + if remaining_size == 0 { + return Ok((num_segments, segment_size)); + } + + // Otherwise, the last segment is not full. + num_segments += 1; + + let last_chunk_size = remaining_size % CHUNK_SIZE; + if last_chunk_size == 0 { + Ok((num_segments, remaining_size)) + } else { + // Padding last chunk with zeros. + let last_segment_size = remaining_size - last_chunk_size + CHUNK_SIZE; + Ok((num_segments, last_segment_size)) + } + } + + fn validate_data_size_and_index( + &self, + file_size: usize, + chunks_per_segment: usize, + ) -> RpcResult { + let (num_segments, last_segment_size) = + SegmentWithProof::split_file_into_segments(file_size, chunks_per_segment)?; + + if self.index >= num_segments { + return Err(error::invalid_params("index", "index out of bound")); + } + + let data_size = if self.index == num_segments - 1 { + last_segment_size + } else { + chunks_per_segment * CHUNK_SIZE + }; + + if self.data.len() != data_size { + return Err(error::invalid_params("data", "invalid data length")); + } + + Ok(num_segments) + } + + fn calculate_segment_merkle_root(&self, extend_chunk_length: usize) -> [u8; 32] { + let mut a = RawLeafSha3Algorithm::default(); + let hashes = self.data.chunks_exact(CHUNK_SIZE).map(|x| { + a.reset(); + a.write(x); + a.hash() + }); + let mut hash_data = hashes.collect::>(); + hash_data.append(&mut vec![ZERO_HASH; extend_chunk_length]); + + MerkleTree::<_, RawLeafSha3Algorithm>::new(hash_data).root() + } + + fn validate_proof(&self, num_segments: usize, expected_data_length: usize) -> RpcResult<()> { + // Validate proof data format at first. + if self.proof.path.is_empty() { + if self.proof.lemma.len() != 1 { + return Err(error::invalid_params("proof", "invalid proof")); + } + } else if self.proof.lemma.len() != self.proof.path.len() + 2 { + return Err(error::invalid_params("proof", "invalid proof")); + } + + // Calculate segment merkle root to verify proof. + let extend_chunk_length = if expected_data_length > self.data.len() { + let extend_data_length = expected_data_length - self.data.len(); + if extend_data_length % CHUNK_SIZE != 0 { + return Err(error::invalid_params("proof", "invalid data len")); + } + + extend_data_length / CHUNK_SIZE + } else { + 0 + }; + + let segment_root = self.calculate_segment_merkle_root(extend_chunk_length); + if !self + .proof + .validate(&segment_root, &self.root, self.index, num_segments)? + { + return Err(error::invalid_params("proof", "validation failed")); + } + + Ok(()) + } + + /// Validates the segment data size and proof. + pub fn validate(&self, chunks_per_segment: usize) -> RpcResult<()> { + self.validate_data_size_and_index(self.file_size, chunks_per_segment)?; + + let (chunks, _) = compute_padded_chunk_size(self.file_size); + let (segments_for_proof, last_segment_size) = + compute_segment_size(chunks, chunks_per_segment); + + let expected_data_length = if self.index == segments_for_proof - 1 { + last_segment_size * CHUNK_SIZE + } else { + chunks_per_segment * CHUNK_SIZE + }; + + debug!( + "data len: {}, expected len: {}", + self.data.len(), + expected_data_length + ); + + self.validate_proof(segments_for_proof, expected_data_length)?; + Ok(()) + } + + /// Returns the index of first chunk in the segment. + #[allow(dead_code)] + pub fn chunk_index(&self, chunks_per_segment: usize) -> usize { + self.index * chunks_per_segment + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ValueSegment { + // key version + pub version: u64, + // data + #[serde(with = "base64")] + pub data: Vec, + // value total size + pub size: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct KeyValueSegment { + // key version + pub version: u64, + // key + #[serde(with = "base64")] + pub key: Vec, + // data + #[serde(with = "base64")] + pub data: Vec, + // value total size + pub size: u64, +} + +mod base64 { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(v: &Vec, s: S) -> Result { + let base64 = base64::encode(v); + String::serialize(&base64, s) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result, D::Error> { + let base64 = String::deserialize(d)?; + base64::decode(base64.as_bytes()).map_err(serde::de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use super::Segment; + + #[test] + fn test_segment_serde() { + let seg = Segment("hello, world".as_bytes().to_vec()); + let result = serde_json::to_string(&seg).unwrap(); + assert_eq!(result.as_str(), "\"aGVsbG8sIHdvcmxk\""); + + let seg2: Segment = serde_json::from_str("\"aGVsbG8sIHdvcmxk\"").unwrap(); + assert_eq!(String::from_utf8(seg2.0).unwrap().as_str(), "hello, world"); + } +} diff --git a/node/rpc/src/zgs_rpc_client/api.rs b/node/rpc/src/zgs_rpc_client/api.rs new file mode 100644 index 0000000..ea4c29a --- /dev/null +++ b/node/rpc/src/zgs_rpc_client/api.rs @@ -0,0 +1,31 @@ +use crate::types::{FileInfo, Segment, SegmentWithProof, Status}; +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use shared_types::DataRoot; + +#[rpc(client, namespace = "zgs")] +pub trait ZgsRpc { + #[method(name = "getStatus")] + async fn get_status(&self) -> RpcResult; + + #[method(name = "uploadSegment")] + async fn upload_segment(&self, segment: SegmentWithProof) -> RpcResult<()>; + + #[method(name = "downloadSegment")] + async fn download_segment( + &self, + data_root: DataRoot, + start_index: usize, + end_index: usize, + ) -> RpcResult>; + + #[method(name = "downloadSegmentWithProof")] + async fn download_segment_with_proof( + &self, + data_root: DataRoot, + index: usize, + ) -> RpcResult>; + + #[method(name = "getFileInfo")] + async fn get_file_info(&self, data_root: DataRoot) -> RpcResult>; +} diff --git a/node/rpc/src/zgs_rpc_client/mod.rs b/node/rpc/src/zgs_rpc_client/mod.rs new file mode 100644 index 0000000..3f4b21e --- /dev/null +++ b/node/rpc/src/zgs_rpc_client/mod.rs @@ -0,0 +1,3 @@ +mod api; + +pub use api::ZgsRpcClient; diff --git a/node/shared_types/Cargo.toml b/node/shared_types/Cargo.toml new file mode 100644 index 0000000..0e15572 --- /dev/null +++ b/node/shared_types/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "shared_types" +version = "0.1.0" +edition = "2021" + +[dependencies] +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +merkle_tree = { path = "../../zerog-storage-rust/common/merkle_tree" } +anyhow = { version = "=1.0.58", features = ["backtrace"] } +ethereum-types = "0.14" +eth2_ssz = "0.4.0" +eth2_ssz_derive = "0.3.0" +eth2_ssz_types = "0.2.1" +merkle_light = { path = "../../zerog-storage-rust/common/merkle_light" } +tiny-keccak = "2.0.2" +tracing = "0.1.35" +typenum = "1.15.0" +serde = { version = "1.0.137", features = ["derive"] } +chrono = "0.4.19" diff --git a/node/shared_types/src/lib.rs b/node/shared_types/src/lib.rs new file mode 100644 index 0000000..078916e --- /dev/null +++ b/node/shared_types/src/lib.rs @@ -0,0 +1,401 @@ +mod proof; + +use anyhow::bail; +use append_merkle::{ + AppendMerkleTree, Proof as RawProof, RangeProof as RawRangeProof, Sha3Algorithm, +}; +use ethereum_types::{H160, H256}; +use merkle_light::merkle::MerkleTree; +use merkle_light::proof::Proof as RawFileProof; +use merkle_light::{hash::Algorithm, merkle::next_pow2}; +use merkle_tree::RawLeafSha3Algorithm; +use serde::{Deserialize, Serialize}; +use ssz::Encode; +use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode}; +use std::collections::HashSet; +use std::fmt; +use std::hash::Hasher; +use std::sync::Arc; +use tiny_keccak::{Hasher as KeccakHasher, Keccak}; +use tracing::debug; + +const ZERO_HASH: [u8; 32] = [ + 0xd3, 0x97, 0xb3, 0xb0, 0x43, 0xd8, 0x7f, 0xcd, 0x6f, 0xad, 0x12, 0x91, 0xff, 0xb, 0xfd, 0x16, + 0x40, 0x1c, 0x27, 0x48, 0x96, 0xd8, 0xc6, 0x3a, 0x92, 0x37, 0x27, 0xf0, 0x77, 0xb8, 0xe0, 0xb5, +]; + +/// Application level requests sent to the network. +#[derive(Debug, Clone, Copy)] +pub enum RequestId { + Router, +} + +pub type DataRoot = H256; + +pub type FlowProof = RawProof; +pub type FlowRangeProof = RawRangeProof; +pub type Merkle = AppendMerkleTree; + +// Each chunk is 32 bytes. +pub const CHUNK_SIZE: usize = 256; + +pub fn bytes_to_chunks(size_bytes: usize) -> usize { + if size_bytes % CHUNK_SIZE == 0 { + size_bytes / CHUNK_SIZE + } else { + size_bytes / CHUNK_SIZE + 1 + } +} + +pub fn compute_padded_chunk_size(size_bytes: usize) -> (usize, usize) { + let chunk_len = bytes_to_chunks(size_bytes); + let chunks_next_pow2 = next_pow2(chunk_len); + + if chunks_next_pow2 == chunk_len { + return (chunks_next_pow2, chunks_next_pow2); + } + + let min_chunk = if chunks_next_pow2 < 16 { + 1 + } else { + chunks_next_pow2 >> 4 + }; + + // chunk_len will be always greater than 0, size_byte comes from tx.size which is file size, the flow contract doesn't allowy upload 0-size file + let padded_chunks = ((chunk_len - 1) / min_chunk + 1) * min_chunk; + + (padded_chunks, chunks_next_pow2) +} + +pub fn compute_segment_size(chunks: usize, chunks_per_segment: usize) -> (usize, usize) { + if chunks % chunks_per_segment == 0 { + (chunks / chunks_per_segment, chunks_per_segment) + } else { + (chunks / chunks_per_segment + 1, chunks % chunks_per_segment) + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct Chunk(pub [u8; CHUNK_SIZE]); + +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, DeriveDecode, DeriveEncode)] +pub struct TxID { + pub seq: u64, + pub hash: H256, +} + +impl TxID { + pub fn random_hash(seq: u64) -> Self { + Self { + seq, + hash: H256::random(), + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq, DeriveDecode, DeriveEncode, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Transaction { + pub stream_ids: Vec, + pub sender: H160, + /// In-place data. + pub data: Vec, + pub data_merkle_root: DataRoot, + /// `(subtree_depth, subtree_root)` + pub merkle_nodes: Vec<(usize, DataRoot)>, + + pub start_entry_index: u64, + pub size: u64, + pub seq: u64, +} + +impl Transaction { + pub fn num_entries_of_node(depth: usize) -> usize { + 1 << (depth - 1) + } + + pub fn num_entries(&self) -> usize { + self.merkle_nodes.iter().fold(0, |size, &(depth, _)| { + size + Transaction::num_entries_of_node(depth) + }) + } + + pub fn hash(&self) -> H256 { + let bytes = self.as_ssz_bytes(); + let mut h = Keccak::v256(); + let mut e = H256::zero(); + h.update(&bytes); + h.finalize(e.as_mut()); + e + } + + pub fn id(&self) -> TxID { + TxID { + seq: self.seq, + hash: self.hash(), + } + } +} + +pub struct ChunkWithProof { + pub chunk: Chunk, + pub proof: FlowProof, +} + +#[derive(Debug, Clone, PartialEq, Eq, DeriveEncode, DeriveDecode)] +pub struct ChunkArrayWithProof { + pub chunks: ChunkArray, + // TODO: The top levels of the two proofs can be merged. + pub proof: FlowRangeProof, +} + +#[derive(Clone, Eq, PartialEq, DeriveEncode, DeriveDecode)] +pub struct ChunkArray { + // The length is exactly a multiple of `CHUNK_SIZE` + pub data: Vec, + pub start_index: u64, +} + +impl fmt::Debug for ChunkArray { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "ChunkArray: start_index={} data_len={}", + self.start_index, + self.data.len() + ) + } +} + +impl ChunkArray { + pub fn first_chunk(&self) -> Option { + self.chunk_at(self.start_index as usize) + } + + pub fn last_chunk(&self) -> Option { + let last_index = + (self.start_index as usize + self.data.len() / CHUNK_SIZE).checked_sub(1)?; + self.chunk_at(last_index) + } + + pub fn chunk_at(&self, index: usize) -> Option { + if index >= self.data.len() / CHUNK_SIZE + self.start_index as usize + || index < self.start_index as usize + { + return None; + } + let offset = (index - self.start_index as usize) * CHUNK_SIZE; + Some(Chunk( + self.data[offset..offset + CHUNK_SIZE] + .try_into() + .expect("length match"), + )) + } + + pub fn sub_array(&self, start: u64, end: u64) -> Option { + if start >= (self.data.len() / CHUNK_SIZE) as u64 + self.start_index + || start < self.start_index + || end > (self.data.len() / CHUNK_SIZE) as u64 + self.start_index + || end <= self.start_index + || end <= start + { + return None; + } + let start_offset = (start - self.start_index) as usize * CHUNK_SIZE; + let end_offset = (end - self.start_index) as usize * CHUNK_SIZE; + Some(ChunkArray { + data: self.data[start_offset..end_offset].to_vec(), + start_index: start, + }) + } +} + +impl std::fmt::Display for ChunkArray { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ChunkArray {{ chunks = {}, start_index = {} }}", + self.data.len() / CHUNK_SIZE, + self.start_index + ) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEncode, DeriveDecode, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct FileProof { + pub lemma: Vec, + pub path: Vec, +} +impl FileProof { + pub fn new(mut lemma: Vec, path: Vec) -> Self { + if path.is_empty() { + lemma.truncate(1); + } + + FileProof { lemma, path } + } + + pub fn validate( + &self, + leaf_hash: &[u8; 32], + root: &DataRoot, + position: usize, + leaf_count: usize, + ) -> anyhow::Result { + let proof_position = self.position(leaf_count)?; + if proof_position != position { + bail!( + "wrong position: proof_pos={} provided={}", + proof_position, + position + ); + } + + let proof: RawFileProof<[u8; 32]> = self.try_into()?; + + if !proof.validate::() { + debug!("Proof validate fails"); + return Ok(false); + } + + if proof.root() != root.0 { + bail!( + "root mismatch, proof_root={:?} provided={:?}", + proof.root(), + root.0 + ); + } + + if proof.item() != *leaf_hash { + bail!( + "data hash mismatch: leaf_hash={:?} proof_item={:?}", + leaf_hash, + proof.item(), + ); + } + + Ok(true) + } + + fn position(&self, total_chunk_count: usize) -> anyhow::Result { + let mut left_chunk_count = total_chunk_count; + let mut proof_position = 0; + // TODO: After the first `is_left == true`, the tree depth is fixed. + for is_left in self.path.iter().rev() { + if left_chunk_count <= 1 { + bail!( + "Proof path too long for a tree size: path={:?}, size={}", + self.path, + total_chunk_count + ); + } + let subtree_size = next_pow2(left_chunk_count) >> 1; + if !is_left { + proof_position += subtree_size; + left_chunk_count -= subtree_size; + } else { + left_chunk_count = subtree_size; + } + } + if left_chunk_count != 1 { + bail!( + "Proof path too short for a tree size: path={:?}, size={}", + self.path, + total_chunk_count + ); + } + Ok(proof_position) + } +} + +#[derive(Debug)] +pub struct StreamRead { + pub stream_id: H256, + pub key: Arc>, +} + +#[derive(Debug)] +pub struct StreamReadSet { + pub stream_reads: Vec, +} + +#[derive(Debug)] +pub struct StreamWrite { + pub stream_id: H256, + pub key: Arc>, + // start index in bytes + pub start_index: u64, + // end index in bytes + pub end_index: u64, +} + +#[derive(Debug)] +pub struct StreamWriteSet { + pub stream_writes: Vec, +} + +#[derive(Debug)] +pub struct AccessControlSet { + pub access_controls: Vec, + pub is_admin: HashSet, +} + +#[derive(Debug)] +pub struct AccessControl { + pub op_type: u8, + pub stream_id: H256, + pub key: Arc>, + pub account: H160, + pub operator: H160, +} + +#[derive(Debug)] +pub struct KeyValuePair { + pub stream_id: H256, + pub key: Vec, + pub start_index: u64, + pub end_index: u64, + pub version: u64, +} + +impl TryFrom<&FileProof> for RawFileProof<[u8; 32]> { + type Error = anyhow::Error; + + fn try_from(value: &FileProof) -> std::result::Result { + if (value.lemma.len() == 1 && value.path.is_empty()) + || (value.lemma.len() > 2 && value.lemma.len() == value.path.len() + 2) + { + Ok(RawFileProof::<[u8; 32]>::new( + value.lemma.iter().map(|e| e.0).collect(), + value.path.clone(), + )) + } else { + bail!("Invalid proof: proof={:?}", value) + } + } +} + +pub fn timestamp_now() -> u32 { + let timestamp = chrono::Utc::now().timestamp(); + u32::try_from(timestamp).expect("The year is between 1970 and 2106") +} + +pub fn compute_segment_merkle_root(data: &[u8], segment_chunks: usize) -> [u8; 32] { + let mut a = RawLeafSha3Algorithm::default(); + let mut hashes: Vec<[u8; 32]> = data + .chunks_exact(CHUNK_SIZE) + .map(|x| { + a.reset(); + a.write(x); + a.hash() + }) + .collect(); + + let num_chunks = data.len() / CHUNK_SIZE; + if num_chunks < segment_chunks { + hashes.append(&mut vec![ZERO_HASH; segment_chunks - num_chunks]); + } + + MerkleTree::<_, RawLeafSha3Algorithm>::new(hashes).root() +} diff --git a/node/shared_types/src/proof.rs b/node/shared_types/src/proof.rs new file mode 100644 index 0000000..e2c8ea3 --- /dev/null +++ b/node/shared_types/src/proof.rs @@ -0,0 +1,220 @@ +use crate::{compute_segment_merkle_root, ChunkArrayWithProof, FileProof, Transaction, CHUNK_SIZE}; +use anyhow::{bail, Result}; +use append_merkle::{Algorithm, Sha3Algorithm}; +use ethereum_types::H256; +use merkle_light::merkle::log2_pow2; + +enum NodeProofLeaf { + /// Segment in a single sub-tree. + Full { node_depth: usize }, + + /// Segment is not full and is made up of one or multiple sub-trees + /// that smaller than a full segment. + Partial, +} + +/// NodeProof represents a merkle proof from submission node to file root. +struct NodeProof { + proof: FileProof, + leaf: NodeProofLeaf, +} + +impl NodeProof { + fn compute_segment_proof( + self, + segment: &ChunkArrayWithProof, + chunks_per_segment: usize, + ) -> Result { + let mut node_depth = match self.leaf { + // In this case, some proof pathes missed between segment + // root and submission node, which could be retrieved from + // the flow proof. + NodeProofLeaf::Full { node_depth } => node_depth, + + // Client could compute the segment root via returned + // segment data, and use the NodeProof to validate. + NodeProofLeaf::Partial => return Ok(self.proof), + }; + + // node depth in Transaction is by number of nodes, + // here we use depth by number of edges. + node_depth -= 1; + + self.compute_segment_proof_full(node_depth, segment, chunks_per_segment) + } + + fn compute_segment_proof_full( + self, + node_depth: usize, // node depth by edge + segment: &ChunkArrayWithProof, + chunks_per_segment: usize, + ) -> Result { + // when segment equals to sub-tree, just return the proof directly + let segment_depth = log2_pow2(chunks_per_segment); + if node_depth == segment_depth { + return Ok(self.proof); + } + + // otherwise, segment should be smaller than sub-tree + assert!(node_depth > segment_depth); + + // flow tree should not be smaller than any sub-tree + let flow_proof_path_len = segment.proof.left_proof.path().len(); + if flow_proof_path_len < node_depth { + bail!( + "flow proof path too small, path_len = {}, node_depth = {}", + flow_proof_path_len, + node_depth + ); + } + + // segment root as proof leaf + let segment_root: H256 = + compute_segment_merkle_root(&segment.chunks.data, chunks_per_segment).into(); + let mut lemma = vec![segment_root]; + let mut path = vec![]; + + // copy from flow proof + for i in segment_depth..node_depth { + lemma.push(segment.proof.left_proof.lemma()[i + 1]); + path.push(segment.proof.left_proof.path()[i]); + } + + // combine with node proof + if self.proof.path.is_empty() { + // append file root only + lemma.push(self.proof.lemma[0]); + } else { + // append lemma/path and file root, and ignore the sub-tree root + // which could be constructed via proof. + lemma.extend_from_slice(&self.proof.lemma[1..]); + path.extend_from_slice(&self.proof.path); + } + + Ok(FileProof::new(lemma, path)) + } +} + +impl Transaction { + /// Computes file merkle proof for the specified segment. + /// + /// The leaf of proof is segment root, and root of proof is file merkle root. + pub fn compute_segment_proof( + &self, + segment: &ChunkArrayWithProof, + chunks_per_segment: usize, + ) -> Result { + // validate index + let chunk_start_index = segment.chunks.start_index as usize; + if chunk_start_index % chunks_per_segment != 0 { + bail!("start index not aligned"); + } + + let total_entries = self.num_entries(); + if chunk_start_index >= total_entries { + bail!("start index out of bound"); + } + + let data_len = segment.chunks.data.len(); + if chunk_start_index + data_len / CHUNK_SIZE > total_entries { + bail!("end index out of bound"); + } + + // compute merkle proof from node root to file root + let node_proof = self.compute_node_proof(chunk_start_index, chunks_per_segment); + + node_proof.compute_segment_proof(segment, chunks_per_segment) + } + + fn compute_node_proof(&self, chunk_start_index: usize, chunks_per_segment: usize) -> NodeProof { + // construct `lemma` in the order: root -> interier nodes -> leaf, + // and reverse the `lemma` and `path` to create proof. + let mut lemma = vec![self.data_merkle_root]; + let mut path = vec![]; + + // Basically, a segment should be in a sub-tree except the last segment. + // As for the last segment, it should be also in a sub-tree if the smallest + // sub-tree is bigger than a segment. Otherwise, segment is made up of + // all sub-trees that smaller than a segment. + + // try to find a single node for segment + let mut node = None; + let mut total_chunks = 0; + for (depth, root) in self.merkle_nodes.iter().cloned() { + let node_chunks = Transaction::num_entries_of_node(depth); + + // ignore nodes that smaller than a segment + if node_chunks < chunks_per_segment { + break; + } + + total_chunks += node_chunks; + + // segment in a single node + if chunk_start_index < total_chunks { + node = Some((depth, root)); + break; + } + + // otherwise, segment not in current node + lemma.push(root); + path.push(false); + } + + let leaf = match node { + // segment in a single node + Some((depth, root)) => { + // append lemma and path if right sibling exists + if let Some(right_root) = self.compute_merkle_root(depth) { + lemma.push(right_root); + path.push(true); + } + + // append leaf + lemma.push(root); + + NodeProofLeaf::Full { node_depth: depth } + } + + // segment is made up of multiple nodes + None => { + let segment_depth = log2_pow2(chunks_per_segment) + 1; + let right_root = self + .compute_merkle_root(segment_depth) + .expect("merkle root should be exists"); + + // append leaf + lemma.push(right_root); + + NodeProofLeaf::Partial + } + }; + + // change to bottom-top order: leaf -> (interiers) -> root + lemma.reverse(); + path.reverse(); + + NodeProof { + proof: FileProof::new(lemma, path), + leaf, + } + } + + /// Computes the merkle root of nodes that of depth less than `max_depth_exclusive`. + fn compute_merkle_root(&self, max_depth_exclusive: usize) -> Option { + let (depth, mut root) = self.merkle_nodes.last().cloned()?; + if depth >= max_depth_exclusive { + return None; + } + + for (node_depth, node_root) in self.merkle_nodes.iter().rev().skip(1) { + if *node_depth >= max_depth_exclusive { + break; + } + + root = Sha3Algorithm::parent(node_root, &root); + } + + Some(root) + } +} diff --git a/node/src/cli/mod.rs b/node/src/cli/mod.rs new file mode 100644 index 0000000..804ec59 --- /dev/null +++ b/node/src/cli/mod.rs @@ -0,0 +1,7 @@ +use clap::{arg, command, Command}; + +pub fn cli_app<'a>() -> Command<'a> { + command!() + .arg(arg!(-c --config "Sets a custom config file")) + .allow_external_subcommands(true) +} diff --git a/node/src/client/builder.rs b/node/src/client/builder.rs new file mode 100644 index 0000000..693173e --- /dev/null +++ b/node/src/client/builder.rs @@ -0,0 +1,135 @@ +use super::{Client, RuntimeContext}; +use log_entry_sync::{LogSyncConfig, LogSyncEvent, LogSyncManager}; +use rpc::HttpClient; +use rpc::RPCConfig; +use std::sync::Arc; +use storage_with_stream::log_store::log_manager::LogConfig; +use storage_with_stream::Store; +use storage_with_stream::{StorageConfig, StoreManager}; +use stream::{StreamConfig, StreamManager}; +use tokio::sync::broadcast; +use tokio::sync::RwLock; + +macro_rules! require { + ($component:expr, $self:ident, $e:ident) => { + $self + .$e + .as_ref() + .ok_or(format!("{} requires {}", $component, std::stringify!($e)))? + }; +} + +struct LogSyncComponents { + send: broadcast::Sender, +} + +/// Builds a `Client` instance. +/// +/// ## Notes +/// +/// The builder may start some services (e.g.., libp2p, http server) immediately after they are +/// initialized, _before_ the `self.build(..)` method has been called. +#[derive(Default)] +pub struct ClientBuilder { + runtime_context: Option, + store: Option>>, + zgs_clients: Option>, + log_sync: Option, +} + +impl ClientBuilder { + /// Specifies the runtime context (tokio executor, logger, etc) for client services. + pub fn with_runtime_context(mut self, context: RuntimeContext) -> Self { + self.runtime_context = Some(context); + self + } + + /// Initializes in-memory storage. + pub async fn with_memory_store(mut self) -> Result { + // TODO(zz): Set config. + let store = Arc::new(RwLock::new( + StoreManager::memorydb(LogConfig::default()) + .await + .map_err(|e| format!("Unable to start in-memory store: {:?}", e))?, + )); + + self.store = Some(store); + + Ok(self) + } + + /// Initializes RocksDB storage. + pub async fn with_rocksdb_store(mut self, config: &StorageConfig) -> Result { + let store = Arc::new(RwLock::new( + StoreManager::rocks_db( + LogConfig::default(), + &config.log_config.db_dir, + &config.kv_db_file, + ) + .await + .map_err(|e| format!("Unable to start RocksDB store: {:?}", e))?, + )); + + self.store = Some(store); + + Ok(self) + } + + pub async fn with_rpc(mut self, rpc_config: RPCConfig) -> Result { + if !rpc_config.enabled { + return Ok(self); + } + + let executor = require!("rpc", self, runtime_context).clone().executor; + let store = require!("stream", self, store).clone(); + + let ctx = rpc::Context { + config: rpc_config, + shutdown_sender: executor.shutdown_sender(), + store, + }; + + self.zgs_clients = Some( + rpc::zgs_clients(&ctx).map_err(|e| format!("Unable to create rpc client: {:?}", e))?, + ); + + let rpc_handle = rpc::run_server(ctx) + .await + .map_err(|e| format!("Unable to start HTTP RPC server: {:?}", e))?; + + executor.spawn(rpc_handle, "rpc"); + + Ok(self) + } + + pub async fn with_stream(self, config: &StreamConfig) -> Result { + let executor = require!("stream", self, runtime_context).clone().executor; + let store = require!("stream", self, store).clone(); + let zgs_clients = require!("stream", self, zgs_clients).clone(); + let (stream_data_fetcher, stream_replayer) = + StreamManager::initialize(config, store, zgs_clients, executor.clone()) + .await + .map_err(|e| e.to_string())?; + StreamManager::spawn(stream_data_fetcher, stream_replayer, executor) + .map_err(|e| e.to_string())?; + Ok(self) + } + + pub async fn with_log_sync(mut self, config: LogSyncConfig) -> Result { + let executor = require!("log_sync", self, runtime_context).clone().executor; + let store = require!("log_sync", self, store).clone(); + let send = LogSyncManager::spawn(config, executor, store) + .await + .map_err(|e| e.to_string())?; + self.log_sync = Some(LogSyncComponents { send }); + Ok(self) + } + + /// Consumes the builder, returning a `Client` if all necessary components have been + /// specified. + pub fn build(self) -> Result { + require!("client", self, runtime_context); + + Ok(Client {}) + } +} diff --git a/node/src/client/environment.rs b/node/src/client/environment.rs new file mode 100644 index 0000000..a484a98 --- /dev/null +++ b/node/src/client/environment.rs @@ -0,0 +1,253 @@ +//! This crate aims to provide a common set of tools that can be used to create a "environment" to +//! run Zgs services. This allows for the unification of creating tokio runtimes, etc. +//! +//! The idea is that the main thread creates an `Environment`, which is then used to spawn a +//! `Context` which can be handed to any service that wishes to start async tasks. + +use futures::channel::mpsc::{channel, Receiver, Sender}; +use futures::{future, StreamExt}; +use std::sync::Arc; +use task_executor::{ShutdownReason, TaskExecutor}; +use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; + +#[cfg(target_family = "unix")] +use { + futures::Future, + std::{pin::Pin, task::Context, task::Poll}, + tokio::signal::unix::{signal, Signal, SignalKind}, +}; + +#[cfg(not(target_family = "unix"))] +use {futures::channel::oneshot, std::cell::RefCell}; + +/// The maximum time in seconds the client will wait for all internal tasks to shutdown. +const MAXIMUM_SHUTDOWN_TIME: u64 = 15; + +/// Builds an `Environment`. +pub struct EnvironmentBuilder { + runtime: Option>, +} + +impl EnvironmentBuilder { + pub fn new() -> Self { + Self { runtime: None } + } +} + +impl EnvironmentBuilder { + /// Specifies that a multi-threaded tokio runtime should be used. Ideal for production uses. + /// + /// The `Runtime` used is just the standard tokio runtime. + pub fn multi_threaded_tokio_runtime(mut self) -> Result { + self.runtime = Some(Arc::new( + RuntimeBuilder::new_multi_thread() + .enable_all() + .build() + .map_err(|e| format!("Failed to start runtime: {:?}", e))?, + )); + Ok(self) + } + + /// Consumes the builder, returning an `Environment`. + pub fn build(self) -> Result { + let (signal, exit) = exit_future::signal(); + let (signal_tx, signal_rx) = channel(1); + Ok(Environment { + runtime: self + .runtime + .ok_or("Cannot build environment without runtime")?, + signal_tx, + signal_rx: Some(signal_rx), + signal: Some(signal), + exit, + }) + } +} + +/// An execution context that can be used by a service. +/// +/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a +/// `Runtime`, instead it only has access to a `Runtime`. +#[derive(Clone)] +pub struct RuntimeContext { + pub executor: TaskExecutor, +} + +/// An environment where Zgs services can run. +pub struct Environment { + runtime: Arc, + + /// Receiver side of an internal shutdown signal. + signal_rx: Option>, + + /// Sender to request shutting down. + signal_tx: Sender, + signal: Option, + exit: exit_future::Exit, +} + +impl Environment { + /// Returns a mutable reference to the `tokio` runtime. + /// + /// Useful in the rare scenarios where it's necessary to block the current thread until a task + /// is finished (e.g., during testing). + pub fn runtime(&self) -> &Arc { + &self.runtime + } + + /// Returns a `Context`. + pub fn core_context(&mut self) -> RuntimeContext { + RuntimeContext { + executor: TaskExecutor::new( + self.runtime().handle().clone(), + self.exit.clone(), + self.signal_tx.clone(), + ), + } + } + + /// Block the current thread until a shutdown signal is received. + /// + /// This can be either the user Ctrl-C'ing or a task requesting to shutdown. + #[cfg(target_family = "unix")] + pub fn block_until_shutdown_requested(&mut self) -> Result { + // future of a task requesting to shutdown + let mut rx = self + .signal_rx + .take() + .ok_or("Inner shutdown already received")?; + let inner_shutdown = + async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; + futures::pin_mut!(inner_shutdown); + + match self.runtime().block_on(async { + let mut handles = vec![]; + + // setup for handling SIGTERM + match signal(SignalKind::terminate()) { + Ok(terminate_stream) => { + let terminate = SignalFuture::new(terminate_stream, "Received SIGTERM"); + handles.push(terminate); + } + Err(e) => error!(error = %e, "Could not register SIGTERM handler"), + }; + + // setup for handling SIGINT + match signal(SignalKind::interrupt()) { + Ok(interrupt_stream) => { + let interrupt = SignalFuture::new(interrupt_stream, "Received SIGINT"); + handles.push(interrupt); + } + Err(e) => error!(error = %e, "Could not register SIGINT handler"), + } + + // setup for handling a SIGHUP + match signal(SignalKind::hangup()) { + Ok(hup_stream) => { + let hup = SignalFuture::new(hup_stream, "Received SIGHUP"); + handles.push(hup); + } + Err(e) => error!(error = %e, "Could not register SIGHUP handler"), + } + + future::select(inner_shutdown, future::select_all(handles.into_iter())).await + }) { + future::Either::Left((Ok(reason), _)) => { + info!(reason = reason.message(), "Internal shutdown received"); + Ok(reason) + } + future::Either::Left((Err(e), _)) => Err(e.into()), + future::Either::Right(((res, _, _), _)) => { + res.ok_or_else(|| "Handler channel closed".to_string()) + } + } + } + + /// Block the current thread until a shutdown signal is received. + /// + /// This can be either the user Ctrl-C'ing or a task requesting to shutdown. + #[cfg(not(target_family = "unix"))] + pub fn block_until_shutdown_requested(&mut self) -> Result { + // future of a task requesting to shutdown + let mut rx = self + .signal_rx + .take() + .ok_or("Inner shutdown already received")?; + let inner_shutdown = + async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; + futures::pin_mut!(inner_shutdown); + + // setup for handling a Ctrl-C + let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); + let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); + ctrlc::set_handler(move || { + if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { + if let Err(e) = ctrlc_send.send(()) { + error!("Error sending ctrl-c message: {:?}", e); + } + } + }) + .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; + + // Block this thread until a shutdown signal is received. + match self + .runtime() + .block_on(future::select(inner_shutdown, ctrlc_oneshot)) + { + future::Either::Left((Ok(reason), _)) => { + info!(reasion = reason.message(), "Internal shutdown received"); + Ok(reason) + } + future::Either::Left((Err(e), _)) => Err(e.into()), + future::Either::Right((x, _)) => x + .map(|()| ShutdownReason::Success("Received Ctrl+C")) + .map_err(|e| format!("Ctrlc oneshot failed: {}", e)), + } + } + + /// Shutdown the `tokio` runtime when all tasks are idle. + pub fn shutdown_on_idle(self) { + match Arc::try_unwrap(self.runtime) { + Ok(runtime) => { + runtime.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME)) + } + Err(e) => warn!( + error = ?e, + "Failed to obtain runtime access to shutdown gracefully", + ), + } + } + + /// Fire exit signal which shuts down all spawned services + pub fn fire_signal(&mut self) { + if let Some(signal) = self.signal.take() { + let _ = signal.fire(); + } + } +} + +#[cfg(target_family = "unix")] +struct SignalFuture { + signal: Signal, + message: &'static str, +} + +#[cfg(target_family = "unix")] +impl SignalFuture { + pub fn new(signal: Signal, message: &'static str) -> SignalFuture { + SignalFuture { signal, message } + } +} + +#[cfg(target_family = "unix")] +impl Future for SignalFuture { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.signal.poll_recv(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Some(_)) => Poll::Ready(Some(ShutdownReason::Success(self.message))), + Poll::Ready(None) => Poll::Ready(None), + } + } +} diff --git a/node/src/client/mod.rs b/node/src/client/mod.rs new file mode 100644 index 0000000..93b6c14 --- /dev/null +++ b/node/src/client/mod.rs @@ -0,0 +1,12 @@ +#![allow(dead_code)] + +mod builder; +mod environment; + +pub use builder::ClientBuilder; +pub use environment::{Environment, EnvironmentBuilder, RuntimeContext}; + +/// The core Zgs client. +/// +/// Holds references to running services, cleanly shutting them down when dropped. +pub struct Client; diff --git a/node/src/config/config_macro.rs b/node/src/config/config_macro.rs new file mode 100644 index 0000000..26ae965 --- /dev/null +++ b/node/src/config/config_macro.rs @@ -0,0 +1,86 @@ +// Copyright 2019 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ + +macro_rules! if_option { + (Option<$type:ty>, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($then)* + ); + ($type:ty, THEN {$($then:tt)*} ELSE {$($otherwise:tt)*}) => ( + $($otherwise)* + ); +} + +macro_rules! if_not_vector { + (Vec<$type:ty>, THEN {$($then:tt)*}) => ( + {} + ); + ($type:ty, THEN {$($then:tt)*}) => ( + $($then)* + ); +} + +macro_rules! underscore_to_hyphen { + ($e:expr) => { + str::replace($e, "_", "-") + }; +} + +macro_rules! build_config{ + ($(($name:ident, ($($type:tt)+), $default:expr))*) => { + #[derive(Debug, PartialEq, Eq, Clone)] + pub struct RawConfiguration { + $(pub $name: $($type)+,)* + } + + impl Default for RawConfiguration { + fn default() -> Self { + RawConfiguration { + $($name: $default,)* + } + } + } + + impl RawConfiguration { + // First parse arguments from config file, and then parse them from command line. + // Replace the ones from config file with the ones from commandline for duplicates. + pub fn parse(matches: &clap::ArgMatches) -> Result { + let mut config = RawConfiguration::default(); + + // read from config file + if let Some(config_file) = matches.value_of("config") { + let config_value = std::fs::read_to_string(config_file) + .map_err(|e| format!("failed to read configuration file: {:?}", e))? + .parse::() + .map_err(|e| format!("failed to parse configuration file: {:?}", e))?; + + $( + if let Some(value) = config_value.get(stringify!($name)) { + config.$name = if_option!($($type)+, + THEN { Some(value.clone().try_into().map_err(|e| format!("Invalid {}: err={:?}", stringify!($name), e).to_owned())?) } + ELSE { value.clone().try_into().map_err(|e| format!("Invalid {}: err={:?}", stringify!($name), e).to_owned())? } + ); + } + )* + } + + // read from command line + $( + #[allow(unused_variables)] + if let Some(value) = matches.value_of(underscore_to_hyphen!(stringify!($name))) { + if_not_vector!($($type)+, THEN { + config.$name = if_option!($($type)+, + THEN{ Some(value.parse().map_err(|_| concat!("Invalid ", stringify!($name)).to_owned())?) } + ELSE{ value.parse().map_err(|_| concat!("Invalid ", stringify!($name)).to_owned())? } + )} + ) + } + )* + + Ok(config) + } + } + } +} + +pub(crate) use {build_config, if_not_vector, if_option, underscore_to_hyphen}; diff --git a/node/src/config/convert.rs b/node/src/config/convert.rs new file mode 100644 index 0000000..2616344 --- /dev/null +++ b/node/src/config/convert.rs @@ -0,0 +1,100 @@ +#![allow(clippy::field_reassign_with_default)] + +use std::{collections::HashSet, str::FromStr}; + +use crate::ZgsKVConfig; +use ethereum_types::H256; +use http::Uri; +use log_entry_sync::{CacheConfig, ContractAddress, LogSyncConfig}; +use rpc::RPCConfig; +use storage_with_stream::{LogStorageConfig, StorageConfig}; +use stream::StreamConfig; + +impl ZgsKVConfig { + pub fn storage_config(&self) -> Result { + Ok(StorageConfig { + log_config: LogStorageConfig { + db_dir: self.db_dir.clone().into(), + }, + kv_db_file: self.kv_db_file.clone().into(), + }) + } + + pub fn stream_config(&self) -> Result { + let mut stream_ids: Vec = vec![]; + for id in &self.stream_ids { + stream_ids.push( + H256::from_str(id) + .map_err(|e| format!("Unable to parse stream id: {:?}, error: {:?}", id, e))?, + ); + } + stream_ids.sort(); + stream_ids.dedup(); + if stream_ids.is_empty() { + error!("{}", format!("stream ids is empty")) + } + let stream_set = HashSet::from_iter(stream_ids.iter().cloned()); + Ok(StreamConfig { + stream_ids, + stream_set, + }) + } + + pub fn rpc_config(&self) -> Result { + let listen_address = self + .rpc_listen_address + .parse::() + .map_err(|e| format!("Unable to parse rpc_listen_address: {:?}", e))?; + + Ok(RPCConfig { + enabled: self.rpc_enabled, + listen_address, + chunks_per_segment: self.rpc_chunks_per_segment, + zgs_nodes: to_zgs_nodes(self.zgs_node_urls.clone()) + .map_err(|e| format!("failed to parse zgs_node_urls: {}", e))?, + max_query_len_in_bytes: self.max_query_len_in_bytes, + max_response_body_in_bytes: self.max_response_body_in_bytes, + }) + } + + pub fn log_sync_config(&self) -> Result { + let contract_address = self + .log_contract_address + .parse::() + .map_err(|e| format!("Unable to parse log_contract_address: {:?}", e))?; + let cache_config = CacheConfig { + // 100 MB. + max_data_size: self.max_cache_data_size, + // This should be enough if we have about one Zgs tx per block. + tx_seq_ttl: self.cache_tx_seq_ttl, + }; + Ok(LogSyncConfig::new( + self.blockchain_rpc_endpoint.clone(), + contract_address, + self.log_sync_start_block_number, + self.confirmation_block_count, + cache_config, + self.log_page_size, + self.rate_limit_retries, + self.timeout_retries, + self.initial_backoff, + self.recover_query_delay, + )) + } +} + +pub fn to_zgs_nodes(zgs_node_urls: String) -> Result, String> { + if zgs_node_urls.is_empty() { + return Err("zgs_node_urls is empty".to_string()); + } + + zgs_node_urls + .split(',') + .map(|url| { + url.parse::() + .map_err(|e| format!("Invalid URL: {}", e))?; + + Ok(url.to_owned()) + }) + .collect() +} diff --git a/node/src/config/mod.rs b/node/src/config/mod.rs new file mode 100644 index 0000000..1a0390e --- /dev/null +++ b/node/src/config/mod.rs @@ -0,0 +1,60 @@ +mod config_macro; + +mod convert; +use config_macro::*; +use std::ops::Deref; + +build_config! { + // stream + (stream_ids, (Vec), vec![]) + + // log sync + (blockchain_rpc_endpoint, (String), "http://127.0.0.1:8545".to_string()) + (log_contract_address, (String), "".to_string()) + (log_sync_start_block_number, (u64), 0) + (confirmation_block_count, (u64), 12) + (log_page_size, (u64), 1000) + (max_cache_data_size, (usize), 100 * 1024 * 1024) // 100 MB + (cache_tx_seq_ttl, (usize), 500) + + (rate_limit_retries, (u32), 100) + (timeout_retries, (u32), 100) + (initial_backoff, (u64), 500) + (recover_query_delay, (u64), 50) + + // rpc + (rpc_enabled, (bool), true) + (rpc_listen_address, (String), "127.0.0.1:5678".to_string()) + (rpc_chunks_per_segment, (usize), 1024) + (zgs_node_urls, (String), "http://127.0.0.1:5678".to_string()) + (max_query_len_in_bytes, (u64), 1024 * 256) // 256 KB + (max_response_body_in_bytes, (u32), 1024 * 1024 * 30) // 30MB + + // db + (db_dir, (String), "db".to_string()) + (kv_db_file, (String), "kv.DB".to_string()) + + // misc + (log_config_file, (String), "log_config".to_string()) +} + +#[derive(Debug)] +pub struct ZgsKVConfig { + pub raw_conf: RawConfiguration, +} + +impl Deref for ZgsKVConfig { + type Target = RawConfiguration; + + fn deref(&self) -> &Self::Target { + &self.raw_conf + } +} + +impl ZgsKVConfig { + pub fn parse(matches: &clap::ArgMatches) -> Result { + Ok(ZgsKVConfig { + raw_conf: RawConfiguration::parse(matches)?, + }) + } +} diff --git a/node/src/log.rs b/node/src/log.rs new file mode 100644 index 0000000..81df978 --- /dev/null +++ b/node/src/log.rs @@ -0,0 +1,55 @@ +use task_executor::TaskExecutor; +use tracing::Level; +use tracing_subscriber::EnvFilter; + +const LOG_RELOAD_PERIOD_SEC: u64 = 30; + +pub fn configure(logfile: &str, executor: TaskExecutor) { + let builder = tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .with_env_filter(EnvFilter::default()) + // .with_file(true) + // .with_line_number(true) + // .with_thread_names(true) + .with_filter_reloading(); + + let handle = builder.reload_handle(); + builder.init(); + + let logfile = logfile.to_string(); + + // load config synchronously + let mut config = std::fs::read_to_string(&logfile).unwrap_or_default(); + let _ = handle.reload(&config); + + // periodically check for config changes + executor.spawn( + async move { + let mut interval = + tokio::time::interval(std::time::Duration::from_secs(LOG_RELOAD_PERIOD_SEC)); + + loop { + interval.tick().await; + + let new_config = match tokio::fs::read_to_string(&logfile).await { + Ok(c) if c == config => continue, + Ok(c) => c, + Err(e) => { + println!("Unable to read log file {}: {:?}", logfile, e); + continue; + } + }; + + println!("Updating log config to {:?}", new_config); + + match handle.reload(&new_config) { + Ok(()) => config = new_config, + Err(e) => { + println!("Failed to load new config: {:?}", e); + } + } + } + }, + "log_reload", + ); +} diff --git a/node/src/main.rs b/node/src/main.rs new file mode 100644 index 0000000..86051d2 --- /dev/null +++ b/node/src/main.rs @@ -0,0 +1,83 @@ +#[macro_use] +extern crate tracing; + +mod cli; +mod client; +mod config; +mod log; + +use client::{Client, ClientBuilder, RuntimeContext}; +use config::ZgsKVConfig; +use std::error::Error; + +async fn start_node(context: RuntimeContext, config: ZgsKVConfig) -> Result { + let storage_config = config.storage_config()?; + let rpc_config = config.rpc_config()?; + let log_sync_config = config.log_sync_config()?; + let stream_config = config.stream_config()?; + + ClientBuilder::default() + .with_runtime_context(context) + .with_rocksdb_store(&storage_config) + .await? + .with_rpc(rpc_config) + .await? + .with_stream(&stream_config) + .await? + .with_log_sync(log_sync_config) + .await? + .build() +} + +fn main() -> Result<(), Box> { + // enable backtraces + std::env::set_var("RUST_BACKTRACE", "1"); + + // runtime environment + let mut environment = client::EnvironmentBuilder::new() + .multi_threaded_tokio_runtime()? + .build()?; + + let context = environment.core_context(); + let executor = context.executor.clone(); + + // CLI, config, and logs + let matches = cli::cli_app().get_matches(); + let config = ZgsKVConfig::parse(&matches)?; + log::configure(&config.log_config_file, executor.clone()); + + // start services + executor.clone().spawn( + async move { + info!("Starting services..."); + if let Err(e) = start_node(context.clone(), config).await { + error!(reason = %e, "Failed to start zgs kv node"); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = + executor + .shutdown_sender() + .try_send(task_executor::ShutdownReason::Failure( + "Failed to start zgs kv node", + )); + } else { + info!("Services started"); + } + }, + "zgs_kv_node", + ); + + // Block this thread until we get a ctrl-c or a task sends a shutdown signal. + let shutdown_reason = environment.block_until_shutdown_requested()?; + info!(reason = ?shutdown_reason, "Shutting down..."); + + environment.fire_signal(); + + // Shutdown the environment once all tasks have completed. + environment.shutdown_on_idle(); + + match shutdown_reason { + task_executor::ShutdownReason::Success(_) => Ok(()), + task_executor::ShutdownReason::Failure(msg) => Err(msg.to_string().into()), + } +} diff --git a/node/storage/Cargo.toml b/node/storage/Cargo.toml new file mode 100644 index 0000000..cd3d6df --- /dev/null +++ b/node/storage/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "storage" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow = { version = "=1.0.58", features = ["backtrace"] } +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +zgs_spec = { path = "../../zerog-storage-rust/common/spec" } +zgs_seal = { path = "../../zerog-storage-rust/common/zgs_seal" } +eth2_ssz = "0.4.0" +eth2_ssz_derive = "0.3.0" +ethereum-types = "0.14" +hex = "0.4.3" +kvdb = "0.13.0" +kvdb-memorydb = "0.13.0" +kvdb-rocksdb = "0.19.0" +#merkle_light = {git = "https://github.com/sitano/merkle_light.git", rev = "fe31d4e" } +merkle_light = { path = "../../zerog-storage-rust/common/merkle_light" } +merkle_tree = { path = "../../zerog-storage-rust/common/merkle_tree" } +rayon = "1.5.3" +shared_types = { path = "../shared_types" } +tracing = "0.1.35" +typenum = "1.15.0" +bitmaps = "^3.2" +static_assertions = "1.1" +tiny-keccak = "*" +itertools = "0.10.5" + +[dev-dependencies] +tempdir = "0.3.7" +rand = "0.8.5" +hex-literal = "0.3.4" diff --git a/node/storage/src/config.rs b/node/storage/src/config.rs new file mode 100644 index 0000000..1f537d3 --- /dev/null +++ b/node/storage/src/config.rs @@ -0,0 +1,6 @@ +use std::path::PathBuf; + +#[derive(Clone)] +pub struct Config { + pub db_dir: PathBuf, +} diff --git a/node/storage/src/error.rs b/node/storage/src/error.rs new file mode 100644 index 0000000..3a7082e --- /dev/null +++ b/node/storage/src/error.rs @@ -0,0 +1,42 @@ +use anyhow; +use ssz::DecodeError; +use std::error::Error as ErrorTrait; +use std::fmt::{Debug, Display, Formatter}; +use std::io::Error as IoError; + +pub type Result = anyhow::Result; + +#[derive(Debug)] +pub enum Error { + Io(IoError), + /// A partial chunk batch is written. + InvalidBatchBoundary, + ValueDecodingError(DecodeError), + Custom(String), +} + +impl From for Error { + fn from(e: IoError) -> Self { + Error::Io(e) + } +} + +impl From for Error { + fn from(e: DecodeError) -> Self { + Error::ValueDecodingError(e) + } +} + +impl From for Error { + fn from(e: anyhow::Error) -> Self { + Error::Custom(e.to_string()) + } +} + +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "StorageError: {:?}", self) + } +} + +impl ErrorTrait for Error {} diff --git a/node/storage/src/lib.rs b/node/storage/src/lib.rs new file mode 100644 index 0000000..dd5ad7b --- /dev/null +++ b/node/storage/src/lib.rs @@ -0,0 +1,32 @@ +use kvdb::KeyValueDB; + +pub mod config; +pub mod error; +pub mod log_store; + +pub use config::Config as StorageConfig; +pub use log_store::log_manager::LogManager; + +pub use ethereum_types::H256; + +pub trait ZgsKeyValueDB: KeyValueDB { + fn put(&self, col: u32, key: &[u8], value: &[u8]) -> std::io::Result<()> { + let mut tx = self.transaction(); + tx.put(col, key, value); + self.write(tx) + } + + fn delete(&self, col: u32, key: &[u8]) -> std::io::Result<()> { + let mut tx = self.transaction(); + tx.delete(col, key); + self.write(tx) + } + + fn delete_with_prefix(&self, col: u32, key_prefix: &[u8]) -> std::io::Result<()> { + let mut tx = self.transaction(); + tx.delete_prefix(col, key_prefix); + self.write(tx) + } +} + +impl ZgsKeyValueDB for T {} diff --git a/node/storage/src/log_store/config.rs b/node/storage/src/log_store/config.rs new file mode 100644 index 0000000..ace314a --- /dev/null +++ b/node/storage/src/log_store/config.rs @@ -0,0 +1,85 @@ +use anyhow::{anyhow, Result}; +use kvdb::{DBKey, DBOp}; +use ssz::{Decode, Encode}; + +use crate::LogManager; + +use super::log_manager::COL_MISC; + +pub trait Configurable { + fn get_config(&self, key: &[u8]) -> Result>>; + fn set_config(&self, key: &[u8], value: &[u8]) -> Result<()>; + fn remove_config(&self, key: &[u8]) -> Result<()>; + + fn exec_configs(&self, tx: ConfigTx) -> Result<()>; +} + +#[derive(Default)] +pub struct ConfigTx { + ops: Vec, +} + +impl ConfigTx { + pub fn append(&mut self, other: &mut Self) { + self.ops.append(&mut other.ops); + } + + pub fn set_config, T: Encode>(&mut self, key: &K, value: &T) { + self.ops.push(DBOp::Insert { + col: COL_MISC, + key: DBKey::from_slice(key.as_ref()), + value: value.as_ssz_bytes(), + }); + } + + pub fn remove_config>(&mut self, key: &K) { + self.ops.push(DBOp::Delete { + col: COL_MISC, + key: DBKey::from_slice(key.as_ref()), + }); + } +} + +pub trait ConfigurableExt: Configurable { + fn get_config_decoded, T: Decode>(&self, key: &K) -> Result> { + match self.get_config(key.as_ref())? { + Some(val) => Ok(Some( + T::from_ssz_bytes(&val).map_err(|e| anyhow!("SSZ decode error: {:?}", e))?, + )), + None => Ok(None), + } + } + + fn set_config_encoded, T: Encode>(&self, key: &K, value: &T) -> Result<()> { + self.set_config(key.as_ref(), &value.as_ssz_bytes()) + } + + fn remove_config_by_key>(&self, key: &K) -> Result<()> { + self.remove_config(key.as_ref()) + } +} + +impl ConfigurableExt for T {} + +impl Configurable for LogManager { + fn get_config(&self, key: &[u8]) -> Result>> { + Ok(self.db.get(COL_MISC, key)?) + } + + fn set_config(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.db.put(COL_MISC, key, value)?; + Ok(()) + } + + fn remove_config(&self, key: &[u8]) -> Result<()> { + Ok(self.db.delete(COL_MISC, key)?) + } + + fn exec_configs(&self, tx: ConfigTx) -> Result<()> { + let mut db_tx = self.db.transaction(); + db_tx.ops = tx.ops; + self.db.write(db_tx)?; + + Ok(()) + } +} diff --git a/node/storage/src/log_store/flow_store.rs b/node/storage/src/log_store/flow_store.rs new file mode 100644 index 0000000..412286d --- /dev/null +++ b/node/storage/src/log_store/flow_store.rs @@ -0,0 +1,539 @@ +use super::load_chunk::EntryBatch; +use super::{MineLoadChunk, SealAnswer, SealTask}; +use crate::error::Error; +use crate::log_store::log_manager::{bytes_to_entries, COL_ENTRY_BATCH, COL_ENTRY_BATCH_ROOT}; +use crate::log_store::{FlowRead, FlowSeal, FlowWrite}; +use crate::{try_option, ZgsKeyValueDB}; +use anyhow::{anyhow, bail, Result}; +use append_merkle::{MerkleTreeInitialData, MerkleTreeRead}; +use itertools::Itertools; +use shared_types::{ChunkArray, DataRoot, FlowProof}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode}; +use std::cmp::Ordering; +use std::collections::BTreeMap; +use std::fmt::Debug; +use std::sync::Arc; +use std::{cmp, mem}; +use tracing::{debug, error, trace}; +use zgs_spec::{BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL}; + +pub struct FlowStore { + db: FlowDBStore, + // TODO(kevin): This is an in-memory cache for recording which chunks are ready for sealing. It should be persisted on disk. + to_seal_set: BTreeMap, + // Data sealing is an asynchronized process. + // The sealing service uses the version number to distinguish if revert happens during sealing. + to_seal_version: usize, + config: FlowConfig, +} + +impl FlowStore { + pub fn new(db: Arc, config: FlowConfig) -> Self { + Self { + db: FlowDBStore::new(db), + to_seal_set: Default::default(), + to_seal_version: 0, + config, + } + } + + pub fn put_batch_root_list(&self, root_map: BTreeMap) -> Result<()> { + self.db.put_batch_root_list(root_map) + } + + pub fn insert_subtree_list_for_batch( + &self, + batch_index: usize, + subtree_list: Vec<(usize, usize, DataRoot)>, + ) -> Result<()> { + let mut batch = self + .db + .get_entry_batch(batch_index as u64)? + .unwrap_or_else(|| EntryBatch::new(batch_index as u64)); + batch.set_subtree_list(subtree_list); + self.db.put_entry_raw(vec![(batch_index as u64, batch)])?; + + Ok(()) + } + + pub fn gen_proof_in_batch(&self, batch_index: usize, sector_index: usize) -> Result { + let batch = self + .db + .get_entry_batch(batch_index as u64)? + .ok_or_else(|| anyhow!("batch missing, index={}", batch_index))?; + let merkle = batch.to_merkle_tree(batch_index == 0)?.ok_or_else(|| { + anyhow!( + "batch data incomplete for building a merkle tree, index={}", + batch_index + ) + })?; + merkle.gen_proof(sector_index) + } +} + +#[derive(Clone, Debug)] +pub struct FlowConfig { + pub batch_size: usize, +} + +impl Default for FlowConfig { + fn default() -> Self { + Self { + batch_size: SECTORS_PER_LOAD, + } + } +} + +impl FlowRead for FlowStore { + /// Return `Ok(None)` if only partial data are available. + fn get_entries(&self, index_start: u64, index_end: u64) -> Result> { + if index_end <= index_start { + bail!( + "invalid entry index: start={} end={}", + index_start, + index_end + ); + } + let mut data = Vec::with_capacity((index_end - index_start) as usize * BYTES_PER_SECTOR); + for (start_entry_index, end_entry_index) in + batch_iter(index_start, index_end, self.config.batch_size) + { + let chunk_index = start_entry_index / self.config.batch_size as u64; + let mut offset = start_entry_index - chunk_index * self.config.batch_size as u64; + let mut length = end_entry_index - start_entry_index; + + // Tempfix: for first chunk, its offset is always 1 + if chunk_index == 0 && offset == 0 { + offset = 1; + length -= 1; + } + + let entry_batch = try_option!(self.db.get_entry_batch(chunk_index)?); + let mut entry_batch_data = + try_option!(entry_batch.get_unsealed_data(offset as usize, length as usize)); + data.append(&mut entry_batch_data); + } + Ok(Some(ChunkArray { + data, + start_index: index_start, + })) + } + + fn get_available_entries(&self, index_start: u64, index_end: u64) -> Result> { + // Both `index_start` and `index_end` are at the batch boundaries, so we do not need + // to check if the data is within range when we process each batch. + if index_end <= index_start + || index_start % self.config.batch_size as u64 != 0 + || index_end % self.config.batch_size as u64 != 0 + { + bail!( + "invalid entry index: start={} end={}", + index_start, + index_end + ); + } + let mut entry_list = Vec::::new(); + for (start_entry_index, _) in batch_iter(index_start, index_end, self.config.batch_size) { + let chunk_index = start_entry_index / self.config.batch_size as u64; + + if let Some(mut data_list) = self + .db + .get_entry_batch(chunk_index)? + .map(|b| b.into_data_list(start_entry_index)) + { + if data_list.is_empty() { + continue; + } + // This will not happen for now because we only get entries for the last chunk. + if let Some(last) = entry_list.last_mut() { + if last.start_index + bytes_to_entries(last.data.len() as u64) + == data_list[0].start_index + { + // Merge the first element with the previous one. + last.data.append(&mut data_list.remove(0).data); + } + } + for data in data_list { + entry_list.push(data); + } + } + } + Ok(entry_list) + } + + /// Return the list of all stored chunk roots. + fn get_chunk_root_list(&self) -> Result> { + self.db.get_batch_root_list() + } + + fn load_sealed_data(&self, chunk_index: u64) -> Result> { + let batch = try_option!(self.db.get_entry_batch(chunk_index)?); + let mut mine_chunk = MineLoadChunk::default(); + for (seal_index, (sealed, validity)) in mine_chunk + .loaded_chunk + .iter_mut() + .zip(mine_chunk.avalibilities.iter_mut()) + .enumerate() + { + if let Some(data) = batch.get_sealed_data(seal_index as u16) { + *validity = true; + *sealed = data; + } + } + Ok(Some(mine_chunk)) + } +} + +impl FlowWrite for FlowStore { + /// Return the roots of completed chunks. The order is guaranteed to be increasing + /// by chunk index. + fn append_entries(&mut self, data: ChunkArray) -> Result> { + trace!("append_entries: {} {}", data.start_index, data.data.len()); + if data.data.len() % BYTES_PER_SECTOR != 0 { + bail!("append_entries: invalid data size, len={}", data.data.len()); + } + let mut batch_list = Vec::new(); + for (start_entry_index, end_entry_index) in batch_iter( + data.start_index, + data.start_index + bytes_to_entries(data.data.len() as u64), + self.config.batch_size, + ) { + // TODO: Avoid mem-copy if possible. + let chunk = data + .sub_array(start_entry_index, end_entry_index) + .expect("in range"); + + let chunk_index = chunk.start_index / self.config.batch_size as u64; + + // TODO: Try to avoid loading from db if possible. + let mut batch = self + .db + .get_entry_batch(chunk_index)? + .unwrap_or_else(|| EntryBatch::new(chunk_index)); + let completed_seals = batch.insert_data( + (chunk.start_index % self.config.batch_size as u64) as usize, + chunk.data, + )?; + completed_seals.into_iter().for_each(|x| { + self.to_seal_set.insert( + chunk_index as usize * SEALS_PER_LOAD + x as usize, + self.to_seal_version, + ); + }); + + batch_list.push((chunk_index, batch)); + } + self.db.put_entry_batch_list(batch_list) + } + + fn truncate(&mut self, start_index: u64) -> crate::error::Result<()> { + let to_reseal = self.db.truncate(start_index, self.config.batch_size)?; + + self.to_seal_set + .split_off(&(start_index as usize / SECTORS_PER_SEAL)); + self.to_seal_version += 1; + + to_reseal.into_iter().for_each(|x| { + self.to_seal_set.insert(x, self.to_seal_version); + }); + Ok(()) + } +} + +impl FlowSeal for FlowStore { + fn pull_seal_chunk(&self, seal_index_max: usize) -> Result>> { + let mut to_seal_iter = self.to_seal_set.iter(); + let (&first_index, &first_version) = try_option!(to_seal_iter.next()); + if first_index >= seal_index_max { + return Ok(None); + } + + let mut tasks = Vec::with_capacity(SEALS_PER_LOAD); + + let batch_data = self + .db + .get_entry_batch((first_index / SEALS_PER_LOAD) as u64)? + .expect("Lost data chunk in to_seal_set"); + + for (&seal_index, &version) in + std::iter::once((&first_index, &first_version)).chain(to_seal_iter.filter(|(&x, _)| { + first_index / SEALS_PER_LOAD == x / SEALS_PER_LOAD && x < seal_index_max + })) + { + let seal_index_local = seal_index % SEALS_PER_LOAD; + let non_sealed_data = batch_data + .get_non_sealed_data(seal_index_local as u16) + .expect("Lost seal chunk in to_seal_set"); + tasks.push(SealTask { + seal_index: seal_index as u64, + version, + non_sealed_data, + }) + } + + Ok(Some(tasks)) + } + + fn submit_seal_result(&mut self, answers: Vec) -> Result<()> { + let is_consistent = |answer: &SealAnswer| { + self.to_seal_set + .get(&(answer.seal_index as usize)) + .map_or(false, |cur_ver| cur_ver == &answer.version) + }; + + let mut updated_chunk = vec![]; + let mut removed_seal_index = Vec::new(); + for (load_index, answers_in_chunk) in &answers + .into_iter() + .filter(is_consistent) + .group_by(|answer| answer.seal_index / SEALS_PER_LOAD as u64) + { + let mut batch_chunk = self + .db + .get_entry_batch(load_index)? + .expect("Can not find chunk data"); + for answer in answers_in_chunk { + removed_seal_index.push(answer.seal_index as usize); + batch_chunk.submit_seal_result(answer)?; + } + updated_chunk.push((load_index, batch_chunk)); + } + + debug!("Seal chunks: indices = {:?}", removed_seal_index); + + for idx in removed_seal_index.into_iter() { + self.to_seal_set.remove(&idx); + } + + self.db.put_entry_raw(updated_chunk)?; + + Ok(()) + } +} + +pub struct FlowDBStore { + kvdb: Arc, +} + +impl FlowDBStore { + pub fn new(kvdb: Arc) -> Self { + Self { kvdb } + } + + fn put_entry_batch_list( + &self, + batch_list: Vec<(u64, EntryBatch)>, + ) -> Result> { + let mut completed_batches = Vec::new(); + let mut tx = self.kvdb.transaction(); + for (batch_index, batch) in batch_list { + tx.put( + COL_ENTRY_BATCH, + &batch_index.to_be_bytes(), + &batch.as_ssz_bytes(), + ); + if let Some(root) = batch.build_root(batch_index == 0)? { + trace!("complete batch: index={}", batch_index); + tx.put( + COL_ENTRY_BATCH_ROOT, + // (batch_index, subtree_depth) + &encode_batch_root_key(batch_index as usize, 1), + root.as_bytes(), + ); + completed_batches.push((batch_index, root)); + } + } + self.kvdb.write(tx)?; + Ok(completed_batches) + } + + fn put_entry_raw(&self, batch_list: Vec<(u64, EntryBatch)>) -> Result<()> { + let mut tx = self.kvdb.transaction(); + for (batch_index, batch) in batch_list { + tx.put( + COL_ENTRY_BATCH, + &batch_index.to_be_bytes(), + &batch.as_ssz_bytes(), + ); + } + self.kvdb.write(tx)?; + Ok(()) + } + + fn get_entry_batch(&self, batch_index: u64) -> Result> { + let raw = try_option!(self.kvdb.get(COL_ENTRY_BATCH, &batch_index.to_be_bytes())?); + Ok(Some(EntryBatch::from_ssz_bytes(&raw).map_err(Error::from)?)) + } + + fn put_batch_root_list(&self, root_map: BTreeMap) -> Result<()> { + let mut tx = self.kvdb.transaction(); + for (batch_index, (root, subtree_depth)) in root_map { + tx.put( + COL_ENTRY_BATCH_ROOT, + &encode_batch_root_key(batch_index, subtree_depth), + root.as_bytes(), + ); + } + Ok(self.kvdb.write(tx)?) + } + + fn get_batch_root_list(&self) -> Result> { + let mut range_root = None; + // A list of `BatchRoot` that can reconstruct the whole merkle tree structure. + let mut root_list = Vec::new(); + // A list of leaf `(index, root_hash)` in the subtrees of some nodes in `root_list`, + // and they will be updated in the merkle tree with `fill_leaf` by the caller. + let mut leaf_list = Vec::new(); + let mut expected_index = 0; + for r in self.kvdb.iter(COL_ENTRY_BATCH_ROOT) { + let (index_bytes, root_bytes) = r?; + let (batch_index, subtree_depth) = decode_batch_root_key(index_bytes.as_ref())?; + debug!( + "load root depth={}, index expected={} get={}", + subtree_depth, expected_index, batch_index + ); + let root = DataRoot::from_slice(root_bytes.as_ref()); + if subtree_depth == 1 { + if range_root.is_none() { + // This is expected to be the next leaf. + if batch_index == expected_index { + root_list.push((1, root)); + expected_index += 1; + } else { + bail!( + "unexpected chunk leaf, expected={}, get={}", + expected_index, + batch_index + ); + } + } else { + match batch_index.cmp(&expected_index) { + Ordering::Less => { + // This leaf is within a subtree whose root is known. + leaf_list.push((batch_index, root)); + } + Ordering::Equal => { + // A subtree range ends. + range_root = None; + root_list.push((1, root)); + expected_index += 1; + } + Ordering::Greater => { + bail!( + "unexpected chunk leaf in range, expected={}, get={}, range={:?}", + expected_index, + batch_index, + range_root, + ); + } + } + } + } else if expected_index == batch_index { + range_root = Some(BatchRoot::Multiple((subtree_depth, root))); + root_list.push((subtree_depth, root)); + expected_index += 1 << (subtree_depth - 1); + } else { + bail!( + "unexpected range root: expected={} get={}", + expected_index, + batch_index + ); + } + } + Ok(MerkleTreeInitialData { + subtree_list: root_list, + known_leaves: leaf_list, + }) + } + + fn truncate(&self, start_index: u64, batch_size: usize) -> crate::error::Result> { + let mut tx = self.kvdb.transaction(); + let mut start_batch_index = start_index / batch_size as u64; + let first_batch_offset = start_index as usize % batch_size; + let mut index_to_reseal = Vec::new(); + if first_batch_offset != 0 { + if let Some(mut first_batch) = self.get_entry_batch(start_batch_index)? { + index_to_reseal = first_batch + .truncate(first_batch_offset) + .into_iter() + .map(|x| start_batch_index as usize * SEALS_PER_LOAD + x as usize) + .collect(); + if !first_batch.is_empty() { + tx.put( + COL_ENTRY_BATCH, + &start_batch_index.to_be_bytes(), + &first_batch.as_ssz_bytes(), + ); + } else { + tx.delete(COL_ENTRY_BATCH, &start_batch_index.to_be_bytes()); + } + } + + start_batch_index += 1; + } + // TODO: `kvdb` and `kvdb-rocksdb` does not support `seek_to_last` yet. + // We'll need to fork it or use another wrapper for a better performance in this. + let end = match self.kvdb.iter(COL_ENTRY_BATCH).last() { + Some(Ok((k, _))) => decode_batch_index(k.as_ref())?, + Some(Err(e)) => { + error!("truncate db error: e={:?}", e); + return Err(e.into()); + } + None => { + // The db has no data, so we can just return; + return Ok(index_to_reseal); + } + }; + for batch_index in start_batch_index as usize..=end { + tx.delete(COL_ENTRY_BATCH, &batch_index.to_be_bytes()); + tx.delete_prefix(COL_ENTRY_BATCH_ROOT, &batch_index.to_be_bytes()); + } + self.kvdb.write(tx)?; + Ok(index_to_reseal) + } +} + +#[derive(DeriveEncode, DeriveDecode, Clone, Debug)] +#[ssz(enum_behaviour = "union")] +pub enum BatchRoot { + Single(DataRoot), + Multiple((usize, DataRoot)), +} + +/// Return the batch boundaries `(batch_start_index, batch_end_index)` given the index range. +pub fn batch_iter(start: u64, end: u64, batch_size: usize) -> Vec<(u64, u64)> { + let mut list = Vec::new(); + for i in (start / batch_size as u64 * batch_size as u64..end).step_by(batch_size) { + let batch_start = cmp::max(start, i); + let batch_end = cmp::min(end, i + batch_size as u64); + list.push((batch_start, batch_end)); + } + list +} + +fn try_decode_usize(data: &[u8]) -> Result { + Ok(usize::from_be_bytes( + data.try_into().map_err(|e| anyhow!("{:?}", e))?, + )) +} + +fn decode_batch_index(data: &[u8]) -> Result { + try_decode_usize(data) +} + +/// For the same batch_index, we want to process the larger subtree_depth first in iteration. +fn encode_batch_root_key(batch_index: usize, subtree_depth: usize) -> Vec { + let mut key = batch_index.to_be_bytes().to_vec(); + key.extend_from_slice(&(usize::MAX - subtree_depth).to_be_bytes()); + key +} + +fn decode_batch_root_key(data: &[u8]) -> Result<(usize, usize)> { + if data.len() != mem::size_of::() * 2 { + bail!("invalid data length"); + } + let batch_index = try_decode_usize(&data[..mem::size_of::()])?; + let subtree_depth = usize::MAX - try_decode_usize(&data[mem::size_of::()..])?; + Ok((batch_index, subtree_depth)) +} diff --git a/node/storage/src/log_store/load_chunk/bitmap.rs b/node/storage/src/log_store/load_chunk/bitmap.rs new file mode 100644 index 0000000..0269428 --- /dev/null +++ b/node/storage/src/log_store/load_chunk/bitmap.rs @@ -0,0 +1,175 @@ +use std::ops::{Deref, DerefMut}; + +use ssz::{Decode, DecodeError, Encode}; + +use bitmaps::{Bitmap, Bits, BitsImpl}; + +#[derive(Default, Debug)] +pub struct WrappedBitmap(pub Bitmap) +where + BitsImpl<{ N }>: Bits; +type PrimitveInner = as Bits>::Store; + +impl Encode for WrappedBitmap +where + BitsImpl<{ N }>: Bits, + PrimitveInner<{ N }>: Encode, +{ + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.append(&mut self.0.into_value().as_ssz_bytes()) + } + + fn ssz_bytes_len(&self) -> usize { + ::ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + as Encode>::ssz_fixed_len() + } +} + +impl Decode for WrappedBitmap +where + BitsImpl<{ N }>: Bits, + PrimitveInner<{ N }>: Decode, +{ + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + Ok(Self(Bitmap::<{ N }>::from_value( + PrimitveInner::from_ssz_bytes(bytes)?, + ))) + } +} + +impl Deref for WrappedBitmap +where + BitsImpl<{ N }>: Bits, +{ + type Target = Bitmap<{ N }>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for WrappedBitmap +where + BitsImpl<{ N }>: Bits, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +pub trait TruncateBitmap { + fn truncate(&mut self, index: u16); +} + +impl TruncateBitmap for u16 { + fn truncate(&mut self, index: u16) { + let mask: u16 = (1 << index) - 1; + *self &= mask + } +} + +impl TruncateBitmap for u32 { + fn truncate(&mut self, index: u16) { + let mask: u32 = (1 << index) - 1; + *self &= mask + } +} + +impl TruncateBitmap for u64 { + fn truncate(&mut self, index: u16) { + let mask: u64 = (1 << index) - 1; + *self &= mask + } +} + +impl TruncateBitmap for u128 { + fn truncate(&mut self, index: u16) { + let mask: u128 = (1 << index) - 1; + *self &= mask + } +} + +impl TruncateBitmap for [u128; N] { + fn truncate(&mut self, index: u16) { + let blob_index = index as usize / u128::BITS as usize; + let bit_index = index as usize % u128::BITS as usize; + let mask: u128 = (1 << (bit_index as u128)) - 1; + self[blob_index] &= mask; + for blob in &mut self[(blob_index + 1)..N] { + *blob = 0; + } + } +} + +impl WrappedBitmap +where + BitsImpl<{ N }>: Bits, + PrimitveInner<{ N }>: TruncateBitmap, +{ + /// Set the position large or equal to `index` to false + pub fn truncate(&mut self, index: u16) { + let mut current = *self.as_value(); + TruncateBitmap::truncate(&mut current, index); + self.0 = Bitmap::<{ N }>::from_value(current) + } +} + +#[test] +fn bitmap_serde() { + let mut bitmap = WrappedBitmap::<64>::default(); + bitmap.set(10, true); + bitmap.set(29, true); + + let serialized = bitmap.as_ssz_bytes(); + let deserialized = WrappedBitmap::<64>::from_ssz_bytes(&serialized).unwrap(); + assert_eq!(bitmap.into_value(), deserialized.into_value()); +} + +#[test] +fn bitmap_truncate() { + let mut bitmap = WrappedBitmap::<64>::default(); + bitmap.set(10, true); + bitmap.set(29, true); + bitmap.set(30, true); + bitmap.set(55, true); + + bitmap.truncate(30); + + assert!(bitmap.get(10)); + assert!(bitmap.get(29)); + assert!(!bitmap.get(30)); + assert!(!bitmap.get(55)); +} + +#[test] +fn bitmap_big_truncate() { + let mut bitmap = WrappedBitmap::<300>::default(); + bitmap.set(110, true); + bitmap.set(129, true); + bitmap.set(130, true); + bitmap.set(155, true); + bitmap.set(299, true); + + bitmap.truncate(130); + + assert!(bitmap.get(110)); + assert!(bitmap.get(129)); + assert!(!bitmap.get(130)); + assert!(!bitmap.get(155)); + assert!(!bitmap.get(299)); +} diff --git a/node/storage/src/log_store/load_chunk/chunk_data.rs b/node/storage/src/log_store/load_chunk/chunk_data.rs new file mode 100644 index 0000000..30b0ec2 --- /dev/null +++ b/node/storage/src/log_store/load_chunk/chunk_data.rs @@ -0,0 +1,497 @@ +use anyhow::{bail, Result}; +use shared_types::{bytes_to_chunks, DataRoot}; +use ssz_derive::{Decode, Encode}; +use std::fmt::{Debug, Formatter}; +use std::mem; +use tracing::error; +use zgs_spec::{BYTES_PER_LOAD, BYTES_PER_SECTOR, SECTORS_PER_LOAD, SECTORS_PER_SEAL}; + +pub enum EntryBatchData { + Complete(Vec), + /// All `PartialBatch`s are ordered based on `start_index`. + Incomplete(IncompleteData), +} + +#[derive(Default, Debug, Encode, Decode)] +pub struct IncompleteData { + pub subtrees: Vec, + pub known_data: Vec, +} + +impl IncompleteData { + fn truncate(&mut self, sector_index: usize) { + let partial_batch_truncate = match PartialBatch::find(&self.known_data, sector_index) { + Ok(x) => { + let p = &mut self.known_data[x]; + let truncated_byte = (sector_index - p.start_sector) * BYTES_PER_SECTOR; + p.data.truncate(truncated_byte); + if p.data.is_empty() { + x + } else { + x + 1 + } + } + Err(x) => x, + }; + self.known_data.truncate(partial_batch_truncate); + + let subtree_truncate = match Subtree::find(&self.subtrees, sector_index) { + Ok(x) => x, + Err(x) => x, + }; + self.subtrees.truncate(subtree_truncate); + } + + /// This is only called once when the batch is removed from the memory and fully stored in db. + pub fn set_subtree_list(&mut self, subtree_list: Vec) { + self.subtrees = subtree_list; + } + + pub fn get(&self, mut start_byte: usize, length_byte: usize) -> Option<&[u8]> { + let p = &self.known_data + [PartialBatch::find(&self.known_data, start_byte / BYTES_PER_SECTOR).ok()?]; + // Rebase the start_byte and end_byte w.r.t. to hit partial batch. + start_byte -= p.start_sector * BYTES_PER_SECTOR; + p.data.get(start_byte..(start_byte + length_byte)) + } +} + +#[derive(Default, Debug, Encode, Decode)] +pub struct Subtree { + pub start_sector: usize, + pub subtree_height: usize, + pub root: DataRoot, +} + +#[derive(PartialEq, Eq)] +pub struct PartialBatch { + /// Offset in this batch. + pub(super) start_sector: usize, + pub(super) data: Vec, +} + +impl Debug for PartialBatch { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "PartialBatch: start_offset={} data_len={}", + self.start_sector, + self.data.len() + ) + } +} + +trait DataRange: Sized { + fn start_sector(&self) -> usize; + fn size(&self) -> usize; + + fn end_sector(&self) -> usize { + self.start_sector() + self.size() + } + + fn find(data_list: &[Self], sector_index: usize) -> Result { + let possible_index = + match data_list.binary_search_by_key(§or_index, |x| x.start_sector()) { + Ok(x) => x, + Err(0) => { + return Err(0); + } + Err(x) => x - 1, + }; + if sector_index < data_list[possible_index].end_sector() { + Ok(possible_index) + } else { + Err(possible_index + 1) + } + } +} + +impl DataRange for PartialBatch { + fn start_sector(&self) -> usize { + self.start_sector + } + + fn size(&self) -> usize { + bytes_to_chunks(self.data.len()) + } +} + +impl DataRange for Subtree { + fn start_sector(&self) -> usize { + self.start_sector + } + + fn size(&self) -> usize { + 1 << (self.subtree_height - 1) + } +} + +impl EntryBatchData { + pub fn new() -> Self { + EntryBatchData::Incomplete(IncompleteData { + subtrees: vec![], + known_data: vec![], + }) + } + + pub fn is_empty(&self) -> bool { + matches!(self,EntryBatchData::Incomplete(x) if x.known_data.is_empty()) + } + + pub fn get(&self, start_byte: usize, length_byte: usize) -> Option<&[u8]> { + assert!(start_byte + length_byte <= BYTES_PER_LOAD); + + match self { + EntryBatchData::Complete(data) => data.get(start_byte..(start_byte + length_byte)), + EntryBatchData::Incomplete(data) => data.get(start_byte, length_byte), + } + } + + pub fn get_mut(&mut self, mut start_byte: usize, length_byte: usize) -> Option<&mut [u8]> { + assert!(start_byte + length_byte <= BYTES_PER_LOAD); + + match self { + EntryBatchData::Complete(data) => data.get_mut(start_byte..(start_byte + length_byte)), + EntryBatchData::Incomplete(data) => { + let index = + PartialBatch::find(&data.known_data, start_byte / BYTES_PER_SECTOR).ok()?; + let p = &mut data.known_data[index]; + + // Rebase the start_byte and end_byte w.r.t. to hit partial batch. + start_byte -= p.start_sector * BYTES_PER_SECTOR; + + p.data.get_mut(start_byte..(start_byte + length_byte)) + } + } + } + + pub fn truncate(&mut self, truncated_byte: usize) { + assert!(truncated_byte % BYTES_PER_SECTOR == 0); + *self = match self { + EntryBatchData::Complete(data) => { + data.truncate(truncated_byte); + let known_data = vec![PartialBatch { + start_sector: 0, + data: std::mem::take(data), + }]; + EntryBatchData::Incomplete(IncompleteData { + subtrees: vec![], + known_data, + }) + } + EntryBatchData::Incomplete(batch_list) => { + batch_list.truncate(truncated_byte / BYTES_PER_SECTOR); + EntryBatchData::Incomplete(std::mem::take(batch_list)) + } + }; + } + + pub fn insert_data(&mut self, start_byte: usize, mut data: Vec) -> Result> { + assert!(start_byte % BYTES_PER_SECTOR == 0); + assert!(data.len() % BYTES_PER_SECTOR == 0); + + if data.is_empty() || self.get(start_byte, data.len()) == Some(&data) { + // TODO(zz): This assumes the caller has processed chain reorg (truncate flow) before + // inserting new data, and the data of the same file are always inserted with the + // same pattern. + return Ok(vec![]); + } + + // Check if the entry is completed + let (list, subtree_list) = if let EntryBatchData::Incomplete(x) = self { + (&mut x.known_data, &mut x.subtrees) + } else { + bail!( + "overwriting a completed PoRA Chunk with conflict data, start_byte={}", + start_byte + ); + }; + + let start_sector = start_byte / BYTES_PER_SECTOR; + let end_sector = start_sector + data.len() / BYTES_PER_SECTOR; + let length_sector = data.len() / BYTES_PER_SECTOR; + + // Check if the entry is completed + let start_insert_position = match PartialBatch::find(list, start_sector) { + Ok(x) => { + bail!( + "start position overlapped with existing batch: start {}, len {}", + list[x].start_sector, + list[x].data.len() + ); + } + Err(x) => x, + }; + + let end_insert_position = match PartialBatch::find(list, end_sector - 1) { + Ok(x) => { + bail!( + "end position overlapped with existing batch: start {}, len {}", + list[x].start_sector, + list[x].data.len() + ); + } + Err(x) => x, + }; + + let position = if start_insert_position != end_insert_position { + bail!("data overlapped with existing batches"); + } else { + start_insert_position + }; + + let merge_prev = position != 0 && start_sector == list[position - 1].end_sector(); + let merge_next = position != list.len() && end_sector == list[position].start_sector; + + let updated_segment = match (merge_prev, merge_next) { + (false, false) => { + list.insert(position, PartialBatch { start_sector, data }); + &list[position] + } + (true, false) => { + list[position - 1].data.append(&mut data); + &list[position - 1] + } + (false, true) => { + data.append(&mut list[position].data); + list[position] = PartialBatch { start_sector, data }; + &list[position] + } + (true, true) => { + // Merge the new data with the two around partial batches to + // a single one. + list[position - 1].data.append(&mut data); + let mut next = list.remove(position); + list[position - 1].data.append(&mut next.data); + &list[position - 1] + } + }; + + // Remove completed subtrees. + let updated_end_sector = + updated_segment.start_sector + updated_segment.data.len() / BYTES_PER_SECTOR; + let start_subtree_index = match subtree_list + .binary_search_by_key(&updated_segment.start_sector, |tree| tree.start_sector) + { + Ok(x) => x, + Err(x) => x, + }; + for subtree_index in start_subtree_index..subtree_list.len() { + assert!(subtree_list[subtree_index].start_sector >= updated_segment.start_sector); + if subtree_list[subtree_index].end_sector() > updated_end_sector { + subtree_list.drain(start_subtree_index..subtree_index); + break; + } + } + + // Find which seal chunks are made intact by this submission. + // It will be notified to the sealer later. + let intact_seal_idxs = get_intact_sealing_index( + updated_segment.start_sector, + updated_segment.data.len() / BYTES_PER_SECTOR, + ); + + // TODO(zz): Use config here? + if list.len() == 1 + && list[0].start_sector == 0 + && bytes_to_chunks(list[0].data.len()) == SECTORS_PER_LOAD + { + // All data in this batch have been filled. + *self = EntryBatchData::Complete(mem::take(&mut list[0].data)); + } + + let ready_for_seal_idxs: Vec = get_covered_sealing_index(start_sector, length_sector) + .filter(|x| intact_seal_idxs.contains(x)) + .collect(); + + Ok(ready_for_seal_idxs) + } + + pub(super) fn available_range_entries(&self) -> Vec<(usize, usize)> { + match self { + EntryBatchData::Complete(data) => { + vec![(0, data.len() / BYTES_PER_SECTOR)] + } + EntryBatchData::Incomplete(batch_list) => batch_list + .known_data + .iter() + .map(|b| (b.start_sector, b.data.len() / BYTES_PER_SECTOR)) + .collect(), + } + } + + /// This is only called once when the batch is removed from the memory and fully stored in db. + pub fn set_subtree_list(&mut self, subtree_list: Vec<(usize, usize, DataRoot)>) { + let subtree_list: Vec = subtree_list + .into_iter() + .filter(|(start_sector, subtree_height, root)| { + self.get(*start_sector * BYTES_PER_SECTOR, (1 << (*subtree_height - 1)) * BYTES_PER_SECTOR) + .is_none() + // The first sector is regarded as known. + && *root != DataRoot::zero() + }) + .map(|(start_sector, subtree_height, root)| Subtree { + start_sector, + subtree_height, + root, + }) + .collect(); + match self { + EntryBatchData::Complete(_) => { + // This should not happen if the data in memory matches the data in DB. + if !subtree_list.is_empty() { + error!(?subtree_list, "store subtrees for a completed chunk"); + } + } + EntryBatchData::Incomplete(data) => data.set_subtree_list(subtree_list), + } + } + + pub fn get_subtree_list(&self) -> &[Subtree] { + match self { + EntryBatchData::Complete(_) => &[], + EntryBatchData::Incomplete(data) => &data.subtrees, + } + } +} + +fn get_intact_sealing_index(start_sector: usize, length_sector: usize) -> std::ops::Range { + // Inclusive + let start_index = ((start_sector + SECTORS_PER_SEAL - 1) / SECTORS_PER_SEAL) as u16; + // Exclusive + let end_index = ((start_sector + length_sector) / SECTORS_PER_SEAL) as u16; + start_index..end_index +} + +fn get_covered_sealing_index(start_sector: usize, length_sector: usize) -> std::ops::Range { + // Inclusive + let start_index = (start_sector / SECTORS_PER_SEAL) as u16; + // Exclusive + let end_index = + ((start_sector + length_sector + SECTORS_PER_SEAL - 1) / SECTORS_PER_SEAL) as u16; + start_index..end_index +} + +#[cfg(test)] +mod tests { + use crate::log_store::load_chunk::chunk_data::PartialBatch; + + use super::EntryBatchData; + use rand::{rngs::StdRng, RngCore, SeedableRng}; + use zgs_spec::{BYTES_PER_LOAD, BYTES_PER_SECTOR, SECTORS_PER_LOAD}; + + fn test_data() -> Vec { + let mut data = vec![0u8; BYTES_PER_LOAD]; + let mut random = StdRng::seed_from_u64(73); + random.fill_bytes(&mut data); + data + } + + #[test] + fn test_data_chunk_insert() { + let data = test_data(); + let mut chunk_batch = EntryBatchData::new(); + + for i in [2usize, 0, 1, 3].into_iter() { + chunk_batch + .insert_data( + BYTES_PER_LOAD / 4 * i, + data[(BYTES_PER_LOAD / 4) * i..(BYTES_PER_LOAD / 4) * (i + 1)].to_vec(), + ) + .unwrap(); + } + + assert!(matches!(chunk_batch, EntryBatchData::Complete(_))); + } + + #[test] + fn test_data_chunk_truncate() { + let data = test_data(); + let mut chunk_batch = EntryBatchData::new(); + + for i in [3, 1].into_iter() { + chunk_batch + .insert_data( + BYTES_PER_LOAD / 4 * i, + data[(BYTES_PER_LOAD / 4) * i..(BYTES_PER_LOAD / 4) * (i + 1)].to_vec(), + ) + .unwrap(); + } + + chunk_batch.truncate(BYTES_PER_LOAD / 4 * 3 + BYTES_PER_SECTOR); + + let chunks = if let EntryBatchData::Incomplete(chunks) = chunk_batch { + chunks + } else { + unreachable!(); + }; + + assert!(chunks.known_data.len() == 2); + assert!( + chunks.known_data[0] + == PartialBatch { + start_sector: SECTORS_PER_LOAD / 4, + data: data[(BYTES_PER_LOAD / 4)..(BYTES_PER_LOAD / 4) * 2].to_vec() + } + ); + assert!( + chunks.known_data[1] + == PartialBatch { + start_sector: SECTORS_PER_LOAD / 4 * 3, + data: data[BYTES_PER_LOAD / 4 * 3..BYTES_PER_LOAD / 4 * 3 + BYTES_PER_SECTOR] + .to_vec() + } + ); + } + + #[test] + fn test_data_chunk_get_slice() { + let data = test_data(); + let mut chunk_batch = EntryBatchData::new(); + + const N: usize = BYTES_PER_LOAD; + const B: usize = N / 16; + + // Skip batch 5,7,10,11 + for i in [3, 8, 12, 15, 6, 1, 4, 13, 0, 2, 9, 14].into_iter() { + chunk_batch + .insert_data(B * i, data[B * i..B * (i + 1)].to_vec()) + .unwrap(); + assert_eq!( + chunk_batch.get(B * i, B).unwrap(), + &data[B * i..B * (i + 1)] + ); + assert_eq!( + chunk_batch.get_mut(B * i, B).unwrap(), + &data[B * i..B * (i + 1)] + ); + } + + const S: usize = B / BYTES_PER_SECTOR; + assert_eq!( + chunk_batch.available_range_entries(), + vec![(0, 5 * S), (6 * S, S), (8 * S, 2 * S), (12 * S, 4 * S)] + ); + + assert_eq!(chunk_batch.get(B * 8, B * 2).unwrap(), &data[B * 8..B * 10]); + assert_eq!( + chunk_batch.get_mut(B * 8, B * 2).unwrap(), + &data[B * 8..B * 10] + ); + + assert_eq!(chunk_batch.get(0, B * 4).unwrap(), &data[0..B * 4]); + assert_eq!(chunk_batch.get_mut(0, B * 4).unwrap(), &data[0..B * 4]); + + assert!(chunk_batch.get(0, B * 5 + 32).is_none()); + assert!(chunk_batch.get_mut(0, B * 5 + 32).is_none()); + + assert!(chunk_batch.get(B * 7 - 32, B + 32).is_none()); + assert!(chunk_batch.get_mut(B * 7 - 32, B + 32).is_none()); + + assert!(chunk_batch.get(B * 7, B + 32).is_none()); + assert!(chunk_batch.get_mut(B * 7, B + 32).is_none()); + + assert!(chunk_batch.get(B * 12 - 32, B + 32).is_none()); + assert!(chunk_batch.get_mut(B * 12 - 32, B + 32).is_none()); + } +} diff --git a/node/storage/src/log_store/load_chunk/mod.rs b/node/storage/src/log_store/load_chunk/mod.rs new file mode 100644 index 0000000..f1b35bd --- /dev/null +++ b/node/storage/src/log_store/load_chunk/mod.rs @@ -0,0 +1,425 @@ +mod bitmap; +mod chunk_data; +mod seal; +mod serde; + +use std::cmp::min; + +use anyhow::Result; +use ethereum_types::H256; +use ssz_derive::{Decode, Encode}; + +use crate::log_store::log_manager::data_to_merkle_leaves; +use crate::try_option; +use append_merkle::{Algorithm, MerkleTreeRead, Sha3Algorithm}; +use shared_types::{ChunkArray, DataRoot, Merkle}; +use tracing::trace; +use zgs_spec::{ + BYTES_PER_LOAD, BYTES_PER_SEAL, BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, + SECTORS_PER_SEAL, +}; + +use super::SealAnswer; +use chunk_data::EntryBatchData; +use seal::SealInfo; + +#[derive(Encode, Decode)] +pub struct EntryBatch { + seal: SealInfo, + // the inner data + data: EntryBatchData, +} + +impl EntryBatch { + pub fn new(load_index_global: u64) -> Self { + Self { + seal: SealInfo::new(load_index_global), + data: EntryBatchData::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } +} + +impl EntryBatch { + pub fn get_sealed_data(&self, seal_index: u16) -> Option<[u8; BYTES_PER_SEAL]> { + if self.seal.is_sealed(seal_index) { + let loaded_slice = self + .data + .get(seal_index as usize * BYTES_PER_SEAL, BYTES_PER_SEAL)?; + Some(loaded_slice.try_into().unwrap()) + } else { + None + } + } + + pub fn get_non_sealed_data(&self, seal_index: u16) -> Option<[u8; BYTES_PER_SEAL]> { + if !self.seal.is_sealed(seal_index) { + let loaded_slice = self + .data + .get(seal_index as usize * BYTES_PER_SEAL, BYTES_PER_SEAL)?; + Some(loaded_slice.try_into().unwrap()) + } else { + None + } + } + + /// Get unsealed data + pub fn get_unsealed_data(&self, start_sector: usize, length_sector: usize) -> Option> { + // If the start position is not aligned and is sealed, we need to load one more word (32 bytes) for unsealing + let advanced_by_one = if start_sector % SECTORS_PER_SEAL == 0 { + // If the start position is not aligned, it is no need to load one more word + false + } else { + // otherwise, it depends on if the given offset is seal + self.seal + .is_sealed((start_sector / SECTORS_PER_SEAL) as u16) + }; + + let start_byte = start_sector * BYTES_PER_SECTOR; + let length_byte = length_sector * BYTES_PER_SECTOR; + + // Load data slice with the word for unsealing + let (mut loaded_data, unseal_mask_seed) = if advanced_by_one { + let loaded_data_with_hint = self.data.get(start_byte - 32, length_byte + 32)?; + + // TODO (api stable): use `split_array_ref` instead when this api is stable. + let (unseal_mask_seed, loaded_data) = loaded_data_with_hint.split_at(32); + let unseal_mask_seed = <[u8; 32]>::try_from(unseal_mask_seed).unwrap(); + (loaded_data.to_vec(), Some(unseal_mask_seed)) + } else { + (self.data.get(start_byte, length_byte)?.to_vec(), None) + }; + + let incomplete_seal_chunk_length = (BYTES_PER_LOAD - start_byte) % BYTES_PER_SEAL; + + // Unseal the first incomplete sealing chunk (if exists) + if let Some(unseal_mask_seed) = unseal_mask_seed { + let data_to_unseal = if loaded_data.len() < incomplete_seal_chunk_length { + // The loaded data does not cross sealings + loaded_data.as_mut() + } else { + loaded_data[..incomplete_seal_chunk_length].as_mut() + }; + + zgs_seal::unseal_with_mask_seed(data_to_unseal, unseal_mask_seed); + } + + if loaded_data.len() > incomplete_seal_chunk_length { + let complete_chunks = &mut loaded_data[incomplete_seal_chunk_length..]; + let start_seal = (start_byte + incomplete_seal_chunk_length) / BYTES_PER_SEAL; + + for (seal_index, data_to_unseal) in complete_chunks + .chunks_mut(BYTES_PER_SEAL) + .enumerate() + .map(|(idx, chunk)| (start_seal + idx, chunk)) + { + self.seal.unseal(data_to_unseal, seal_index as u16); + } + } + + Some(loaded_data) + } + + /// Return `Error` if the new data overlaps with old data. + /// Convert `Incomplete` to `Completed` if the chunk is completed after the insertion. + pub fn insert_data(&mut self, offset: usize, data: Vec) -> Result> { + self.data.insert_data(offset * BYTES_PER_SECTOR, data) + } + + pub fn truncate(&mut self, truncated_sector: usize) -> Vec { + assert!(truncated_sector > 0 && truncated_sector < SECTORS_PER_LOAD); + + self.data.truncate(truncated_sector * BYTES_PER_SECTOR); + self.truncate_seal(truncated_sector) + } + + pub fn into_data_list(self, global_start_entry: u64) -> Vec { + self.data + .available_range_entries() + .into_iter() + .map(|(start_entry, length_entry)| ChunkArray { + data: self + .get_unsealed_data(start_entry, length_entry) + .unwrap() + .to_vec(), + start_index: global_start_entry + start_entry as u64, + }) + .collect() + } + + fn truncate_seal(&mut self, truncated_sector: usize) -> Vec { + let reverted_seal_index = (truncated_sector / SECTORS_PER_SEAL) as u16; + + let first_unseal_index = self.seal.truncated_seal_index(reverted_seal_index); + let last_unseal_index = ((truncated_sector - 1) / SECTORS_PER_SEAL) as u16; + + let mut to_reseal_set = Vec::with_capacity(SEALS_PER_LOAD); + + for unseal_index in first_unseal_index..=last_unseal_index { + if !self.seal.is_sealed(unseal_index) { + continue; + } + + let truncated_byte = truncated_sector * BYTES_PER_SECTOR; + let first_unseal_byte = unseal_index as usize * BYTES_PER_SEAL; + let length = min(truncated_byte - first_unseal_byte, BYTES_PER_SEAL); + let to_unseal = self + .data + .get_mut(first_unseal_byte, length) + .expect("Sealed chunk should be complete"); + self.seal.unseal(to_unseal, unseal_index); + + to_reseal_set.push(unseal_index) + } + + // truncate the bitmap + self.seal.truncate(reverted_seal_index); + + to_reseal_set + } + + pub fn build_root(&self, is_first_chunk: bool) -> Result> { + // Fast check if an incomplete chunk is a full chunk. + if let EntryBatchData::Incomplete(d) = &self.data { + if self.get_unsealed_data(SECTORS_PER_LOAD - 1, 1).is_none() { + if let Some(last_subtree) = d.subtrees.last() { + if last_subtree.start_sector + (1 << (last_subtree.subtree_height - 1)) + != SECTORS_PER_LOAD + { + return Ok(None); + } + } else { + return Ok(None); + } + } + } + Ok(Some( + *try_option!(self.to_merkle_tree(is_first_chunk)?).root(), + )) + } + + pub fn submit_seal_result(&mut self, answer: SealAnswer) -> Result<()> { + let local_seal_index = answer.seal_index as usize % SEALS_PER_LOAD; + assert!( + !self.seal.is_sealed(local_seal_index as u16), + "Duplicated sealing" + ); + assert_eq!( + answer.seal_index / SEALS_PER_LOAD as u64, + self.seal.load_index() + ); + + self.seal.set_seal_context( + answer.seal_context, + answer.context_end_seal, + answer.miner_id, + ); + let sealing_segment = self + .data + .get_mut(local_seal_index * BYTES_PER_SEAL, BYTES_PER_SEAL) + .expect("Sealing segment should exist"); + + sealing_segment.copy_from_slice(&answer.sealed_data); + self.seal.mark_sealed(local_seal_index as u16); + + Ok(()) + } + + /// This is only called once when the batch is removed from the memory and fully stored in db. + pub fn set_subtree_list(&mut self, subtree_list: Vec<(usize, usize, DataRoot)>) { + self.data.set_subtree_list(subtree_list) + } + + pub fn to_merkle_tree(&self, is_first_chunk: bool) -> Result> { + let initial_leaves = if is_first_chunk { + vec![H256::zero()] + } else { + vec![] + }; + let mut merkle = Merkle::new(initial_leaves, 0, None); + for subtree in self.data.get_subtree_list() { + trace!(?subtree, "get subtree, leaves={}", merkle.leaves()); + if subtree.start_sector != merkle.leaves() { + let leaf_data = try_option!( + self.get_unsealed_data(merkle.leaves(), subtree.start_sector - merkle.leaves()) + ); + merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned")); + } + merkle.append_subtree(subtree.subtree_height, subtree.root)?; + } + if merkle.leaves() != SECTORS_PER_LOAD { + let leaf_data = try_option!( + self.get_unsealed_data(merkle.leaves(), SECTORS_PER_LOAD - merkle.leaves()) + ); + merkle.append_list(data_to_merkle_leaves(&leaf_data).expect("aligned")); + } + // TODO(zz): Optimize. + for index in 0..merkle.leaves() { + if merkle.leaf_at(index)?.is_none() { + if let Some(leaf_data) = self.get_unsealed_data(index, 1) { + merkle.fill_leaf(index, Sha3Algorithm::leaf(&leaf_data)); + } + } + } + Ok(Some(merkle)) + } +} + +#[cfg(test)] +mod tests { + use super::{EntryBatch, SealAnswer}; + use ethereum_types::H256; + use zgs_spec::{ + BYTES_PER_SEAL, BYTES_PER_SECTOR, SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL, + }; + const LOAD_INDEX: u64 = 1; + fn seal( + batch: &mut EntryBatch, + seal_index: u16, + context_digest: H256, + context_end_seal_local: u64, + ) { + let miner_id = H256([33u8; 32]); + let mut data = batch.get_non_sealed_data(seal_index).unwrap(); + zgs_seal::seal( + &mut data, + &miner_id, + &context_digest, + LOAD_INDEX * SECTORS_PER_LOAD as u64 + seal_index as u64 * SECTORS_PER_SEAL as u64, + ); + batch + .submit_seal_result(SealAnswer { + seal_index: LOAD_INDEX * SEALS_PER_LOAD as u64 + seal_index as u64, + version: 0, + sealed_data: data, + miner_id, + seal_context: context_digest, + context_end_seal: LOAD_INDEX * SEALS_PER_LOAD as u64 + context_end_seal_local, + }) + .unwrap(); + } + + #[test] + fn test_seal_single() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL]).unwrap(); + + const DIGEST: H256 = H256([22u8; 32]); + seal(&mut batch, 0, DIGEST, 1); + + assert_eq!( + batch.get_unsealed_data(0, SECTORS_PER_SEAL).unwrap(), + vec![11; SECTORS_PER_SEAL * BYTES_PER_SECTOR] + ); + assert_eq!( + batch.get_unsealed_data(1, SECTORS_PER_SEAL - 1).unwrap(), + vec![11; (SECTORS_PER_SEAL - 1) * BYTES_PER_SECTOR] + ); + } + + fn check_two_seals(batch: &EntryBatch) { + assert_eq!( + batch.get_unsealed_data(0, SECTORS_PER_SEAL).unwrap(), + vec![11; SECTORS_PER_SEAL * BYTES_PER_SECTOR] + ); + assert_eq!( + batch + .get_unsealed_data(SECTORS_PER_SEAL, SECTORS_PER_SEAL) + .unwrap(), + vec![11; SECTORS_PER_SEAL * BYTES_PER_SECTOR] + ); + assert_eq!( + batch.get_unsealed_data(1, SECTORS_PER_SEAL - 1).unwrap(), + vec![11; (SECTORS_PER_SEAL - 1) * BYTES_PER_SECTOR] + ); + assert_eq!( + batch.get_unsealed_data(1, SECTORS_PER_SEAL).unwrap(), + vec![11; SECTORS_PER_SEAL * BYTES_PER_SECTOR] + ); + assert_eq!( + batch + .get_unsealed_data(1, 2 * SECTORS_PER_SEAL - 1) + .unwrap(), + vec![11; (2 * SECTORS_PER_SEAL - 1) * BYTES_PER_SECTOR] + ); + } + + #[test] + fn test_seal_mono_context() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + const DIGEST: H256 = H256([22u8; 32]); + seal(&mut batch, 0, DIGEST, 2); + seal(&mut batch, 1, DIGEST, 2); + + check_two_seals(&batch); + } + + #[test] + fn test_seal_mono_context_reorder() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + const DIGEST: H256 = H256([22u8; 32]); + seal(&mut batch, 1, DIGEST, 2); + seal(&mut batch, 0, DIGEST, 2); + + check_two_seals(&batch); + } + + #[test] + fn test_seal_mono_context_partial() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + const DIGEST: H256 = H256([22u8; 32]); + seal(&mut batch, 1, DIGEST, 2); + + check_two_seals(&batch); + } + + #[test] + fn test_seal_hete_context() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + const DIGEST0: H256 = H256([22u8; 32]); + const DIGEST1: H256 = H256([33u8; 32]); + + seal(&mut batch, 0, DIGEST0, 1); + seal(&mut batch, 1, DIGEST1, 2); + + check_two_seals(&batch); + } + + #[test] + fn test_seal_hete_context_reord() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + const DIGEST0: H256 = H256([22u8; 32]); + const DIGEST1: H256 = H256([33u8; 32]); + + seal(&mut batch, 1, DIGEST1, 2); + seal(&mut batch, 0, DIGEST0, 1); + + check_two_seals(&batch); + } + + #[test] + fn test_seal_hete_context_partial() { + let mut batch = EntryBatch::new(LOAD_INDEX); + batch.insert_data(0, vec![11; BYTES_PER_SEAL * 2]).unwrap(); + + // const DIGEST0: H256 = H256([22u8; 32]); + const DIGEST1: H256 = H256([33u8; 32]); + + seal(&mut batch, 1, DIGEST1, 2); + + check_two_seals(&batch); + } +} diff --git a/node/storage/src/log_store/load_chunk/seal.rs b/node/storage/src/log_store/load_chunk/seal.rs new file mode 100644 index 0000000..6b705e3 --- /dev/null +++ b/node/storage/src/log_store/load_chunk/seal.rs @@ -0,0 +1,303 @@ +use ethereum_types::H256; +use ssz_derive::{Decode as DeriveDecode, Encode as DeriveEncode}; +use static_assertions::const_assert; + +use zgs_seal; +use zgs_spec::{SEALS_PER_LOAD, SECTORS_PER_LOAD, SECTORS_PER_SEAL}; + +use super::bitmap::WrappedBitmap; + +#[derive(DeriveEncode, DeriveDecode)] +pub struct SealContextInfo { + /// The context digest for this seal group + context_digest: H256, + /// The end position (exclusive) indexed by sectors + end_seal_index: u16, +} + +type ChunkSealBitmap = WrappedBitmap; +const_assert!(SEALS_PER_LOAD <= u128::BITS as usize); + +#[derive(Default, DeriveEncode, DeriveDecode)] +pub struct SealInfo { + // a bitmap specify which sealing chunks have been sealed + bitmap: ChunkSealBitmap, + // the batch_offset (seal chunks) of the EntryBatch this seal info belongs to + load_index: u64, + // the miner Id for sealing this chunk, zero representing doesn't exists + miner_id: H256, + // seal context information, indexed by u16. Get a position has never been set is undefined behaviour. + seal_contexts: Vec, +} + +// Basic interfaces +impl SealInfo { + pub fn new(load_index: u64) -> Self { + Self { + load_index, + ..Default::default() + } + } + + pub fn is_sealed(&self, seal_index: u16) -> bool { + self.bitmap.get(seal_index as usize) + } + + pub fn mark_sealed(&mut self, seal_index: u16) { + self.bitmap.set(seal_index as usize, true); + } + + pub fn load_index(&self) -> u64 { + self.load_index + } + + pub fn global_seal_sector(&self, index: u16) -> u64 { + (self.load_index as usize * SECTORS_PER_LOAD + index as usize * SECTORS_PER_SEAL) as u64 + } +} + +// Interfaces for maintaining context info +impl SealInfo { + fn context_index(&self, seal_index: u16) -> usize { + match self + .seal_contexts + .binary_search_by_key(&(seal_index + 1), |x| x.end_seal_index) + { + Ok(x) | Err(x) => x, + } + } + + pub fn get_seal_context_digest(&self, seal_index: u16) -> Option { + self.seal_contexts + .get(self.context_index(seal_index)) + .map(|x| x.context_digest) + } + + pub fn set_seal_context( + &mut self, + context_digest: H256, + global_end_seal_index: u64, + miner_id: H256, + ) { + // 1. Check consistency of the miner id. + if self.miner_id.is_zero() { + self.miner_id = miner_id; + } else { + assert!( + self.miner_id == miner_id, + "miner_id setting is inconsistent with db" + ); + } + + // 2. Compute the local end_seal_index + let end_seal_index = global_end_seal_index - self.load_index * SEALS_PER_LOAD as u64; + let end_seal_index = std::cmp::min(end_seal_index as u16, SEALS_PER_LOAD as u16); + let new_context = SealContextInfo { + context_digest, + end_seal_index, + }; + + // 3. Update the seal context array by cases + let insert_position = self.context_index(end_seal_index - 1); + + if let Some(existing_context) = self.seal_contexts.get(insert_position) { + if existing_context.context_digest == new_context.context_digest { + // Case 1: the new context is consistent with existing contexts (nothing to do) + } else { + // Case 2: the new context should be inserted in the middle (may not happen) + self.seal_contexts.insert(insert_position, new_context); + } + } else { + // Case 3: the new context exceeds the upper bound of existing contexts + self.seal_contexts.push(new_context); + } + } +} + +impl SealInfo { + pub fn truncate(&mut self, reverted_seal_index: u16) { + // TODO (kevin): have issue in some cases + let truncated_context_index = self.context_index(reverted_seal_index); + let truncated_seal_index = self.truncated_seal_index(reverted_seal_index); + + self.bitmap.truncate(truncated_seal_index); + self.seal_contexts.truncate(truncated_context_index); + } + + pub fn truncated_seal_index(&self, reverted_seal_index: u16) -> u16 { + let truncated_context = self.context_index(reverted_seal_index); + if truncated_context == 0 { + 0 + } else { + self.seal_contexts + .get(truncated_context - 1) + .unwrap() + .end_seal_index + } + } +} + +impl SealInfo { + pub fn unseal(&self, data: &mut [u8], index: u16) { + if !self.is_sealed(index) { + return; + } + let seal_context = self + .get_seal_context_digest(index) + .expect("cannot unseal non-sealed data"); + zgs_seal::unseal( + data, + &self.miner_id, + &seal_context, + self.global_seal_sector(index), + ); + } + + #[cfg(test)] + pub fn seal(&self, data: &mut [u8], index: u16) { + if self.is_sealed(index) { + return; + } + let seal_context = self + .get_seal_context_digest(index) + .expect("cannot unseal non-sealed data"); + zgs_seal::seal( + data, + &self.miner_id, + &seal_context, + self.global_seal_sector(index), + ); + } +} + +#[cfg(test)] +mod tests { + use ethereum_types::H256; + use hex_literal::hex; + use rand::{rngs::StdRng, RngCore, SeedableRng}; + use zgs_seal; + use zgs_spec::BYTES_PER_SEAL; + + use super::{SealContextInfo, SealInfo}; + + const TEST_MINER_ID: H256 = H256(hex!( + "003d82782c78262bada18a22f5f982d2b43934d5541e236ca3781ddc8c911cb8" + )); + + #[test] + fn get_seal_context() { + let mut random = StdRng::seed_from_u64(149); + + let mut context1 = H256::default(); + let mut context2 = H256::default(); + let mut context3 = H256::default(); + random.fill_bytes(&mut context1.0); + random.fill_bytes(&mut context2.0); + random.fill_bytes(&mut context3.0); + + let mut sealer = SealInfo::new(0); + sealer.seal_contexts.push(SealContextInfo { + context_digest: context1, + end_seal_index: 2, + }); + sealer.seal_contexts.push(SealContextInfo { + context_digest: context2, + end_seal_index: 3, + }); + sealer.seal_contexts.push(SealContextInfo { + context_digest: context3, + end_seal_index: 6, + }); + + assert_eq!(sealer.get_seal_context_digest(0), Some(context1)); + assert_eq!(sealer.get_seal_context_digest(1), Some(context1)); + assert_eq!(sealer.get_seal_context_digest(2), Some(context2)); + assert_eq!(sealer.get_seal_context_digest(3), Some(context3)); + assert_eq!(sealer.get_seal_context_digest(4), Some(context3)); + assert_eq!(sealer.get_seal_context_digest(5), Some(context3)); + assert_eq!(sealer.get_seal_context_digest(6), None); + + assert_eq!(sealer.truncated_seal_index(0), 0); + assert_eq!(sealer.truncated_seal_index(1), 0); + assert_eq!(sealer.truncated_seal_index(2), 2); + assert_eq!(sealer.truncated_seal_index(3), 3); + assert_eq!(sealer.truncated_seal_index(4), 3); + assert_eq!(sealer.truncated_seal_index(5), 3); + assert_eq!(sealer.truncated_seal_index(6), 6); + } + + #[test] + fn unseal_chunks() { + let mut random = StdRng::seed_from_u64(137); + let mut unsealed_data = vec![0u8; BYTES_PER_SEAL * 10]; + random.fill_bytes(&mut unsealed_data); + let mut data = unsealed_data.clone(); + + let mut context1 = H256::default(); + let mut context2 = H256::default(); + let mut context3 = H256::default(); + random.fill_bytes(&mut context1.0); + random.fill_bytes(&mut context2.0); + random.fill_bytes(&mut context3.0); + + let mut sealer = SealInfo::new(100); + sealer.miner_id = TEST_MINER_ID; + + sealer.seal_contexts.push(SealContextInfo { + context_digest: context1, + end_seal_index: 2, + }); + sealer.seal_contexts.push(SealContextInfo { + context_digest: context2, + end_seal_index: 5, + }); + sealer.seal_contexts.push(SealContextInfo { + context_digest: context3, + end_seal_index: 10, + }); + + // skip seal 6, 3, 9 + for idx in [1, 7, 2, 5, 0, 8, 4].into_iter() { + sealer.seal( + &mut data[idx * BYTES_PER_SEAL..(idx + 1) * BYTES_PER_SEAL], + idx as u16, + ); + sealer.bitmap.set(idx, true); + } + + let partial_hint = &data[BYTES_PER_SEAL * 5 + 64..BYTES_PER_SEAL * 5 + 96]; + let mut tmp_data = data.clone(); + zgs_seal::unseal_with_mask_seed( + &mut tmp_data[BYTES_PER_SEAL * 5 + 96..BYTES_PER_SEAL * 6], + partial_hint, + ); + assert_eq!( + &tmp_data[BYTES_PER_SEAL * 5 + 96..BYTES_PER_SEAL * 6], + &unsealed_data[BYTES_PER_SEAL * 5 + 96..BYTES_PER_SEAL * 6] + ); + + let mut tmp_data = data.clone(); + sealer.unseal(&mut tmp_data[BYTES_PER_SEAL * 5..BYTES_PER_SEAL * 6], 5); + assert_eq!( + &tmp_data[BYTES_PER_SEAL * 5..BYTES_PER_SEAL * 6], + &unsealed_data[BYTES_PER_SEAL * 5..BYTES_PER_SEAL * 6] + ); + + let mut tmp_data = data.clone(); + sealer.unseal(&mut tmp_data[BYTES_PER_SEAL * 6..BYTES_PER_SEAL * 7], 6); + assert_eq!( + &tmp_data[BYTES_PER_SEAL * 6..BYTES_PER_SEAL * 7], + &unsealed_data[BYTES_PER_SEAL * 6..BYTES_PER_SEAL * 7] + ); + + let mut tmp_data = data.clone(); + sealer.unseal( + &mut tmp_data[BYTES_PER_SEAL * 7..BYTES_PER_SEAL * 7 + 96], + 7, + ); + assert_eq!( + &tmp_data[BYTES_PER_SEAL * 7..BYTES_PER_SEAL * 7 + 96], + &unsealed_data[BYTES_PER_SEAL * 7..BYTES_PER_SEAL * 7 + 96] + ); + } +} diff --git a/node/storage/src/log_store/load_chunk/serde.rs b/node/storage/src/log_store/load_chunk/serde.rs new file mode 100644 index 0000000..4c7fa50 --- /dev/null +++ b/node/storage/src/log_store/load_chunk/serde.rs @@ -0,0 +1,83 @@ +use super::{chunk_data::PartialBatch, EntryBatchData}; + +use crate::log_store::load_chunk::chunk_data::IncompleteData; +use ssz::{Decode, DecodeError, Encode}; +use std::mem; + +const COMPLETE_BATCH_TYPE: u8 = 0; +const INCOMPLETE_BATCH_TYPE: u8 = 1; + +impl Encode for EntryBatchData { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_append(&self, buf: &mut Vec) { + match &self { + EntryBatchData::Complete(data) => { + buf.extend_from_slice(&[COMPLETE_BATCH_TYPE]); + buf.extend_from_slice(data.as_slice()); + } + EntryBatchData::Incomplete(data_list) => { + buf.extend_from_slice(&[INCOMPLETE_BATCH_TYPE]); + buf.extend_from_slice(&data_list.as_ssz_bytes()); + } + } + } + + fn ssz_bytes_len(&self) -> usize { + match &self { + EntryBatchData::Complete(data) => 1 + data.len(), + EntryBatchData::Incomplete(batch_list) => 1 + batch_list.ssz_bytes_len(), + } + } +} + +impl Decode for EntryBatchData { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { + match *bytes.first().ok_or(DecodeError::ZeroLengthItem)? { + COMPLETE_BATCH_TYPE => Ok(EntryBatchData::Complete(bytes[1..].to_vec())), + INCOMPLETE_BATCH_TYPE => Ok(EntryBatchData::Incomplete( + IncompleteData::from_ssz_bytes(&bytes[1..])?, + )), + unknown => Err(DecodeError::BytesInvalid(format!( + "Unrecognized EntryBatchData indentifier {}", + unknown + ))), + } + } +} + +impl Encode for PartialBatch { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.start_sector.to_be_bytes()); + buf.extend_from_slice(&self.data); + } + + fn ssz_bytes_len(&self) -> usize { + 1 + self.data.len() + } +} + +impl Decode for PartialBatch { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { + Ok(Self { + start_sector: usize::from_be_bytes( + bytes[..mem::size_of::()].try_into().unwrap(), + ), + data: bytes[mem::size_of::()..].to_vec(), + }) + } +} diff --git a/node/storage/src/log_store/log_manager.rs b/node/storage/src/log_store/log_manager.rs new file mode 100644 index 0000000..4f231ce --- /dev/null +++ b/node/storage/src/log_store/log_manager.rs @@ -0,0 +1,1077 @@ +use crate::log_store::flow_store::{batch_iter, FlowConfig, FlowStore}; +use crate::log_store::tx_store::TransactionStore; +use crate::log_store::{ + FlowRead, FlowWrite, LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite, +}; +use crate::{try_option, ZgsKeyValueDB}; +use anyhow::{anyhow, bail, Result}; +use append_merkle::{Algorithm, MerkleTreeRead, Sha3Algorithm}; +use ethereum_types::H256; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use merkle_light::merkle::{log2_pow2, MerkleTree}; +use merkle_tree::RawLeafSha3Algorithm; +use rayon::iter::ParallelIterator; +use rayon::prelude::ParallelSlice; +use shared_types::{ + bytes_to_chunks, compute_padded_chunk_size, compute_segment_size, Chunk, ChunkArray, + ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowProof, FlowRangeProof, Merkle, Transaction, +}; +use std::cmp::Ordering; +use std::collections::BTreeMap; +use std::path::Path; +use std::sync::Arc; +use tracing::{debug, error, info, instrument, trace, warn}; + +use super::LogStoreInner; + +/// 256 Bytes +pub const ENTRY_SIZE: usize = 256; +/// 1024 Entries. +pub const PORA_CHUNK_SIZE: usize = 1024; + +pub const COL_TX: u32 = 0; +pub const COL_ENTRY_BATCH: u32 = 1; +pub const COL_TX_DATA_ROOT_INDEX: u32 = 2; +pub const COL_ENTRY_BATCH_ROOT: u32 = 3; +pub const COL_TX_COMPLETED: u32 = 4; +pub const COL_MISC: u32 = 5; +pub const COL_SEAL_CONTEXT: u32 = 6; +pub const COL_NUM: u32 = 7; + +pub struct LogManager { + pub(crate) db: Arc, + tx_store: TransactionStore, + flow_store: FlowStore, + // TODO(zz): Refactor the in-memory merkle and in-disk storage together. + pora_chunks_merkle: Merkle, + /// The in-memory structure of the sub merkle tree of the last chunk. + /// The size is always less than `PORA_CHUNK_SIZE`. + last_chunk_merkle: Merkle, +} + +#[derive(Clone, Default)] +pub struct LogConfig { + pub flow: FlowConfig, +} + +impl LogStoreInner for LogManager { + fn flow(&self) -> &dyn super::Flow { + &self.flow_store + } + + fn flow_mut(&mut self) -> &mut dyn super::Flow { + &mut self.flow_store + } +} + +impl LogStoreChunkWrite for LogManager { + fn put_chunks(&mut self, tx_seq: u64, chunks: ChunkArray) -> Result<()> { + let tx = self + .tx_store + .get_tx_by_seq_number(tx_seq)? + .ok_or_else(|| anyhow!("put chunks with missing tx: tx_seq={}", tx_seq))?; + let (chunks_for_proof, _) = compute_padded_chunk_size(tx.size as usize); + if chunks.start_index.saturating_mul(ENTRY_SIZE as u64) + chunks.data.len() as u64 + > (chunks_for_proof * ENTRY_SIZE) as u64 + { + bail!( + "put chunks with data out of tx range: tx_seq={} start_index={} data_len={}", + tx_seq, + chunks.start_index, + chunks.data.len() + ); + } + // TODO: Use another struct to avoid confusion. + let mut flow_entry_array = chunks; + flow_entry_array.start_index += tx.start_entry_index; + self.append_entries(flow_entry_array)?; + Ok(()) + } + + fn put_chunks_with_tx_hash( + &mut self, + tx_seq: u64, + tx_hash: H256, + chunks: ChunkArray, + ) -> Result { + let tx = self + .tx_store + .get_tx_by_seq_number(tx_seq)? + .ok_or_else(|| anyhow!("put chunks with missing tx: tx_seq={}", tx_seq))?; + if tx.hash() != tx_hash { + return Ok(false); + } + let (chunks_for_proof, _) = compute_padded_chunk_size(tx.size as usize); + if chunks.start_index.saturating_mul(ENTRY_SIZE as u64) + chunks.data.len() as u64 + > (chunks_for_proof * ENTRY_SIZE) as u64 + { + bail!( + "put chunks with data out of tx range: tx_seq={} start_index={} data_len={}", + tx_seq, + chunks.start_index, + chunks.data.len() + ); + } + // TODO: Use another struct to avoid confusion. + let mut flow_entry_array = chunks; + flow_entry_array.start_index += tx.start_entry_index; + self.append_entries(flow_entry_array)?; + Ok(true) + } + + fn remove_all_chunks(&self, _tx_seq: u64) -> crate::error::Result<()> { + todo!() + } +} + +impl LogStoreWrite for LogManager { + #[instrument(skip(self))] + /// Insert the tx and update the flow store if needed. + /// + /// We assumes that all transactions are inserted in order sequentially. + /// We always write the database in the following order: + /// 1. Insert the tx (the tx and the root to tx_seq map are inserted atomically). + /// 2. Update the flow store(pad data for alignment and copy data in `put_tx`, write data in + /// `put_chunks`, pad rear data in `finalize_tx`). + /// 3. Mark tx as finalized. + /// + /// Step 1 and 3 are both atomic operations. + /// * If a tx has been finalized, the data in flow must + /// have been updated correctly. + /// * If `put_tx` succeeds but not finalized, we rely on the upper layer + /// operations (client/auto-sync) to insert needed data (`put_chunks`) and trigger + /// finalization (`finalize_tx`). + /// * If `put_tx` fails in the middle, the tx is inserted but the flow is not updated correctly. + /// Only the last tx may have this case, so we rerun + /// `put_tx` for the last tx when we restart the node to ensure that it succeeds. + /// + fn put_tx(&mut self, tx: Transaction) -> Result<()> { + debug!("put_tx: tx={:?}", tx); + let expected_seq = self.next_tx_seq(); + if tx.seq != expected_seq { + if tx.seq + 1 == expected_seq && !self.check_tx_completed(tx.seq)? { + // special case for rerun the last tx during recovery. + debug!("recovery with tx_seq={}", tx.seq); + } else { + // This is not supposed to happen since we have checked the tx seq in log entry sync. + error!("tx unmatch, expected={} get={:?}", expected_seq, tx); + bail!("unexpected tx!"); + } + } + let maybe_same_data_tx_seq = self.tx_store.put_tx(tx.clone())?.first().cloned(); + // TODO(zz): Should we validate received tx? + self.append_subtree_list(tx.merkle_nodes.clone())?; + self.commit_merkle(tx.seq)?; + + if let Some(old_tx_seq) = maybe_same_data_tx_seq { + if self.check_tx_completed(old_tx_seq)? { + self.copy_tx_data(old_tx_seq, vec![tx.seq])?; + self.tx_store.finalize_tx(tx.seq)?; + } + } + Ok(()) + } + + fn finalize_tx(&mut self, tx_seq: u64) -> Result<()> { + let tx = self + .tx_store + .get_tx_by_seq_number(tx_seq)? + .ok_or_else(|| anyhow!("finalize_tx with tx missing: tx_seq={}", tx_seq))?; + + self.padding_rear_data(&tx)?; + + let tx_end_index = tx.start_entry_index + bytes_to_entries(tx.size); + // TODO: Check completeness without loading all data in memory. + // TODO: Should we double check the tx merkle root? + if self + .flow_store + .get_entries(tx.start_entry_index, tx_end_index)? + .is_some() + { + let same_root_seq_list = self + .tx_store + .get_tx_seq_list_by_data_root(&tx.data_merkle_root)?; + // Check if there are other same-root transaction not finalized. + if same_root_seq_list.first() == Some(&tx_seq) { + self.copy_tx_data(tx_seq, same_root_seq_list[1..].to_vec())?; + } + self.tx_store.finalize_tx(tx_seq)?; + Ok(()) + } else { + bail!("finalize tx with data missing: tx_seq={}", tx_seq) + } + } + + fn finalize_tx_with_hash(&mut self, tx_seq: u64, tx_hash: H256) -> crate::error::Result { + trace!( + "finalize_tx_with_hash: tx_seq={} tx_hash={:?}", + tx_seq, + tx_hash + ); + let tx = self + .tx_store + .get_tx_by_seq_number(tx_seq)? + .ok_or_else(|| anyhow!("finalize_tx with tx missing: tx_seq={}", tx_seq))?; + debug!("finalize_tx_with_hash: tx={:?}", tx); + if tx.hash() != tx_hash { + return Ok(false); + } + + self.padding_rear_data(&tx)?; + + let tx_end_index = tx.start_entry_index + bytes_to_entries(tx.size); + // TODO: Check completeness without loading all data in memory. + // TODO: Should we double check the tx merkle root? + if self + .flow_store + .get_entries(tx.start_entry_index, tx_end_index)? + .is_some() + { + self.tx_store.finalize_tx(tx_seq)?; + let same_root_seq_list = self + .tx_store + .get_tx_seq_list_by_data_root(&tx.data_merkle_root)?; + // Check if there are other same-root transaction not finalized. + if same_root_seq_list.first() == Some(&tx_seq) { + self.copy_tx_data(tx_seq, same_root_seq_list[1..].to_vec())?; + } + Ok(true) + } else { + bail!("finalize tx with data missing: tx_seq={}", tx_seq) + } + } + + fn put_sync_progress(&self, progress: (u64, H256)) -> Result<()> { + self.tx_store.put_progress(progress) + } + + /// Return the reverted Transactions in order. + /// `tx_seq == u64::MAX` is a special case for reverting all transactions. + fn revert_to(&mut self, tx_seq: u64) -> Result> { + // FIXME(zz): If this revert is triggered by chain reorg after restarts, this will fail. + self.revert_merkle_tree(tx_seq)?; + let start_index = self.last_chunk_start_index() * PORA_CHUNK_SIZE as u64 + + self.last_chunk_merkle.leaves() as u64; + self.flow_store.truncate(start_index)?; + let start = if tx_seq != u64::MAX { tx_seq + 1 } else { 0 }; + self.tx_store.remove_tx_after(start) + } +} + +impl LogStoreChunkRead for LogManager { + fn get_chunk_by_tx_and_index( + &self, + tx_seq: u64, + index: usize, + ) -> crate::error::Result> { + // TODO(zz): This is not needed? + let single_chunk_array = + try_option!(self.get_chunks_by_tx_and_index_range(tx_seq, index, index + 1)?); + Ok(Some(Chunk(single_chunk_array.data.as_slice().try_into()?))) + } + + fn get_chunks_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + let tx = try_option!(self.get_tx_by_seq_number(tx_seq)?); + + if index_end as u64 > bytes_to_entries(tx.size) { + bail!( + "end entry index exceeds tx size: end={} tx size={}", + index_start, + tx.size + ); + } + + let start_flow_index = tx.start_entry_index + index_start as u64; + let end_flow_index = tx.start_entry_index + index_end as u64; + // TODO: Use another struct. + // Set returned chunk start index as the offset in the tx data. + let mut tx_chunk = try_option!(self + .flow_store + .get_entries(start_flow_index, end_flow_index)?); + tx_chunk.start_index -= tx.start_entry_index; + Ok(Some(tx_chunk)) + } + + fn get_chunk_by_data_root_and_index( + &self, + _data_root: &DataRoot, + _index: usize, + ) -> crate::error::Result> { + todo!() + } + + fn get_chunks_by_data_root_and_index_range( + &self, + data_root: &DataRoot, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + let tx_seq = try_option!(self.get_tx_seq_by_data_root(data_root)?); + self.get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end) + } + + fn get_chunk_index_list(&self, _tx_seq: u64) -> crate::error::Result> { + todo!() + } + + fn get_chunk_by_flow_index( + &self, + index: u64, + length: u64, + ) -> crate::error::Result> { + let start_flow_index = index; + let end_flow_index = index + length; + self.flow_store + .get_entries(start_flow_index, end_flow_index) + } +} + +impl LogStoreRead for LogManager { + fn get_tx_by_seq_number(&self, seq: u64) -> crate::error::Result> { + self.tx_store.get_tx_by_seq_number(seq) + } + + fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result> { + self.tx_store.get_first_tx_seq_by_data_root(data_root) + } + + fn get_chunk_with_proof_by_tx_and_index( + &self, + tx_seq: u64, + index: usize, + ) -> crate::error::Result> { + // TODO(zz): Optimize for mining. + let single_chunk_array = try_option!(self.get_chunks_with_proof_by_tx_and_index_range( + tx_seq, + index, + index + 1 + )?); + Ok(Some(ChunkWithProof { + chunk: Chunk(single_chunk_array.chunks.data.as_slice().try_into()?), + proof: single_chunk_array.proof.left_proof, + })) + } + + fn get_chunks_with_proof_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + let tx = try_option!(self.tx_store.get_tx_by_seq_number(tx_seq)?); + let chunks = + try_option!(self.get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end)?); + let left_proof = self.gen_proof(tx.start_entry_index + index_start as u64, None)?; + let right_proof = self.gen_proof(tx.start_entry_index + index_end as u64 - 1, None)?; + Ok(Some(ChunkArrayWithProof { + chunks, + proof: FlowRangeProof { + left_proof, + right_proof, + }, + })) + } + + fn check_tx_completed(&self, tx_seq: u64) -> crate::error::Result { + self.tx_store.check_tx_completed(tx_seq) + } + + fn validate_range_proof(&self, tx_seq: u64, data: &ChunkArrayWithProof) -> Result { + let tx = self + .get_tx_by_seq_number(tx_seq)? + .ok_or_else(|| anyhow!("tx missing"))?; + let leaves = data_to_merkle_leaves(&data.chunks.data)?; + data.proof.validate::( + &leaves, + (data.chunks.start_index + tx.start_entry_index) as usize, + )?; + Ok(self.pora_chunks_merkle.check_root(&data.proof.root())) + } + + fn get_sync_progress(&self) -> Result> { + self.tx_store.get_progress() + } + + fn next_tx_seq(&self) -> u64 { + self.tx_store.next_tx_seq() + } + + fn get_proof_at_root( + &self, + root: &DataRoot, + index: u64, + length: u64, + ) -> crate::error::Result { + let left_proof = self.gen_proof(index, Some(*root))?; + let right_proof = self.gen_proof(index + length - 1, Some(*root))?; + Ok(FlowRangeProof { + left_proof, + right_proof, + }) + } + + fn get_context(&self) -> crate::error::Result<(DataRoot, u64)> { + Ok(( + *self.pora_chunks_merkle.root(), + self.last_chunk_start_index() + self.last_chunk_merkle.leaves() as u64, + )) + } +} + +impl LogManager { + pub fn rocksdb(config: LogConfig, path: impl AsRef) -> Result { + let mut db_config = DatabaseConfig::with_columns(COL_NUM); + db_config.enable_statistics = true; + let db = Arc::new(Database::open(&db_config, path)?); + Self::new(db, config) + } + + pub fn memorydb(config: LogConfig) -> Result { + let db = Arc::new(kvdb_memorydb::create(COL_NUM)); + Self::new(db, config) + } + + fn new(db: Arc, config: LogConfig) -> Result { + let tx_store = TransactionStore::new(db.clone())?; + let flow_store = FlowStore::new(db.clone(), config.flow); + let mut initial_data = flow_store.get_chunk_root_list()?; + // If the last tx `put_tx` does not complete, we will revert it in `initial_data.subtree_list` + // first and call `put_tx` later. The known leaves in its data will be saved in `extra_leaves` + // and inserted later. + let mut extra_leaves = Vec::new(); + + let next_tx_seq = tx_store.next_tx_seq(); + let mut start_tx_seq = if next_tx_seq > 0 { + Some(next_tx_seq - 1) + } else { + None + }; + let mut last_tx_to_insert = None; + if let Some(last_tx_seq) = start_tx_seq { + if !tx_store.check_tx_completed(last_tx_seq)? { + // Last tx not finalized, we need to check if its `put_tx` is completed. + let last_tx = tx_store + .get_tx_by_seq_number(last_tx_seq)? + .expect("tx missing"); + let mut current_len = initial_data.leaves(); + let expected_len = (last_tx.start_entry_index + last_tx.num_entries() as u64) + / PORA_CHUNK_SIZE as u64; + match expected_len.cmp(&(current_len as u64)) { + Ordering::Less => { + bail!( + "Unexpected DB: merkle tree larger than the known data size,\ + expected={} get={}", + expected_len, + current_len + ); + } + Ordering::Equal => {} + Ordering::Greater => { + // Flow updates are not complete. + // For simplicity, we build the merkle tree for the previous tx and update + // the flow for the last tx again. + info!("revert last tx: last_tx={:?}", last_tx); + last_tx_to_insert = Some(last_tx); + if last_tx_seq == 0 { + start_tx_seq = None; + } else { + // truncate until we get the pora chunks merkle for the previous tx. + let previous_tx = tx_store + .get_tx_by_seq_number(last_tx_seq - 1)? + .expect("tx missing"); + let expected_len = ((previous_tx.start_entry_index + + previous_tx.num_entries() as u64) + / PORA_CHUNK_SIZE as u64) + as usize; + assert!(current_len > expected_len); + while let Some((subtree_depth, _)) = initial_data.subtree_list.pop() { + current_len -= 1 << (subtree_depth - 1); + if current_len == expected_len { + break; + } + } + assert_eq!(current_len, expected_len); + while let Some((index, h)) = initial_data.known_leaves.pop() { + if index < current_len { + initial_data.known_leaves.push((index, h)); + break; + } else { + extra_leaves.push((index, h)); + } + } + start_tx_seq = Some(last_tx_seq - 1); + }; + } + } + } + } + + let mut pora_chunks_merkle = + Merkle::new_with_subtrees(initial_data, log2_pow2(PORA_CHUNK_SIZE), start_tx_seq)?; + let last_chunk_merkle = match start_tx_seq { + Some(tx_seq) => { + tx_store.rebuild_last_chunk_merkle(pora_chunks_merkle.leaves(), tx_seq)? + } + // Initialize + None => Merkle::new_with_depth(vec![], log2_pow2(PORA_CHUNK_SIZE) + 1, None), + }; + + debug!( + "LogManager::new() with chunk_list_len={} start_tx_seq={:?} last_chunk={}", + pora_chunks_merkle.leaves(), + start_tx_seq, + last_chunk_merkle.leaves(), + ); + if last_chunk_merkle.leaves() != 0 { + pora_chunks_merkle.append(*last_chunk_merkle.root()); + // update the merkle root + pora_chunks_merkle.commit(start_tx_seq); + } + let mut log_manager = Self { + db, + tx_store, + flow_store, + pora_chunks_merkle, + last_chunk_merkle, + }; + + if let Some(tx) = last_tx_to_insert { + log_manager.put_tx(tx)?; + for (index, h) in extra_leaves { + if index < log_manager.pora_chunks_merkle.leaves() { + log_manager.pora_chunks_merkle.fill_leaf(index, h); + } else { + error!("out of range extra leaf: index={} hash={:?}", index, h); + } + } + } else { + assert!(extra_leaves.is_empty()); + } + log_manager.try_initialize()?; + Ok(log_manager) + } + + fn try_initialize(&mut self) -> Result<()> { + if self.pora_chunks_merkle.leaves() == 0 && self.last_chunk_merkle.leaves() == 0 { + self.last_chunk_merkle.append(H256::zero()); + self.pora_chunks_merkle + .update_last(*self.last_chunk_merkle.root()); + } else if self.last_chunk_merkle.leaves() != 0 { + let last_chunk_start_index = self.last_chunk_start_index(); + let last_chunk_data = self.flow_store.get_available_entries( + last_chunk_start_index, + last_chunk_start_index + PORA_CHUNK_SIZE as u64, + )?; + for e in last_chunk_data { + let start_index = e.start_index - last_chunk_start_index; + for i in 0..e.data.len() / ENTRY_SIZE { + let index = i + start_index as usize; + if index >= self.last_chunk_merkle.leaves() { + // We revert the merkle tree before truncate the flow store, + // so last_chunk_data may include data that should have been truncated. + break; + } + self.last_chunk_merkle.fill_leaf( + index, + Sha3Algorithm::leaf(&e.data[i * ENTRY_SIZE..(i + 1) * ENTRY_SIZE]), + ); + } + } + } + Ok(()) + } + + fn gen_proof(&self, flow_index: u64, maybe_root: Option) -> Result { + let chunk_index = flow_index / PORA_CHUNK_SIZE as u64; + let top_proof = match maybe_root { + None => self.pora_chunks_merkle.gen_proof(chunk_index as usize)?, + Some(root) => self + .pora_chunks_merkle + .at_root_version(&root)? + .gen_proof(chunk_index as usize)?, + }; + + // TODO(zz): Maybe we can decide that all proofs are at the PoRA chunk level, so + // we do not need to maintain the proof at the entry level below. + // Condition (self.last_chunk_merkle.leaves() == 0): When last chunk size is exactly PORA_CHUNK_SIZE, proof should be generated from flow data, as last_chunk_merkle.leaves() is zero at this time + // TODO(zz): In the current use cases, `maybe_root` is only `Some` for mining + // and `flow_index` must be within a complete PoRA chunk. For possible future usages, + // we'll need to find the flow length at the given root and load a partial chunk + // if `flow_index` is in the last chunk. + let sub_proof = if chunk_index as usize != self.pora_chunks_merkle.leaves() - 1 + || self.last_chunk_merkle.leaves() == 0 + { + self.flow_store + .gen_proof_in_batch(chunk_index as usize, flow_index as usize % PORA_CHUNK_SIZE)? + } else { + match maybe_root { + None => self + .last_chunk_merkle + .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, + Some(root) => self + .last_chunk_merkle + .at_root_version(&root)? + .gen_proof(flow_index as usize % PORA_CHUNK_SIZE)?, + } + }; + entry_proof(&top_proof, &sub_proof) + } + + #[instrument(skip(self))] + fn append_subtree_list(&mut self, merkle_list: Vec<(usize, DataRoot)>) -> Result<()> { + if merkle_list.is_empty() { + return Ok(()); + } + + self.pad_tx(1 << (merkle_list[0].0 - 1))?; + + let mut batch_root_map = BTreeMap::new(); + for (subtree_depth, subtree_root) in merkle_list { + let subtree_size = 1 << (subtree_depth - 1); + if self.last_chunk_merkle.leaves() + subtree_size <= PORA_CHUNK_SIZE { + self.last_chunk_merkle + .append_subtree(subtree_depth, subtree_root)?; + if self.last_chunk_merkle.leaves() == subtree_size { + // `last_chunk_merkle` was empty, so this is a new leaf in the top_tree. + self.pora_chunks_merkle + .append_subtree(1, *self.last_chunk_merkle.root())?; + } else { + self.pora_chunks_merkle + .update_last(*self.last_chunk_merkle.root()); + } + if self.last_chunk_merkle.leaves() == PORA_CHUNK_SIZE { + batch_root_map.insert( + self.pora_chunks_merkle.leaves() - 1, + (*self.last_chunk_merkle.root(), 1), + ); + self.complete_last_chunk_merkle(self.pora_chunks_merkle.leaves() - 1)?; + } + } else { + // `last_chunk_merkle` has been padded here, so a subtree should not be across + // the chunks boundary. + assert_eq!(self.last_chunk_merkle.leaves(), 0); + assert!(subtree_size >= PORA_CHUNK_SIZE); + batch_root_map.insert( + self.pora_chunks_merkle.leaves(), + (subtree_root, subtree_depth - log2_pow2(PORA_CHUNK_SIZE)), + ); + self.pora_chunks_merkle + .append_subtree(subtree_depth - log2_pow2(PORA_CHUNK_SIZE), subtree_root)?; + } + } + self.flow_store.put_batch_root_list(batch_root_map)?; + Ok(()) + } + + #[instrument(skip(self))] + fn pad_tx(&mut self, first_subtree_size: u64) -> Result<()> { + // Check if we need to pad the flow. + let tx_start_flow_index = + self.last_chunk_start_index() + self.last_chunk_merkle.leaves() as u64; + let extra = tx_start_flow_index % first_subtree_size; + trace!( + "before pad_tx {} {}", + self.pora_chunks_merkle.leaves(), + self.last_chunk_merkle.leaves() + ); + if extra != 0 { + let pad_data = Self::padding((first_subtree_size - extra) as usize); + + // Update the in-memory merkle tree. + let mut root_map = BTreeMap::new(); + let last_chunk_pad = if self.last_chunk_merkle.leaves() == 0 { + 0 + } else { + (PORA_CHUNK_SIZE - self.last_chunk_merkle.leaves()) * ENTRY_SIZE + }; + + let mut completed_chunk_index = None; + if pad_data.len() < last_chunk_pad { + self.last_chunk_merkle + .append_list(data_to_merkle_leaves(&pad_data)?); + self.pora_chunks_merkle + .update_last(*self.last_chunk_merkle.root()); + } else { + if last_chunk_pad != 0 { + // Pad the last chunk. + self.last_chunk_merkle + .append_list(data_to_merkle_leaves(&pad_data[..last_chunk_pad])?); + self.pora_chunks_merkle + .update_last(*self.last_chunk_merkle.root()); + root_map.insert( + self.pora_chunks_merkle.leaves() - 1, + (*self.last_chunk_merkle.root(), 1), + ); + completed_chunk_index = Some(self.pora_chunks_merkle.leaves() - 1); + } + + // Pad with more complete chunks. + let mut start_index = last_chunk_pad / ENTRY_SIZE; + while pad_data.len() >= (start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE { + let data = pad_data + [start_index * ENTRY_SIZE..(start_index + PORA_CHUNK_SIZE) * ENTRY_SIZE] + .to_vec(); + let root = *Merkle::new(data_to_merkle_leaves(&data)?, 0, None).root(); + self.pora_chunks_merkle.append(root); + root_map.insert(self.pora_chunks_merkle.leaves() - 1, (root, 1)); + start_index += PORA_CHUNK_SIZE; + } + assert_eq!(pad_data.len(), start_index * ENTRY_SIZE); + } + + // Update the root index. + self.flow_store.put_batch_root_list(root_map)?; + // Update the flow database. + // This should be called before `complete_last_chunk_merkle` so that we do not save + // subtrees with data known. + self.flow_store.append_entries(ChunkArray { + data: pad_data, + start_index: tx_start_flow_index, + })?; + if let Some(index) = completed_chunk_index { + self.complete_last_chunk_merkle(index)?; + } + } + trace!( + "after pad_tx {} {}", + self.pora_chunks_merkle.leaves(), + self.last_chunk_merkle.leaves() + ); + Ok(()) + } + + fn append_entries(&mut self, flow_entry_array: ChunkArray) -> Result<()> { + let last_chunk_start_index = self.last_chunk_start_index(); + if flow_entry_array.start_index + bytes_to_chunks(flow_entry_array.data.len()) as u64 + > last_chunk_start_index + { + // Update `last_chunk_merkle` with real data. + let (chunk_start_index, flow_entry_data_index) = if flow_entry_array.start_index + >= last_chunk_start_index + { + // flow_entry_array only fill last chunk + ( + (flow_entry_array.start_index - last_chunk_start_index) as usize, + 0, + ) + } else { + // flow_entry_array fill both last and last - 1 chunk + ( + 0, + (last_chunk_start_index - flow_entry_array.start_index) as usize * ENTRY_SIZE, + ) + }; + + // Since we always put tx before insert its data. Here `last_chunk_merkle` must + // have included the data range. + for (local_index, entry) in flow_entry_array.data[flow_entry_data_index..] + .chunks_exact(ENTRY_SIZE) + .enumerate() + { + self.last_chunk_merkle + .fill_leaf(chunk_start_index + local_index, Sha3Algorithm::leaf(entry)); + } + } + let chunk_roots = self.flow_store.append_entries(flow_entry_array)?; + for (chunk_index, chunk_root) in chunk_roots { + if chunk_index < self.pora_chunks_merkle.leaves() as u64 { + self.pora_chunks_merkle + .fill_leaf(chunk_index as usize, chunk_root); + } else { + // TODO(zz): This assumption may be false in the future. + unreachable!("We always insert tx nodes before put_chunks"); + } + } + Ok(()) + } + + // FIXME(zz): Implement padding. + pub fn padding(len: usize) -> Vec { + vec![0; len * ENTRY_SIZE] + } + + fn last_chunk_start_index(&self) -> u64 { + if self.pora_chunks_merkle.leaves() == 0 { + 0 + } else { + PORA_CHUNK_SIZE as u64 + * if self.last_chunk_merkle.leaves() == 0 { + // The last chunk is empty and its root hash is not in `pora_chunk_merkle`, + // so all chunks in `pora_chunk_merkle` is complete. + self.pora_chunks_merkle.leaves() + } else { + // The last chunk has data, so we need to exclude it from `pora_chunks_merkle`. + self.pora_chunks_merkle.leaves() - 1 + } as u64 + } + } + + #[instrument(skip(self))] + fn commit_merkle(&mut self, tx_seq: u64) -> Result<()> { + self.pora_chunks_merkle.commit(Some(tx_seq)); + self.last_chunk_merkle.commit(Some(tx_seq)); + Ok(()) + } + + fn revert_merkle_tree(&mut self, tx_seq: u64) -> Result<()> { + // Special case for reverting tx_seq == 0 + if tx_seq == u64::MAX { + self.pora_chunks_merkle.reset(); + self.last_chunk_merkle.reset(); + self.try_initialize()?; + return Ok(()); + } + let old_leaves = self.pora_chunks_merkle.leaves(); + self.pora_chunks_merkle.revert_to(tx_seq)?; + if old_leaves == self.pora_chunks_merkle.leaves() { + self.last_chunk_merkle.revert_to(tx_seq)?; + } else { + // We are reverting to a position before the current last_chunk. + self.last_chunk_merkle = self + .tx_store + .rebuild_last_chunk_merkle(self.pora_chunks_merkle.leaves() - 1, tx_seq)?; + self.try_initialize()?; + assert_eq!( + Some(*self.last_chunk_merkle.root()), + self.pora_chunks_merkle + .leaf_at(self.pora_chunks_merkle.leaves() - 1)? + ); + } + Ok(()) + } + + #[cfg(test)] + pub fn flow_store(&self) -> &FlowStore { + &self.flow_store + } + + fn padding_rear_data(&mut self, tx: &Transaction) -> Result<()> { + let (chunks, _) = compute_padded_chunk_size(tx.size as usize); + let (segments_for_proof, last_segment_size_for_proof) = + compute_segment_size(chunks, PORA_CHUNK_SIZE); + debug!( + "segments_for_proof: {}, last_segment_size_for_proof: {}", + segments_for_proof, last_segment_size_for_proof + ); + + let chunks_for_file = bytes_to_entries(tx.size) as usize; + let (mut segments_for_file, mut last_segment_size_for_file) = + compute_segment_size(chunks_for_file, PORA_CHUNK_SIZE); + debug!( + "segments_for_file: {}, last_segment_size_for_file: {}", + segments_for_file, last_segment_size_for_file + ); + + while segments_for_file <= segments_for_proof { + let padding_size = if segments_for_file == segments_for_proof { + (last_segment_size_for_proof - last_segment_size_for_file) * ENTRY_SIZE + } else { + (PORA_CHUNK_SIZE - last_segment_size_for_file) * ENTRY_SIZE + }; + + debug!("Padding size: {}", padding_size); + if padding_size > 0 { + // This tx hash is guaranteed to be consistent. + self.put_chunks_with_tx_hash( + tx.seq, + tx.hash(), + ChunkArray { + data: vec![0u8; padding_size], + start_index: ((segments_for_file - 1) * PORA_CHUNK_SIZE + + last_segment_size_for_file) + as u64, + }, + )?; + } + + last_segment_size_for_file = 0; + segments_for_file += 1; + } + + Ok(()) + } + + fn copy_tx_data(&mut self, from_tx_seq: u64, to_tx_seq_list: Vec) -> Result<()> { + // We have all the data need for this tx, so just copy them. + let old_tx = self + .get_tx_by_seq_number(from_tx_seq)? + .ok_or_else(|| anyhow!("from tx missing"))?; + let mut to_tx_offset_list = Vec::with_capacity(to_tx_seq_list.len()); + for seq in to_tx_seq_list { + // No need to copy data for completed tx. + if self.check_tx_completed(seq)? { + continue; + } + let tx = self + .get_tx_by_seq_number(seq)? + .ok_or_else(|| anyhow!("to tx missing"))?; + to_tx_offset_list.push((tx.seq, tx.start_entry_index - old_tx.start_entry_index)); + } + if to_tx_offset_list.is_empty() { + return Ok(()); + } + // copy data in batches + // TODO(zz): Do this asynchronously and keep atomicity. + for (batch_start, batch_end) in batch_iter( + old_tx.start_entry_index, + old_tx.start_entry_index + old_tx.num_entries() as u64, + PORA_CHUNK_SIZE, + ) { + let batch_data = self + .get_chunk_by_flow_index(batch_start, batch_end - batch_start)? + .ok_or_else(|| anyhow!("tx data missing"))?; + for (_, offset) in &to_tx_offset_list { + let mut data = batch_data.clone(); + data.start_index += offset; + self.append_entries(data)?; + } + } + // num_entries() includes the rear padding data, so no need for more padding. + + for (seq, _) in to_tx_offset_list { + self.tx_store.finalize_tx(seq)?; + } + Ok(()) + } + + /// Here we persist the subtrees with the incomplete data of the last chunk merkle so that + /// we can still provide proof for known data in it. + /// Another choice is to insert these subtrees earlier in `put_tx`. To insert them here can + /// batch them and avoid inserting for the subtrees with all data known. + fn complete_last_chunk_merkle(&mut self, index: usize) -> Result<()> { + let subtree_list = self.last_chunk_merkle.get_subtrees(); + self.last_chunk_merkle = + Merkle::new_with_depth(vec![], log2_pow2(PORA_CHUNK_SIZE) + 1, None); + + // Only insert non-leave subtrees. The leave data should have been available. + let mut to_insert_subtrees = Vec::new(); + let mut start_index = 0; + for (subtree_height, root) in subtree_list { + to_insert_subtrees.push((start_index, subtree_height, root)); + start_index += 1 << (subtree_height - 1); + } + self.flow_store + .insert_subtree_list_for_batch(index, to_insert_subtrees) + } +} + +/// This represents the subtree of a chunk or the whole data merkle tree. +pub type FileMerkleTree = MerkleTree<[u8; 32], RawLeafSha3Algorithm>; + +#[macro_export] +macro_rules! try_option { + ($r: ident) => { + match $r { + Some(v) => v, + None => return Ok(None), + } + }; + ($e: expr) => { + match $e { + Some(v) => v, + None => return Ok(None), + } + }; +} + +/// This should be called with input checked. +pub fn sub_merkle_tree(leaf_data: &[u8]) -> Result { + Ok(FileMerkleTree::new( + data_to_merkle_leaves(leaf_data)? + .into_iter() + .map(|h| h.0) + .collect::>(), + )) +} + +pub fn data_to_merkle_leaves(leaf_data: &[u8]) -> Result> { + if leaf_data.len() % ENTRY_SIZE != 0 { + bail!("merkle_tree: unmatch data size"); + } + // If the data size is small, using `rayon` would introduce more overhead. + let r = if leaf_data.len() >= ENTRY_SIZE * 8 { + leaf_data + .par_chunks_exact(ENTRY_SIZE) + .map(Sha3Algorithm::leaf) + .collect() + } else { + leaf_data + .chunks_exact(ENTRY_SIZE) + .map(Sha3Algorithm::leaf) + .collect() + }; + Ok(r) +} + +pub fn bytes_to_entries(size_bytes: u64) -> u64 { + if size_bytes % ENTRY_SIZE as u64 == 0 { + size_bytes / ENTRY_SIZE as u64 + } else { + size_bytes / ENTRY_SIZE as u64 + 1 + } +} + +fn entry_proof(top_proof: &FlowProof, sub_proof: &FlowProof) -> Result { + if top_proof.item() != sub_proof.root() { + bail!( + "top tree and sub tree mismatch: top_leaf={:?}, sub_root={:?}", + top_proof.item(), + sub_proof.root() + ); + } + let mut lemma = sub_proof.lemma().to_vec(); + let mut path = sub_proof.path().to_vec(); + assert!(lemma.pop().is_some()); + lemma.extend_from_slice(&top_proof.lemma()[1..]); + path.extend_from_slice(top_proof.path()); + Ok(FlowProof::new(lemma, path)) +} + +pub fn split_nodes(data_size: usize) -> Vec { + let (mut padded_chunks, chunks_next_pow2) = compute_padded_chunk_size(data_size); + let mut next_chunk_size = chunks_next_pow2; + + let mut nodes = vec![]; + while padded_chunks > 0 { + if padded_chunks >= next_chunk_size { + padded_chunks -= next_chunk_size; + nodes.push(next_chunk_size); + } + + next_chunk_size >>= 1; + } + + nodes +} + +pub fn tx_subtree_root_list_padded(data: &[u8]) -> Vec<(usize, DataRoot)> { + let mut root_list = Vec::new(); + let mut start_index = 0; + let nodes = split_nodes(data.len()); + + for &tree_size in nodes.iter() { + let end = start_index + tree_size * ENTRY_SIZE; + + let submerkle_root = if start_index >= data.len() { + sub_merkle_tree(&vec![0u8; tree_size * ENTRY_SIZE]) + .unwrap() + .root() + } else if end > data.len() { + let mut pad_data = data[start_index..].to_vec(); + pad_data.append(&mut vec![0u8; end - data.len()]); + sub_merkle_tree(&pad_data).unwrap().root() + } else { + sub_merkle_tree(&data[start_index..end]).unwrap().root() + }; + + root_list.push((log2_pow2(tree_size) + 1, submerkle_root.into())); + start_index = end; + } + + root_list +} diff --git a/node/storage/src/log_store/mod.rs b/node/storage/src/log_store/mod.rs new file mode 100644 index 0000000..74e68cd --- /dev/null +++ b/node/storage/src/log_store/mod.rs @@ -0,0 +1,225 @@ +use append_merkle::MerkleTreeInitialData; +use ethereum_types::H256; +use shared_types::{ + Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, FlowRangeProof, Transaction, +}; +use zgs_spec::{BYTES_PER_SEAL, SEALS_PER_LOAD}; + +use crate::error::Result; + +pub mod config; +mod flow_store; +mod load_chunk; +pub mod log_manager; +mod tx_store; + +/// The trait to read the transactions already appended to the log. +/// +/// Implementation Rationale: +/// If the stored chunk is large, we can store the proof together with the chunk. +pub trait LogStoreRead: LogStoreChunkRead { + /// Get a transaction by its global log sequence number. + fn get_tx_by_seq_number(&self, seq: u64) -> Result>; + + /// Get a transaction by the data root of its data. + fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result>; + + fn get_tx_by_data_root(&self, data_root: &DataRoot) -> Result> { + match self.get_tx_seq_by_data_root(data_root)? { + Some(seq) => self.get_tx_by_seq_number(seq), + None => Ok(None), + } + } + + fn get_chunk_with_proof_by_tx_and_index( + &self, + tx_seq: u64, + index: usize, + ) -> Result>; + + fn get_chunks_with_proof_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> Result>; + + fn check_tx_completed(&self, tx_seq: u64) -> Result; + + fn next_tx_seq(&self) -> u64; + + fn get_sync_progress(&self) -> Result>; + + fn validate_range_proof(&self, tx_seq: u64, data: &ChunkArrayWithProof) -> Result; + + fn get_proof_at_root(&self, root: &DataRoot, index: u64, length: u64) + -> Result; + + /// Return flow root and length. + fn get_context(&self) -> Result<(DataRoot, u64)>; +} + +pub trait LogStoreChunkRead { + /// Get a data chunk by the transaction sequence number and the chunk offset in the transaction. + /// Accessing a single chunk is mostly used for mining. + fn get_chunk_by_tx_and_index(&self, tx_seq: u64, index: usize) -> Result>; + + /// Get a list of continuous chunks by the transaction sequence number and an index range (`index_end` excluded). + fn get_chunks_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> Result>; + + fn get_chunk_by_data_root_and_index( + &self, + data_root: &DataRoot, + index: usize, + ) -> Result>; + + fn get_chunks_by_data_root_and_index_range( + &self, + data_root: &DataRoot, + index_start: usize, + index_end: usize, + ) -> Result>; + + fn get_chunk_index_list(&self, tx_seq: u64) -> Result>; + + /// Accessing chunks by absolute flow index + fn get_chunk_by_flow_index(&self, index: u64, length: u64) -> Result>; +} + +pub trait LogStoreWrite: LogStoreChunkWrite { + /// Store a data entry metadata. + fn put_tx(&mut self, tx: Transaction) -> Result<()>; + + /// Finalize a transaction storage. + /// This will compute and the merkle tree, check the data root, and persist a part of the merkle + /// tree for future queries. + /// + /// This will return error if not all chunks are stored. But since this check can be expensive, + /// the caller is supposed to track chunk statuses and call this after storing all the chunks. + fn finalize_tx(&mut self, tx_seq: u64) -> Result<()>; + fn finalize_tx_with_hash(&mut self, tx_seq: u64, tx_hash: H256) -> Result; + + /// Store the progress of synced block number and its hash. + fn put_sync_progress(&self, progress: (u64, H256)) -> Result<()>; + + /// Revert the log state to a given tx seq. + /// This is needed when transactions are reverted because of chain reorg. + /// + /// Reverted transactions are returned in order. + fn revert_to(&mut self, tx_seq: u64) -> Result>; +} + +pub trait LogStoreChunkWrite { + /// Store data chunks of a data entry. + fn put_chunks(&mut self, tx_seq: u64, chunks: ChunkArray) -> Result<()>; + + fn put_chunks_with_tx_hash( + &mut self, + tx_seq: u64, + tx_hash: H256, + chunks: ChunkArray, + ) -> Result; + + /// Delete all chunks of a tx. + fn remove_all_chunks(&self, tx_seq: u64) -> Result<()>; +} + +pub trait LogChunkStore: LogStoreChunkRead + LogStoreChunkWrite + Send + Sync + 'static {} +impl LogChunkStore for T {} + +pub trait Store: + LogStoreRead + LogStoreWrite + LogStoreInner + config::Configurable + Send + Sync + 'static +{ +} +impl< + T: LogStoreRead + LogStoreWrite + LogStoreInner + config::Configurable + Send + Sync + 'static, + > Store for T +{ +} + +pub trait LogStoreInner { + fn flow(&self) -> &dyn Flow; + fn flow_mut(&mut self) -> &mut dyn Flow; +} + +pub struct MineLoadChunk { + pub loaded_chunk: [[u8; BYTES_PER_SEAL]; SEALS_PER_LOAD], + pub avalibilities: [bool; SEALS_PER_LOAD], +} + +impl Default for MineLoadChunk { + fn default() -> Self { + Self { + loaded_chunk: [[0u8; BYTES_PER_SEAL]; SEALS_PER_LOAD], + avalibilities: [false; SEALS_PER_LOAD], + } + } +} + +pub trait FlowRead { + /// Return the entries in the given range. If some data are missing, `Ok(None)` is returned. + fn get_entries(&self, index_start: u64, index_end: u64) -> Result>; + + /// Return the available entries in the given range. + /// The `ChunkArray` in the returned list are in order and they will not overlap or be adjacent. + /// + /// For simplicity, `index_start` and `index_end` must be at the batch boundaries. + fn get_available_entries(&self, index_start: u64, index_end: u64) -> Result>; + + fn get_chunk_root_list(&self) -> Result>; + + fn load_sealed_data(&self, chunk_index: u64) -> Result>; +} + +pub trait FlowWrite { + /// Append data to the flow. `start_index` is included in `ChunkArray`, so + /// it's possible to append arrays in any place. + /// Return the list of completed chunks. + fn append_entries(&mut self, data: ChunkArray) -> Result>; + + /// Remove all the entries after `start_index`. + /// This is used to remove deprecated data in case of chain reorg. + fn truncate(&mut self, start_index: u64) -> Result<()>; +} + +pub struct SealTask { + /// The index (in seal) of chunks + pub seal_index: u64, + /// An ephemeral version number to distinguish if revert happending + pub version: usize, + /// The data to be sealed + pub non_sealed_data: [u8; BYTES_PER_SEAL], +} + +#[derive(Debug)] +pub struct SealAnswer { + /// The index (in seal) of chunks + pub seal_index: u64, + /// An ephemeral version number to distinguish if revert happending + pub version: usize, + /// The data to be sealed + pub sealed_data: [u8; BYTES_PER_SEAL], + /// The miner Id + pub miner_id: H256, + /// The seal_context for this chunk + pub seal_context: H256, + pub context_end_seal: u64, +} + +pub trait FlowSeal { + /// Pull a seal chunk ready for sealing + /// Return the global index (in sector) and the data + fn pull_seal_chunk(&self, seal_index_max: usize) -> Result>>; + + /// Submit sealing result + + fn submit_seal_result(&mut self, answers: Vec) -> Result<()>; +} + +pub trait Flow: FlowRead + FlowWrite + FlowSeal {} +impl Flow for T {} diff --git a/node/storage/src/log_store/tx_store.rs b/node/storage/src/log_store/tx_store.rs new file mode 100644 index 0000000..1da6731 --- /dev/null +++ b/node/storage/src/log_store/tx_store.rs @@ -0,0 +1,276 @@ +use crate::error::Error; +use crate::log_store::log_manager::{ + data_to_merkle_leaves, sub_merkle_tree, COL_MISC, COL_TX, COL_TX_COMPLETED, + COL_TX_DATA_ROOT_INDEX, ENTRY_SIZE, PORA_CHUNK_SIZE, +}; +use crate::{try_option, LogManager, ZgsKeyValueDB}; +use anyhow::{anyhow, Result}; +use append_merkle::{AppendMerkleTree, MerkleTreeRead, Sha3Algorithm}; +use ethereum_types::H256; +use merkle_light::merkle::log2_pow2; +use shared_types::{DataRoot, Transaction}; +use ssz::{Decode, Encode}; +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use tracing::{error, instrument}; + +const LOG_SYNC_PROGRESS_KEY: &str = "log_sync_progress"; +const NEXT_TX_KEY: &str = "next_tx_seq"; + +pub struct TransactionStore { + kvdb: Arc, + /// This is always updated before writing the database to ensure no intermediate states. + next_tx_seq: AtomicU64, +} + +impl TransactionStore { + pub fn new(kvdb: Arc) -> Result { + let next_tx_seq = kvdb + .get(COL_TX, NEXT_TX_KEY.as_bytes())? + .map(|a| decode_tx_seq(&a)) + .unwrap_or(Ok(0))?; + Ok(Self { + kvdb, + next_tx_seq: AtomicU64::new(next_tx_seq), + }) + } + + #[instrument(skip(self))] + /// Return `Ok(Some(tx_seq))` if a previous transaction has the same tx root. + pub fn put_tx(&self, mut tx: Transaction) -> Result> { + let mut db_tx = self.kvdb.transaction(); + + if !tx.data.is_empty() { + tx.size = tx.data.len() as u64; + let mut padded_data = tx.data.clone(); + let extra = tx.data.len() % ENTRY_SIZE; + if extra != 0 { + padded_data.append(&mut vec![0u8; ENTRY_SIZE - extra]); + } + let data_root = sub_merkle_tree(&padded_data)?.root(); + tx.data_merkle_root = data_root.into(); + } + + db_tx.put(COL_TX, &tx.seq.to_be_bytes(), &tx.as_ssz_bytes()); + db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &(tx.seq + 1).to_be_bytes()); + let old_tx_seq_list = self.get_tx_seq_list_by_data_root(&tx.data_merkle_root)?; + // The list is sorted, and we always call `put_tx` in order. + assert!(old_tx_seq_list + .last() + .map(|last| *last < tx.seq) + .unwrap_or(true)); + let mut new_tx_seq_list = old_tx_seq_list.clone(); + new_tx_seq_list.push(tx.seq); + db_tx.put( + COL_TX_DATA_ROOT_INDEX, + tx.data_merkle_root.as_bytes(), + &new_tx_seq_list.as_ssz_bytes(), + ); + self.next_tx_seq.store(tx.seq + 1, Ordering::SeqCst); + self.kvdb.write(db_tx)?; + Ok(old_tx_seq_list) + } + + pub fn get_tx_by_seq_number(&self, seq: u64) -> Result> { + if seq >= self.next_tx_seq() { + return Ok(None); + } + let value = try_option!(self.kvdb.get(COL_TX, &seq.to_be_bytes())?); + let tx = Transaction::from_ssz_bytes(&value).map_err(Error::from)?; + Ok(Some(tx)) + } + + pub fn remove_tx_after(&self, min_seq: u64) -> Result> { + let mut removed_txs = Vec::new(); + let max_seq = self.next_tx_seq(); + let mut db_tx = self.kvdb.transaction(); + let mut modified_merkle_root_map = HashMap::new(); + for seq in min_seq..max_seq { + let Some(tx) = self.get_tx_by_seq_number(seq)? else { + error!(?seq, ?max_seq, "Transaction missing before the end"); + break; + }; + db_tx.delete(COL_TX, &seq.to_be_bytes()); + db_tx.delete(COL_TX_COMPLETED, &seq.to_be_bytes()); + // We only remove tx when the blockchain reorgs. + // If a tx is reverted, all data after it will also be reverted, so we call remove + // all indices after it. + let tx_seq_list = match modified_merkle_root_map.entry(tx.data_merkle_root) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + e.insert(self.get_tx_seq_list_by_data_root(&tx.data_merkle_root)?) + } + }; + tx_seq_list.retain(|e| *e < seq); + removed_txs.push(tx); + } + for (merkle_root, tx_seq_list) in modified_merkle_root_map { + if tx_seq_list.is_empty() { + db_tx.delete(COL_TX_DATA_ROOT_INDEX, merkle_root.as_bytes()); + } else { + db_tx.put( + COL_TX_DATA_ROOT_INDEX, + merkle_root.as_bytes(), + &tx_seq_list.as_ssz_bytes(), + ); + } + } + db_tx.put(COL_TX, NEXT_TX_KEY.as_bytes(), &min_seq.to_be_bytes()); + self.next_tx_seq.store(min_seq, Ordering::SeqCst); + self.kvdb.write(db_tx)?; + Ok(removed_txs) + } + + pub fn get_tx_seq_list_by_data_root(&self, data_root: &DataRoot) -> Result> { + let value = match self + .kvdb + .get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())? + { + Some(v) => v, + None => return Ok(Vec::new()), + }; + Ok(Vec::::from_ssz_bytes(&value).map_err(Error::from)?) + } + + pub fn get_first_tx_seq_by_data_root(&self, data_root: &DataRoot) -> Result> { + let value = try_option!(self + .kvdb + .get(COL_TX_DATA_ROOT_INDEX, data_root.as_bytes())?); + let seq_list = Vec::::from_ssz_bytes(&value).map_err(Error::from)?; + Ok(seq_list.first().cloned()) + } + + #[instrument(skip(self))] + pub fn finalize_tx(&self, tx_seq: u64) -> Result<()> { + Ok(self + .kvdb + .put(COL_TX_COMPLETED, &tx_seq.to_be_bytes(), &[0])?) + } + + pub fn check_tx_completed(&self, tx_seq: u64) -> Result { + Ok(self.kvdb.has_key(COL_TX_COMPLETED, &tx_seq.to_be_bytes())?) + } + + pub fn next_tx_seq(&self) -> u64 { + self.next_tx_seq.load(Ordering::SeqCst) + } + + #[instrument(skip(self))] + pub fn put_progress(&self, progress: (u64, H256)) -> Result<()> { + Ok(self.kvdb.put( + COL_MISC, + LOG_SYNC_PROGRESS_KEY.as_bytes(), + &progress.as_ssz_bytes(), + )?) + } + + #[instrument(skip(self))] + pub fn get_progress(&self) -> Result> { + Ok(Some( + <(u64, H256)>::from_ssz_bytes(&try_option!(self + .kvdb + .get(COL_MISC, LOG_SYNC_PROGRESS_KEY.as_bytes())?)) + .map_err(Error::from)?, + )) + } + + /// Build the merkle tree at `pora_chunk_index` with the data before (including) `tx_seq`. + /// This first rebuild the tree with the tx root nodes lists by repeatedly checking previous + /// until we reach the start of this chunk. + /// + /// Note that this can only be called with the last chunk after some transaction is committed, + /// otherwise the start of this chunk might be within some tx subtree and this will panic. + // TODO(zz): Fill the last chunk with data. + pub fn rebuild_last_chunk_merkle( + &self, + pora_chunk_index: usize, + mut tx_seq: u64, + ) -> Result> { + let last_chunk_start_index = pora_chunk_index as u64 * PORA_CHUNK_SIZE as u64; + let mut tx_list = Vec::new(); + // Find the first tx within the last chunk. + loop { + let tx = self.get_tx_by_seq_number(tx_seq)?.expect("tx not removed"); + match tx.start_entry_index.cmp(&last_chunk_start_index) { + cmp::Ordering::Greater => { + tx_list.push((tx_seq, tx.merkle_nodes)); + } + cmp::Ordering::Equal => { + tx_list.push((tx_seq, tx.merkle_nodes)); + break; + } + cmp::Ordering::Less => { + // The transaction data crosses a chunk, so we need to find the subtrees + // within the last chunk. + let mut start_index = tx.start_entry_index; + let mut first_index = None; + for (i, (depth, _)) in tx.merkle_nodes.iter().enumerate() { + start_index += 1 << (depth - 1); + if start_index == last_chunk_start_index { + first_index = Some(i + 1); + break; + } + } + // Some means some subtree ends at the chunk boundary. + // None means there are padding data between the tx data and the boundary, + // so no data belongs to the last chunk. + if let Some(first_index) = first_index { + if first_index != tx.merkle_nodes.len() { + tx_list.push((tx_seq, tx.merkle_nodes[first_index..].to_vec())); + } else { + // If the last subtree ends at the chunk boundary, we also do not need + // to add data of this tx to the last chunk. + // This is only possible if the last chunk is empty, because otherwise + // we should have entered the `Equal` condition before and + // have broken the loop. + assert!(tx_list.is_empty()); + } + } + break; + } + } + if tx_seq == 0 { + break; + } else { + tx_seq -= 1; + } + } + let mut merkle = if last_chunk_start_index == 0 { + // The first entry hash is initialized as zero. + AppendMerkleTree::::new_with_depth( + vec![H256::zero()], + log2_pow2(PORA_CHUNK_SIZE) + 1, + None, + ) + } else { + AppendMerkleTree::::new_with_depth( + vec![], + log2_pow2(PORA_CHUNK_SIZE) + 1, + None, + ) + }; + for (tx_seq, subtree_list) in tx_list.into_iter().rev() { + // Pad the tx. After the first subtree is padded, other subtrees should be aligned. + let first_subtree = 1 << (subtree_list[0].0 - 1); + if merkle.leaves() % first_subtree != 0 { + let pad_len = + cmp::min(first_subtree, PORA_CHUNK_SIZE) - (merkle.leaves() % first_subtree); + merkle.append_list(data_to_merkle_leaves(&LogManager::padding(pad_len))?); + } + // Since we are building the last merkle with a given last tx_seq, it's ensured + // that appending subtrees will not go beyond the max size. + merkle.append_subtree_list(subtree_list)?; + merkle.commit(Some(tx_seq)); + } + Ok(merkle) + } +} + +fn decode_tx_seq(data: &[u8]) -> Result { + Ok(u64::from_be_bytes( + data.try_into().map_err(|e| anyhow!("{:?}", e))?, + )) +} diff --git a/node/storage_with_stream/Cargo.toml b/node/storage_with_stream/Cargo.toml new file mode 100644 index 0000000..dd24cb0 --- /dev/null +++ b/node/storage_with_stream/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "storage_with_stream" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow = { version = "=1.0.58", features = ["backtrace"] } +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +eth2_ssz = "0.4.0" +eth2_ssz_derive = "0.3.0" +ethereum-types = "0.14" +hex = "0.4.3" +kvdb = "0.13.0" +kvdb-memorydb = "0.13.0" +kvdb-rocksdb = "0.19.0" +merkle_light = { path = "../../zerog-storage-rust/common/merkle_light" } +merkle_tree = { path = "../../zerog-storage-rust/common/merkle_tree" } +storage = { path = "../storage" } +rayon = "1.5.3" +shared_types = { path = "../shared_types" } +tracing = "0.1.35" +typenum = "1.15.0" +rusqlite = { version = "0.28.0", features = ["bundled"] } +const_format = "0.2.26" +tokio-rusqlite = "0.3.0" +async-trait = "0.1.56" + +[dev-dependencies] +tempdir = "0.3.7" +rand = "0.8.5" diff --git a/node/storage_with_stream/src/config.rs b/node/storage_with_stream/src/config.rs new file mode 100644 index 0000000..438e0e0 --- /dev/null +++ b/node/storage_with_stream/src/config.rs @@ -0,0 +1,8 @@ +use crate::LogStorageConfig; +use std::path::PathBuf; + +#[derive(Clone)] +pub struct Config { + pub log_config: LogStorageConfig, + pub kv_db_file: PathBuf, +} diff --git a/node/storage_with_stream/src/lib.rs b/node/storage_with_stream/src/lib.rs new file mode 100644 index 0000000..cc5467b --- /dev/null +++ b/node/storage_with_stream/src/lib.rs @@ -0,0 +1,12 @@ +pub mod config; +pub mod store; + +pub use storage::error; +pub use storage::log_store; +pub use storage::log_store::log_manager::LogManager; +pub use storage::StorageConfig as LogStorageConfig; + +pub use config::Config as StorageConfig; +pub use store::store_manager::StoreManager; +pub use store::AccessControlOps; +pub use store::Store; diff --git a/node/storage_with_stream/src/store/mod.rs b/node/storage_with_stream/src/store/mod.rs new file mode 100644 index 0000000..236794c --- /dev/null +++ b/node/storage_with_stream/src/store/mod.rs @@ -0,0 +1,133 @@ +use std::sync::Arc; + +use async_trait::async_trait; +use ethereum_types::{H160, H256}; +use shared_types::{AccessControlSet, KeyValuePair, StreamWriteSet, Transaction}; +use storage::log_store::config::Configurable; +use storage::log_store::{LogStoreRead, LogStoreWrite}; + +use crate::error::Result; + +mod sqlite_db_statements; +pub mod store_manager; +mod stream_store; + +pub use stream_store::to_access_control_op_name; +pub use stream_store::AccessControlOps; + +pub trait Store: + LogStoreRead + LogStoreWrite + Configurable + Send + Sync + StreamRead + StreamWrite + 'static +{ +} +impl< + T: LogStoreRead + + LogStoreWrite + + Configurable + + Send + + Sync + + StreamRead + + StreamWrite + + 'static, + > Store for T +{ +} + +#[async_trait] +pub trait StreamRead { + async fn get_holding_stream_ids(&self) -> Result>; + + async fn get_stream_data_sync_progress(&self) -> Result; + + async fn get_stream_replay_progress(&self) -> Result; + + async fn get_latest_version_before( + &self, + stream_id: H256, + key: Arc>, + before: u64, + ) -> Result; + + async fn has_write_permission( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result; + + async fn is_new_stream(&self, stream_id: H256, version: u64) -> Result; + + async fn is_admin(&self, account: H160, stream_id: H256, version: u64) -> Result; + + async fn is_special_key( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result; + + async fn is_writer_of_key( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result; + + async fn is_writer_of_stream( + &self, + account: H160, + stream_id: H256, + version: u64, + ) -> Result; + + async fn get_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result>; + + async fn get_next_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + inclusive: bool, + version: u64, + ) -> Result>; + + async fn get_prev_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + inclusive: bool, + version: u64, + ) -> Result>; + + async fn get_first(&self, stream_id: H256, version: u64) -> Result>; + + async fn get_last(&self, stream_id: H256, version: u64) -> Result>; +} + +#[async_trait] +pub trait StreamWrite { + async fn reset_stream_sync(&self, stream_ids: Vec) -> Result<()>; + + async fn update_stream_ids(&self, stream_ids: Vec) -> Result<()>; + + async fn update_stream_data_sync_progress(&self, from: u64, progress: u64) -> Result; + + async fn update_stream_replay_progress(&self, from: u64, progress: u64) -> Result; + + async fn put_stream( + &self, + tx_seq: u64, + data_merkle_root: H256, + result: String, + commit_data: Option<(StreamWriteSet, AccessControlSet)>, + ) -> Result<()>; + + async fn get_tx_result(&self, tx_seq: u64) -> Result>; + + async fn revert_stream(&mut self, tx_seq: u64) -> Result>; +} diff --git a/node/storage_with_stream/src/store/sqlite_db_statements.rs b/node/storage_with_stream/src/store/sqlite_db_statements.rs new file mode 100644 index 0000000..e9b5efc --- /dev/null +++ b/node/storage_with_stream/src/store/sqlite_db_statements.rs @@ -0,0 +1,237 @@ +use super::stream_store::AccessControlOps; +use const_format::formatcp; + +pub struct SqliteDBStatements; + +impl SqliteDBStatements { + pub const RESET_STERAM_SYNC_STATEMENT: &'static str = " + INSERT OR REPLACE INTO + t_misc (data_sync_progress, stream_replay_progress, stream_ids, id) + VALUES + (:data_sync_progress, :stream_replay_progress, :stream_ids, :id) + "; + + pub const FINALIZE_TX_STATEMENT: &'static str = " + INSERT OR REPLACE INTO + t_tx (tx_seq, result) + VALUES + (:tx_seq, :result) + "; + + pub const DELETE_TX_STATEMENT: &'static str = "DELETE FROM t_tx WHERE tx_seq > :tx_seq"; + + pub const DELETE_ALL_TX_STATEMENT: &'static str = "DELETE FROM t_tx"; + + pub const GET_TX_RESULT_STATEMENT: &'static str = + "SELECT result FROM t_tx WHERE tx_seq = :tx_seq"; + + pub const GET_STREAM_DATA_SYNC_PROGRESS_STATEMENT: &'static str = + "SELECT data_sync_progress FROM t_misc WHERE id = 0"; + + pub const UPDATE_STREAM_DATA_SYNC_PROGRESS_STATEMENT: &'static str = + "UPDATE t_misc SET data_sync_progress = :data_sync_progress WHERE id = :id AND data_sync_progress = :from"; + + pub const GET_STREAM_REPLAY_PROGRESS_STATEMENT: &'static str = + "SELECT stream_replay_progress FROM t_misc WHERE id = 0"; + + pub const UPDATE_STREAM_REPLAY_PROGRESS_STATEMENT: &'static str = "UPDATE t_misc SET stream_replay_progress = :stream_replay_progress WHERE id = :id AND stream_replay_progress = :from"; + + pub const GET_STREAM_IDS_STATEMENT: &'static str = "SELECT stream_ids FROM t_misc WHERE id = 0"; + + pub const UPDATE_STREAM_IDS_STATEMENT: &'static str = + "UPDATE t_misc SET stream_ids = :stream_ids WHERE id = :id"; + + pub const GET_LATEST_VERSION_BEFORE_STATEMENT: &'static str = + "SELECT MAX(version) FROM t_stream WHERE stream_id = :stream_id AND key = :key AND version <= :before"; + + pub const IS_NEW_STREAM_STATEMENT: &'static str = + "SELECT 1 FROM t_access_control WHERE stream_id = :stream_id AND version <= :version LIMIT 1"; + + pub const IS_SPECIAL_KEY_STATEMENT: &'static str = formatcp!( + " + SELECT op_type FROM + t_access_control + WHERE + stream_id = :stream_id AND key = :key AND + version <= :version AND op_type in ({}, {}) + ORDER BY version DESC LIMIT 1", + AccessControlOps::SET_KEY_TO_SPECIAL, + AccessControlOps::SET_KEY_TO_NORMAL, + ); + + pub const IS_ADMIN_STATEMENT: &'static str = formatcp!( + " + SELECT op_type FROM + t_access_control + WHERE + stream_id = :stream_id AND account = :account AND + version <= :version AND op_type in ({}, {}) + ORDER BY version DESC LIMIT 1", + AccessControlOps::GRANT_ADMIN_ROLE, + AccessControlOps::RENOUNCE_ADMIN_ROLE + ); + + pub const IS_WRITER_FOR_KEY_STATEMENT: &'static str = formatcp!( + " + SELECT op_type FROM + t_access_control + WHERE + stream_id = :stream_id AND key = :key AND + account = :account AND version <= :version AND + op_type in ({}, {}, {}) + ORDER BY version DESC LIMIT 1 + ", + AccessControlOps::GRANT_SPECIAL_WRITER_ROLE, + AccessControlOps::REVOKE_SPECIAL_WRITER_ROLE, + AccessControlOps::RENOUNCE_SPECIAL_WRITER_ROLE + ); + + pub const IS_WRITER_FOR_STREAM_STATEMENT: &'static str = formatcp!( + " + SELECT op_type FROM + t_access_control + WHERE + stream_id = :stream_id AND account = :account AND + version <= :version AND op_type in ({}, {}, {}) + ORDER BY version DESC LIMIT 1 + ", + AccessControlOps::GRANT_WRITER_ROLE, + AccessControlOps::REVOKE_WRITER_ROLE, + AccessControlOps::RENOUNCE_WRITER_ROLE + ); + + pub const PUT_STREAM_WRITE_STATEMENT: &'static str = " + INSERT OR REPLACE INTO + t_stream (stream_id, key, version, start_index, end_index) + VALUES + (:stream_id, :key, :version, :start_index, :end_index) + "; + + pub const DELETE_STREAM_WRITE_STATEMENT: &'static str = + "DELETE FROM t_stream WHERE version > :version"; + + pub const DELETE_ALL_STREAM_WRITE_STATEMENT: &'static str = "DELETE FROM t_stream"; + + pub const PUT_ACCESS_CONTROL_STATEMENT: &'static str = " + INSERT OR REPLACE INTO + t_access_control (stream_id, key, version, account, op_type, operator) + VALUES + (:stream_id, :key, :version, :account, :op_type, :operator) + "; + + pub const DELETE_ACCESS_CONTROL_STATEMENT: &'static str = + "DELETE FROM t_access_control WHERE version > :version "; + + pub const DELETE_ALL_ACCESS_CONTROL_STATEMENT: &'static str = "DELETE FROM t_access_control"; + + pub const GET_STREAM_KEY_VALUE_STATEMENT: &'static str = " + SELECT version, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND key = :key AND + version <= :version + ORDER BY version DESC LIMIT 1 + "; + + pub const GET_NEXT_KEY_VALUE_STATEMENT_INCLUSIVE: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND key >= :key AND version <= :version + ORDER BY key ASC, version DESC LIMIT 1 + "; + + pub const GET_NEXT_KEY_VALUE_STATEMENT: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND key > :key AND version <= :version + ORDER BY key ASC, version DESC LIMIT 1 + "; + + pub const GET_PREV_KEY_VALUE_STATEMENT_INCLUSIVE: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND key <= :key AND version <= :version + ORDER BY key DESC, version DESC LIMIT 1 + "; + + pub const GET_PREV_KEY_VALUE_STATEMENT: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND key < :key AND version <= :version + ORDER BY key DESC, version DESC LIMIT 1 + "; + + pub const GET_FIRST_KEY_VALUE_STATEMENT: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND version <= :version + ORDER BY key ASC, version DESC LIMIT 1 + "; + + pub const GET_LAST_KEY_VALUE_STATEMENT: &'static str = " + SELECT version, key, start_index, end_index FROM + t_stream + WHERE + stream_id = :stream_id AND version <= :version + ORDER BY key DESC, version DESC LIMIT 1 + "; + + pub const CREATE_MISC_TABLE_STATEMENT: &'static str = " + CREATE TABLE IF NOT EXISTS t_misc ( + id INTEGER NOT NULL PRIMARY KEY, + data_sync_progress INTEGER NOT NULL, + stream_replay_progress INTEGER NOT NULL, + stream_ids BLOB NOT NULL + ) WITHOUT ROWID + "; + + pub const CREATE_STREAM_TABLE_STATEMENT: &'static str = " + CREATE TABLE IF NOT EXISTS t_stream ( + stream_id BLOB NOT NULL, + key BLOB NOT NULL, + version INTEGER NOT NULL, + start_index INTEGER NOT NULL, + end_index INTEGER NOT NULL, + PRIMARY KEY (stream_id, key, version) + ) WITHOUT ROWID + "; + + pub const CREATE_STREAM_INDEX_STATEMENTS: [&'static str; 2] = [ + "CREATE INDEX IF NOT EXISTS stream_key_idx ON t_stream(stream_id, key)", + "CREATE INDEX IF NOT EXISTS stream_version_idx ON t_stream(version)", + ]; + + pub const CREATE_ACCESS_CONTROL_TABLE_STATEMENT: &'static str = " + CREATE TABLE IF NOT EXISTS t_access_control ( + stream_id BLOB NOT NULL, + key BLOB, + version INTEGER NOT NULL, + account BLOB, + op_type INTEGER NOT NULL, + operator BLOB NOT NULL + ) + "; + + pub const CREATE_ACCESS_CONTROL_INDEX_STATEMENTS: [&'static str; 5] = [ + "CREATE INDEX IF NOT EXISTS ac_version_index ON t_access_control(version)", + "CREATE INDEX IF NOT EXISTS ac_op_type_index ON t_access_control(op_type)", + "CREATE INDEX IF NOT EXISTS ac_account_index ON t_access_control(stream_id, account)", + "CREATE INDEX IF NOT EXISTS ac_key_index ON t_access_control(stream_id, key)", + "CREATE INDEX IF NOT EXISTS ac_account_key_index ON t_access_control(stream_id, key, account)", + ]; + + pub const CREATE_TX_TABLE_STATEMENT: &'static str = " + CREATE TABLE IF NOT EXISTS t_tx ( + tx_seq INTEGER NOT NULL PRIMARY KEY, + result TEXT + ) WITHOUT ROWID + "; + + pub const CREATE_TX_INDEX_STATEMENTS: [&'static str; 1] = + ["CREATE INDEX IF NOT EXISTS tx_result_idex ON t_tx(result)"]; +} diff --git a/node/storage_with_stream/src/store/store_manager.rs b/node/storage_with_stream/src/store/store_manager.rs new file mode 100644 index 0000000..2af0c7d --- /dev/null +++ b/node/storage_with_stream/src/store/store_manager.rs @@ -0,0 +1,445 @@ +use anyhow::{Error, Result}; +use async_trait::async_trait; +use ethereum_types::{H160, H256}; +use shared_types::{ + AccessControlSet, Chunk, ChunkArray, ChunkArrayWithProof, ChunkWithProof, DataRoot, + FlowRangeProof, KeyValuePair, StreamWriteSet, Transaction, +}; +use std::path::Path; +use std::sync::Arc; +use storage::log_store::config::Configurable; +use storage::log_store::log_manager::LogConfig; +use storage::log_store::{LogStoreChunkRead, LogStoreChunkWrite, LogStoreRead, LogStoreWrite}; +use storage::LogManager; +use tracing::instrument; + +use super::stream_store::StreamStore; +use super::{StreamRead, StreamWrite}; + +/// 256 Bytes +pub const ENTRY_SIZE: usize = 256; +/// 1024 Entries. +pub const PORA_CHUNK_SIZE: usize = 1024; + +pub const COL_TX: u32 = 0; +pub const COL_ENTRY_BATCH: u32 = 1; +pub const COL_TX_DATA_ROOT_INDEX: u32 = 2; +pub const COL_ENTRY_BATCH_ROOT: u32 = 3; +pub const COL_TX_COMPLETED: u32 = 4; +pub const COL_MISC: u32 = 5; +pub const COL_NUM: u32 = 6; + +pub struct StoreManager { + log_store: LogManager, + stream_store: StreamStore, +} + +impl LogStoreChunkWrite for StoreManager { + fn put_chunks(&mut self, tx_seq: u64, chunks: ChunkArray) -> Result<()> { + self.log_store.put_chunks(tx_seq, chunks) + } + + fn remove_all_chunks(&self, tx_seq: u64) -> crate::error::Result<()> { + self.log_store.remove_all_chunks(tx_seq) + } + + fn put_chunks_with_tx_hash( + &mut self, + tx_seq: u64, + tx_hash: H256, + chunks: ChunkArray, + ) -> storage::error::Result { + self.log_store + .put_chunks_with_tx_hash(tx_seq, tx_hash, chunks) + } +} + +impl LogStoreWrite for StoreManager { + #[instrument(skip(self))] + fn put_tx(&mut self, tx: Transaction) -> Result<()> { + self.log_store.put_tx(tx) + } + + fn finalize_tx(&mut self, tx_seq: u64) -> Result<()> { + self.log_store.finalize_tx(tx_seq) + } + + fn finalize_tx_with_hash( + &mut self, + tx_seq: u64, + tx_hash: H256, + ) -> storage::error::Result { + self.log_store.finalize_tx_with_hash(tx_seq, tx_hash) + } + + fn put_sync_progress(&self, progress: (u64, H256)) -> Result<()> { + self.log_store.put_sync_progress(progress) + } + + fn revert_to(&mut self, tx_seq: u64) -> Result> { + self.log_store.revert_to(tx_seq) + } +} + +impl LogStoreChunkRead for StoreManager { + fn get_chunk_by_tx_and_index( + &self, + tx_seq: u64, + index: usize, + ) -> crate::error::Result> { + self.log_store.get_chunk_by_tx_and_index(tx_seq, index) + } + + fn get_chunks_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + self.log_store + .get_chunks_by_tx_and_index_range(tx_seq, index_start, index_end) + } + + fn get_chunk_by_data_root_and_index( + &self, + data_root: &DataRoot, + index: usize, + ) -> crate::error::Result> { + self.log_store + .get_chunk_by_data_root_and_index(data_root, index) + } + + fn get_chunks_by_data_root_and_index_range( + &self, + data_root: &DataRoot, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + self.log_store + .get_chunks_by_data_root_and_index_range(data_root, index_start, index_end) + } + + fn get_chunk_index_list(&self, tx_seq: u64) -> crate::error::Result> { + self.log_store.get_chunk_index_list(tx_seq) + } + + fn get_chunk_by_flow_index( + &self, + index: u64, + length: u64, + ) -> crate::error::Result> { + self.log_store.get_chunk_by_flow_index(index, length) + } +} + +impl LogStoreRead for StoreManager { + fn get_tx_by_seq_number(&self, seq: u64) -> crate::error::Result> { + self.log_store.get_tx_by_seq_number(seq) + } + + fn get_tx_seq_by_data_root(&self, data_root: &DataRoot) -> crate::error::Result> { + self.log_store.get_tx_seq_by_data_root(data_root) + } + + fn get_chunk_with_proof_by_tx_and_index( + &self, + tx_seq: u64, + index: usize, + ) -> crate::error::Result> { + self.log_store + .get_chunk_with_proof_by_tx_and_index(tx_seq, index) + } + + fn get_chunks_with_proof_by_tx_and_index_range( + &self, + tx_seq: u64, + index_start: usize, + index_end: usize, + ) -> crate::error::Result> { + self.log_store + .get_chunks_with_proof_by_tx_and_index_range(tx_seq, index_start, index_end) + } + + fn check_tx_completed(&self, tx_seq: u64) -> crate::error::Result { + self.log_store.check_tx_completed(tx_seq) + } + + fn validate_range_proof(&self, tx_seq: u64, data: &ChunkArrayWithProof) -> Result { + self.log_store.validate_range_proof(tx_seq, data) + } + + fn get_sync_progress(&self) -> Result> { + self.log_store.get_sync_progress() + } + + fn next_tx_seq(&self) -> u64 { + self.log_store.next_tx_seq() + } + + fn get_proof_at_root( + &self, + root: &DataRoot, + index: u64, + length: u64, + ) -> Result { + self.log_store.get_proof_at_root(root, index, length) + } + + fn get_context(&self) -> Result<(DataRoot, u64)> { + self.log_store.get_context() + } +} + +impl Configurable for StoreManager { + fn get_config(&self, key: &[u8]) -> Result>> { + self.log_store.get_config(key) + } + + fn set_config(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.log_store.set_config(key, value) + } + + fn remove_config(&self, key: &[u8]) -> Result<()> { + self.log_store.remove_config(key) + } + + fn exec_configs(&self, tx: storage::log_store::config::ConfigTx) -> Result<()> { + self.log_store.exec_configs(tx) + } +} + +#[async_trait] +impl StreamRead for StoreManager { + async fn get_holding_stream_ids(&self) -> crate::error::Result> { + self.stream_store.get_stream_ids().await + } + + async fn get_stream_data_sync_progress(&self) -> Result { + self.stream_store.get_stream_data_sync_progress().await + } + + async fn get_stream_replay_progress(&self) -> Result { + self.stream_store.get_stream_replay_progress().await + } + + async fn get_latest_version_before( + &self, + stream_id: H256, + key: Arc>, + before: u64, + ) -> Result { + self.stream_store + .get_latest_version_before(stream_id, key, before) + .await + } + + async fn has_write_permission( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + self.stream_store + .has_write_permission(account, stream_id, key, version) + .await + } + + async fn is_new_stream(&self, stream_id: H256, version: u64) -> Result { + self.stream_store.is_new_stream(stream_id, version).await + } + + async fn is_admin(&self, account: H160, stream_id: H256, version: u64) -> Result { + self.stream_store + .is_admin(account, stream_id, version) + .await + } + + async fn is_special_key( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + self.stream_store + .is_special_key(stream_id, key, version) + .await + } + + async fn is_writer_of_key( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + self.stream_store + .is_writer_of_key(account, stream_id, key, version) + .await + } + + async fn is_writer_of_stream( + &self, + account: H160, + stream_id: H256, + version: u64, + ) -> Result { + self.stream_store + .is_writer_of_stream(account, stream_id, version) + .await + } + + async fn get_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result> { + self.stream_store + .get_stream_key_value(stream_id, key, version) + .await + } + + async fn get_next_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + inclusive: bool, + version: u64, + ) -> Result> { + self.stream_store + .get_next_stream_key_value(stream_id, key, version, inclusive) + .await + } + + async fn get_prev_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + inclusive: bool, + version: u64, + ) -> Result> { + self.stream_store + .get_prev_stream_key_value(stream_id, key, version, inclusive) + .await + } + + async fn get_first(&self, stream_id: H256, version: u64) -> Result> { + self.stream_store.get_first(stream_id, version).await + } + + async fn get_last(&self, stream_id: H256, version: u64) -> Result> { + self.stream_store.get_last(stream_id, version).await + } +} + +#[async_trait] +impl StreamWrite for StoreManager { + async fn reset_stream_sync(&self, stream_ids: Vec) -> Result<()> { + self.stream_store.reset_stream_sync(stream_ids).await + } + + async fn update_stream_ids(&self, stream_ids: Vec) -> Result<()> { + self.stream_store.update_stream_ids(stream_ids).await + } + + // update the progress and return the next tx_seq to sync + async fn update_stream_data_sync_progress(&self, from: u64, progress: u64) -> Result { + if self + .stream_store + .update_stream_data_sync_progress(from, progress) + .await? + > 0 + { + Ok(progress) + } else { + Ok(self.stream_store.get_stream_data_sync_progress().await?) + } + } + + // update the progress and return the next tx_seq to replay + async fn update_stream_replay_progress(&self, from: u64, progress: u64) -> Result { + if self + .stream_store + .update_stream_replay_progress(from, progress) + .await? + > 0 + { + Ok(progress) + } else { + Ok(self.stream_store.get_stream_replay_progress().await?) + } + } + + async fn put_stream( + &self, + tx_seq: u64, + data_merkle_root: H256, + result: String, + commit_data: Option<(StreamWriteSet, AccessControlSet)>, + ) -> Result<()> { + match self.log_store.get_tx_by_seq_number(tx_seq) { + Ok(Some(tx)) => { + if tx.data_merkle_root != data_merkle_root { + return Err(Error::msg("data merkle root deos not match")); + } + } + _ => { + return Err(Error::msg("tx does not found")); + } + } + + self.stream_store + .put_stream(tx_seq, result, commit_data) + .await + } + + async fn get_tx_result(&self, tx_seq: u64) -> Result> { + self.stream_store.get_tx_result(tx_seq).await + } + + async fn revert_stream(&mut self, tx_seq: u64) -> Result> { + self.stream_store.revert_to(tx_seq).await?; + self.log_store.revert_to(tx_seq) + } +} + +impl StoreManager { + pub async fn memorydb(config: LogConfig) -> Result { + let stream_store = StreamStore::new_in_memory().await?; + stream_store.create_tables_if_not_exist().await?; + Ok(Self { + log_store: LogManager::memorydb(config)?, + stream_store, + }) + } + + pub async fn rocks_db( + config: LogConfig, + path: impl AsRef, + kv_db_file: impl AsRef, + ) -> Result { + let stream_store = StreamStore::new(kv_db_file.as_ref()).await?; + stream_store.create_tables_if_not_exist().await?; + Ok(Self { + log_store: LogManager::rocksdb(config, path)?, + stream_store, + }) + } +} + +#[macro_export] +macro_rules! try_option { + ($r: ident) => { + match $r { + Some(v) => v, + None => return Ok(None), + } + }; + ($e: expr) => { + match $e { + Some(v) => v, + None => return Ok(None), + } + }; +} diff --git a/node/storage_with_stream/src/store/stream_store.rs b/node/storage_with_stream/src/store/stream_store.rs new file mode 100644 index 0000000..470f63c --- /dev/null +++ b/node/storage_with_stream/src/store/stream_store.rs @@ -0,0 +1,739 @@ +use anyhow::{bail, Result}; +use ethereum_types::{H160, H256}; +use shared_types::{AccessControlSet, KeyValuePair, StreamWriteSet}; +use ssz::{Decode, Encode}; +use std::{path::Path, sync::Arc}; + +use rusqlite::named_params; +use tokio_rusqlite::Connection; + +use crate::error::Error; + +use super::sqlite_db_statements::SqliteDBStatements; + +pub struct StreamStore { + connection: Connection, +} + +fn convert_to_i64(x: u64) -> i64 { + if x > i64::MAX as u64 { + (x - i64::MAX as u64 - 1) as i64 + } else { + x as i64 - i64::MAX - 1 + } +} + +fn convert_to_u64(x: i64) -> u64 { + if x < 0 { + (x + i64::MAX + 1) as u64 + } else { + x as u64 + i64::MAX as u64 + 1 + } +} + +impl StreamStore { + pub async fn create_tables_if_not_exist(&self) -> Result<()> { + self.connection + .call(|conn| { + // misc table + conn.execute(SqliteDBStatements::CREATE_MISC_TABLE_STATEMENT, [])?; + // stream table + conn.execute(SqliteDBStatements::CREATE_STREAM_TABLE_STATEMENT, [])?; + for stmt in SqliteDBStatements::CREATE_STREAM_INDEX_STATEMENTS.iter() { + conn.execute(stmt, [])?; + } + // access control table + conn.execute( + SqliteDBStatements::CREATE_ACCESS_CONTROL_TABLE_STATEMENT, + [], + )?; + for stmt in SqliteDBStatements::CREATE_ACCESS_CONTROL_INDEX_STATEMENTS.iter() { + conn.execute(stmt, [])?; + } + // tx table + conn.execute(SqliteDBStatements::CREATE_TX_TABLE_STATEMENT, [])?; + for stmt in SqliteDBStatements::CREATE_TX_INDEX_STATEMENTS.iter() { + conn.execute(stmt, [])?; + } + Ok(()) + }) + .await + } + + pub async fn new_in_memory() -> Result { + let connection = Connection::open_in_memory().await?; + Ok(Self { connection }) + } + + pub async fn new(path: impl AsRef) -> Result { + let connection = Connection::open(path).await?; + Ok(Self { connection }) + } + + pub async fn get_stream_ids(&self) -> Result> { + self.connection + .call(|conn| { + let mut stmt = conn.prepare(SqliteDBStatements::GET_STREAM_IDS_STATEMENT)?; + let mut rows = stmt.query_map([], |row| row.get(0))?; + if let Some(raw_data) = rows.next() { + let raw_stream_ids: Vec = raw_data?; + return Ok(Vec::::from_ssz_bytes(&raw_stream_ids).map_err(Error::from)?); + } + Ok(vec![]) + }) + .await + } + + pub async fn update_stream_ids(&self, stream_ids: Vec) -> Result<()> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::UPDATE_STREAM_IDS_STATEMENT)?; + stmt.execute(named_params! { + ":stream_ids": stream_ids, + ":id": 0, + })?; + Ok(()) + }) + .await + } + + pub async fn reset_stream_sync(&self, stream_ids: Vec) -> Result<()> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::RESET_STERAM_SYNC_STATEMENT)?; + stmt.execute(named_params! { + ":data_sync_progress": convert_to_i64(0), + ":stream_replay_progress": convert_to_i64(0), + ":stream_ids": stream_ids, + ":id": 0, + })?; + Ok(()) + }) + .await + } + + pub async fn get_stream_data_sync_progress(&self) -> Result { + self.connection + .call(|conn| { + let mut stmt = + conn.prepare(SqliteDBStatements::GET_STREAM_DATA_SYNC_PROGRESS_STATEMENT)?; + let mut rows = stmt.query_map([], |row| row.get(0))?; + if let Some(raw_data) = rows.next() { + return Ok(convert_to_u64(raw_data?)); + } + Ok(0) + }) + .await + } + + pub async fn update_stream_data_sync_progress( + &self, + from: u64, + progress: u64, + ) -> Result { + self.connection + .call(move |conn| { + let mut stmt = + conn.prepare(SqliteDBStatements::UPDATE_STREAM_DATA_SYNC_PROGRESS_STATEMENT)?; + Ok(stmt.execute(named_params! { + ":data_sync_progress": convert_to_i64(progress), + ":id": 0, + ":from": convert_to_i64(from), + })?) + }) + .await + } + + pub async fn get_stream_replay_progress(&self) -> Result { + self.connection + .call(|conn| { + let mut stmt = + conn.prepare(SqliteDBStatements::GET_STREAM_REPLAY_PROGRESS_STATEMENT)?; + let mut rows = stmt.query_map([], |row| row.get(0))?; + if let Some(raw_data) = rows.next() { + return Ok(convert_to_u64(raw_data?)); + } + Ok(0) + }) + .await + } + + pub async fn update_stream_replay_progress(&self, from: u64, progress: u64) -> Result { + self.connection + .call(move |conn| { + let mut stmt = + conn.prepare(SqliteDBStatements::UPDATE_STREAM_REPLAY_PROGRESS_STATEMENT)?; + Ok(stmt.execute(named_params! { + ":stream_replay_progress": convert_to_i64(progress), + ":id": 0, + ":from": convert_to_i64(from), + })?) + }) + .await + } + + pub async fn get_latest_version_before( + &self, + stream_id: H256, + key: Arc>, + before: u64, + ) -> Result { + self.connection + .call(move |conn| { + let mut stmt = + conn.prepare(SqliteDBStatements::GET_LATEST_VERSION_BEFORE_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":before": convert_to_i64(before), + }, + |row| row.get(0), + )?; + if let Some(raw_data) = rows.next() { + match raw_data { + Ok(x) => { + return Ok(convert_to_u64(x)); + } + Err(_) => return Ok(0), + } + } + Ok(0) + }) + .await + } + + pub async fn is_new_stream(&self, stream_id: H256, version: u64) -> Result { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::IS_NEW_STREAM_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |_| Ok(1), + )?; + if rows.next().is_some() { + return Ok(false); + } + Ok(true) + }) + .await + } + + pub async fn is_special_key( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::IS_SPECIAL_KEY_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":version": convert_to_i64(version), + }, + |row| row.get(0), + )?; + if let Some(raw_data) = rows.next() { + match raw_data? { + AccessControlOps::SET_KEY_TO_NORMAL => Ok(false), + AccessControlOps::SET_KEY_TO_SPECIAL => Ok(true), + _ => { + bail!("is_special_key: unexpected access control op type"); + } + } + } else { + Ok(false) + } + }) + .await + } + + pub async fn is_admin(&self, account: H160, stream_id: H256, version: u64) -> Result { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::IS_ADMIN_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":account": account.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |row| row.get(0), + )?; + if let Some(raw_data) = rows.next() { + match raw_data? { + AccessControlOps::GRANT_ADMIN_ROLE => { + return Ok(true); + } + AccessControlOps::RENOUNCE_ADMIN_ROLE => { + return Ok(false); + } + _ => { + bail!("is_admin: unexpected access control type"); + } + } + } + Ok(false) + }) + .await + } + + pub async fn is_writer_of_key( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::IS_WRITER_FOR_KEY_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":account": account.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |row| row.get(0), + )?; + if let Some(raw_data) = rows.next() { + match raw_data? { + AccessControlOps::GRANT_SPECIAL_WRITER_ROLE => { + return Ok(true); + } + AccessControlOps::REVOKE_SPECIAL_WRITER_ROLE + | AccessControlOps::RENOUNCE_SPECIAL_WRITER_ROLE => return Ok(false), + _ => { + bail!("is_writer_of_key: unexpected access control op type"); + } + } + }; + Ok(false) + }) + .await + } + + pub async fn is_writer_of_stream( + &self, + account: H160, + stream_id: H256, + version: u64, + ) -> Result { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::IS_WRITER_FOR_STREAM_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":account": account.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |row| row.get(0), + )?; + if let Some(raw_data) = rows.next() { + match raw_data? { + AccessControlOps::GRANT_WRITER_ROLE => Ok(true), + AccessControlOps::REVOKE_WRITER_ROLE + | AccessControlOps::RENOUNCE_WRITER_ROLE => Ok(false), + _ => { + bail!("is_writer_of_stream: unexpected access control op type"); + } + } + } else { + Ok(false) + } + }) + .await + } + + pub async fn has_write_permission( + &self, + account: H160, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result { + if self.is_new_stream(stream_id, version).await? { + return Ok(true); + } + if self.is_admin(account, stream_id, version).await? { + return Ok(true); + } + if self.is_special_key(stream_id, key.clone(), version).await? { + self.is_writer_of_key(account, stream_id, key.clone(), version) + .await + } else { + self.is_writer_of_stream(account, stream_id, version).await + } + } + + pub async fn put_stream( + &self, + tx_seq: u64, + result: String, + commit_data: Option<(StreamWriteSet, AccessControlSet)>, + ) -> Result<()> { + self.connection + .call(move |conn| { + let tx = conn.transaction()?; + let version = tx_seq; + + if tx.execute( + SqliteDBStatements::UPDATE_STREAM_REPLAY_PROGRESS_STATEMENT, + named_params! { + ":stream_replay_progress": convert_to_i64(version + 1), + ":id": 0, + ":from": convert_to_i64(version), + }, + )? == 0 + { + return Err(anyhow::Error::msg("tx_seq not match")); + } + + if let Some((stream_write_set, access_control_set)) = commit_data { + for stream_write in stream_write_set.stream_writes.iter() { + tx.execute( + SqliteDBStatements::PUT_STREAM_WRITE_STATEMENT, + named_params! { + ":stream_id": stream_write.stream_id.as_ssz_bytes(), + ":key": stream_write.key, + ":version": convert_to_i64(version), + ":start_index": stream_write.start_index, + ":end_index": stream_write.end_index + }, + )?; + } + for access_control in access_control_set.access_controls.iter() { + tx.execute( + SqliteDBStatements::PUT_ACCESS_CONTROL_STATEMENT, + named_params! { + ":stream_id": access_control.stream_id.as_ssz_bytes(), + ":key": access_control.key, + ":version": convert_to_i64(version), + ":account": access_control.account.as_ssz_bytes(), + ":op_type": access_control.op_type, + ":operator": access_control.operator.as_ssz_bytes(), + }, + )?; + } + } + tx.execute( + SqliteDBStatements::FINALIZE_TX_STATEMENT, + named_params! { + ":tx_seq": convert_to_i64(tx_seq), + ":result": result, + }, + )?; + tx.commit()?; + Ok(()) + }) + .await + } + + pub async fn get_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + version: u64, + ) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::GET_STREAM_KEY_VALUE_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":version": convert_to_i64(version), + }, + |row| { + Ok(KeyValuePair { + stream_id, + key: vec![], + start_index: row.get(1)?, + end_index: row.get(2)?, + version: convert_to_u64(row.get(0)?), + }) + }, + )?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn get_next_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + version: u64, + inclusive: bool, + ) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = if inclusive { + conn.prepare(SqliteDBStatements::GET_NEXT_KEY_VALUE_STATEMENT_INCLUSIVE)? + } else { + conn.prepare(SqliteDBStatements::GET_NEXT_KEY_VALUE_STATEMENT)? + }; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":version": convert_to_i64(version), + }, + |row| { + Ok(KeyValuePair { + stream_id, + key: row.get(1)?, + start_index: row.get(2)?, + end_index: row.get(3)?, + version: convert_to_u64(row.get(0)?), + }) + }, + )?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn get_prev_stream_key_value( + &self, + stream_id: H256, + key: Arc>, + version: u64, + inclusive: bool, + ) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = if inclusive { + conn.prepare(SqliteDBStatements::GET_PREV_KEY_VALUE_STATEMENT_INCLUSIVE)? + } else { + conn.prepare(SqliteDBStatements::GET_PREV_KEY_VALUE_STATEMENT)? + }; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":key": key, + ":version": convert_to_i64(version), + }, + |row| { + Ok(KeyValuePair { + stream_id, + key: row.get(1)?, + start_index: row.get(2)?, + end_index: row.get(3)?, + version: convert_to_u64(row.get(0)?), + }) + }, + )?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn get_first(&self, stream_id: H256, version: u64) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::GET_FIRST_KEY_VALUE_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |row| { + Ok(KeyValuePair { + stream_id, + key: row.get(1)?, + start_index: row.get(2)?, + end_index: row.get(3)?, + version: convert_to_u64(row.get(0)?), + }) + }, + )?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn get_last(&self, stream_id: H256, version: u64) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::GET_LAST_KEY_VALUE_STATEMENT)?; + let mut rows = stmt.query_map( + named_params! { + ":stream_id": stream_id.as_ssz_bytes(), + ":version": convert_to_i64(version), + }, + |row| { + Ok(KeyValuePair { + stream_id, + key: row.get(1)?, + start_index: row.get(2)?, + end_index: row.get(3)?, + version: convert_to_u64(row.get(0)?), + }) + }, + )?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn get_tx_result(&self, tx_seq: u64) -> Result> { + self.connection + .call(move |conn| { + let mut stmt = conn.prepare(SqliteDBStatements::GET_TX_RESULT_STATEMENT)?; + let mut rows = stmt + .query_map(named_params! {":tx_seq": convert_to_i64(tx_seq)}, |row| { + row.get(0) + })?; + if let Some(raw_data) = rows.next() { + return Ok(Some(raw_data?)); + } + Ok(None) + }) + .await + } + + pub async fn revert_to(&self, tx_seq: u64) -> Result<()> { + let stream_data_sync_progress = self.get_stream_data_sync_progress().await?; + let stream_replay_progress = self.get_stream_replay_progress().await?; + + assert!( + stream_data_sync_progress >= stream_replay_progress, + "stream replay progress ahead than data sync progress" + ); + + if tx_seq == u64::MAX { + self.connection + .call(move |conn| { + let tx_seq = convert_to_i64(0); + let tx = conn.transaction()?; + tx.execute( + SqliteDBStatements::UPDATE_STREAM_DATA_SYNC_PROGRESS_STATEMENT, + named_params! { + ":data_sync_progress": tx_seq, + ":id": 0, + ":from": convert_to_i64(stream_data_sync_progress), + }, + )?; + + tx.execute( + SqliteDBStatements::UPDATE_STREAM_REPLAY_PROGRESS_STATEMENT, + named_params! { + ":stream_replay_progress": tx_seq, + ":id": 0, + ":from": convert_to_i64(stream_replay_progress), + }, + )?; + + tx.execute(SqliteDBStatements::DELETE_ALL_TX_STATEMENT, [])?; + tx.execute(SqliteDBStatements::DELETE_ALL_STREAM_WRITE_STATEMENT, [])?; + tx.execute(SqliteDBStatements::DELETE_ALL_ACCESS_CONTROL_STATEMENT, [])?; + + tx.commit()?; + Ok::<(), anyhow::Error>(()) + }) + .await?; + } else if tx_seq < stream_data_sync_progress { + if tx_seq < stream_replay_progress { + self.connection + .call(move |conn| { + let tx_seq = convert_to_i64(tx_seq); + let tx = conn.transaction()?; + tx.execute( + SqliteDBStatements::UPDATE_STREAM_DATA_SYNC_PROGRESS_STATEMENT, + named_params! { + ":data_sync_progress": tx_seq + 1, + ":id": 0, + ":from": convert_to_i64(stream_data_sync_progress), + }, + )?; + + tx.execute( + SqliteDBStatements::UPDATE_STREAM_REPLAY_PROGRESS_STATEMENT, + named_params! { + ":stream_replay_progress": tx_seq + 1, + ":id": 0, + ":from": convert_to_i64(stream_replay_progress), + }, + )?; + + tx.execute( + SqliteDBStatements::DELETE_TX_STATEMENT, + named_params! {":tx_seq": tx_seq}, + )?; + tx.execute( + SqliteDBStatements::DELETE_STREAM_WRITE_STATEMENT, + named_params! {":version": tx_seq}, + )?; + tx.execute( + SqliteDBStatements::DELETE_ACCESS_CONTROL_STATEMENT, + named_params! {":version": tx_seq}, + )?; + + tx.commit()?; + Ok::<(), anyhow::Error>(()) + }) + .await?; + } else { + self.update_stream_data_sync_progress(stream_data_sync_progress, tx_seq) + .await?; + } + } + + Ok(()) + } +} + +pub struct AccessControlOps; + +impl AccessControlOps { + pub const GRANT_ADMIN_ROLE: u8 = 0x00; + pub const RENOUNCE_ADMIN_ROLE: u8 = 0x01; + pub const SET_KEY_TO_SPECIAL: u8 = 0x10; + pub const SET_KEY_TO_NORMAL: u8 = 0x11; + pub const GRANT_WRITER_ROLE: u8 = 0x20; + pub const REVOKE_WRITER_ROLE: u8 = 0x21; + pub const RENOUNCE_WRITER_ROLE: u8 = 0x22; + pub const GRANT_SPECIAL_WRITER_ROLE: u8 = 0x30; + pub const REVOKE_SPECIAL_WRITER_ROLE: u8 = 0x31; + pub const RENOUNCE_SPECIAL_WRITER_ROLE: u8 = 0x32; +} + +pub fn to_access_control_op_name(x: u8) -> &'static str { + match x { + 0x00 => "GRANT_ADMIN_ROLE", + 0x01 => "RENOUNCE_ADMIN_ROLE", + 0x10 => "SET_KEY_TO_SPECIAL", + 0x11 => "SET_KEY_TO_NORMAL", + 0x20 => "GRANT_WRITER_ROLE", + 0x21 => "REVOKE_WRITER_ROLE", + 0x22 => "RENOUNCE_WRITER_ROLE", + 0x30 => "GRANT_SPECIAL_WRITER_ROLE", + 0x31 => "REVOKE_SPECIAL_WRITER_ROLE", + 0x32 => "RENOUNCE_SPECIAL_WRITER_ROLE", + _ => "UNKNOWN", + } +} diff --git a/node/stream/Cargo.toml b/node/stream/Cargo.toml new file mode 100644 index 0000000..f0eda56 --- /dev/null +++ b/node/stream/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "stream" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { version = "=1.0.58", features = ["backtrace"] } +append_merkle = { path = "../../zerog-storage-rust/common/append_merkle" } +async-trait = "0.1.56" +ethereum-types = "0.14" +futures = "0.3.21" +jsonrpsee = { version = "0.14.0", features = ["full"] } +shared_types = { path = "../shared_types" } +task_executor = { path = "../../zerog-storage-rust/common/task_executor" } +tokio = "1.19.2" +ethers = { version = "^2", features = ["ws"] } +serde_json = "1.0.82" +storage_with_stream = { path = "../storage_with_stream" } +rpc = {path = "../rpc"} +contract-interface = { path = "../../zerog-storage-rust/common/contract-interface" } +rusqlite = { version = "0.28.0", features = ["bundled"] } +tracing = "0.1.35" +eth2_ssz = "0.4.0" +eth2_ssz_derive = "0.3.0" +thiserror = "1.0.37" diff --git a/node/stream/src/config.rs b/node/stream/src/config.rs new file mode 100644 index 0000000..5d252ca --- /dev/null +++ b/node/stream/src/config.rs @@ -0,0 +1,9 @@ +use std::collections::HashSet; + +use ethereum_types::H256; + +#[derive(Clone)] +pub struct Config { + pub stream_ids: Vec, + pub stream_set: HashSet, +} diff --git a/node/stream/src/lib.rs b/node/stream/src/lib.rs new file mode 100644 index 0000000..817501e --- /dev/null +++ b/node/stream/src/lib.rs @@ -0,0 +1,8 @@ +#[macro_use] +extern crate tracing; + +mod config; +mod stream_manager; + +pub use config::Config as StreamConfig; +pub use stream_manager::StreamManager; diff --git a/node/stream/src/stream_manager/error.rs b/node/stream/src/stream_manager/error.rs new file mode 100644 index 0000000..3c36a50 --- /dev/null +++ b/node/stream/src/stream_manager/error.rs @@ -0,0 +1,11 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum ParseError { + #[error("Invalid stream data")] + InvalidData, + #[error("Stream read/write/access_control list too long")] + ListTooLong, + #[error("Only partial data are available")] + PartialDataAvailable, +} diff --git a/node/stream/src/stream_manager/mod.rs b/node/stream/src/stream_manager/mod.rs new file mode 100644 index 0000000..2f77e7f --- /dev/null +++ b/node/stream/src/stream_manager/mod.rs @@ -0,0 +1,78 @@ +mod error; +mod stream_data_fetcher; +mod stream_replayer; + +use crate::StreamConfig; +use anyhow::Result; +use ethereum_types::H256; +use jsonrpsee::http_client::HttpClient; +use ssz::Encode; +use std::{collections::HashSet, sync::Arc}; +use storage_with_stream::Store; +use task_executor::TaskExecutor; +use tokio::sync::RwLock; + +use self::{stream_data_fetcher::StreamDataFetcher, stream_replayer::StreamReplayer}; + +pub struct StreamManager; + +pub const RETRY_WAIT_MS: u64 = 1000; + +impl StreamManager { + pub async fn initialize( + config: &StreamConfig, + store: Arc>, + clients: Vec, + task_executor: TaskExecutor, + ) -> Result<(StreamDataFetcher, StreamReplayer)> { + // initialize + let holding_stream_ids = store.read().await.get_holding_stream_ids().await?; + let holding_stream_set: HashSet = + HashSet::from_iter(holding_stream_ids.iter().cloned()); + // ensure current stream id set is a subset of streams maintained in db + let mut reseted = false; + for id in config.stream_ids.iter() { + if !holding_stream_set.contains(id) { + // new stream id, replay from start + store + .write() + .await + .reset_stream_sync(config.stream_ids.as_ssz_bytes()) + .await?; + reseted = true; + break; + } + } + // is a subset, update stream ids in db + if !reseted && config.stream_ids.len() != holding_stream_ids.len() { + store + .write() + .await + .update_stream_ids(config.stream_ids.as_ssz_bytes()) + .await?; + } + + // spawn data sync and stream replay threads + let fetcher = + StreamDataFetcher::new(config.clone(), store.clone(), clients, task_executor).await?; + let replayer = StreamReplayer::new(config.clone(), store.clone()).await?; + Ok((fetcher, replayer)) + } + + pub fn spawn( + fetcher: StreamDataFetcher, + replayer: StreamReplayer, + executor: TaskExecutor, + ) -> Result<()> { + executor.spawn( + async move { Box::pin(fetcher.run()).await }, + "stream data fetcher", + ); + + executor.spawn( + async move { Box::pin(replayer.run()).await }, + "stream data replayer", + ); + Ok(()) + } +} diff --git a/node/stream/src/stream_manager/stream_data_fetcher.rs b/node/stream/src/stream_manager/stream_data_fetcher.rs new file mode 100644 index 0000000..81256a1 --- /dev/null +++ b/node/stream/src/stream_manager/stream_data_fetcher.rs @@ -0,0 +1,367 @@ +use crate::StreamConfig; +use anyhow::{anyhow, bail, Result}; +use jsonrpsee::http_client::HttpClient; +use rpc::ZgsRpcClient; +use shared_types::{ChunkArray, Transaction}; +use std::{ + cmp, + collections::{HashMap, VecDeque}, + sync::Arc, + time::Duration, +}; +use storage_with_stream::{log_store::log_manager::ENTRY_SIZE, Store}; +use task_executor::TaskExecutor; +use tokio::sync::{ + mpsc::{self, UnboundedSender}, + RwLock, +}; + +const RETRY_WAIT_MS: u64 = 1000; +const ENTRIES_PER_SEGMENT: usize = 1024; +const MAX_DOWNLOAD_TASK: usize = 5; +const ALERT_CNT: i32 = 10; +const MAX_RETRY: usize = 5; + +pub struct StreamDataFetcher { + config: StreamConfig, + store: Arc>, + clients: Vec, + task_executor: TaskExecutor, +} + +async fn download_with_proof( + client: HttpClient, + tx: Arc, + start_index: usize, + end_index: usize, + store: Arc>, + sender: UnboundedSender>, +) { + let mut fail_cnt = 0; + while fail_cnt < ALERT_CNT { + debug!("download_with_proof for {}", start_index); + match client + .download_segment_with_proof(tx.data_merkle_root, start_index / ENTRIES_PER_SEGMENT) + .await + { + Ok(Some(segment)) => { + if segment.data.len() % ENTRY_SIZE != 0 + || segment.data.len() / ENTRY_SIZE != end_index - start_index + { + debug!("invalid data length"); + if let Err(e) = sender.send(Err((start_index, end_index, true))) { + error!("send error: {:?}", e); + } + + return; + } + + if segment.root != tx.data_merkle_root { + debug!("invalid file root"); + if let Err(e) = sender.send(Err((start_index, end_index, true))) { + error!("send error: {:?}", e); + } + + return; + } + + if let Err(e) = segment.validate(ENTRIES_PER_SEGMENT) { + debug!("validate segment with error: {:?}", e); + + if let Err(e) = sender.send(Err((start_index, end_index, true))) { + error!("send error: {:?}", e); + } + return; + } + + if let Err(e) = store.write().await.put_chunks_with_tx_hash( + tx.seq, + tx.hash(), + ChunkArray { + data: segment.data, + start_index: (segment.index * ENTRIES_PER_SEGMENT) as u64, + }, + ) { + debug!("put segment with error: {:?}", e); + + if let Err(e) = sender.send(Err((start_index, end_index, true))) { + error!("send error: {:?}", e); + } + return; + } + + debug!("download start_index {:?} successful", start_index); + if let Err(e) = sender.send(Ok(())) { + error!("send error: {:?}", e); + } + + return; + } + Ok(None) => { + debug!( + "start_index {:?}, end_index {:?}, response is none", + start_index, end_index + ); + fail_cnt += 1; + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + } + Err(e) => { + warn!( + "start_index {:?}, end_index {:?}, response error: {:?}", + start_index, end_index, e + ); + fail_cnt += 1; + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + } + } + } + + if let Err(e) = sender.send(Err((start_index, end_index, false))) { + error!("send error: {:?}", e); + } +} + +impl StreamDataFetcher { + pub async fn new( + config: StreamConfig, + store: Arc>, + clients: Vec, + task_executor: TaskExecutor, + ) -> Result { + Ok(Self { + config, + store, + clients, + task_executor, + }) + } + + fn spawn_download_task( + &self, + client_index: &mut usize, + tx: Arc, + start_index: usize, + end_index: usize, + sender: &UnboundedSender>, + ) { + debug!( + "downloading start_index {:?}, end_index: {:?} from client index: {}", + start_index, end_index, client_index + ); + + self.task_executor.spawn( + download_with_proof( + self.clients[*client_index].clone(), + tx, + start_index, + end_index, + self.store.clone(), + sender.clone(), + ), + "download segment", + ); + + // round robin client + *client_index = (*client_index + 1) % self.clients.len(); + } + + async fn sync_data(&self, tx: &Transaction) -> Result<()> { + if self.store.read().await.check_tx_completed(tx.seq)? { + return Ok(()); + } + let tx_size_in_entry = if tx.size % ENTRY_SIZE as u64 == 0 { + tx.size / ENTRY_SIZE as u64 + } else { + tx.size / ENTRY_SIZE as u64 + 1 + }; + + let mut pending_entries = VecDeque::new(); + let mut task_counter = 0; + let mut client_index = 0; + let (sender, mut rx) = mpsc::unbounded_channel(); + let tx = Arc::new(tx.clone()); + + for i in (0..tx_size_in_entry).step_by(ENTRIES_PER_SEGMENT * MAX_DOWNLOAD_TASK) { + let tasks_end_index = cmp::min( + tx_size_in_entry, + i + (ENTRIES_PER_SEGMENT * MAX_DOWNLOAD_TASK) as u64, + ); + debug!( + "task_start_index: {:?}, tasks_end_index: {:?}, tx_size_in_entry: {:?}, root: {:?}", + i, tasks_end_index, tx_size_in_entry, tx.data_merkle_root + ); + for j in (i..tasks_end_index).step_by(ENTRIES_PER_SEGMENT) { + let task_end_index = cmp::min(tasks_end_index, j + ENTRIES_PER_SEGMENT as u64); + pending_entries.push_back((j as usize, task_end_index as usize)); + } + } + + // spawn download tasks + while task_counter < MAX_DOWNLOAD_TASK && !pending_entries.is_empty() { + let (start_index, end_index) = pending_entries.pop_front().unwrap(); + self.spawn_download_task( + &mut client_index, + tx.clone(), + start_index, + end_index, + &sender, + ); + task_counter += 1; + } + + let mut failed_tasks = HashMap::new(); + while task_counter > 0 { + if let Some(ret) = rx.recv().await { + match ret { + Ok(_) => { + if let Some((start_index, end_index)) = pending_entries.pop_front() { + self.spawn_download_task( + &mut client_index, + tx.clone(), + start_index, + end_index, + &sender, + ); + } else { + task_counter -= 1; + } + } + Err((start_index, end_index, data_err)) => { + warn!("Download data of tx_seq {:?}, start_index {:?}, end_index {:?}, failed",tx.seq, start_index, end_index); + + match failed_tasks.get_mut(&start_index) { + Some(c) => { + if data_err { + *c += 1; + } + + if *c == self.clients.len() * MAX_RETRY { + bail!(anyhow!(format!("Download segment failed, start_index {:?}, end_index: {:?}", start_index, end_index))); + } + } + _ => { + failed_tasks.insert(start_index, 1); + } + } + + self.spawn_download_task( + &mut client_index, + tx.clone(), + start_index, + end_index, + &sender, + ); + } + } + } + } + + self.store + .write() + .await + .finalize_tx_with_hash(tx.seq, tx.hash())?; + Ok(()) + } + + pub async fn run(&self) { + let mut tx_seq; + match self + .store + .read() + .await + .get_stream_data_sync_progress() + .await + { + Ok(progress) => { + tx_seq = progress; + } + Err(e) => { + error!("get stream data sync progress error: e={:?}", e); + return; + } + } + + let mut check_sync_progress = false; + loop { + if check_sync_progress { + match self + .store + .read() + .await + .get_stream_data_sync_progress() + .await + { + Ok(progress) => { + if tx_seq != progress { + debug!("reorg happened: tx_seq {}, progress {}", tx_seq, progress); + tx_seq = progress; + } + } + Err(e) => { + error!("get stream data sync progress error: e={:?}", e); + } + } + + check_sync_progress = false; + } + + info!("checking tx with sequence number {:?}..", tx_seq); + let maybe_tx = self.store.read().await.get_tx_by_seq_number(tx_seq); + match maybe_tx { + Ok(Some(tx)) => { + let mut skip = false; + if tx.stream_ids.is_empty() { + skip = true; + } else { + for id in tx.stream_ids.iter() { + if !self.config.stream_set.contains(id) { + skip = true; + break; + } + } + } + // sync data + if !skip { + info!("syncing data of tx with sequence number {:?}..", tx.seq); + match self.sync_data(&tx).await { + Ok(()) => { + info!("data of tx with sequence number {:?} synced.", tx.seq); + } + Err(e) => { + error!("stream data sync error: e={:?}", e); + check_sync_progress = true; + continue; + } + } + } else { + info!("tx {:?} is not in stream, skipped.", tx.seq); + } + // update progress, get next tx_seq to sync + match self + .store + .write() + .await + .update_stream_data_sync_progress(tx_seq, tx_seq + 1) + .await + { + Ok(next_tx_seq) => { + tx_seq = next_tx_seq; + } + Err(e) => { + error!("update stream data sync progress error: e={:?}", e); + } + } + } + Ok(None) => { + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + check_sync_progress = true; + } + Err(e) => { + error!("stream data sync error: e={:?}", e); + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + check_sync_progress = true; + } + } + } + } +} diff --git a/node/stream/src/stream_manager/stream_replayer.rs b/node/stream/src/stream_manager/stream_replayer.rs new file mode 100644 index 0000000..001390b --- /dev/null +++ b/node/stream/src/stream_manager/stream_replayer.rs @@ -0,0 +1,697 @@ +use crate::stream_manager::error::ParseError; +use crate::StreamConfig; +use anyhow::{bail, Result}; +use ethereum_types::{H160, H256}; +use shared_types::{ + AccessControl, AccessControlSet, StreamRead, StreamReadSet, StreamWrite, StreamWriteSet, + Transaction, +}; +use ssz::Decode; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::str; +use std::{cmp, sync::Arc, time::Duration}; +use storage_with_stream::error::Error; +use storage_with_stream::log_store::log_manager::ENTRY_SIZE; +use storage_with_stream::store::to_access_control_op_name; +use storage_with_stream::AccessControlOps; +use storage_with_stream::Store; +use tokio::sync::RwLock; + +use super::RETRY_WAIT_MS; + +const MAX_LOAD_ENTRY_SIZE: u64 = 10; +const STREAM_ID_SIZE: u64 = 32; +const STREAM_KEY_LEN_SIZE: u64 = 3; +const SET_LEN_SIZE: u64 = 4; +const DATA_LEN_SIZE: u64 = 8; +const VERSION_SIZE: u64 = 8; +const ACCESS_CONTROL_OP_TYPE_SIZE: u64 = 1; +const ADDRESS_SIZE: u64 = 20; +const MAX_SIZE_LEN: u32 = 65536; + +enum ReplayResult { + Commit(u64, StreamWriteSet, AccessControlSet), + DataParseError(String), + VersionConfliction, + TagsMismatch, + WritePermissionDenied(H256, Arc>), + AccessControlPermissionDenied(u8, H256, Arc>, H160), + DataUnavailable, +} + +fn truncated_key(key: &[u8]) -> String { + if key.is_empty() { + return "NONE".to_owned(); + } + let key_str = str::from_utf8(key).unwrap_or("UNKNOWN"); + match key_str.char_indices().nth(32) { + None => key_str.to_owned(), + Some((idx, _)) => key_str[0..idx].to_owned(), + } +} + +impl fmt::Display for ReplayResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ReplayResult::Commit(_, _, _) => write!(f, "Commit"), + ReplayResult::DataParseError(e) => write!(f, "DataParseError: {}", e), + ReplayResult::VersionConfliction => write!(f, "VersionConfliction"), + ReplayResult::TagsMismatch => write!(f, "TagsMismatch"), + ReplayResult::WritePermissionDenied(stream_id, key) => write!( + f, + "WritePermissionDenied: stream: {:?}, key: {}", + stream_id, + truncated_key(key), + ), + ReplayResult::AccessControlPermissionDenied(op_type, stream_id, key, account) => { + write!( + f, + "AccessControlPermissionDenied: operation: {}, stream: {:?}, key: {}, account: {:?}", + to_access_control_op_name(*op_type), + stream_id, + truncated_key(key), + account + ) + } + ReplayResult::DataUnavailable => write!(f, "DataUnavailable"), + } + } +} + +struct StreamReader<'a> { + store: Arc>, + tx: &'a Transaction, + tx_size_in_entry: u64, + current_position: u64, // the index of next entry to read + buffer: Vec, // buffered data +} + +impl<'a> StreamReader<'a> { + pub fn new(store: Arc>, tx: &'a Transaction) -> Self { + Self { + store, + tx, + tx_size_in_entry: if tx.size % ENTRY_SIZE as u64 == 0 { + tx.size / ENTRY_SIZE as u64 + } else { + tx.size / ENTRY_SIZE as u64 + 1 + }, + current_position: 0, + buffer: vec![], + } + } + + pub fn current_position_in_bytes(&self) -> u64 { + (self.current_position + self.tx.start_entry_index) * (ENTRY_SIZE as u64) + - (self.buffer.len() as u64) + } + + async fn load(&mut self, length: u64) -> Result<()> { + match self + .store + .read() + .await + .get_chunk_by_flow_index(self.current_position + self.tx.start_entry_index, length)? + { + Some(mut x) => { + self.buffer.append(&mut x.data); + self.current_position += length; + Ok(()) + } + None => { + bail!(ParseError::PartialDataAvailable); + } + } + } + + // read next ${size} bytes from the stream + pub async fn next(&mut self, size: u64) -> Result> { + if (self.buffer.len() as u64) + + (self.tx_size_in_entry - self.current_position) * (ENTRY_SIZE as u64) + < size + { + bail!(ParseError::InvalidData); + } + while (self.buffer.len() as u64) < size { + self.load(cmp::min( + self.tx_size_in_entry - self.current_position, + MAX_LOAD_ENTRY_SIZE, + )) + .await?; + } + Ok(self.buffer.drain(0..(size as usize)).collect()) + } + + pub async fn skip(&mut self, mut size: u64) -> Result<()> { + if (self.buffer.len() as u64) >= size { + self.buffer.drain(0..(size as usize)); + return Ok(()); + } + size -= self.buffer.len() as u64; + self.buffer.clear(); + let entries_to_skip = size / (ENTRY_SIZE as u64); + self.current_position += entries_to_skip; + if self.current_position > self.tx_size_in_entry { + bail!(ParseError::InvalidData); + } + size -= entries_to_skip * (ENTRY_SIZE as u64); + if size > 0 { + self.next(size).await?; + } + Ok(()) + } +} + +pub struct StreamReplayer { + config: StreamConfig, + store: Arc>, +} + +impl StreamReplayer { + pub async fn new(config: StreamConfig, store: Arc>) -> Result { + Ok(Self { config, store }) + } + + async fn parse_version(&self, stream_reader: &mut StreamReader<'_>) -> Result { + Ok(u64::from_be_bytes( + stream_reader.next(VERSION_SIZE).await?.try_into().unwrap(), + )) + } + + async fn parse_key(&self, stream_reader: &mut StreamReader<'_>) -> Result> { + let mut key_size_in_bytes = vec![0x0; (8 - STREAM_KEY_LEN_SIZE) as usize]; + key_size_in_bytes.append(&mut stream_reader.next(STREAM_KEY_LEN_SIZE).await?); + let key_size = u64::from_be_bytes(key_size_in_bytes.try_into().unwrap()); + // key should not be empty + if key_size == 0 { + bail!(ParseError::InvalidData); + } + stream_reader.next(key_size).await + } + + async fn parse_stream_read_set( + &self, + stream_reader: &mut StreamReader<'_>, + ) -> Result { + let size = u32::from_be_bytes(stream_reader.next(SET_LEN_SIZE).await?.try_into().unwrap()); + if size > MAX_SIZE_LEN { + bail!(ParseError::ListTooLong); + } + let mut stream_read_set = StreamReadSet { + stream_reads: vec![], + }; + for _ in 0..(size as usize) { + stream_read_set.stream_reads.push(StreamRead { + stream_id: H256::from_ssz_bytes(&stream_reader.next(STREAM_ID_SIZE).await?) + .map_err(Error::from)?, + key: Arc::new(self.parse_key(stream_reader).await?), + }); + } + Ok(stream_read_set) + } + + async fn validate_stream_read_set( + &self, + stream_read_set: &StreamReadSet, + tx: &Transaction, + version: u64, + ) -> Result> { + for stream_read in stream_read_set.stream_reads.iter() { + if !self.config.stream_set.contains(&stream_read.stream_id) { + return Ok(Some(ReplayResult::TagsMismatch)); + } + // check version confiction + if self + .store + .read() + .await + .get_latest_version_before(stream_read.stream_id, stream_read.key.clone(), tx.seq) + .await? + > version + { + return Ok(Some(ReplayResult::VersionConfliction)); + } + } + Ok(None) + } + + async fn parse_stream_write_set( + &self, + stream_reader: &mut StreamReader<'_>, + ) -> Result { + let size = u32::from_be_bytes(stream_reader.next(SET_LEN_SIZE).await?.try_into().unwrap()); + if size > MAX_SIZE_LEN { + bail!(ParseError::ListTooLong); + } + // load metadata + let mut stream_write_metadata = vec![]; + for _ in 0..(size as usize) { + let stream_id = H256::from_ssz_bytes(&stream_reader.next(STREAM_ID_SIZE).await?) + .map_err(Error::from)?; + let key = Arc::new(self.parse_key(stream_reader).await?); + let data_size = + u64::from_be_bytes(stream_reader.next(DATA_LEN_SIZE).await?.try_into().unwrap()); + stream_write_metadata.push((stream_id, key, data_size)); + } + // use a hashmap to filter out the duplicate writes on same key, only the last one is reserved + let mut start_index = stream_reader.current_position_in_bytes(); + let mut stream_writes = HashMap::new(); + for (stream_id, key, data_size) in stream_write_metadata.iter() { + let end_index = start_index + data_size; + stream_writes.insert( + (stream_id, key.clone()), + StreamWrite { + stream_id: *stream_id, + key: key.clone(), + start_index, + end_index, + }, + ); + start_index = end_index; + } + // skip the write data + stream_reader + .skip(start_index - stream_reader.current_position_in_bytes()) + .await?; + Ok(StreamWriteSet { + stream_writes: stream_writes.into_values().collect(), + }) + } + + async fn validate_stream_write_set( + &self, + stream_write_set: &StreamWriteSet, + tx: &Transaction, + version: u64, + ) -> Result> { + let stream_set = HashSet::::from_iter(tx.stream_ids.iter().cloned()); + let store_read = self.store.read().await; + for stream_write in stream_write_set.stream_writes.iter() { + if !stream_set.contains(&stream_write.stream_id) { + // the write set in data is conflict with tx tags + return Ok(Some(ReplayResult::TagsMismatch)); + } + // check version confiction + if store_read + .get_latest_version_before(stream_write.stream_id, stream_write.key.clone(), tx.seq) + .await? + > version + { + return Ok(Some(ReplayResult::VersionConfliction)); + } + // check write permission + if !(store_read + .has_write_permission( + tx.sender, + stream_write.stream_id, + stream_write.key.clone(), + tx.seq, + ) + .await?) + { + return Ok(Some(ReplayResult::WritePermissionDenied( + stream_write.stream_id, + stream_write.key.clone(), + ))); + } + } + Ok(None) + } + + async fn parse_access_control_data( + &self, + tx: &Transaction, + stream_reader: &mut StreamReader<'_>, + ) -> Result { + let size = u32::from_be_bytes(stream_reader.next(SET_LEN_SIZE).await?.try_into().unwrap()); + if size > MAX_SIZE_LEN { + bail!(ParseError::ListTooLong); + } + // use a hashmap to filter out the useless operations + // all operations can be categorized by op_type & 0xf0 + // for each category, except GRANT_ADMIN_ROLE, only the last operation of each account is reserved + let mut access_ops = HashMap::new(); + // pad GRANT_ADMIN_ROLE prefix to handle the first write to new stream + let mut is_admin = HashSet::new(); + let store_read = self.store.read().await; + for id in &tx.stream_ids { + if store_read.is_new_stream(*id, tx.seq).await? { + let op_meta = ( + AccessControlOps::GRANT_ADMIN_ROLE & 0xf0, + *id, + Arc::new(vec![]), + tx.sender, + ); + access_ops.insert( + op_meta, + AccessControl { + op_type: AccessControlOps::GRANT_ADMIN_ROLE, + stream_id: *id, + key: Arc::new(vec![]), + account: tx.sender, + operator: H160::zero(), + }, + ); + is_admin.insert(*id); + } else if store_read.is_admin(tx.sender, *id, tx.seq).await? { + is_admin.insert(*id); + } + } + drop(store_read); + // ops in transaction + for _ in 0..(size as usize) { + let op_type = u8::from_be_bytes( + stream_reader + .next(ACCESS_CONTROL_OP_TYPE_SIZE) + .await? + .try_into() + .unwrap(), + ); + // parse operation data + let stream_id = H256::from_ssz_bytes(&stream_reader.next(STREAM_ID_SIZE).await?) + .map_err(Error::from)?; + let mut account = H160::zero(); + let mut key = Arc::new(vec![]); + match op_type { + // stream_id + account + AccessControlOps::GRANT_ADMIN_ROLE + | AccessControlOps::GRANT_WRITER_ROLE + | AccessControlOps::REVOKE_WRITER_ROLE => { + account = H160::from_ssz_bytes(&stream_reader.next(ADDRESS_SIZE).await?) + .map_err(Error::from)?; + } + // stream_id + key + AccessControlOps::SET_KEY_TO_NORMAL | AccessControlOps::SET_KEY_TO_SPECIAL => { + key = Arc::new(self.parse_key(stream_reader).await?); + } + // stream_id + key + account + AccessControlOps::GRANT_SPECIAL_WRITER_ROLE + | AccessControlOps::REVOKE_SPECIAL_WRITER_ROLE => { + key = Arc::new(self.parse_key(stream_reader).await?); + account = H160::from_ssz_bytes(&stream_reader.next(ADDRESS_SIZE).await?) + .map_err(Error::from)?; + } + // renounce type + AccessControlOps::RENOUNCE_ADMIN_ROLE | AccessControlOps::RENOUNCE_WRITER_ROLE => { + account = tx.sender; + } + AccessControlOps::RENOUNCE_SPECIAL_WRITER_ROLE => { + key = Arc::new(self.parse_key(stream_reader).await?); + account = tx.sender; + } + // unexpected type + _ => { + bail!(ParseError::InvalidData); + } + } + let op_meta = (op_type & 0xf0, stream_id, key.clone(), account); + if op_type != AccessControlOps::GRANT_ADMIN_ROLE + || (!access_ops.contains_key(&op_meta) && account != tx.sender) + { + access_ops.insert( + op_meta, + AccessControl { + op_type, + stream_id, + key: key.clone(), + account, + operator: tx.sender, + }, + ); + } + } + Ok(AccessControlSet { + access_controls: access_ops.into_values().collect(), + is_admin, + }) + } + + async fn validate_access_control_set( + &self, + access_control_set: &mut AccessControlSet, + tx: &Transaction, + ) -> Result> { + // validate + let stream_set = HashSet::::from_iter(tx.stream_ids.iter().cloned()); + for access_control in &access_control_set.access_controls { + if !stream_set.contains(&access_control.stream_id) { + // the write set in data is conflict with tx tags + return Ok(Some(ReplayResult::TagsMismatch)); + } + match access_control.op_type { + AccessControlOps::GRANT_ADMIN_ROLE + | AccessControlOps::SET_KEY_TO_NORMAL + | AccessControlOps::SET_KEY_TO_SPECIAL + | AccessControlOps::GRANT_WRITER_ROLE + | AccessControlOps::REVOKE_WRITER_ROLE + | AccessControlOps::GRANT_SPECIAL_WRITER_ROLE + | AccessControlOps::REVOKE_SPECIAL_WRITER_ROLE => { + if !access_control_set + .is_admin + .contains(&access_control.stream_id) + { + return Ok(Some(ReplayResult::AccessControlPermissionDenied( + access_control.op_type, + access_control.stream_id, + access_control.key.clone(), + access_control.account, + ))); + } + } + _ => {} + } + } + Ok(None) + } + + async fn replay(&self, tx: &Transaction) -> Result { + if !self.store.read().await.check_tx_completed(tx.seq)? { + return Ok(ReplayResult::DataUnavailable); + } + let mut stream_reader = StreamReader::new(self.store.clone(), tx); + // parse and validate + let version = self.parse_version(&mut stream_reader).await?; + let stream_read_set = match self.parse_stream_read_set(&mut stream_reader).await { + Ok(x) => x, + Err(e) => match e.downcast_ref::() { + Some(ParseError::InvalidData | ParseError::ListTooLong) => { + return Ok(ReplayResult::DataParseError(e.to_string())); + } + Some(ParseError::PartialDataAvailable) | None => { + return Err(e); + } + }, + }; + if let Some(result) = self + .validate_stream_read_set(&stream_read_set, tx, version) + .await? + { + // validation error in stream read set + return Ok(result); + } + let stream_write_set = match self.parse_stream_write_set(&mut stream_reader).await { + Ok(x) => x, + Err(e) => match e.downcast_ref::() { + Some(ParseError::InvalidData | ParseError::ListTooLong) => { + return Ok(ReplayResult::DataParseError(e.to_string())); + } + Some(ParseError::PartialDataAvailable) | None => { + return Err(e); + } + }, + }; + if let Some(result) = self + .validate_stream_write_set(&stream_write_set, tx, version) + .await? + { + // validation error in stream write set + return Ok(result); + } + let mut access_control_set = + match self.parse_access_control_data(tx, &mut stream_reader).await { + Ok(x) => x, + Err(e) => match e.downcast_ref::() { + Some(ParseError::InvalidData | ParseError::ListTooLong) => { + return Ok(ReplayResult::DataParseError(e.to_string())); + } + Some(ParseError::PartialDataAvailable) | None => { + return Err(e); + } + }, + }; + if let Some(result) = self + .validate_access_control_set(&mut access_control_set, tx) + .await? + { + // there is confliction in access control set + return Ok(result); + } + Ok(ReplayResult::Commit( + tx.seq, + stream_write_set, + access_control_set, + )) + } + + pub async fn run(&self) { + let mut tx_seq; + match self.store.read().await.get_stream_replay_progress().await { + Ok(progress) => { + tx_seq = progress; + } + Err(e) => { + error!("get stream replay progress error: e={:?}", e); + return; + } + } + let mut check_replay_progress = false; + loop { + if check_replay_progress { + match self.store.read().await.get_stream_replay_progress().await { + Ok(progress) => { + if tx_seq != progress { + debug!("reorg happend: tx_seq {}, progress {}", tx_seq, progress); + tx_seq = progress; + } + } + Err(e) => { + error!("get stream replay progress error: e={:?}", e); + } + } + + check_replay_progress = false; + } + + info!("checking tx with sequence number {:?}..", tx_seq); + let maybe_tx = self.store.read().await.get_tx_by_seq_number(tx_seq); + match maybe_tx { + Ok(Some(tx)) => { + let mut skip = false; + if tx.stream_ids.is_empty() { + skip = true; + } else { + for id in tx.stream_ids.iter() { + if !self.config.stream_set.contains(id) { + skip = true; + break; + } + } + } + // replay data + if !skip { + info!("replaying data of tx with sequence number {:?}..", tx.seq); + match self.replay(&tx).await { + Ok(result) => { + let result_str = result.to_string(); + match result { + ReplayResult::Commit( + tx_seq, + stream_write_set, + access_control_set, + ) => { + match self + .store + .write() + .await + .put_stream( + tx_seq, + tx.data_merkle_root, + result_str.clone(), + Some((stream_write_set, access_control_set)), + ) + .await + { + Ok(_) => { + info!( + "tx with sequence number {:?} commit.", + tx.seq + ); + } + Err(e) => { + error!("stream replay result finalization error: e={:?}", e); + check_replay_progress = true; + continue; + } + } + } + ReplayResult::DataUnavailable => { + // data not available + info!("data of tx with sequence number {:?} is not available yet, wait..", tx.seq); + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)) + .await; + check_replay_progress = true; + continue; + } + _ => { + match self + .store + .write() + .await + .put_stream( + tx.seq, + tx.data_merkle_root, + result_str.clone(), + None, + ) + .await + { + Ok(_) => { + info!( + "tx with sequence number {:?} reverted with reason {:?}", + tx.seq, result_str + ); + } + Err(e) => { + error!("stream replay result finalization error: e={:?}", e); + check_replay_progress = true; + continue; + } + } + } + } + + if !check_replay_progress { + tx_seq += 1; + } + } + Err(e) => { + error!("replay stream data error: e={:?}", e); + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + check_replay_progress = true; + continue; + } + } + } else { + info!("tx {:?} is not in stream, skipped.", tx.seq); + // parse success + // update progress, get next tx_seq to sync + match self + .store + .write() + .await + .update_stream_replay_progress(tx_seq, tx_seq + 1) + .await + { + Ok(next_tx_seq) => { + tx_seq = next_tx_seq; + } + Err(e) => { + error!("update stream replay progress error: e={:?}", e); + } + } + } + } + Ok(None) => { + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + check_replay_progress = true; + } + Err(e) => { + error!("stream replay error: e={:?}", e); + tokio::time::sleep(Duration::from_millis(RETRY_WAIT_MS)).await; + check_replay_progress = true; + } + } + } + } +} diff --git a/run/config_example.toml b/run/config_example.toml new file mode 100644 index 0000000..0e01019 --- /dev/null +++ b/run/config_example.toml @@ -0,0 +1,15 @@ +stream_ids = ["000000000000000000000000000000000000000000000000000000000000f2bd", "000000000000000000000000000000000000000000000000000000000000f009", "0000000000000000000000000000000000000000000000000000000000016879", "0000000000000000000000000000000000000000000000000000000000002e3d"] + + +db_dir = "db" +kv_db_dir = "kv.DB" + +rpc_enabled = true +rpc_listen_address = "127.0.0.1:6789" +zgs_node_urls = "http://127.0.0.1:5678" + +log_config_file = "log_config" + +blockchain_rpc_endpoint = "" +log_contract_address = "" +log_sync_start_block_number = 23231699 \ No newline at end of file diff --git a/run/log_config b/run/log_config new file mode 100644 index 0000000..06e5547 --- /dev/null +++ b/run/log_config @@ -0,0 +1 @@ +info \ No newline at end of file diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000..5e3a425 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +1.73.0 diff --git a/tests/ccov.sh b/tests/ccov.sh new file mode 100755 index 0000000..797aa0c --- /dev/null +++ b/tests/ccov.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -euo pipefail +ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )" + +cd $ROOT_DIR +echo "If you are running this script for the first time, please clean previous +debug build first by running \`rm -rf target/debug\`. +This script requires cargo nightly." + +# Install dependencies +cargo install grcov + +# Build binary and run unit tests with code coverage. +export CARGO_INCREMENTAL=0 +export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Cinline-threshold=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests" + +cargo +nightly build +cargo +nightly test --all + +# Run python integration tests. +# export ZGS="`pwd`/target/debug/zgs_kv" +python ./tests/test_all.py + +# Generate code coverage data +if [ -d "ccov" ] +then + rm -dr ccov +fi + +mkdir ccov +zip -0 ccov/ccov.zip `find . \( -name "*.gc*" \) -print` +grcov ccov/ccov.zip -s . -t html --llvm --branch --ignore-not-existing --ignore "/*" \ +--ignore "*target/debug/build/libp2p-*" \ +--ignore "*target/debug/build/clang-sys*" \ +--ignore "*target/debug/build/librocksdb-sys*" \ +--ignore "*target/debug/build/solang*" -o ccov + +echo "Code coverage result is saved to directory 'ccov'. +You can open 'ccov/index.html' with a web brower to start." + diff --git a/tests/config/MockToken.json b/tests/config/MockToken.json new file mode 100644 index 0000000..5d82c15 --- /dev/null +++ b/tests/config/MockToken.json @@ -0,0 +1,286 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "MockToken", + "sourceName": "contracts/token/MockToken.sol", + "abi": [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "subtractedValue", + "type": "uint256" + } + ], + "name": "decreaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "addedValue", + "type": "uint256" + } + ], + "name": "increaseAllowance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x608034620003ac576040906001600160401b039080830182811182821017620002ac5783526007815260209166135bd8dad554d160ca1b8383015283519184830183811083821117620002ac578552600391828452621554d160ea1b85850152815192818411620002ac578054936001938486811c96168015620003a1575b888710146200038b578190601f9687811162000335575b508890878311600114620002ce57600092620002c2575b505060001982841b1c191690841b1781555b8451918211620002ac5760049485548481811c91168015620002a1575b888210146200028c5785811162000241575b508690858411600114620001d657938394918492600095620001ca575b50501b92600019911b1c19161782555b33156200018b57505062000130600254620003b1565b6002553360005260008152816000206200014b8154620003b1565b905560007fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef835192633b9aca0084523393a3516108bc9081620003da8239f35b60649284519262461bcd60e51b845283015260248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152fd5b0151935038806200010a565b9190601f198416928760005284896000209460005b8b898383106200022957505050106200020e575b50505050811b0182556200011a565b01519060f884600019921b161c1916905538808080620001ff565b868601518955909701969485019488935001620001eb565b86600052876000208680860160051c8201928a871062000282575b0160051c019085905b82811062000275575050620000ed565b6000815501859062000265565b925081926200025c565b602287634e487b7160e01b6000525260246000fd5b90607f1690620000db565b634e487b7160e01b600052604160045260246000fd5b015190503880620000ac565b90869350601f19831691856000528a6000209260005b8c8282106200031e575050841162000305575b505050811b018155620000be565b015160001983861b60f8161c19169055388080620002f7565b8385015186558a97909501949384019301620002e4565b90915083600052886000208780850160051c8201928b861062000381575b918891869594930160051c01915b8281106200037157505062000095565b6000815585945088910162000361565b9250819262000353565b634e487b7160e01b600052602260045260246000fd5b95607f16956200007e565b600080fd5b90633b9aca008201809211620003c357565b634e487b7160e01b600052601160045260246000fdfe608060408181526004918236101561001657600080fd5b600092833560e01c91826306fdde031461049657508163095ea7b31461046c57816318160ddd1461044d57816323b872dd14610383578163313ce56714610367578163395093511461031757816370a08231146102e057816395d89b41146101c1578163a457c2d71461011957508063a9059cbb146100e95763dd62ed3e1461009e57600080fd5b346100e557806003193601126100e557806020926100ba6105bb565b6100c26105d6565b6001600160a01b0391821683526001865283832091168252845220549051908152f35b5080fd5b50346100e557806003193601126100e5576020906101126101086105bb565b602435903361060f565b5160018152f35b905082346101be57826003193601126101be576101346105bb565b918360243592338152600160205281812060018060a01b038616825260205220549082821061016d576020856101128585038733610784565b608490602086519162461bcd60e51b8352820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152fd5b80fd5b8383346100e557816003193601126100e557805190828454600181811c908083169283156102d6575b60209384841081146102c3578388529081156102a75750600114610252575b505050829003601f01601f191682019267ffffffffffffffff84118385101761023f575082918261023b925282610572565b0390f35b634e487b7160e01b815260418552602490fd5b8787529192508591837f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b8385106102935750505050830101858080610209565b80548886018301529301928490820161027d565b60ff1916878501525050151560051b8401019050858080610209565b634e487b7160e01b895260228a52602489fd5b91607f16916101ea565b5050346100e55760203660031901126100e55760209181906001600160a01b036103086105bb565b16815280845220549051908152f35b5050346100e557806003193601126100e55761011260209261036061033a6105bb565b338352600186528483206001600160a01b038216845286529184902054602435906105ec565b9033610784565b5050346100e557816003193601126100e5576020905160128152f35b839150346100e55760603660031901126100e55761039f6105bb565b6103a76105d6565b91846044359460018060a01b0384168152600160205281812033825260205220549060001982036103e1575b60208661011287878761060f565b84821061040a57509183916103ff6020969561011295033383610784565b9193948193506103d3565b606490602087519162461bcd60e51b8352820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152fd5b5050346100e557816003193601126100e5576020906002549051908152f35b5050346100e557806003193601126100e55760209061011261048c6105bb565b6024359033610784565b8490843461056e578260031936011261056e5782600354600181811c90808316928315610564575b60209384841081146102c3578388529081156102a7575060011461050e57505050829003601f01601f191682019267ffffffffffffffff84118385101761023f575082918261023b925282610572565b600387529192508591837fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b8385106105505750505050830101858080610209565b80548886018301529301928490820161053a565b91607f16916104be565b8280fd5b6020808252825181830181905290939260005b8281106105a757505060409293506000838284010152601f8019910116010190565b818101860151848201604001528501610585565b600435906001600160a01b03821682036105d157565b600080fd5b602435906001600160a01b03821682036105d157565b919082018092116105f957565b634e487b7160e01b600052601160045260246000fd5b6001600160a01b0390811691821561073157169182156106e05760008281528060205260408120549180831061068c57604082827fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9587602096528286520382822055868152206106818282546105ec565b9055604051908152a3565b60405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608490fd5b60405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608490fd5b60405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608490fd5b6001600160a01b0390811691821561083557169182156107e55760207f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925918360005260018252604060002085600052825280604060002055604051908152a3565b60405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b60405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fdfea26469706673582212200239d5fdc8ba06d4c9278a25c561798906e419c37123e597870f8ad34e28da2864736f6c63430008100033", + "deployedBytecode": "0x608060408181526004918236101561001657600080fd5b600092833560e01c91826306fdde031461049657508163095ea7b31461046c57816318160ddd1461044d57816323b872dd14610383578163313ce56714610367578163395093511461031757816370a08231146102e057816395d89b41146101c1578163a457c2d71461011957508063a9059cbb146100e95763dd62ed3e1461009e57600080fd5b346100e557806003193601126100e557806020926100ba6105bb565b6100c26105d6565b6001600160a01b0391821683526001865283832091168252845220549051908152f35b5080fd5b50346100e557806003193601126100e5576020906101126101086105bb565b602435903361060f565b5160018152f35b905082346101be57826003193601126101be576101346105bb565b918360243592338152600160205281812060018060a01b038616825260205220549082821061016d576020856101128585038733610784565b608490602086519162461bcd60e51b8352820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152fd5b80fd5b8383346100e557816003193601126100e557805190828454600181811c908083169283156102d6575b60209384841081146102c3578388529081156102a75750600114610252575b505050829003601f01601f191682019267ffffffffffffffff84118385101761023f575082918261023b925282610572565b0390f35b634e487b7160e01b815260418552602490fd5b8787529192508591837f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b8385106102935750505050830101858080610209565b80548886018301529301928490820161027d565b60ff1916878501525050151560051b8401019050858080610209565b634e487b7160e01b895260228a52602489fd5b91607f16916101ea565b5050346100e55760203660031901126100e55760209181906001600160a01b036103086105bb565b16815280845220549051908152f35b5050346100e557806003193601126100e55761011260209261036061033a6105bb565b338352600186528483206001600160a01b038216845286529184902054602435906105ec565b9033610784565b5050346100e557816003193601126100e5576020905160128152f35b839150346100e55760603660031901126100e55761039f6105bb565b6103a76105d6565b91846044359460018060a01b0384168152600160205281812033825260205220549060001982036103e1575b60208661011287878761060f565b84821061040a57509183916103ff6020969561011295033383610784565b9193948193506103d3565b606490602087519162461bcd60e51b8352820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152fd5b5050346100e557816003193601126100e5576020906002549051908152f35b5050346100e557806003193601126100e55760209061011261048c6105bb565b6024359033610784565b8490843461056e578260031936011261056e5782600354600181811c90808316928315610564575b60209384841081146102c3578388529081156102a7575060011461050e57505050829003601f01601f191682019267ffffffffffffffff84118385101761023f575082918261023b925282610572565b600387529192508591837fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b8385106105505750505050830101858080610209565b80548886018301529301928490820161053a565b91607f16916104be565b8280fd5b6020808252825181830181905290939260005b8281106105a757505060409293506000838284010152601f8019910116010190565b818101860151848201604001528501610585565b600435906001600160a01b03821682036105d157565b600080fd5b602435906001600160a01b03821682036105d157565b919082018092116105f957565b634e487b7160e01b600052601160045260246000fd5b6001600160a01b0390811691821561073157169182156106e05760008281528060205260408120549180831061068c57604082827fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9587602096528286520382822055868152206106818282546105ec565b9055604051908152a3565b60405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608490fd5b60405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608490fd5b60405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608490fd5b6001600160a01b0390811691821561083557169182156107e55760207f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925918360005260018252604060002085600052825280604060002055604051908152a3565b60405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b60405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fdfea26469706673582212200239d5fdc8ba06d4c9278a25c561798906e419c37123e597870f8ad34e28da2864736f6c63430008100033", + "linkReferences": {}, + "deployedLinkReferences": {} +} diff --git a/tests/config/bsc.toml b/tests/config/bsc.toml new file mode 100644 index 0000000..2b5c92b --- /dev/null +++ b/tests/config/bsc.toml @@ -0,0 +1,101 @@ +[Eth] +SyncMode = "snap" +DisablePeerTxBroadcast = false +EthDiscoveryURLs = [] +SnapDiscoveryURLs = [] +TrustDiscoveryURLs = [] +NoPruning = false +NoPrefetch = false +DirectBroadcast = false +DisableSnapProtocol = false +DisableDiffProtocol = false +EnableTrustProtocol = false +DiffSync = false +RangeLimit = false +TxLookupLimit = 2350000 +LightPeers = 100 +UltraLightFraction = 75 +DatabaseCache = 512 +DatabaseFreezer = "" +DatabaseDiff = "" +TrieCleanCache = 154 +TrieCleanCacheJournal = "triecache" +TrieCleanCacheRejournal = 3600000000000 +TrieDirtyCache = 256 +TrieTimeout = 3600000000000 +SnapshotCache = 102 +TriesInMemory = 128 +TriesVerifyMode = "local" +Preimages = false +PersistDiff = false +DiffBlock = 86400 +PruneAncientData = false +EnablePreimageRecording = false +EWASMInterpreter = "" +EVMInterpreter = "" +RPCGasCap = 50000000 +RPCEVMTimeout = 5000000000 +RPCTxFeeCap = 1e+00 + +[Eth.Miner] +DelayLeftOver = 50000000 +GasFloor = 0 +GasCeil = 8000000 +GasPrice = 1000000000 +Recommit = 3000000000 +Noverify = false + +[Eth.Ethash] +CacheDir = "ethash" +CachesInMem = 2 +CachesOnDisk = 3 +CachesLockMmap = false +DatasetDir = "" +DatasetsInMem = 1 +DatasetsOnDisk = 2 +DatasetsLockMmap = false +PowMode = 2 +NotifyFull = false + +[Eth.TxPool] +Locals = [] +NoLocals = false +Journal = "transactions.rlp" +Rejournal = 3600000000000 +PriceLimit = 1 +PriceBump = 10 +AccountSlots = 16 +GlobalSlots = 5120 +AccountQueue = 64 +GlobalQueue = 1024 +Lifetime = 10800000000000 +ReannounceTime = 315360000000000000 + +[Eth.GPO] +Blocks = 20 +Percentile = 60 +MaxHeaderHistory = 0 +MaxBlockHistory = 0 +MaxPrice = 100000000000 +IgnorePrice = 4 +OracleThreshold = 1000 + +[Node] +IPCPath = "geth.ipc" +HTTPHost = "127.0.0.1" +HTTPVirtualHosts = ["localhost"] +HTTPModules = ["personal", "eth", "net", "web3", "admin", "txpool", "miner"] +GraphQLVirtualHosts = ["localhost"] + +[Node.P2P] +MaxPeers = 50 +NoDiscovery = false +StaticNodes = [] +VerifyNodes = [] +TrustedNodes = [] +EnableMsgEvents = false + +[Node.HTTPTimeouts] +ReadTimeout = 30000000000 +WriteTimeout = 30000000000 +IdleTimeout = 120000000000 \ No newline at end of file diff --git a/tests/config/genesis.json b/tests/config/genesis.json new file mode 100644 index 0000000..7112690 --- /dev/null +++ b/tests/config/genesis.json @@ -0,0 +1,32 @@ +{ + "config":{ + "chainId":2001, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "ramanujanBlock": 0, + "nielsBlock": 0 + }, + "nonce":"0x0000000000000061", + "timestamp":"0x0", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "gasLimit":"0x8000000", + "difficulty":"0x100", + "mixhash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase":"0x3333333333333333333333333333331234567890", + "alloc":{ + "fbe45681Ac6C53D5a40475F7526baC1FE7590fb8":{ + "balance":"400000000000000000000000000" + }, + "0e768D12395C8ABFDEdF7b1aEB0Dd1D27d5E2A7F":{ + "balance":"400000000000000000000000000" + } + } + } \ No newline at end of file diff --git a/tests/config/node_config.py b/tests/config/node_config.py new file mode 100644 index 0000000..d537ad3 --- /dev/null +++ b/tests/config/node_config.py @@ -0,0 +1,41 @@ +from web3 import Web3 + +ZGS_CONFIG = dict(log_config_file="log_config") + +KV_CONFIG = dict(log_config_file="log_config") + +BSC_CONFIG = dict( + NetworkId=1000, + HTTPPort=8545, + HTTPHost="127.0.0.1", + Etherbase="0x7df9a875a174b3bc565e6424a0050ebc1b2d1d82", + DataDir="test/local_ethereum_blockchain/node1", + Port=30303, + Verbosity=5, +) + +CONFLUX_CONFIG = dict( + mode="test", + chain_id=10, + jsonrpc_http_eth_port=8545, + tcp_port=32323, + log_level="debug", + log_file="./conflux.log", + public_address="127.0.0.1", + poll_lifetime_in_seconds=60, + dev_allow_phase_change_without_peer="true", + # dev_block_interval_ms=50, +) + +BLOCK_SIZE_LIMIT = 200 * 1024 +GENESIS_PRIV_KEY = "46b9e861b63d3509c88b7817275a30d22d62c8cd8fa6486ddee35ef0d8e0495f" +MINER_ID = "308a6e102a5829ba35e4ba1da0473c3e8bd45f5d3ffb91e31adb43f25463dddb" +GENESIS_ACCOUNT = Web3().eth.account.from_key(GENESIS_PRIV_KEY) +TX_PARAMS = {"gasPrice": 10_000_000_000, "from": GENESIS_ACCOUNT.address, "gas": 10_000_000} + +GENESIS_PRIV_KEY1 = "9a6d3ba2b0c7514b16a006ee605055d71b9edfad183aeb2d9790e9d4ccced471" +GENESIS_ACCOUNT1 = Web3().eth.account.from_key(GENESIS_PRIV_KEY1) +TX_PARAMS1 = {"gasPrice": 10_000_000_000, "from": GENESIS_ACCOUNT1.address, "gas": 10_000_000} + +NO_SEAL_FLAG = 0x1 +NO_MERKLE_PROOF_FLAG = 0x2 diff --git a/tests/dep_pip3.sh b/tests/dep_pip3.sh new file mode 100755 index 0000000..8686bcd --- /dev/null +++ b/tests/dep_pip3.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +function install() { + if [ "`pip3 show ${1%%=*}`" = "" ]; then + pip3 install $1 + fi +} + +install jsonrpcclient +install pyyaml +install pysha3 +install coincurve +install eth_utils +install py-ecc +install web3 +install eth_tester \ No newline at end of file diff --git a/tests/kv_access_control_test.py b/tests/kv_access_control_test.py new file mode 100644 index 0000000..fdeba2b --- /dev/null +++ b/tests/kv_access_control_test.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python3 +import base64 +from this import d +import time +from os import access +import random +from test_framework.test_framework import TestFramework +from utility.kv import (MAX_U64, op_with_address, op_with_key, STREAM_DOMAIN, with_prefix, is_access_control_permission_denied, is_write_permission_denied, + MAX_STREAM_ID, pad, to_key_with_size, to_stream_id, create_kv_data, AccessControlOps, rand_key, rand_write) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + wait_until, +) +from config.node_config import TX_PARAMS, TX_PARAMS1, GENESIS_ACCOUNT, GENESIS_ACCOUNT1 + + +class KVAccessControlTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 1 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.setup_kv_node(0, self.stream_ids) + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + self.cross_test(self.stream_ids[0]) + self.revoke_test(self.stream_ids[1]) + self.renounce_admin_test(self.stream_ids[2]) + + def submit(self, version, reads, writes, access_controls, tx_params=TX_PARAMS, given_tags=None, trunc=False): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[:random.randrange( + len(chunk_data) / 2, len(chunk_data))] + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit(submissions, tx_params=tx_params) + wait_until(lambda: self.contract.num_submissions() + == self.next_tx_seq + 1) + + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info("segments: %s", [ + (s["root"], s["index"], s["proof"]) for s in segments]) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"]) + + def update_data(self, writes): + for write in writes: + self.data[','.join([write[0], write[1]])] = write[3] + + def renounce_admin_test(self, stream_id): + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT.address, stream_id, rand_key()), True) + # first put + writes = [rand_write(stream_id)] + access_controls = [AccessControlOps.renounce_admin_role(stream_id)] + # grant writer role + self.submit(MAX_U64, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, first_version) + assert_equal(self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), False) + + def revoke_test(self, stream_id): + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT.address, stream_id, rand_key()), True) + special_key = rand_key() + # first put + writes = [rand_write(stream_id), rand_write(stream_id, special_key)] + access_controls = [] + # grant writer role + access_controls.append(AccessControlOps.grant_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append( + AccessControlOps.set_key_to_special(stream_id, special_key)) + access_controls.append(AccessControlOps.grant_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + self.submit(MAX_U64, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal(self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_stream( + GENESIS_ACCOUNT1.address, stream_id), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_is_special_key( + stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, rand_key()), True) + + # write and renounce + writes = [rand_write(stream_id, writes[0][1]), + rand_write(stream_id)] + access_controls = [] + access_controls.append(AccessControlOps.revoke_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.revoke_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + # no permission + self.submit(first_version, [], writes, + access_controls, tx_params=TX_PARAMS1) + wait_until(lambda: is_access_control_permission_denied(self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq))) + self.next_tx_seq += 1 + + # commit + self.submit(first_version, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + second_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, second_version) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, rand_key()), False) + + # grant and revoke in one tx + access_controls = [] + access_controls.append(AccessControlOps.grant_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.revoke_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.grant_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.revoke_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + self.submit(MAX_U64, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + self.next_tx_seq += 1 + assert_equal(self.kv_nodes[0].kv_is_writer_of_stream( + GENESIS_ACCOUNT1.address, stream_id), False) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, rand_key()), False) + + access_controls = [] + access_controls.append(AccessControlOps.revoke_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.grant_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.revoke_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + access_controls.append(AccessControlOps.grant_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + self.submit(MAX_U64, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + self.next_tx_seq += 1 + assert_equal(self.kv_nodes[0].kv_is_writer_of_stream( + GENESIS_ACCOUNT1.address, stream_id), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, rand_key()), True) + + def cross_test(self, stream_id): + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT.address, stream_id, rand_key()), True) + special_key = rand_key() + # first put + writes = [rand_write(stream_id), rand_write(stream_id, special_key)] + access_controls = [] + # grant writer role + access_controls.append(AccessControlOps.grant_writer_role( + stream_id, GENESIS_ACCOUNT1.address)) + # set special key + access_controls.append( + AccessControlOps.set_key_to_special(stream_id, special_key)) + self.submit(MAX_U64, [], writes, access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal(self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_stream( + GENESIS_ACCOUNT1.address, stream_id), True) + assert_equal(self.kv_nodes[0].kv_is_special_key( + stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + + # write special key but no permission + writes = [rand_write(stream_id, special_key)] + self.submit(MAX_U64, [], writes, access_controls, tx_params=TX_PARAMS1) + wait_until(lambda: is_write_permission_denied( + self.kv_nodes[0].kv_get_trasanction_result(self.next_tx_seq))) + self.next_tx_seq += 1 + + # grant special writer role + self.submit(MAX_U64, [], [], [AccessControlOps.grant_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)]) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + self.next_tx_seq += 1 + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key, first_version), False) + + # write and renounce + writes = [rand_write(stream_id, writes[0][1]), + rand_write(stream_id)] + access_controls = [AccessControlOps.renounce_writer_role( + stream_id)] + self.submit(first_version, [], writes, + access_controls, tx_params=TX_PARAMS1) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + second_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and role + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, first_version) + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, second_version) + assert_equal(self.kv_nodes[0].kv_is_writer_of_stream( + GENESIS_ACCOUNT1.address, stream_id), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, rand_key()), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + + # write special key and renounce + writes = [rand_write(stream_id, special_key)] + access_controls = [AccessControlOps.renounce_special_writer_role( + stream_id, special_key)] + self.submit(MAX_U64, [], writes, access_controls, tx_params=TX_PARAMS1) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + third_version = self.next_tx_seq + self.next_tx_seq += 1 + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, third_version) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + + # grant special writer role + access_controls = [] + access_controls.append(AccessControlOps.grant_special_writer_role( + stream_id, special_key, GENESIS_ACCOUNT1.address)) + # no permission + self.submit(MAX_U64, [], [], access_controls, tx_params=TX_PARAMS1) + wait_until(lambda: is_access_control_permission_denied( + self.kv_nodes[0].kv_get_trasanction_result(self.next_tx_seq))) + self.next_tx_seq += 1 + # commit + self.submit(MAX_U64, [], [], access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + self.next_tx_seq += 1 + # role check + assert_equal(self.kv_nodes[0].kv_is_special_key( + stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + # set key to normal + access_controls = [] + access_controls.append( + AccessControlOps.set_key_to_normal(stream_id, special_key)) + self.submit(MAX_U64, [], [], access_controls) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + self.next_tx_seq += 1 + # check + assert_equal(self.kv_nodes[0].kv_is_special_key( + stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_has_write_permission( + GENESIS_ACCOUNT1.address, stream_id, special_key), False) + assert_equal(self.kv_nodes[0].kv_is_writer_of_key( + GENESIS_ACCOUNT1.address, stream_id, special_key), True) + + +if __name__ == "__main__": + KVAccessControlTest(blockchain_node_configs=dict( + [(0, dict(mode="dev", dev_block_interval_ms=50))])).main() diff --git a/tests/kv_data_fetcher_test.py b/tests/kv_data_fetcher_test.py new file mode 100644 index 0000000..ddd69b2 --- /dev/null +++ b/tests/kv_data_fetcher_test.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +import random +from test_framework.test_framework import TestFramework +from utility.kv import ( + MAX_U64, + MAX_STREAM_ID, + to_stream_id, + create_kv_data, + rand_write, +) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + rpc_port, + wait_until, +) +from config.node_config import TX_PARAMS, TX_PARAMS1, GENESIS_ACCOUNT, GENESIS_ACCOUNT1 + + +class DataFetcherTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 2 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.stream_ids.reverse() + + updated_config = { + "zgs_node_urls": f"http://127.0.0.1:{rpc_port(0)},http://127.0.0.1:{rpc_port(1)}" + } + + self.setup_kv_node(0, self.stream_ids, updated_config) + self.stream_ids.reverse() + assert_equal( + [x[2:] for x in self.kv_nodes[0].kv_get_holding_stream_ids()], + self.stream_ids, + ) + + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + # write empty stream + self.write_streams() + + def submit( + self, + version, + reads, + writes, + access_controls, + tx_params=TX_PARAMS, + given_tags=None, + trunc=False, + ): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[ + : random.randrange(len(chunk_data) // 2, len(chunk_data)) + ] + print(len(chunk_data)) + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags + ) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit(submissions, tx_params=tx_params) + wait_until(lambda: self.contract.num_submissions() + == self.next_tx_seq + 1) + + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info( + "segments: %s", [(s["root"], s["index"], s["proof"]) + for s in segments] + ) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"], timeout=60) + + def update_data(self, writes): + for write in writes: + self.data[",".join([write[0], write[1]])] = write[3] + + def write_streams(self): + # first put + writes = [rand_write() for i in range(20)] + self.submit(MAX_U64, [], writes, []) + wait_until( + lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) + == "Commit", + ) + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and admin role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal( + self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True + ) + + # stop one node, download should also successful + self.stop_storage_node(1) + + writes = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + writes.append(rand_write(stream_id, key)) + self.submit(first_version, [], writes, []) + wait_until( + lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) + == "Commit", + timeout=180, + ) + second_version = self.next_tx_seq + self.next_tx_seq += 1 + + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + self.kv_nodes[0].check_equal(stream_id, key, value, first_version) + + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + self.kv_nodes[0].check_equal(stream_id, key, value, second_version) + + +if __name__ == "__main__": + DataFetcherTest(blockchain_node_configs=dict( + [(0, dict(mode="dev", dev_block_interval_ms=50))])).main() diff --git a/tests/kv_iterator_test.py b/tests/kv_iterator_test.py new file mode 100644 index 0000000..cd9197f --- /dev/null +++ b/tests/kv_iterator_test.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +import base64 +from this import d +import time +from os import access +import random +from test_framework.test_framework import TestFramework +from utility.kv import (MAX_U64, op_with_address, op_with_key, STREAM_DOMAIN, with_prefix, is_write_permission_denied, + MAX_STREAM_ID, pad, to_key_with_size, to_stream_id, create_kv_data, AccessControlOps, rand_key, rand_write) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + wait_until, +) +from config.node_config import TX_PARAMS, TX_PARAMS1, GENESIS_ACCOUNT, GENESIS_ACCOUNT1 + + +class KVPutGetTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 1 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.stream_ids.reverse() + self.setup_kv_node(0, self.stream_ids) + self.stream_ids.reverse() + assert_equal( + [x[2:] for x in self.kv_nodes[0].kv_get_holding_stream_ids()], self.stream_ids) + + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + # write empty stream + self.write_streams() + + def submit(self, version, reads, writes, access_controls, tx_params=TX_PARAMS, given_tags=None, trunc=False): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[:random.randrange( + len(chunk_data) // 2, len(chunk_data))] + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit(submissions, tx_params=tx_params) + wait_until(lambda: self.contract.num_submissions() + == self.next_tx_seq + 1) + + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info("segments: %s", [ + (s["root"], s["index"], s["proof"]) for s in segments]) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"]) + + def update_data(self, writes): + for write in writes: + self.data[','.join([write[0], write[1]])] = write[3] if len( + write[3]) > 0 else None + + def write_streams(self): + # first put + stream_id = to_stream_id(1) + writes = [rand_write(stream_id) for i in range(20)] + self.submit(MAX_U64, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and admin role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal(self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True) + + # iterate + pair = self.kv_nodes[0].seek_to_first(stream_id) + current_key = None + cnt = 0 + deleted = 0 + writes = [] + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key < pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + if cnt % 3 != 0: + writes.append(rand_write(stream_id, pair['key'], 0)) + deleted += 1 + + pair = self.kv_nodes[0].next(stream_id, current_key) + assert cnt == len(self.data.items()) + + # iterate(reverse) + pair = self.kv_nodes[0].seek_to_last(stream_id) + current_key = None + cnt = 0 + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key > pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + + pair = self.kv_nodes[0].prev(stream_id, current_key) + assert cnt == len(self.data.items()) + + # delete + self.submit(MAX_U64, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + second_version = self.next_tx_seq + self.next_tx_seq += 1 + + # iterate at first version + pair = self.kv_nodes[0].seek_to_first(stream_id, first_version) + current_key = None + cnt = 0 + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key < pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + + pair = self.kv_nodes[0].next(stream_id, current_key, first_version) + assert cnt == len(self.data.items()) + + pair = self.kv_nodes[0].seek_to_last(stream_id, first_version) + current_key = None + cnt = 0 + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key > pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + + pair = self.kv_nodes[0].prev(stream_id, current_key, first_version) + assert cnt == len(self.data.items()) + + # iterate at second version + pair = self.kv_nodes[0].seek_to_first(stream_id, second_version) + current_key = None + cnt = 0 + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key < pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + + pair = self.kv_nodes[0].next( + stream_id, current_key, second_version) + assert cnt == len(self.data.items()) - deleted + + pair = self.kv_nodes[0].seek_to_last(stream_id, second_version) + current_key = None + cnt = 0 + while pair != None: + cnt += 1 + if current_key is not None: + assert current_key > pair['key'] + current_key = pair['key'] + tmp = ','.join([stream_id, pair['key']]) + assert tmp in self.data + value = self.data[tmp] + assert_equal(value, pair['data']) + + pair = self.kv_nodes[0].prev( + stream_id, current_key, second_version) + assert cnt == len(self.data.items()) - deleted + + +if __name__ == "__main__": + KVPutGetTest(blockchain_node_configs=dict( + [(0, dict(mode="dev", dev_block_interval_ms=50))])).main() diff --git a/tests/kv_put_get_test.py b/tests/kv_put_get_test.py new file mode 100644 index 0000000..4ba383e --- /dev/null +++ b/tests/kv_put_get_test.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +import base64 +from this import d +import time +from os import access +import random +from test_framework.test_framework import TestFramework +from utility.kv import (MAX_U64, op_with_address, op_with_key, STREAM_DOMAIN, with_prefix, is_write_permission_denied, + MAX_STREAM_ID, pad, to_key_with_size, to_stream_id, create_kv_data, AccessControlOps, rand_key, rand_write) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + wait_until, +) +from config.node_config import TX_PARAMS, TX_PARAMS1, GENESIS_ACCOUNT, GENESIS_ACCOUNT1 + + +class KVPutGetTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 1 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.stream_ids.reverse() + self.setup_kv_node(0, self.stream_ids) + self.stream_ids.reverse() + assert_equal( + [x[2:] for x in self.kv_nodes[0].kv_get_holding_stream_ids()], self.stream_ids) + + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + # write empty stream + self.write_streams() + + def submit(self, version, reads, writes, access_controls, tx_params=TX_PARAMS, given_tags=None, trunc=False): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[:random.randrange( + len(chunk_data) // 2, len(chunk_data))] + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit(submissions, tx_params=tx_params) + wait_until(lambda: self.contract.num_submissions() + == self.next_tx_seq + 1) + + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info("segments: %s", [ + (s["root"], s["index"], s["proof"]) for s in segments]) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"]) + + def update_data(self, writes): + for write in writes: + self.data[','.join([write[0], write[1]])] = write[3] if len( + write[3]) > 0 else None + + def write_streams(self): + # first put + writes = [rand_write() for i in range(20)] + self.submit(MAX_U64, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + # check data and admin role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal(self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True) + + # overwrite + writes = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + writes.append(rand_write(stream_id, key)) + self.submit(first_version, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + second_version = self.next_tx_seq + self.next_tx_seq += 1 + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, first_version) + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, second_version) + + # write but conflict + writes = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + writes.append(rand_write(stream_id, key)) + self.submit(first_version, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "VersionConfliction") + self.next_tx_seq += 1 + + writes = writes[:1] + reads = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + reads.append([stream_id, key]) + self.submit(first_version, reads, writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "VersionConfliction") + self.next_tx_seq += 1 + + # write but invalid format + writes = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + writes.append(rand_write(stream_id, key)) + self.submit(MAX_U64, [], writes, [], trunc=True) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "DataParseError: Invalid stream data") + self.next_tx_seq += 1 + + # write but permission denied + self.submit(MAX_U64, [], writes, [], tx_params=TX_PARAMS1) + wait_until(lambda: is_write_permission_denied( + self.kv_nodes[0].kv_get_trasanction_result(self.next_tx_seq))) + self.next_tx_seq += 1 + + # check data + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + + # overwrite, write same key multiple times in one tx + writes = [] + reads = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + for _ in range(random.randrange(3, 10)): + writes.append(rand_write(stream_id, key)) + reads.append([stream_id, key]) + random.shuffle(writes) + self.submit(second_version, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + third_version = self.next_tx_seq + self.next_tx_seq += 1 + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal( + stream_id, key, value, third_version - 1) + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + + # delete + writes = [] + reads = [] + flag = 0 + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + if flag % 2 == 0: + writes.append(rand_write(stream_id, key, 0)) + self.submit(MAX_U64, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + fourth_version = self.next_tx_seq + self.next_tx_seq += 1 + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal( + stream_id, key, value, fourth_version - 1) + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value) + + +if __name__ == "__main__": + KVPutGetTest(blockchain_node_configs=dict( + [(0, dict(mode="dev", dev_block_interval_ms=50))])).main() diff --git a/tests/kv_recovery_test.py b/tests/kv_recovery_test.py new file mode 100644 index 0000000..27907c2 --- /dev/null +++ b/tests/kv_recovery_test.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +import base64 +from this import d +import time +from os import access +import random +from test_framework.test_framework import TestFramework +from utility.kv import (MAX_U64, op_with_address, op_with_key, STREAM_DOMAIN, with_prefix, is_write_permission_denied, + MAX_STREAM_ID, pad, to_key_with_size, to_stream_id, create_kv_data, AccessControlOps, rand_key, rand_write) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + wait_until, +) +from config.node_config import TX_PARAMS, TX_PARAMS1, GENESIS_ACCOUNT, GENESIS_ACCOUNT1 + + +class KVRecoveryTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 1 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.stream_ids.reverse() + self.setup_kv_node(0, self.stream_ids) + self.stream_ids.reverse() + assert_equal( + [x[2:] for x in self.kv_nodes[0].kv_get_holding_stream_ids()], self.stream_ids) + + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + # write empty stream + self.write_streams() + + def submit(self, version, reads, writes, access_controls, tx_params=TX_PARAMS, given_tags=None, trunc=False): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[:random.randrange( + len(chunk_data) // 2, len(chunk_data))] + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit(submissions, tx_params=tx_params) + wait_until(lambda: self.contract.num_submissions() + == self.next_tx_seq + 1) + + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info("segments: %s", [ + (s["root"], s["index"], s["proof"]) for s in segments]) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"]) + + def update_data(self, writes): + for write in writes: + self.data[','.join([write[0], write[1]])] = write[3] + + def write_streams(self): + # first put + writes = [rand_write() for i in range(20)] + self.submit(MAX_U64, [], writes, []) + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + first_version = self.next_tx_seq + self.next_tx_seq += 1 + + self.update_data(writes) + # stop node + self.kv_nodes[0].stop() + + # overwrite + writes = [] + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + writes.append(rand_write(stream_id, key)) + self.submit(first_version, [], writes, []) + + # restart node + self.kv_nodes[0].start() + self.kv_nodes[0].wait_for_rpc_connection() + wait_until(lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) == "Commit") + second_version = self.next_tx_seq + self.next_tx_seq += 1 + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, first_version) + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(',') + self.kv_nodes[0].check_equal(stream_id, key, value, second_version) + + +if __name__ == "__main__": + KVRecoveryTest(blockchain_node_configs=dict( + [(0, dict(mode="dev", dev_block_interval_ms=50))])).main() diff --git a/tests/kv_reorg_test.py b/tests/kv_reorg_test.py new file mode 100644 index 0000000..f5319dd --- /dev/null +++ b/tests/kv_reorg_test.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +import random +from test_framework.test_framework import TestFramework +from test_framework.conflux_node import connect_nodes, disconnect_nodes, sync_blocks +from test_framework.blockchain_node import BlockChainNodeType +from utility.kv import ( + MAX_U64, + MAX_STREAM_ID, + to_stream_id, + create_kv_data, + rand_write, +) +from utility.submission import submit_data +from utility.submission import create_submission +from utility.utils import ( + assert_equal, + wait_until, +) +from config.node_config import TX_PARAMS, GENESIS_ACCOUNT + + +class ReorgTest(TestFramework): + def setup_params(self): + self.num_blockchain_nodes = 2 + self.num_nodes = 1 + + def run_test(self): + # setup kv node, watch stream with id [0,100) + self.stream_ids = [to_stream_id(i) for i in range(MAX_STREAM_ID)] + self.stream_ids.reverse() + + self.setup_kv_node(0, self.stream_ids) + self.stream_ids.reverse() + assert_equal( + [x[2:] for x in self.kv_nodes[0].kv_get_holding_stream_ids()], + self.stream_ids, + ) + + # tx_seq and data mapping + self.next_tx_seq = 0 + self.data = {} + # write empty stream + self.write_streams() + + def submit( + self, + version, + reads, + writes, + access_controls, + tx_params=TX_PARAMS, + given_tags=None, + trunc=False, + node_index=0, + ): + chunk_data, tags = create_kv_data( + version, reads, writes, access_controls) + if trunc: + chunk_data = chunk_data[ + : random.randrange(len(chunk_data) // 2, len(chunk_data)) + ] + submissions, data_root = create_submission( + chunk_data, tags if given_tags is None else given_tags + ) + self.log.info("data root: %s, submissions: %s", data_root, submissions) + self.contract.submit( + submissions, tx_params=tx_params, node_idx=node_index) + wait_until( + lambda: self.contract.num_submissions( + node_index) == self.next_tx_seq + 1 + ) + + return data_root, chunk_data + + def submit_data(self, data_root, chunk_data): + client = self.nodes[0] + wait_until(lambda: client.zgs_get_file_info(data_root) is not None) + + segments = submit_data(client, chunk_data) + self.log.info( + "segments: %s", [(s["root"], s["index"], s["proof"]) + for s in segments] + ) + wait_until(lambda: client.zgs_get_file_info(data_root)["finalized"]) + + def update_data(self, writes): + for write in writes: + self.data[",".join([write[0], write[1]])] = write[3] + + def write_streams(self): + disconnect_nodes(self.blockchain_nodes, 0, 1) + self.blockchain_nodes[0].generate_empty_blocks(5) + + # first put + writes = [rand_write() for i in range(20)] + data_root, chunk_data = self.submit(MAX_U64, [], writes, []) + client = self.nodes[0] + assert client.zgs_get_file_info(data_root) is None + self.blockchain_nodes[0].generate_empty_blocks(12) + self.submit_data(data_root, chunk_data) + wait_until( + lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) + == "Commit", + ) + + # check data and admin role + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + self.kv_nodes[0].check_equal(stream_id, key, value) + assert_equal( + self.kv_nodes[0].kv_is_admin( + GENESIS_ACCOUNT.address, stream_id), True + ) + + # reorg put + writes = [rand_write() for i in range(20)] + data_root, chunk_data = self.submit( + MAX_U64, [], writes, [], node_index=1) + + self.blockchain_nodes[1].generate_empty_blocks(30) + connect_nodes(self.blockchain_nodes, 0, 1) + sync_blocks(self.blockchain_nodes[0:2]) + + self.submit_data(data_root, chunk_data) + + wait_until( + lambda: self.kv_nodes[0].kv_get_trasanction_result( + self.next_tx_seq) + == "Commit", + ) + + self.data = {} + self.update_data(writes) + for stream_id_key, value in self.data.items(): + stream_id, key = stream_id_key.split(",") + self.kv_nodes[0].check_equal(stream_id, key, value) + + +if __name__ == "__main__": + ReorgTest(blockchain_node_type=BlockChainNodeType.Conflux).main() diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 0000000..c7078b4 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,8 @@ +jsonrpcclient +pyyaml +pysha3 +coincurve +eth-utils==1.10.0 +py-ecc==5.2.0 +web3 +eth_tester \ No newline at end of file diff --git a/tests/test_all.py b/tests/test_all.py new file mode 100644 index 0000000..83b38fb --- /dev/null +++ b/tests/test_all.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +import argparse +import os +import shutil +import stat +import subprocess +import sys + +from concurrent.futures import ProcessPoolExecutor + +from utility.utils import is_windows_platform + +PORT_MIN = 11000 +PORT_MAX = 65535 +PORT_RANGE = 600 + +__file_path__ = os.path.dirname(os.path.realpath(__file__)) + + +def run_single_test(py, script, test_dir, index, port_min, port_max): + try: + # Make sure python thinks it can write unicode to its stdout + "\u2713".encode("utf_8").decode(sys.stdout.encoding) + TICK = "✓ " + CROSS = "✖ " + CIRCLE = "○ " + except UnicodeDecodeError: + TICK = "P " + CROSS = "x " + CIRCLE = "o " + + BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") + if os.name == "posix": + # primitive formatting on supported + # terminal via ANSI escape sequences: + BOLD = ("\033[0m", "\033[1m") + BLUE = ("\033[0m", "\033[0;34m") + RED = ("\033[0m", "\033[0;31m") + GREY = ("\033[0m", "\033[1;30m") + print("Running " + script) + port_min = port_min + (index * PORT_RANGE) % (port_max - port_min) + color = BLUE + glyph = TICK + try: + subprocess.check_output( + args=[py, script, "--randomseed=1", f"--port-min={port_min}"], + stdin=None, + cwd=test_dir, + ) + except subprocess.CalledProcessError as err: + color = RED + glyph = CROSS + print(color[1] + glyph + " Testcase " + script + color[0]) + print("Output of " + script + "\n" + err.output.decode("utf-8")) + raise err + print(color[1] + glyph + " Testcase " + script + color[0]) + + +def run(): + dir_name = os.path.join(__file_path__, "utility") + if not os.path.exists(dir_name): + os.makedirs(dir_name, exist_ok=True) + + conflux_path = os.path.join(dir_name, "conflux") + if not os.path.exists(conflux_path): + build_conflux(conflux_path) + + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + parser.add_argument( + "--max-workers", + dest="max_workers", + default=5, + type=int, + ) + parser.add_argument( + "--port-max", + dest="port_max", + default=PORT_MAX, + type=int, + ) + parser.add_argument( + "--port-min", + dest="port_min", + default=PORT_MIN, + type=int, + ) + + options = parser.parse_args() + + TEST_SCRIPTS = [] + + test_dir = os.path.dirname(os.path.realpath(__file__)) + test_subdirs = [ + "", # include test_dir itself + ] + + slow_tests = {} + + for subdir in test_subdirs: + subdir_path = os.path.join(test_dir, subdir) + for file in os.listdir(subdir_path): + if file.endswith("_test.py"): + rel_path = os.path.join(subdir, file) + if rel_path not in slow_tests: + TEST_SCRIPTS.append(rel_path) + + executor = ProcessPoolExecutor(max_workers=options.max_workers) + test_results = [] + + py = "python" + if hasattr(sys, "getwindowsversion"): + py = "python" + + i = 0 + # Start slow tests first to avoid waiting for long-tail jobs + for script in slow_tests: + f = executor.submit( + run_single_test, py, script, test_dir, i, options.port_min, options.port_max + ) + test_results.append((script, f)) + i += 1 + for script in TEST_SCRIPTS: + f = executor.submit( + run_single_test, py, script, test_dir, i, options.port_min, options.port_max + ) + test_results.append((script, f)) + i += 1 + + failed = set() + for script, f in test_results: + try: + f.result() + except subprocess.CalledProcessError as err: + print("CalledProcessError " + repr(err)) + failed.add(script) + + if len(failed) > 0: + print("The following test fails: ") + for c in failed: + print(c) + sys.exit(1) + + +def build_conflux(conflux_path): + destination_path = os.path.join(__file_path__, "tmp", "conflux_tmp") + if os.path.exists(destination_path): + shutil.rmtree(destination_path) + + clone_command = "git clone https://github.com/Conflux-Chain/conflux-rust.git" + clone_with_path = clone_command + " " + destination_path + os.system(clone_with_path) + + origin_path = os.getcwd() + os.chdir(destination_path) + os.system("cargo build --release --bin conflux") + + path = os.path.join(destination_path, "target", "release", "conflux") + shutil.copyfile(path, conflux_path) + + if not is_windows_platform(): + st = os.stat(conflux_path) + os.chmod(conflux_path, st.st_mode | stat.S_IEXEC) + + os.chdir(origin_path) + + +if __name__ == "__main__": + run() diff --git a/tests/test_framework/blockchain_node.py b/tests/test_framework/blockchain_node.py new file mode 100644 index 0000000..10cbcff --- /dev/null +++ b/tests/test_framework/blockchain_node.py @@ -0,0 +1,326 @@ +import json +import os +import subprocess +import tempfile +import time + +from web3 import Web3, HTTPProvider +from web3.middleware import construct_sign_and_send_raw_middleware +from enum import Enum, unique +from config.node_config import ( + GENESIS_PRIV_KEY, + GENESIS_PRIV_KEY1, + TX_PARAMS, + MINER_ID, + NO_MERKLE_PROOF_FLAG, + NO_SEAL_FLAG, + TX_PARAMS1, +) +from utility.simple_rpc_proxy import SimpleRpcProxy +from utility.utils import ( + initialize_config, + wait_until, +) + + +@unique +class BlockChainNodeType(Enum): + Conflux = 0 + BSC = 1 + + +@unique +class NodeType(Enum): + BlockChain = 0 + Zgs = 1 + KV = 2 + + +class FailedToStartError(Exception): + """Raised when a node fails to start correctly.""" + + +class TestNode: + def __init__( + self, node_type, index, data_dir, rpc_url, binary, config, log, rpc_timeout=10 + ): + self.node_type = node_type + self.index = index + self.data_dir = data_dir + self.rpc_url = rpc_url + self.config = config + self.rpc_timeout = rpc_timeout + self.process = None + self.stdout = None + self.stderr = None + self.config_file = os.path.join(self.data_dir, "config.toml") + self.args = [binary, "--config", self.config_file] + self.running = False + self.rpc_connected = False + self.rpc = None + self.log = log + + def __del__(self): + if self.process: + self.process.terminate() + + def __getattr__(self, name): + """Dispatches any unrecognised messages to the RPC connection.""" + assert self.rpc_connected and self.rpc is not None, self._node_msg( + "Error: no RPC connection" + ) + return getattr(self.rpc, name) + + def _node_msg(self, msg: str) -> str: + """Return a modified msg that identifies this node by its index as a debugging aid.""" + return "[node %s %d] %s" % (self.node_type, self.index, msg) + + def _raise_assertion_error(self, msg: str): + """Raise an AssertionError with msg modified to identify this node.""" + raise AssertionError(self._node_msg(msg)) + + def setup_config(self): + os.mkdir(self.data_dir) + initialize_config(self.config_file, self.config) + + def start(self, redirect_stderr=False): + my_env = os.environ.copy() + if self.stdout is None: + self.stdout = tempfile.NamedTemporaryFile( + dir=self.data_dir, prefix="stdout", delete=False + ) + if self.stderr is None: + self.stderr = tempfile.NamedTemporaryFile( + dir=self.data_dir, prefix="stderr", delete=False + ) + + if redirect_stderr: + self.process = subprocess.Popen( + self.args, + stdout=self.stdout, + stderr=self.stdout, + cwd=self.data_dir, + env=my_env, + ) + else: + self.process = subprocess.Popen( + self.args, + stdout=self.stdout, + stderr=self.stderr, + cwd=self.data_dir, + env=my_env, + ) + self.running = True + + def wait_for_rpc_connection(self): + raise NotImplementedError + + def _wait_for_rpc_connection(self, check): + """Sets up an RPC connection to the node process. Returns False if unable to connect.""" + # Poll at a rate of four times per second + poll_per_s = 4 + for _ in range(poll_per_s * self.rpc_timeout): + if self.process.poll() is not None: + raise FailedToStartError( + self._node_msg( + "exited with status {} during initialization".format( + self.process.returncode + ) + ) + ) + rpc = SimpleRpcProxy(self.rpc_url, timeout=self.rpc_timeout) + if check(rpc): + self.rpc_connected = True + self.rpc = rpc + return + time.sleep(1.0 / poll_per_s) + self._raise_assertion_error( + "failed to get RPC proxy: index = {}, rpc_url = {}".format( + self.index, self.rpc_url + ) + ) + + def stop(self, expected_stderr="", kill=False, wait=True): + """Stop the node.""" + if not self.running: + return + if kill: + self.process.kill() + else: + self.process.terminate() + if wait: + self.wait_until_stopped() + # Check that stderr is as expected + self.stderr.seek(0) + stderr = self.stderr.read().decode("utf-8").strip() + # TODO: Check how to avoid `pthread lock: Invalid argument`. + if stderr != expected_stderr and stderr != "pthread lock: Invalid argument": + # print process status for debug + if self.return_code is None: + self.log.info("Process is still running") + else: + self.log.info( + "Process has terminated with code {}".format( + self.return_code) + ) + + raise AssertionError( + "Unexpected stderr {} != {} from node={}{}".format( + stderr, expected_stderr, self.node_type, self.index + ) + ) + + self.stdout.close() + self.stderr.close() + self.stdout = None + self.stderr = None + + def is_node_stopped(self): + """Checks whether the node has stopped. + + Returns True if the node has stopped. False otherwise. + This method is responsible for freeing resources (self.process).""" + if not self.running: + return True + return_code = self.process.poll() + if return_code is None: + return False + + # process has stopped. Assert that it didn't return an error code. + assert return_code == 0, self._node_msg( + "Node returned non-zero exit code (%d) when stopping" % return_code + ) + self.running = False + self.process = None + self.rpc = None + self.return_code = return_code + return True + + def wait_until_stopped(self, timeout=20): + wait_until(self.is_node_stopped, timeout=timeout) + + +class BlockchainNode(TestNode): + def __init__( + self, + index, + data_dir, + rpc_url, + binary, + local_conf, + contract_path, + token_contract_path, + mine_contract_path, + log, + blockchain_node_type, + rpc_timeout=10, + ): + self.contract_path = contract_path + self.token_contract_path = token_contract_path + self.mine_contract_path = mine_contract_path + + self.blockchain_node_type = blockchain_node_type + + super().__init__( + NodeType.BlockChain, + index, + data_dir, + rpc_url, + binary, + local_conf, + log, + rpc_timeout, + ) + + def wait_for_rpc_connection(self): + self._wait_for_rpc_connection(lambda rpc: rpc.eth_syncing() is False) + + def wait_for_start_mining(self): + self._wait_for_rpc_connection( + lambda rpc: int(rpc.eth_blockNumber(), 16) > 0) + + def wait_for_transaction_receipt(self, w3, tx_hash, timeout=120, parent_hash=None): + return w3.eth.wait_for_transaction_receipt(tx_hash, timeout) + + def setup_contract(self): + w3 = Web3(HTTPProvider(self.rpc_url)) + + account1 = w3.eth.account.from_key(GENESIS_PRIV_KEY) + account2 = w3.eth.account.from_key(GENESIS_PRIV_KEY1) + w3.middleware_onion.add( + construct_sign_and_send_raw_middleware([account1, account2]) + ) + # account = w3.eth.account.from_key(GENESIS_PRIV_KEY1) + # w3.middleware_onion.add(construct_sign_and_send_raw_middleware(account)) + + def deploy_contract(path, args=None): + if args is None: + args = [] + contract_interface = json.load(open(path, "r")) + contract = w3.eth.contract( + abi=contract_interface["abi"], + bytecode=contract_interface["bytecode"], + ) + tx_hash = contract.constructor(*args).transact(TX_PARAMS) + tx_receipt = self.wait_for_transaction_receipt(w3, tx_hash) + contract = w3.eth.contract( + address=tx_receipt.contractAddress, + abi=contract_interface["abi"], + ) + return contract, tx_hash + + self.log.debug("Start deploy contracts") + token_contract, _ = deploy_contract(self.token_contract_path) + self.log.debug("ERC20 deployed") + flow_contract, flow_contract_hash = deploy_contract( + self.contract_path, [token_contract.address] + ) + self.log.debug("Flow deployed") + mine_contract, _ = deploy_contract( + self.mine_contract_path, + [flow_contract.address, NO_SEAL_FLAG], + ) + self.log.debug("Mine deployed") + self.log.info("All contracts deployed") + + tx_hash = token_contract.functions.approve( + flow_contract.address, int(1e9) + ).transact(TX_PARAMS) + self.wait_for_transaction_receipt(w3, tx_hash) + + # setup second account + amount = int(1e8) + tx_hash = token_contract.functions.transfer(account2.address, amount).transact( + TX_PARAMS + ) + self.wait_for_transaction_receipt(w3, tx_hash) + + tx_hash = token_contract.functions.approve( + flow_contract.address, amount + ).transact(TX_PARAMS1) + self.wait_for_transaction_receipt(w3, tx_hash) + + tx_hash = mine_contract.functions.setMiner( + MINER_ID).transact(TX_PARAMS) + self.wait_for_transaction_receipt(w3, tx_hash) + + return flow_contract, flow_contract_hash, mine_contract + + def get_contract(self, contract_address): + w3 = Web3(HTTPProvider(self.rpc_url)) + + account1 = w3.eth.account.from_key(GENESIS_PRIV_KEY) + account2 = w3.eth.account.from_key(GENESIS_PRIV_KEY1) + w3.middleware_onion.add( + construct_sign_and_send_raw_middleware([account1, account2]) + ) + + contract_interface = json.load(open(self.contract_path, "r")) + return w3.eth.contract(address=contract_address, abi=contract_interface["abi"]) + + def wait_for_transaction(self, tx_hash): + w3 = Web3(HTTPProvider(self.rpc_url)) + w3.eth.wait_for_transaction_receipt(tx_hash) + + def start(self): + super().start(self.blockchain_node_type == BlockChainNodeType.BSC) diff --git a/tests/test_framework/bsc_node.py b/tests/test_framework/bsc_node.py new file mode 100644 index 0000000..17b4682 --- /dev/null +++ b/tests/test_framework/bsc_node.py @@ -0,0 +1,162 @@ +import os +import platform +import requests +import shutil +import stat + +from config.node_config import BSC_CONFIG +from eth_utils import encode_hex +from utility.signature_utils import ec_random_keys, priv_to_addr +from test_framework.blockchain_node import BlockChainNodeType, BlockchainNode +from utility.utils import ( + blockchain_p2p_port, + blockchain_rpc_port, + is_windows_platform, + wait_until, +) + +__file_path__ = os.path.dirname(os.path.realpath(__file__)) + + +class BSCNode(BlockchainNode): + def __init__( + self, + index, + root_dir, + binary, + updated_config, + contract_path, + token_contract_path, + mine_contract_path, + log, + rpc_timeout=10, + ): + local_conf = BSC_CONFIG.copy() + indexed_config = { + "HTTPPort": blockchain_rpc_port(index), + "Port": blockchain_p2p_port(index), + } + # Set configs for this specific node. + local_conf.update(indexed_config) + # Overwrite with personalized configs. + local_conf.update(updated_config) + data_dir = os.path.join(root_dir, "blockchain_node" + str(index)) + rpc_url = "http://" + \ + local_conf["HTTPHost"] + ":" + str(local_conf["HTTPPort"]) + + self.genesis_config = os.path.join( + __file_path__, "..", "config", "genesis.json" + ) + self.binary = binary + + if not os.path.exists(self.binary): + log.info("binary does not exist") + try: + with open(f"{self.binary}", "xb") as f: + self.__try_download_node(f, log) + except FileExistsError: + log.info("Binary is alrady under downloading") + + wait_until(lambda: os.access(f"{self.binary}", os.X_OK), timeout=120) + + self.node_id = encode_hex(priv_to_addr(ec_random_keys()[0])) + + super().__init__( + index, + data_dir, + rpc_url, + binary, + local_conf, + contract_path, + token_contract_path, + mine_contract_path, + log, + BlockChainNodeType.BSC, + rpc_timeout, + ) + + def __try_download_node(self, f, log): + url = "https://api.github.com/repos/{}/{}/releases/latest".format( + "bnb-chain", "bsc" + ) + req = requests.get(url) + if req.ok: + asset_name = self.__get_asset_name() + + url = "" + for asset in req.json()["assets"]: + if asset["name"].lower() == asset_name: + url = asset["browser_download_url"] + break + + if url: + log.info("Try to download geth from %s", url) + f.write(requests.get(url).content) + f.close() + + if not is_windows_platform(): + st = os.stat(self.binary) + os.chmod(self.binary, st.st_mode | stat.S_IEXEC) + else: + log.info("Request failed with %s", req) + + def __get_asset_name(self): + sys = platform.system().lower() + if sys == "linux": + return "geth_linux" + elif sys == "windows": + return "geth_windows.exe" + elif sys == "darwin": + return "geth_mac" + else: + raise RuntimeError("Unable to recognize platform") + + def start(self): + self.args = [ + self.binary, + "--datadir", + self.data_dir, + "init", + self.genesis_config, + ] + super().start() + + wait_until(lambda: self.process.poll() is not None) + ret = self.process.poll() + assert ret == 0, "BSC init should be successful" + + self.log.info( + "BSC node%d init finished with return code %d", self.index, ret) + + config_file = os.path.join(__file_path__, "..", "config", "bsc.toml") + target = os.path.join(self.data_dir, "bsc.toml") + shutil.copyfile(config_file, target) + + self.args = [ + self.binary, + "--datadir", + self.data_dir, + "--port", + str(self.config["Port"]), + "--http", + "--http.api", + "personal,eth,net,web3,admin,txpool,miner", + "--http.port", + str(self.config["HTTPPort"]), + # "--syncmode", + # "full", + # "--mine", + "--miner.threads", + "1", + "--miner.etherbase", + self.node_id, + "--networkid", + str(self.config["NetworkId"]), + "--verbosity", + str(self.config["Verbosity"]), + "--config", + "bsc.toml", + ] + + self.log.info("Start BSC node %d", self.index) + super().start() diff --git a/tests/test_framework/conflux_node.py b/tests/test_framework/conflux_node.py new file mode 100644 index 0000000..f61983f --- /dev/null +++ b/tests/test_framework/conflux_node.py @@ -0,0 +1,337 @@ +import os +import random +import threading +import time + +from config.node_config import BLOCK_SIZE_LIMIT, CONFLUX_CONFIG +import eth_utils +from test_framework.blockchain_node import BlockChainNodeType, BlockchainNode +from utility.signature_utils import ( + encode_int32, + get_nodeid, + sha3, +) +from utility.simple_rpc_proxy import SimpleRpcProxy +from utility.utils import ( + blockchain_p2p_port, + blockchain_rpc_port, + blockchain_rpc_port_core, + wait_until, +) +from web3.exceptions import TransactionNotFound + + +class ConfluxNode(BlockchainNode): + def __init__( + self, + index, + root_dir, + binary, + updated_config, + contract_path, + token_contract_path, + mine_contract_path, + log, + rpc_timeout=10, + ): + local_conf = CONFLUX_CONFIG.copy() + indexed_config = { + "jsonrpc_http_eth_port": blockchain_rpc_port(index), + "jsonrpc_local_http_port": blockchain_rpc_port_core(index), + "tcp_port": blockchain_p2p_port(index), + } + # Set configs for this specific node. + local_conf.update(indexed_config) + # Overwrite with personalized configs. + local_conf.update(updated_config) + data_dir = os.path.join(root_dir, "blockchain_node" + str(index)) + rpc_url = ( + "http://" + + local_conf["public_address"] + + ":" + + str(local_conf["jsonrpc_http_eth_port"]) + ) + self.ip = local_conf["public_address"] + self.port = str(local_conf["tcp_port"]) + + if "dev_block_interval_ms" in local_conf: + self.auto_mining = True + else: + self.auto_mining = False + + # setup core space rpc + core_space_rpc_url = ( + "http://" + + local_conf["public_address"] + + ":" + + str(local_conf["jsonrpc_local_http_port"]) + ) + + self.core_space_rpc = SimpleRpcProxy( + core_space_rpc_url, timeout=rpc_timeout) + + super().__init__( + index, + data_dir, + rpc_url, + binary, + local_conf, + contract_path, + token_contract_path, + mine_contract_path, + log, + BlockChainNodeType.Conflux, + rpc_timeout, + ) + + def __getattr__(self, name): + """Dispatches any unrecognised messages to the RPC connection.""" + assert self.rpc_connected and self.rpc is not None, self._node_msg( + "Error: no RPC connection" + ) + if name.startswith("eth_") or name.startswith("parity_"): + return getattr(self.rpc, name) + else: + return getattr(self.core_space_rpc, name) + + def wait_for_transaction_receipt(self, w3, tx_hash, timeout=120, parent_hash=None): + if self.auto_mining: + return super().wait_for_transaction_receipt(w3, tx_hash, timeout) + else: + time_end = time.time() + timeout + while time.time() < time_end: + try: + tx_receipt = w3.eth.get_transaction_receipt(tx_hash) + except TransactionNotFound: + tx_receipt = None + if parent_hash: + parent_hash = self.generatefixedblock( + parent_hash, [], 1, False, None, None + ) + else: + self.generateoneblock(1, BLOCK_SIZE_LIMIT) + time.sleep(0.5) + + if tx_receipt is not None: + return tx_receipt + + raise TransactionNotFound + + def wait_for_nodeid(self): + pubkey, x, y = get_nodeid(self) + self.key = eth_utils.encode_hex(pubkey) + addr_tmp = bytearray(sha3(encode_int32(x) + encode_int32(y))[12:]) + addr_tmp[0] &= 0x0F + addr_tmp[0] |= 0x10 + self.addr = addr_tmp + self.log.debug("Get node {} nodeid {}".format(self.index, self.key)) + + def best_block_hash(self): + return self.core_space_rpc.cfx_getBestBlockHash() + + def cfx_epochNumber(self, epoch_number=None): + return self.core_space_rpc.cfx_epochNumber([epoch_number]) + + def getblockcount(self): + return self.core_space_rpc.getblockcount() + + def addnode(self, key, peer_addr): + return self.core_space_rpc.addnode([key, peer_addr]) + + def removenode(self, key, peer_addr): + return self.core_space_rpc.removenode([key, peer_addr]) + + def addlatency(self, node_id, latency_ms): + return self.core_space_rpc.addlatency([node_id, latency_ms]) + + def getnodeid(self, challenge): + return self.core_space_rpc.getnodeid([challenge]) + + def generate_empty_blocks(self, num_blocks): + return self.core_space_rpc.generate_empty_blocks([num_blocks]) + + def generateoneblock(self, num_txs, block_size_limit): + return self.core_space_rpc.generateoneblock([num_txs, block_size_limit]) + + def generatefixedblock( + self, parent_hash, referee, num_txs, adaptive, difficulty, pos_reference + ): + return self.core_space_rpc.generatefixedblock( + [parent_hash, referee, num_txs, adaptive, difficulty, pos_reference] + ) + + +def check_handshake(from_connection, target_node_id): + """ + Check whether node 'from_connection' has already + added node 'target_node_id' into its peer set. + """ + peers = from_connection.getpeerinfo() + for peer in peers: + if peer["nodeid"] == target_node_id and len(peer["protocols"]) > 0: + return True + return False + + +def get_peer_addr(connection): + return "{}:{}".format(connection.ip, connection.port) + + +def connect_nodes(nodes, a, node_num, timeout=60): + """ + Let node[a] connect to node[node_num] + """ + from_connection = nodes[a] + to_connection = nodes[node_num] + key = nodes[node_num].key + peer_addr = get_peer_addr(to_connection) + from_connection.addnode(key, peer_addr) + # poll until hello handshake complete to avoid race conditions + # with transaction relaying + wait_until( + lambda: check_handshake(from_connection, to_connection.key), timeout=timeout + ) + + +def sync_blocks(rpc_connections, *, sync_count=True, wait=1, timeout=60): + """ + Wait until everybody has the same tip. + + sync_blocks needs to be called with an rpc_connections set that has least + one node already synced to the latest, stable tip, otherwise there's a + chance it might return before all nodes are stably synced. + """ + stop_time = time.time() + timeout + while time.time() <= stop_time: + best_hash = [x.best_block_hash() for x in rpc_connections] + block_count = [x.getblockcount() for x in rpc_connections] + if best_hash.count(best_hash[0]) == len(rpc_connections) and ( + not sync_count or block_count.count( + block_count[0]) == len(rpc_connections) + ): + return + time.sleep(wait) + raise AssertionError( + "Block sync timed out:{}".format( + "".join("\n {!r}".format(b) for b in best_hash + block_count) + ) + ) + + +def disconnect_nodes(nodes, from_connection, node_num): + try: + nodes[from_connection].removenode( + nodes[node_num].key, get_peer_addr(nodes[node_num]) + ) + nodes[node_num].removenode( + nodes[from_connection].key, get_peer_addr(nodes[from_connection]) + ) + except Exception as e: + # If this node is disconnected between calculating the peer id + # and issuing the disconnect, don't worry about it. + # This avoids a race condition if we're mass-disconnecting peers. + if e.error["code"] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED + raise + + # wait to disconnect + wait_until( + lambda: [ + peer + for peer in nodes[from_connection].getpeerinfo() + if peer["nodeid"] == nodes[node_num].key + ] + == [], + timeout=5, + ) + wait_until( + lambda: [ + peer + for peer in nodes[node_num].getpeerinfo() + if peer["nodeid"] == nodes[from_connection].key + ] + == [], + timeout=5, + ) + + +def connect_sample_nodes( + nodes, log, sample=3, latency_min=0, latency_max=300, timeout=30 +): + """ + Establish connections among nodes with each node having 'sample' outgoing peers. + It first lets all the nodes link as a loop, then randomly pick 'sample-1' + outgoing peers for each node. + """ + peer = [[] for _ in nodes] + latencies = [{} for _ in nodes] + threads = [] + num_nodes = len(nodes) + sample = min(num_nodes - 1, sample) + + for i in range(num_nodes): + # make sure all nodes are reachable + next = (i + 1) % num_nodes + peer[i].append(next) + lat = random.randint(latency_min, latency_max) + latencies[i][next] = lat + latencies[next][i] = lat + + for _ in range(sample - 1): + while True: + p = random.randint(0, num_nodes - 1) + if p not in peer[i] and not p == i: + peer[i].append(p) + lat = random.randint(latency_min, latency_max) + latencies[i][p] = lat + latencies[p][i] = lat + break + + for i in range(num_nodes): + t = ConnectThread(nodes, i, peer[i], latencies, log, min_peers=sample) + t.start() + threads.append(t) + + for t in threads: + t.join(timeout) + assert ( + not t.is_alive() + ), "Node[{}] connect to other nodes timeout in {} seconds".format(t.a, timeout) + assert not t.failed, "connect_sample_nodes failed." + + +class ConnectThread(threading.Thread): + def __init__(self, nodes, a, peers, latencies, log, min_peers=3, daemon=True): + threading.Thread.__init__(self, daemon=daemon) + self.nodes = nodes + self.a = a + self.peers = peers + self.latencies = latencies + self.log = log + self.min_peers = min_peers + self.failed = False + + def run(self): + try: + while True: + for i in range(len(self.peers)): + p = self.peers[i] + connect_nodes(self.nodes, self.a, p) + for p in self.latencies[self.a]: + self.nodes[self.a].addlatency( + self.nodes[p].key, self.latencies[self.a][p] + ) + if len(self.nodes[self.a].getpeerinfo()) >= self.min_peers: + break + else: + time.sleep(1) + except Exception as e: + node = self.nodes[self.a] + self.log.error( + "Node " + + str(self.a) + + " fails to be connected to " + + str(self.peers) + + ", ip={}, index={}".format(node.ip, node.index) + ) + self.log.error(e) + self.failed = True diff --git a/tests/test_framework/contract_proxy.py b/tests/test_framework/contract_proxy.py new file mode 100644 index 0000000..41157b0 --- /dev/null +++ b/tests/test_framework/contract_proxy.py @@ -0,0 +1,65 @@ +from gettext import npgettext +from config.node_config import TX_PARAMS +from utility.utils import assert_equal + + +class ContractProxy: + def __init__(self, contract, blockchain_nodes): + self.contract = contract + self.contract_address = contract.address + self.blockchain_nodes = blockchain_nodes + + def _get_contract(self, node_idx=0): + return ( + self.contract + if node_idx == 0 + else self.blockchain_nodes[node_idx].get_contract(self.contract_address) + ) + + def _call(self, fn_name, node_idx, **args): + assert node_idx < len(self.blockchain_nodes) + + contract = self._get_contract(node_idx) + return getattr(contract.functions, fn_name)(**args).call() + + def _send(self, fn_name, node_idx, **args): + assert node_idx < len(self.blockchain_nodes) + + contract = self._get_contract(node_idx) + return getattr(contract.functions, fn_name)(**args).transact(TX_PARAMS) + + def address(self): + return self.contract_address + + +class FlowContractProxy(ContractProxy): + def submit( + self, submission_nodes, node_idx=0, tx_params=TX_PARAMS, parent_hash=None + ): + assert node_idx < len(self.blockchain_nodes) + + contract = self._get_contract(node_idx) + tx_hash = contract.functions.submit( + submission_nodes).transact(tx_params) + receipt = self.blockchain_nodes[node_idx].wait_for_transaction_receipt( + contract.web3, tx_hash, parent_hash=parent_hash + ) + assert_equal(receipt["status"], 1) + return tx_hash + + def num_submissions(self, node_idx=0): + return self._call("numSubmissions", node_idx) + + def first_block(self, node_idx=0): + return self._call("firstBlock", node_idx) + + def epoch(self, node_idx=0): + return self._call("epoch", node_idx) + + +class MineContractProxy(ContractProxy): + def last_mined_epoch(self, node_idx=0): + return self._call("lastMinedEpoch", node_idx) + + def set_quality(self, quality, node_idx=0): + return self._send("setQuality", node_idx, _targetQuality=quality) diff --git a/tests/test_framework/kv_node.py b/tests/test_framework/kv_node.py new file mode 100644 index 0000000..07d1204 --- /dev/null +++ b/tests/test_framework/kv_node.py @@ -0,0 +1,205 @@ +import base64 +import os + +from config.node_config import KV_CONFIG +from test_framework.blockchain_node import NodeType, TestNode +from utility.utils import ( + initialize_config, + rpc_port, + kv_rpc_port, + blockchain_rpc_port, + assert_equal +) + +bytes_per_query = 1024 * 256 + + +class KVNode(TestNode): + def __init__( + self, + index, + root_dir, + binary, + updated_config, + log_contract_address, + log, + rpc_timeout=10, + stream_ids=[] + ): + local_conf = KV_CONFIG.copy() + + indexed_config = { + "stream_ids": stream_ids, + "rpc_listen_address": f"127.0.0.1:{kv_rpc_port(index)}", + "log_contract_address": log_contract_address, + "blockchain_rpc_endpoint": f"http://127.0.0.1:{blockchain_rpc_port(0)}", + "zgs_node_urls": f"http://127.0.0.1:{rpc_port(0)}" + } + # Set configs for this specific node. + local_conf.update(indexed_config) + # Overwrite with personalized configs. + local_conf.update(updated_config) + data_dir = os.path.join(root_dir, "zgs_kv" + str(index)) + rpc_url = "http://" + local_conf["rpc_listen_address"] + super().__init__( + NodeType.KV, + index, + data_dir, + rpc_url, + binary, + local_conf, + log, + rpc_timeout, + ) + self.rpc_cnt = 0 + + def setup_config(self): + os.mkdir(self.data_dir) + log_config_path = os.path.join( + self.data_dir, self.config["log_config_file"]) + with open(log_config_path, "w") as f: + f.write("info") + initialize_config(self.config_file, self.config) + + def wait_for_rpc_connection(self): + self._wait_for_rpc_connection( + lambda rpc: rpc.kv_getStatus() is not None) + + def start(self): + self.log.info("Start kv node %d", self.index) + super().start() + + def check_equal(self, stream_id, key, value, version=None): + global bytes_per_query + i = 0 + if value is None: + self.rpc_cnt += 1 + res = self.kv_get_value(stream_id, key, 0, 1, version) + assert_equal(b'', base64.b64decode(res['data'].encode("utf-8"))) + assert res['size'] == 0 + return + while i < len(value): + self.rpc_cnt += 1 + res = self.kv_get_value( + stream_id, key, i, bytes_per_query, version) + if i + bytes_per_query < len(value): + assert_equal(base64.b64decode( + res['data'].encode("utf-8") + ), value[i: i + bytes_per_query]) + else: + assert_equal(base64.b64decode( + res['data'].encode("utf-8") + ), value[i:]) + i += bytes_per_query + + def next(self, stream_id, key, version=None): + global bytes_per_query + start_index = 0 + ans = { + 'data': b'' + } + while True: + res = self.kv_get_next( + stream_id, key, start_index, bytes_per_query, version) + if res is None: + return None + ans['size'] = res['size'] + ans['key'] = base64.b64decode(res['key'].encode("utf-8")).hex() + ans['data'] += base64.b64decode(res['data'].encode("utf-8")) + if len(ans['data']) == ans['size']: + return ans + start_index += bytes_per_query + + def prev(self, stream_id, key, version=None): + global bytes_per_query + start_index = 0 + ans = { + 'data': b'' + } + while True: + res = self.kv_get_prev( + stream_id, key, start_index, bytes_per_query, version) + if res is None: + return None + ans['size'] = res['size'] + ans['key'] = base64.b64decode(res['key'].encode("utf-8")).hex() + ans['data'] += base64.b64decode(res['data'].encode("utf-8")) + if len(ans['data']) == ans['size']: + return ans + start_index += bytes_per_query + + def seek_to_first(self, stream_id, version=None): + global bytes_per_query + start_index = 0 + ans = { + 'data': b'' + } + while True: + res = self.kv_get_first( + stream_id, start_index, bytes_per_query, version) + if res is None: + return None + ans['size'] = res['size'] + ans['key'] = base64.b64decode(res['key'].encode("utf-8")).hex() + ans['data'] += base64.b64decode(res['data'].encode("utf-8")) + if len(ans['data']) == ans['size']: + return ans + start_index += bytes_per_query + + def seek_to_last(self, stream_id, version=None): + global bytes_per_query + start_index = 0 + ans = { + 'data': b'' + } + while True: + res = self.kv_get_last( + stream_id, start_index, bytes_per_query, version) + if res is None: + return None + ans['size'] = res['size'] + ans['key'] = base64.b64decode(res['key'].encode("utf-8")).hex() + ans['data'] += base64.b64decode(res['data'].encode("utf-8")) + if len(ans['data']) == ans['size']: + return ans + start_index += bytes_per_query + + def hex_to_segment(self, x): + return base64.b64encode(bytes.fromhex(x)).decode("utf-8") + + # rpc + def kv_get_value(self, stream_id, key, start_index, size, version=None): + return self.rpc.kv_getValue([stream_id, self.hex_to_segment(key), start_index, size, version]) + + def kv_get_next(self, stream_id, key, start_index, size, version=None): + return self.rpc.kv_getNext([stream_id, self.hex_to_segment(key), start_index, size, False, version]) + + def kv_get_prev(self, stream_id, key, start_index, size, version=None): + return self.rpc.kv_getPrev([stream_id, self.hex_to_segment(key), start_index, size, False, version]) + + def kv_get_first(self, stream_id, start_index, size, version=None): + return self.rpc.kv_getFirst([stream_id, start_index, size, version]) + + def kv_get_last(self, stream_id, start_index, size, version=None): + return self.rpc.kv_getLast([stream_id, start_index, size, version]) + + def kv_get_trasanction_result(self, tx_seq): + return self.rpc.kv_getTransactionResult([tx_seq]) + + def kv_get_holding_stream_ids(self): + return self.rpc.kv_getHoldingStreamIds() + + def kv_has_write_permission(self, account, stream_id, key, version=None): + return self.rpc.kv_hasWritePermission([account, stream_id, self.hex_to_segment(key), version]) + + def kv_is_admin(self, account, stream_id, version=None): + return self.rpc.kv_isAdmin([account, stream_id, version]) + + def kv_is_special_key(self, stream_id, key, version=None): + return self.rpc.kv_isSpecialKey([stream_id, self.hex_to_segment(key), version]) + + def kv_is_writer_of_key(self, account, stream_id, key, version=None): + return self.rpc.kv_isWriterOfKey([account, stream_id, self.hex_to_segment(key), version]) + + def kv_is_writer_of_stream(self, account, stream_id, version=None): + return self.rpc.kv_isWriterOfStream([account, stream_id, version]) diff --git a/tests/test_framework/test_framework.py b/tests/test_framework/test_framework.py new file mode 100644 index 0000000..a098a19 --- /dev/null +++ b/tests/test_framework/test_framework.py @@ -0,0 +1,505 @@ +import argparse +from enum import Enum +import logging +import os +import pdb +import random +import shutil +import subprocess +import sys +import tempfile +import time +import traceback + +from eth_utils import encode_hex +from test_framework.bsc_node import BSCNode +from test_framework.contract_proxy import FlowContractProxy, MineContractProxy +from test_framework.zgs_node import ZgsNode +from test_framework.blockchain_node import BlockChainNodeType +from test_framework.conflux_node import ConfluxNode, connect_sample_nodes, sync_blocks +from test_framework.kv_node import KVNode + +from utility.utils import PortMin, is_windows_platform, wait_until + +__file_path__ = os.path.dirname(os.path.realpath(__file__)) + + +class TestStatus(Enum): + PASSED = 1 + FAILED = 2 + + +TEST_EXIT_PASSED = 0 +TEST_EXIT_FAILED = 1 + + +class TestFramework: + def __init__(self, blockchain_node_type=BlockChainNodeType.Conflux, blockchain_node_configs={}): + self.num_blockchain_nodes = None + self.num_nodes = None + self.blockchain_nodes = [] + self.nodes = [] + self.kv_nodes = [] + self.contract = None + self.blockchain_node_configs = blockchain_node_configs + self.zgs_node_configs = {} + self.blockchain_node_type = blockchain_node_type + + def __setup_blockchain_node(self): + for i in range(self.num_blockchain_nodes): + if i in self.blockchain_node_configs: + updated_config = self.blockchain_node_configs[i] + else: + updated_config = {} + + node = None + if self.blockchain_node_type == BlockChainNodeType.BSC: + node = BSCNode( + i, + self.root_dir, + self.blockchain_binary, + updated_config, + self.contract_path, + self.token_contract_path, + self.mine_contract_path, + self.log, + 60, + ) + elif self.blockchain_node_type == BlockChainNodeType.Conflux: + node = ConfluxNode( + i, + self.root_dir, + self.blockchain_binary, + updated_config, + self.contract_path, + self.token_contract_path, + self.mine_contract_path, + self.log, + ) + else: + raise NotImplementedError + + self.blockchain_nodes.append(node) + node.setup_config() + node.start() + + # wait node to start to avoid NewConnectionError + time.sleep(1) + for node in self.blockchain_nodes: + node.wait_for_rpc_connection() + + if self.blockchain_node_type == BlockChainNodeType.BSC: + enodes = set( + [node.admin_nodeInfo()["enode"] + for node in self.blockchain_nodes[1:]] + ) + for enode in enodes: + self.blockchain_nodes[0].admin_addPeer([enode]) + + # mine + self.blockchain_nodes[0].miner_start([1]) + + def wait_for_peer(): + peers = self.blockchain_nodes[0].admin_peers() + for peer in peers: + if peer["enode"] in enodes: + enodes.remove(peer["enode"]) + + if enodes: + for enode in enodes: + self.blockchain_nodes[0].admin_addPeer([enode]) + return False + + return True + + wait_until(lambda: wait_for_peer()) + + for node in self.blockchain_nodes: + node.wait_for_start_mining() + elif self.blockchain_node_type == BlockChainNodeType.Conflux: + for node in self.blockchain_nodes: + node.wait_for_nodeid() + + # make nodes full connected + if self.num_blockchain_nodes > 1: + connect_sample_nodes(self.blockchain_nodes, self.log) + sync_blocks(self.blockchain_nodes) + + contract, tx_hash, mine_contract = self.blockchain_nodes[0].setup_contract( + ) + self.contract = FlowContractProxy(contract, self.blockchain_nodes) + self.mine_contract = MineContractProxy( + mine_contract, self.blockchain_nodes) + + for node in self.blockchain_nodes[1:]: + node.wait_for_transaction(tx_hash) + + def __setup_zgs_node(self): + for i in range(self.num_nodes): + if i in self.zgs_node_configs: + updated_config = self.zgs_node_configs[i] + else: + updated_config = {} + + assert os.path.exists(self.zgs_binary), ( + "%s should be exist" % self.zgs_binary + ) + node = ZgsNode( + i, + self.root_dir, + self.zgs_binary, + updated_config, + self.contract.address(), + self.mine_contract.address(), + self.log, + ) + self.nodes.append(node) + node.setup_config() + # wait first node start for connection + if i > 0: + time.sleep(1) + node.start() + + time.sleep(1) + for node in self.nodes: + node.wait_for_rpc_connection() + + def __parse_arguments(self): + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + + parser.add_argument( + "--conflux-binary", + dest="conflux", + default=os.path.join( + __file_path__, + "../utility/conflux" + + (".exe" if is_windows_platform() else ""), + ), + type=str, + ) + + parser.add_argument( + "--bsc-binary", + dest="bsc", + default=os.path.join( + __file_path__, + "../utility/geth" + (".exe" if is_windows_platform() else ""), + ), + type=str, + ) + + parser.add_argument( + "--zgs-binary", + dest="zgs", + default=os.getenv( + "ZGS", + default=os.path.join( + __file_path__, + "../../zerog-storage-rust/target/release/zgs_node" + + (".exe" if is_windows_platform() else ""), + ), + ), + type=str, + ) + + parser.add_argument( + "--zgs-client", + dest="cli", + default=os.path.join( + __file_path__, + "../../zerog-storage-rust/target/zgs-client" + + (".exe" if is_windows_platform() else ""), + ), + type=str, + ) + + parser.add_argument( + "--zgs-kv", + dest="zgs_kv", + default=os.path.join( + __file_path__, + "../../target/release/zgs_kv" + + (".exe" if is_windows_platform() else ""), + ), + type=str, + ) + + parser.add_argument( + "--contract-path", + dest="contract", + default=os.path.join( + __file_path__, + "../../zerog-storage-rust/zerog-storage-contracts/artifacts/contracts/dataFlow/Flow.sol/Flow.json", + ), + type=str, + ) + + parser.add_argument( + "--token-contract-path", + dest="token_contract", + default=os.path.join( + __file_path__, + "../config/MockToken.json", + ), + type=str, + ) + + parser.add_argument( + "--mine-contract-path", + dest="mine_contract", + default=os.path.join( + __file_path__, + "../../zerog-storage-rust/zerog-storage-contracts/artifacts/contracts/test/PoraMineTest.sol/PoraMineTest.json", + ), + type=str, + ) + + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="INFO", + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", + ) + + parser.add_argument( + "--tmpdir", dest="tmpdir", help="Root directory for datadirs" + ) + + parser.add_argument( + "--randomseed", dest="random_seed", type=int, help="Set a random seed" + ) + + parser.add_argument("--port-min", dest="port_min", + default=11000, type=int) + + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + + self.options = parser.parse_args() + + def __start_logging(self): + # Add logger and logging handlers + self.log = logging.getLogger("TestFramework") + self.log.setLevel(logging.DEBUG) + + # Create file handler to log all messages + fh = logging.FileHandler( + self.options.tmpdir + "/test_framework.log", encoding="utf-8" + ) + fh.setLevel(logging.DEBUG) + + # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. + ch = logging.StreamHandler(sys.stdout) + # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int + ll = ( + int(self.options.loglevel) + if self.options.loglevel.isdigit() + else self.options.loglevel.upper() + ) + ch.setLevel(ll) + + # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) + formatter = logging.Formatter( + fmt="%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + formatter.converter = time.gmtime + fh.setFormatter(formatter) + ch.setFormatter(formatter) + + # add the handlers to the logger + self.log.addHandler(fh) + self.log.addHandler(ch) + + def _upload_file_use_cli( + self, + blockchain_node_rpc_url, + contract_address, + key, + ionion_node_rpc_url, + file_to_upload, + ): + assert os.path.exists( + self.cli_binary), "%s should be exist" % self.cli_binary + upload_args = [ + self.cli_binary, + "upload", + "--url", + blockchain_node_rpc_url, + "--contract", + contract_address, + "--key", + encode_hex(key), + "--node", + ionion_node_rpc_url, + "--log-level", + "debug", + "--file", + ] + + proc = subprocess.Popen( + upload_args + [file_to_upload.name], + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + proc.wait() + + root = None + for line in proc.stdout.readlines(): + self.log.debug("line: %s", line) + if "root" in line: + index = line.find("root=") + root = line[index + 5: -1] + self.log.info("root: %s", root) + + assert proc.returncode == 0, "%s upload file failed" % self.cli_binary + self.log.info("file uploaded") + + return root + + def setup_params(self): + self.num_blockchain_nodes = 1 + self.num_nodes = 1 + + def setup_nodes(self): + self.__setup_blockchain_node() + self.__setup_zgs_node() + + def setup_kv_node(self, index, stream_ids, updated_config={}): + assert os.path.exists(self.kv_binary), ( + "%s should be exist" % self.kv_binary + ) + node = KVNode( + index, + self.root_dir, + self.kv_binary, + updated_config, + self.contract.address(), + self.log, + stream_ids=stream_ids, + ) + self.kv_nodes.append(node) + node.setup_config() + node.start() + + time.sleep(1) + node.wait_for_rpc_connection() + + def stop_nodes(self): + for node in self.kv_nodes: + node.stop() + + # stop ionion nodes first + for node in self.nodes: + node.stop() + + for node in self.blockchain_nodes: + node.stop() + + def stop_kv_node(self, index): + self.kv_nodes[index].stop() + + def stop_storage_node(self, index): + self.nodes[index].stop() + + def start_kv_node(self, index): + self.kv_nodes[index].start() + + def start_storage_node(self, index): + self.nodes[index].start() + + def run_test(self): + raise NotImplementedError + + def main(self): + self.__parse_arguments() + PortMin.n = self.options.port_min + + # Set up temp directory and start logging + if self.options.tmpdir: + self.options.tmpdir = os.path.abspath(self.options.tmpdir) + os.makedirs(self.options.tmpdir, exist_ok=True) + else: + self.options.tmpdir = os.getenv( + "ZGS_TESTS_LOG_DIR", default=tempfile.mkdtemp(prefix="zgs_test_") + ) + + self.root_dir = self.options.tmpdir + + self.__start_logging() + self.log.info("Root dir: %s", self.root_dir) + + if self.blockchain_node_type == BlockChainNodeType.Conflux: + self.blockchain_binary = self.options.conflux + else: + self.blockchain_binary = self.options.bsc + + self.zgs_binary = self.options.zgs + self.cli_binary = self.options.cli + self.kv_binary = self.options.zgs_kv + self.contract_path = self.options.contract + self.token_contract_path = self.options.token_contract + self.mine_contract_path = self.options.mine_contract + + assert os.path.exists(self.contract_path), ( + "%s should be exist" % self.contract_path + ) + assert os.path.exists(self.token_contract_path), ( + "%s should be exist" % self.token_contract_path + ) + assert os.path.exists(self.mine_contract_path), ( + "%s should be exist" % self.mine_contract_path + ) + + if self.options.random_seed is not None: + random.seed(self.options.random_seed) + + success = TestStatus.FAILED + try: + self.setup_params() + self.setup_nodes() + self.run_test() + success = TestStatus.PASSED + except AssertionError as e: + self.log.exception("Assertion failed %s", repr(e)) + except KeyboardInterrupt as e: + self.log.warning("Exiting after keyboard interrupt %s", repr(e)) + except Exception as e: + self.log.error("Test exception %s %s", + repr(e), traceback.format_exc()) + self.log.error(f"Test data are not deleted: {self.root_dir}") + + if success == TestStatus.FAILED and self.options.pdbonfailure: + print("Testcase failed. Attaching python debugger. Enter ? for help") + pdb.set_trace() + + if success == TestStatus.PASSED: + self.log.info("Tests successful") + exit_code = TEST_EXIT_PASSED + else: + self.log.error( + "Test failed. Test logging available at %s/test_framework.log", + self.options.tmpdir, + ) + exit_code = TEST_EXIT_FAILED + + self.stop_nodes() + + if success == TestStatus.PASSED: + self.log.info("Test success") + shutil.rmtree(self.root_dir) + + handlers = self.log.handlers[:] + for handler in handlers: + self.log.removeHandler(handler) + handler.close() + logging.shutdown() + + sys.exit(exit_code) diff --git a/tests/test_framework/zgs_node.py b/tests/test_framework/zgs_node.py new file mode 100644 index 0000000..26ac4e5 --- /dev/null +++ b/tests/test_framework/zgs_node.py @@ -0,0 +1,103 @@ +import os + +from config.node_config import ZGS_CONFIG +from test_framework.blockchain_node import NodeType, TestNode +from config.node_config import MINER_ID +from utility.utils import ( + initialize_config, + p2p_port, + rpc_port, + blockchain_rpc_port, +) + + +class ZgsNode(TestNode): + def __init__( + self, + index, + root_dir, + binary, + updated_config, + log_contract_address, + mine_contract_address, + log, + rpc_timeout=10, + libp2p_nodes=None, + ): + local_conf = ZGS_CONFIG.copy() + if libp2p_nodes is None: + if index == 0: + libp2p_nodes = [] + else: + libp2p_nodes = [] + for i in range(index): + libp2p_nodes.append(f"/ip4/127.0.0.1/tcp/{p2p_port(i)}") + + indexed_config = { + "network_libp2p_port": p2p_port(index), + "network_discovery_port": p2p_port(index), + "rpc_listen_address": f"127.0.0.1:{rpc_port(index)}", + "network_libp2p_nodes": libp2p_nodes, + "log_contract_address": log_contract_address, + "mine_contract_address": mine_contract_address, + "blockchain_rpc_endpoint": f"http://127.0.0.1:{blockchain_rpc_port(0)}", + } + # Set configs for this specific node. + local_conf.update(indexed_config) + # Overwrite with personalized configs. + local_conf.update(updated_config) + data_dir = os.path.join(root_dir, "zgs_node" + str(index)) + rpc_url = "http://" + local_conf["rpc_listen_address"] + super().__init__( + NodeType.Zgs, + index, + data_dir, + rpc_url, + binary, + local_conf, + log, + rpc_timeout, + ) + + def setup_config(self): + os.mkdir(self.data_dir) + log_config_path = os.path.join( + self.data_dir, self.config["log_config_file"]) + with open(log_config_path, "w") as f: + f.write("trace") + initialize_config(self.config_file, self.config) + + def wait_for_rpc_connection(self): + self._wait_for_rpc_connection( + lambda rpc: rpc.zgs_getStatus() is not None) + + def start(self): + self.log.info("Start zgs node %d", self.index) + super().start() + + # rpc + def zgs_get_status(self): + return self.rpc.zgs_getStatus()["connectedPeers"] + + def zgs_upload_segment(self, segment): + return self.rpc.zgs_uploadSegment([segment]) + + def zgs_download_segment(self, data_root, start_index, end_index): + return self.rpc.zgs_downloadSegment([data_root, start_index, end_index]) + + def zgs_get_file_info(self, data_root): + return self.rpc.zgs_getFileInfo([data_root]) + + def shutdown(self): + self.rpc.admin_shutdown() + self.wait_until_stopped() + + def admin_start_sync_file(self, tx_seq): + return self.rpc.admin_startSyncFile([tx_seq]) + + def admin_get_sync_status(self, tx_seq): + return self.rpc.admin_getSyncStatus([tx_seq]) + + def sycn_status_is_completed_or_unknown(self, tx_seq): + status = self.rpc.admin_getSyncStatus([tx_seq]) + return status == "Completed" or status == "unknown" diff --git a/tests/utility/kv.py b/tests/utility/kv.py new file mode 100644 index 0000000..ecb01c5 --- /dev/null +++ b/tests/utility/kv.py @@ -0,0 +1,183 @@ +from enum import Enum +import random + + +class AccessControlOps(Enum): + GRANT_ADMIN_ROLE = 0x00 + RENOUNCE_ADMIN_ROLE = 0x01 + SET_KEY_TO_SPECIAL = 0x10 + SET_KEY_TO_NORMAL = 0x11 + GRANT_WRITER_ROLE = 0x20 + REVOKE_WRITER_ROLE = 0x21 + RENOUNCE_WRITER_ROLE = 0x22 + GRANT_SPECIAL_WRITER_ROLE = 0x30 + REVOKE_SPECIAL_WRITER_ROLE = 0x31 + RENOUNCE_SPECIAL_WRITER_ROLE = 0x32 + + @staticmethod + def grant_admin_role(stream_id, address): + return [AccessControlOps.GRANT_ADMIN_ROLE, stream_id, to_address(address)] + + @staticmethod + def renounce_admin_role(stream_id): + return [AccessControlOps.RENOUNCE_ADMIN_ROLE, stream_id] + + @staticmethod + def set_key_to_special(stream_id, key): + return [AccessControlOps.SET_KEY_TO_SPECIAL, stream_id, key] + + @staticmethod + def set_key_to_normal(stream_id, key): + return [AccessControlOps.SET_KEY_TO_NORMAL, stream_id, key] + + @staticmethod + def grant_writer_role(stream_id, address): + return [AccessControlOps.GRANT_WRITER_ROLE, stream_id, to_address(address)] + + @staticmethod + def revoke_writer_role(stream_id, address): + return [AccessControlOps.REVOKE_WRITER_ROLE, stream_id, to_address(address)] + + @staticmethod + def renounce_writer_role(stream_id): + return [AccessControlOps.RENOUNCE_WRITER_ROLE, stream_id] + + @staticmethod + def grant_special_writer_role(stream_id, key, address): + return [AccessControlOps.GRANT_SPECIAL_WRITER_ROLE, stream_id, key, to_address(address)] + + @staticmethod + def revoke_special_writer_role(stream_id, key, address): + return [AccessControlOps.REVOKE_SPECIAL_WRITER_ROLE, stream_id, key, to_address(address)] + + @staticmethod + def renounce_special_writer_role(stream_id, key): + return [AccessControlOps.RENOUNCE_SPECIAL_WRITER_ROLE, stream_id, key] + + +op_with_key = [ + AccessControlOps.SET_KEY_TO_SPECIAL, + AccessControlOps.SET_KEY_TO_NORMAL, + AccessControlOps.GRANT_SPECIAL_WRITER_ROLE, + AccessControlOps.REVOKE_SPECIAL_WRITER_ROLE, + AccessControlOps.RENOUNCE_SPECIAL_WRITER_ROLE] + +op_with_address = [ + AccessControlOps.GRANT_ADMIN_ROLE, + AccessControlOps.GRANT_WRITER_ROLE, + AccessControlOps.REVOKE_WRITER_ROLE, + AccessControlOps.GRANT_SPECIAL_WRITER_ROLE, + AccessControlOps.REVOKE_SPECIAL_WRITER_ROLE] + + +MAX_STREAM_ID = 100 +MAX_DATA_LENGTH = 256 * 1024 * 4 +MIN_DATA_LENGTH = 10 +MAX_U64 = (1 << 64) - 1 +MAX_KEY_LEN = 2000 + +STREAM_DOMAIN = bytes.fromhex( + "df2ff3bb0af36c6384e6206552a4ed807f6f6a26e7d0aa6bff772ddc9d4307aa") + + +def with_prefix(x): + x = x.lower() + if not x.startswith('0x'): + x = '0x' + x + return x + + +def pad(x, l): + ans = hex(x)[2:] + return '0' * (l - len(ans)) + ans + + +def to_address(x): + if x.startswith('0x'): + return x[2:] + return x + + +def to_stream_id(x): + return pad(x, 64) + + +def to_key_with_size(x): + size = pad(len(x) // 2, 6) + return size + x + + +def rand_key(): + len = random.randrange(1, MAX_KEY_LEN) + if len % 2 == 1: + len += 1 + return ''.join([hex(random.randrange(16))[2:] for i in range(len)]) + + +def rand_write(stream_id=None, key=None, size=None): + return [to_stream_id(random.randrange(0, MAX_STREAM_ID)) if stream_id is None else stream_id, + rand_key() if key is None else key, + random.randrange(MIN_DATA_LENGTH, MAX_DATA_LENGTH) if size is None else size] + + +def is_access_control_permission_denied(x): + if x is None: + return False + return x.startswith("AccessControlPermissionDenied") + + +def is_write_permission_denied(x): + if x is None: + return False + return x.startswith("WritePermissionDenied") + +# reads: array of [stream_id, key] +# writes: array of [stream_id, key, data_length] + + +def create_kv_data(version, reads, writes, access_controls): + # version + data = bytes.fromhex(pad(version, 16)) + tags = [] + # read set + data += bytes.fromhex(pad(len(reads), 8)) + for read in reads: + data += bytes.fromhex(read[0]) + data += bytes.fromhex(to_key_with_size(read[1])) + # write set + data += bytes.fromhex(pad(len(writes), 8)) + # write set meta + for write in writes: + data += bytes.fromhex(write[0]) + data += bytes.fromhex(to_key_with_size(write[1])) + data += bytes.fromhex(pad(write[2], 16)) + tags.append(write[0]) + if len(write) == 3: + write_data = random.randbytes(write[2]) + write.append(write_data) + # write data + for write in writes: + data += write[3] + # access controls + data += bytes.fromhex(pad(len(access_controls), 8)) + for ac in access_controls: + k = 0 + # type + data += bytes.fromhex(pad(ac[k].value, 2)) + k += 1 + # stream_id + tags.append(ac[k]) + data += bytes.fromhex(ac[k]) + k += 1 + # key + if ac[0] in op_with_key: + data += bytes.fromhex(to_key_with_size(ac[k])) + k += 1 + # address + if ac[0] in op_with_address: + data += bytes.fromhex(ac[k]) + k += 1 + tags = list(set(tags)) + tags = sorted(tags) + tags = STREAM_DOMAIN + bytes.fromhex(''.join(tags)) + return data, tags diff --git a/tests/utility/merkle_tree.py b/tests/utility/merkle_tree.py new file mode 100644 index 0000000..01d35ae --- /dev/null +++ b/tests/utility/merkle_tree.py @@ -0,0 +1,242 @@ +import sha3 + +from math import log2 + + +def decompose(num): + powers = [] + while num > 0: + power = int(log2(num)) + powers += [power] + num -= 1 << power + return powers + + +def add_0x_prefix(val): + return "0x" + val + + +class Hasher: + def __init__(self, algorithm="keccak_256", + encoding="utf-8", security=False): + self.algorithm = algorithm + self.security = security + self.encoding = encoding + + if security: + self.prefix00 = "\x00".encode(encoding) + self.prefix01 = "\x01".encode(encoding) + else: + self.prefix00 = bytes() + self.prefix01 = bytes() + + def _hasher(self): + if self.algorithm == "keccak_256": + return sha3.keccak_256() + else: + raise NotImplementedError + + def hash_data(self, data): + buff = self.prefix00 + ( + data if isinstance(data, bytes) else data.encode(self.encoding) + ) + + hasher = self._hasher() + hasher.update(buff) + return hasher.hexdigest().encode(self.encoding) + + def hash_pair(self, left, right): + buff = ( + self.prefix01 + + bytes.fromhex(left.decode("utf-8")) + + bytes.fromhex(right.decode("utf-8")) + ) + + hasher = self._hasher() + hasher.update(buff) + return hasher.hexdigest().encode(self.encoding) + + +class Node: + + __slots__ = ("__value", "__parent", "__left", "__right") + + def __init__(self, value, parent=None, left=None, right=None): + self.__value = value + self.__parent = parent + self.__left = left + self.__right = right + + if left: + left.__parent = self + if right: + right.__parent = self + + @property + def value(self): + return self.__value + + @property + def left(self): + return self.__left + + @property + def right(self): + return self.__right + + @property + def parent(self): + return self.__parent + + def set_left(self, node): + self.__left = node + + def set_right(self, node): + self.__right = node + + def set_parent(self, node): + self.__parent = node + + def is_left_child(self): + parent = self.__parent + if not parent: + return False + + return self == parent.left + + def is_right_child(self): + parent = self.__parent + if not parent: + return False + + return self == parent.right + + def is_leaf(self): + return isinstance(self, Leaf) + + @classmethod + def from_children(cls, left, right, hasher): + digest = hasher.hash_pair(left.__value, right.__value) + return cls(value=digest, left=left, right=right, parent=None) + + def ancestor(self, degree): + if degree == 0: + return self + + if not self.__parent: + return + + return self.__parent.ancestor(degree - 1) + + def recalculate_hash(self, hasher): + self.__value = hasher.hash_pair(self.left.value, self.right.value) + + +class Leaf(Node): + def __init__(self, value, leaf=None): + super().__init__(value) + + @classmethod + def from_data(cls, data, hasher): + return cls(hasher.hash_data(data), leaf=None) + + +class MerkleTree: + def __init__(self, encoding="utf-8"): + self.__root = None + self.__leaves = [] + self.encoding = encoding + self.hasher = Hasher(encoding=encoding) + + def __bool__(self): + return len(self.__leaves) != 0 + + def encrypt(self, data): + leaf = Leaf.from_data(data, self.hasher) + self.add_leaf(leaf) + + def add_leaf(self, leaf): + if self: + subroot = self.get_last_subroot() + self._append_leaf(leaf) + + if not subroot.parent: + # Increase height by one + self.__root = Node.from_children(subroot, leaf, self.hasher) + else: + parent = subroot.parent + + # Create bifurcation node + new_node = Node.from_children(subroot, leaf, self.hasher) + + # Interject bifurcation node + parent.set_right(new_node) + new_node.set_parent(parent) + + # Recalculate hashes only at the rightmost branch of the tree + curr = parent + while curr: + curr.recalculate_hash(self.hasher) + curr = curr.parent + else: + self._append_leaf(leaf) + self.__root = leaf + + def get_last_subroot(self): + if not self.__leaves: + raise ValueError + + last_power = decompose(len(self.__leaves))[-1] + return self.get_tail().ancestor(degree=last_power) + + def get_tail(self): + return self.__leaves[-1] + + def _append_leaf(self, leaf): + self.__leaves.append(leaf) + + def get_root_hash(self): + if not self.__root: + return + + return self.__root.value + + def decode_value(self, val): + return val.decode(self.encoding) + + def proof_at(self, i): + if i < 0 or i >= len(self.__leaves): + raise IndexError + + if len(self.__leaves) == 1: + return { + "lemma": [add_0x_prefix(self.decode_value(self.get_root_hash()))], + "path": [], + } + + proof = {"lemma": [], "path": []} + proof["lemma"].append(add_0x_prefix( + self.decode_value(self.__leaves[i].value))) + + current = self.__leaves[i] + while current != self.__root: + if current.parent is not None and current.parent.left == current: + # add right + proof["lemma"].append( + add_0x_prefix(self.decode_value( + current.parent.right.value)) + ) + proof["path"].append(True) + else: + # add left + proof["lemma"].append( + add_0x_prefix(self.decode_value(current.parent.left.value)) + ) + proof["path"].append(False) + + current = current.parent + + # add root + proof["lemma"].append(add_0x_prefix( + self.decode_value(self.get_root_hash()))) + return proof diff --git a/tests/utility/signature_utils.py b/tests/utility/signature_utils.py new file mode 100644 index 0000000..149fcc0 --- /dev/null +++ b/tests/utility/signature_utils.py @@ -0,0 +1,616 @@ +import coincurve +import random +import rlp +import sha3 as _sha3 + +from eth_utils import decode_hex, int_to_big_endian, big_endian_to_int +from eth_utils import encode_hex as encode_hex_0x +from py_ecc.secp256k1 import privtopub, ecdsa_raw_sign, ecdsa_raw_recover +from rlp.sedes import big_endian_int, BigEndianInt, Binary +from rlp.utils import ALL_BYTES + + +def sha3_256(x): + return _sha3.keccak_256(x).digest() + + +class Memoize: + def __init__(self, fn): + self.fn = fn + self.memo = {} + + def __call__(self, *args): + if args not in self.memo: + self.memo[args] = self.fn(*args) + return self.memo[args] + + +TT256 = 2**256 +TT256M1 = 2**256 - 1 +TT255 = 2**255 +SECP256K1P = 2**256 - 4294968273 + + +def is_numeric(x): + return isinstance(x, int) + + +def is_string(x): + return isinstance(x, bytes) + + +def to_string(value): + if isinstance(value, bytes): + return value + if isinstance(value, str): + return bytes(value, "utf-8") + if isinstance(value, int): + return bytes(str(value), "utf-8") + + +def int_to_bytes(value): + if isinstance(value, bytes): + return value + return int_to_big_endian(value) + + +def to_string_for_regexp(value): + return str(to_string(value), "utf-8") + + +unicode = str + + +def bytearray_to_bytestr(value): + return bytes(value) + + +def encode_int32(v): + return v.to_bytes(32, byteorder="big") + + +def bytes_to_int(value): + return int.from_bytes(value, byteorder="big") + + +def str_to_bytes(value): + if isinstance(value, bytearray): + value = bytes(value) + if isinstance(value, bytes): + return value + return bytes(value, "utf-8") + + +def ascii_chr(n): + return ALL_BYTES[n] + + +def encode_hex(n): + if isinstance(n, str): + return encode_hex(n.encode("ascii")) + return encode_hex_0x(n)[2:] + + +def ecrecover_to_pub(rawhash, v, r, s): + if coincurve and hasattr(coincurve, "PublicKey"): + try: + pk = coincurve.PublicKey.from_signature_and_message( + zpad(bytearray_to_bytestr(int_to_32bytearray(r)), 32) + + zpad(bytearray_to_bytestr(int_to_32bytearray(s)), 32) + + ascii_chr(v - 27), + rawhash, + hasher=None, + ) + pub = pk.format(compressed=False)[1:] + x, y = pk.point() + except BaseException: + x, y = 0, 0 + pub = b"\x00" * 64 + else: + result = ecdsa_raw_recover(rawhash, (v, r, s)) + if result: + x, y = result + pub = encode_int32(x) + encode_int32(y) + else: + raise ValueError("Invalid VRS") + assert len(pub) == 64 + return pub, x, y + + +def ecsign(rawhash, key): + if coincurve and hasattr(coincurve, "PrivateKey"): + pk = coincurve.PrivateKey(key) + signature = pk.sign_recoverable(rawhash, hasher=None) + v = safe_ord(signature[64]) + 27 + r = big_endian_to_int(signature[0:32]) + s = big_endian_to_int(signature[32:64]) + else: + v, r, s = ecdsa_raw_sign(rawhash, key) + return v, r, s + + +def ec_random_keys(): + priv_key = random.randint(0, 2**256).to_bytes(32, "big") + pub_key = privtopub(priv_key) + return priv_key, pub_key + + +def convert_to_nodeid(signature, challenge): + r = big_endian_to_int(signature[:32]) + s = big_endian_to_int(signature[32:64]) + v = big_endian_to_int(signature[64:]) + 27 + signed = int_to_bytes(challenge) + h_signed = sha3_256(signed) + return ecrecover_to_pub(h_signed, v, r, s) + + +def get_nodeid(node): + challenge = random.randint(0, 2**32 - 1) + signature = node.getnodeid(list(int_to_bytes(challenge))) + return convert_to_nodeid(signature, challenge) + + +def mk_contract_address(sender, nonce): + return sha3(rlp.encode([normalize_address(sender), nonce]))[12:] + + +def mk_metropolis_contract_address(sender, initcode): + return sha3(normalize_address(sender) + initcode)[12:] + + +def safe_ord(value): + if isinstance(value, int): + return value + else: + return ord(value) + + +# decorator + + +def debug(label): + def deb(f): + def inner(*args, **kwargs): + i = random.randrange(1000000) + print(label, i, "start", args) + x = f(*args, **kwargs) + print(label, i, "end", x) + return x + + return inner + + return deb + + +def flatten(li): + o = [] + for l in li: + o.extend(l) + return o + + +def bytearray_to_int(arr): + o = 0 + for a in arr: + o = (o << 8) + a + return o + + +def int_to_32bytearray(i): + o = [0] * 32 + for x in range(32): + o[31 - x] = i & 0xFF + i >>= 8 + return o + + +# sha3_count = [0] + + +def sha3(seed): + return sha3_256(to_string(seed)) + + +assert ( + encode_hex(sha3(b"")) + == "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" +) +assert ( + encode_hex(sha3(b"\x00" * 256)) + == "d397b3b043d87fcd6fad1291ff0bfd16401c274896d8c63a923727f077b8e0b5" +) + + +@Memoize +def priv_to_addr(k): + k = normalize_key(k) + x, y = privtopub(k) + addr = bytearray(sha3(encode_int32(x) + encode_int32(y))[12:]) + addr[0] &= 0x0F + addr[0] |= 0x10 + return bytes(addr) + + +def priv_to_pub(k): + k = normalize_key(k) + x, y = privtopub(k) + return bytes(encode_int32(x) + encode_int32(y)) + + +def pub_to_addr(k): + x = big_endian_to_int(decode_hex(k[2:34])) + y = big_endian_to_int(decode_hex(k[34:66])) + addr = sha3(encode_int32(x) + encode_int32(y))[12:] + addr[0] &= 0x0F + addr[0] |= 0x10 + return bytes(addr) + + +def checksum_encode(addr): # Takes a 20-byte binary address as input + addr = normalize_address(addr) + o = "" + v = big_endian_to_int(sha3(encode_hex(addr))) + for i, c in enumerate(encode_hex(addr)): + if c in "0123456789": + o += c + else: + o += c.upper() if (v & (2 ** (255 - 4 * i))) else c.lower() + return "0x" + o + + +def check_checksum(addr): + return checksum_encode(normalize_address(addr)) == addr + + +def normalize_address(x, allow_blank=False): + if is_numeric(x): + return int_to_addr(x) + if allow_blank and x in {"", b""}: + return b"" + if len(x) in (42, 50) and x[:2] in {"0x", b"0x"}: + x = x[2:] + if len(x) in (40, 48): + x = decode_hex(x) + if len(x) == 24: + assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:] + x = x[:20] + if len(x) != 20: + raise Exception("Invalid address format: %r" % x) + return x + + +def normalize_key(key): + if is_numeric(key): + o = encode_int32(key) + elif len(key) == 32: + o = key + elif len(key) == 64: + o = decode_hex(key) + elif len(key) == 66 and key[:2] == "0x": + o = decode_hex(key[2:]) + else: + raise Exception("Invalid key format: %r" % key) + if o == b"\x00" * 32: + raise Exception("Zero privkey invalid") + return o + + +def zpad(x, l): + """Left zero pad value `x` at least to length `l`. + + >>> zpad('', 1) + '\x00' + >>> zpad('\xca\xfe', 4) + '\x00\x00\xca\xfe' + >>> zpad('\xff', 1) + '\xff' + >>> zpad('\xca\xfe', 2) + '\xca\xfe' + """ + return b"\x00" * max(0, l - len(x)) + x + + +def rzpad(value, total_length): + """Right zero pad value `x` at least to length `l`. + + >>> zpad('', 1) + '\x00' + >>> zpad('\xca\xfe', 4) + '\xca\xfe\x00\x00' + >>> zpad('\xff', 1) + '\xff' + >>> zpad('\xca\xfe', 2) + '\xca\xfe' + """ + return value + b"\x00" * max(0, total_length - len(value)) + + +def int_to_addr(x): + o = [b""] * 20 + for i in range(20): + o[19 - i] = ascii_chr(x & 0xFF) + x >>= 8 + return b"".join(o) + + +def coerce_addr_to_bin(x): + if is_numeric(x): + return encode_hex(zpad(big_endian_int.serialize(x), 20)) + elif len(x) == 40 or len(x) == 0: + return decode_hex(x) + else: + return zpad(x, 20)[-20:] + + +def coerce_addr_to_hex(x): + if is_numeric(x): + return encode_hex(zpad(big_endian_int.serialize(x), 20)) + elif len(x) == 40 or len(x) == 0: + return x + else: + return encode_hex(zpad(x, 20)[-20:]) + + +def coerce_to_int(x): + if is_numeric(x): + return x + elif len(x) == 40: + return big_endian_to_int(decode_hex(x)) + else: + return big_endian_to_int(x) + + +def coerce_to_bytes(x): + if is_numeric(x): + return big_endian_int.serialize(x) + elif len(x) == 40: + return decode_hex(x) + else: + return x + + +def parse_int_or_hex(s): + if is_numeric(s): + return s + elif s[:2] in (b"0x", "0x"): + s = to_string(s) + tail = (b"0" if len(s) % 2 else b"") + s[2:] + return big_endian_to_int(decode_hex(tail)) + else: + return int(s) + + +def ceil32(x): + return x if x % 32 == 0 else x + 32 - (x % 32) + + +def to_signed(i): + return i if i < TT255 else i - TT256 + + +def sha3rlp(x): + return sha3(rlp.encode(x)) + + +# Format encoders/decoders for bin, addr, int + + +def decode_bin(v): + """decodes a bytearray from serialization""" + if not is_string(v): + raise Exception("Value must be binary, not RLP array") + return v + + +def decode_addr(v): + """decodes an address from serialization""" + if len(v) not in [0, 20]: + raise Exception("Serialized addresses must be empty or 20 bytes long!") + return encode_hex(v) + + +def decode_int(v): + """decodes and integer from serialization""" + if len(v) > 0 and (v[0] == b"\x00" or v[0] == 0): + raise Exception("No leading zero bytes allowed for integers") + return big_endian_to_int(v) + + +def decode_int256(v): + return big_endian_to_int(v) + + +def encode_bin(v): + """encodes a bytearray into serialization""" + return v + + +def encode_root(v): + """encodes a trie root into serialization""" + return v + + +def encode_int(v): + """encodes an integer into serialization""" + if not is_numeric(v) or v < 0 or v >= TT256: + raise Exception("Integer invalid or out of range: %r" % v) + return int_to_big_endian(v) + + +def encode_int256(v): + return zpad(int_to_big_endian(v), 256) + + +def scan_bin(v): + if v[:2] in ("0x", b"0x"): + return decode_hex(v[2:]) + else: + return decode_hex(v) + + +def scan_int(v): + if v[:2] in ("0x", b"0x"): + return big_endian_to_int(decode_hex(v[2:])) + else: + return int(v) + + +# Decoding from RLP serialization +decoders = { + "bin": decode_bin, + "addr": decode_addr, + "int": decode_int, + "int256b": decode_int256, +} + +# Encoding to RLP serialization +encoders = { + "bin": encode_bin, + "int": encode_int, + "trie_root": encode_root, + "int256b": encode_int256, +} + +# Encoding to printable format +printers = { + "bin": lambda v: "0x" + encode_hex(v), + "addr": lambda v: v, + "int": lambda v: to_string(v), + "trie_root": lambda v: encode_hex(v), + "int256b": lambda x: encode_hex(zpad(encode_int256(x), 256)), +} + +# Decoding from printable format +scanners = { + "bin": scan_bin, + "addr": lambda x: x[2:] if x[:2] in (b"0x", "0x") else x, + "int": scan_int, + "trie_root": lambda x: scan_bin, + "int256b": lambda x: big_endian_to_int(decode_hex(x)), +} + + +def int_to_hex(x): + o = encode_hex(encode_int(x)) + return "0x" + (o[1:] if (len(o) > 0 and o[0] == b"0") else o) + + +def remove_0x_head(s): + return s[2:] if s[:2] in (b"0x", "0x") else s + + +def parse_as_bin(s): + return decode_hex(s[2:] if s[:2] == "0x" else s) + + +def parse_as_int(s): + return s if is_numeric(s) else int( + "0" + s[2:], 16) if s[:2] == "0x" else int(s) + + +def print_func_call(ignore_first_arg=False, max_call_number=100): + """utility function to facilitate debug, it will print input args before + function call, and print return value after function call + + usage: + + @print_func_call + def some_func_to_be_debu(): + pass + + :param ignore_first_arg: whether print the first arg or not. + useful when ignore the `self` parameter of an object method call + """ + from functools import wraps + + def display(x): + x = to_string(x) + try: + x.decode("ascii") + except BaseException: + return "NON_PRINTABLE" + return x + + local = {"call_number": 0} + + def inner(f): + @wraps(f) + def wrapper(*args, **kwargs): + local["call_number"] += 1 + tmp_args = args[1:] if ignore_first_arg and len(args) else args + this_call_number = local["call_number"] + print( + ( + "{0}#{1} args: {2}, {3}".format( + f.__name__, + this_call_number, + ", ".join([display(x) for x in tmp_args]), + ", ".join( + display(key) + "=" + to_string(value) + for key, value in kwargs.items() + ), + ) + ) + ) + res = f(*args, **kwargs) + print( + ( + "{0}#{1} return: {2}".format( + f.__name__, this_call_number, display(res) + ) + ) + ) + + if local["call_number"] > 100: + raise Exception("Touch max call number!") + return res + + return wrapper + + return inner + + +def dump_state(trie): + res = "" + for k, v in list(trie.to_dict().items()): + res += "%r:%r\n" % (encode_hex(k), encode_hex(v)) + return res + + +class Denoms: + def __init__(self): + self.wei = 1 + self.babbage = 10**3 + self.ada = 10**3 + self.kwei = 10**3 + self.lovelace = 10**6 + self.mwei = 10**6 + self.shannon = 10**9 + self.gwei = 10**9 + self.szabo = 10**12 + self.finney = 10**15 + self.mether = 10**15 + self.ether = 10**18 + self.turing = 2**256 - 1 + + +denoms = Denoms() + +address = Binary.fixed_length(20, allow_empty=True) +int20 = BigEndianInt(20) +int32 = BigEndianInt(32) +int256 = BigEndianInt(256) +hash32 = Binary.fixed_length(32) +hash20 = Binary.fixed_length(20) +trie_root = Binary.fixed_length(32, allow_empty=True) + + +class bcolors: + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKGREEN = "\033[92m" + WARNING = "\033[91m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" diff --git a/tests/utility/simple_rpc_proxy.py b/tests/utility/simple_rpc_proxy.py new file mode 100644 index 0000000..26db293 --- /dev/null +++ b/tests/utility/simple_rpc_proxy.py @@ -0,0 +1,31 @@ +from jsonrpcclient import request, parse, Ok +import requests + + +class SimpleRpcProxy: + def __init__(self, url, timeout=60): + self.url = url + self.timeout = timeout + + def __getattr__(self, name): + return RpcCaller(self.url, name, self.timeout) + + +class RpcCaller: + def __init__(self, url, method, timeout): + self.url = url + self.method = method + self.timeout = timeout + + def __call__(self, *args, **argsn): + r = request(self.method, *args) + try: + response = requests.post(self.url, json=r, timeout=self.timeout) + parsed = parse(response.json()) + if isinstance(parsed, Ok): + return parsed.result + else: + print(parsed) + except Exception as ex: + print(ex) + return None diff --git a/tests/utility/submission.py b/tests/utility/submission.py new file mode 100644 index 0000000..4a6b81c --- /dev/null +++ b/tests/utility/submission.py @@ -0,0 +1,226 @@ +import base64 +from math import log2 +from utility.merkle_tree import add_0x_prefix, Leaf, MerkleTree + + +ENTRY_SIZE = 256 +PORA_CHUNK_SIZE = 1024 + + +def log2_pow2(n): + return int(log2(((n ^ (n - 1)) >> 1) + 1)) + + +def next_pow2(input): + x = input + x -= 1 + x |= x >> 16 + x |= x >> 8 + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + x += 1 + + return x + + +def bytes_to_entries(size_bytes): + if size_bytes % ENTRY_SIZE == 0: + return size_bytes // ENTRY_SIZE + else: + return size_bytes // ENTRY_SIZE + 1 + + +def create_submission(data, tags=b""): + submission = [] + submission.append(len(data)) + submission.append(tags) + submission.append([]) + + offset = 0 + nodes = [] + for chunks in split_nodes(len(data)): + node_hash = create_node(data, offset, chunks) + nodes.append(node_hash) + + height = int(log2(chunks)) + submission[2].append( + [add_0x_prefix(node_hash.decode("utf-8")), height]) + offset += chunks * ENTRY_SIZE + + root_hash = nodes[-1] + for i in range(len(nodes) - 2, -1, -1): + tree = MerkleTree() + tree.add_leaf(Leaf(nodes[i])) + tree.add_leaf(Leaf(root_hash)) + root_hash = tree.get_root_hash() + + return submission, add_0x_prefix(root_hash.decode("utf-8")) + + +def split_nodes(data_len): + nodes = [] + + chunks = bytes_to_entries(data_len) + padded_chunks, chunks_next_pow2 = compute_padded_size(chunks) + next_chunk_size = chunks_next_pow2 + + while padded_chunks > 0: + if padded_chunks >= next_chunk_size: + padded_chunks -= next_chunk_size + nodes.append(next_chunk_size) + + next_chunk_size >>= 1 + + return nodes + + +def compute_padded_size(chunk_len): + chunks_next_pow2 = next_pow2(chunk_len) + + if chunks_next_pow2 == chunk_len: + return chunks_next_pow2, chunks_next_pow2 + + min_chunk = 1 if chunks_next_pow2 < 16 else chunks_next_pow2 // 16 + padded_chunks = ((chunk_len - 1) // min_chunk + 1) * min_chunk + + return padded_chunks, chunks_next_pow2 + + +def create_node(data, offset, chunks): + batch = chunks + if chunks > PORA_CHUNK_SIZE: + batch = PORA_CHUNK_SIZE + + return create_segment_node( + data, offset, ENTRY_SIZE * batch, ENTRY_SIZE * chunks) + + +def create_segment_node(data, offset, batch, size): + tree = MerkleTree() + i = offset + n = len(data) + while i < offset + size: + start = i + end = min(offset + size, i + batch) + + if start >= n: + tree.add_leaf(Leaf(segment_root(b"\x00" * (end - start)))) + elif end > n: + tree.add_leaf( + Leaf(segment_root(data[start:] + b"\x00" * (end - n)))) + else: + tree.add_leaf(Leaf(segment_root(data[start:end]))) + + i += batch + + return tree.get_root_hash() + + +def segment_root(chunks): + data_len = len(chunks) + if data_len == 0: + return b"\x00" * 32 + + tree = MerkleTree() + for i in range(0, data_len, ENTRY_SIZE): + tree.encrypt(chunks[i: i + ENTRY_SIZE]) + + return tree.get_root_hash() + + +def generate_merkle_tree(data): + chunks = bytes_to_entries(len(data)) + padded_chunks, _ = compute_padded_size(chunks) + + tree = MerkleTree() + for i in range(padded_chunks): + if i * ENTRY_SIZE > len(data): + tree.encrypt(b"\x00" * ENTRY_SIZE) + elif (i + 1) * ENTRY_SIZE > len(data): + tree.encrypt( + data[i * ENTRY_SIZE:] + b"\x00" * + ((i + 1) * ENTRY_SIZE - len(data)) + ) + else: + tree.encrypt(data[i * ENTRY_SIZE: (i + 1) * ENTRY_SIZE]) + + return tree + + +def generate_merkle_tree_by_batch(data): + chunks = bytes_to_entries(len(data)) + padded_chunks, _ = compute_padded_size(chunks) + + tree = MerkleTree() + for i in range(0, padded_chunks, PORA_CHUNK_SIZE): + if i * ENTRY_SIZE >= len(data): + tree.add_leaf( + Leaf( + segment_root( + b"\x00" * ENTRY_SIZE * + min(PORA_CHUNK_SIZE, padded_chunks - i) + ) + ) + ) + elif (i + PORA_CHUNK_SIZE) * ENTRY_SIZE > len(data): + tree.add_leaf( + Leaf( + segment_root( + data[i * ENTRY_SIZE:] + + b"\x00" + * ( + min(padded_chunks, i + PORA_CHUNK_SIZE) * ENTRY_SIZE + - len(data) + ) + ) + ) + ) + else: + tree.add_leaf( + Leaf( + segment_root( + data[i * + ENTRY_SIZE: (i + PORA_CHUNK_SIZE) * ENTRY_SIZE] + ) + ) + ) + + return tree, add_0x_prefix(tree.decode_value(tree.get_root_hash())) + + +def submit_data(client, data): + tree, root_hash = generate_merkle_tree_by_batch(data) + chunks = bytes_to_entries(len(data)) + + segments = [] + idx = 0 + while idx * PORA_CHUNK_SIZE < chunks: + proof = tree.proof_at(idx) + + tmp = ( + data[ + idx + * ENTRY_SIZE + * PORA_CHUNK_SIZE: (idx + 1) + * ENTRY_SIZE + * PORA_CHUNK_SIZE + ] + if len(data) >= (idx + 1) * PORA_CHUNK_SIZE * ENTRY_SIZE + else data[idx * ENTRY_SIZE * PORA_CHUNK_SIZE:] + + b"\x00" * (chunks * ENTRY_SIZE - len(data)) + ) + + segment = { + "root": root_hash, + "data": base64.b64encode(tmp).decode("utf-8"), + "index": idx, + "proof": proof, + "fileSize": len(data), + } + + client.zgs_upload_segment(segment) + segments.append(segment) + idx += 1 + + return segments diff --git a/tests/utility/utils.py b/tests/utility/utils.py new file mode 100644 index 0000000..99c82a1 --- /dev/null +++ b/tests/utility/utils.py @@ -0,0 +1,143 @@ +import base64 +import inspect +import os +import platform +import sha3 +import time + +from config.node_config import ZGS_CONFIG +from eth_utils import encode_hex + + +class PortMin: + # Must be initialized with a unique integer for each process + n = 11000 + + +MAX_NODES = 100 + + +def p2p_port(n): + assert n <= MAX_NODES + return PortMin.n + n + + +def rpc_port(n): + return PortMin.n + MAX_NODES + n + + +def blockchain_p2p_port(n): + return PortMin.n + 2 * MAX_NODES + n + + +def blockchain_rpc_port(n): + return PortMin.n + 3 * MAX_NODES + n + + +def blockchain_rpc_port_core(n): + return PortMin.n + 4 * MAX_NODES + n + + +def kv_rpc_port(n): + return PortMin.n + 5 * MAX_NODES + n + + +def wait_until(predicate, *, attempts=float("inf"), + timeout=float("inf"), lock=None): + if attempts == float("inf") and timeout == float("inf"): + timeout = 60 + attempt = 0 + time_end = time.time() + timeout + + while attempt < attempts and time.time() < time_end: + if lock: + with lock: + if predicate(): + return + else: + if predicate(): + return + attempt += 1 + time.sleep(0.5) + + # Print the cause of the timeout + predicate_source = inspect.getsourcelines(predicate) + if attempt >= attempts: + raise AssertionError( + "Predicate {} not true after {} attempts".format( + predicate_source, attempts) + ) + elif time.time() >= time_end: + raise AssertionError( + "Predicate {} not true after {} seconds".format( + predicate_source, timeout) + ) + raise RuntimeError("Unreachable") + + +def is_windows_platform(): + return platform.system().lower() == "windows" + + +def initialize_config(config_path, config_parameters): + with open(config_path, "w") as f: + for k in config_parameters: + value = config_parameters[k] + if isinstance(value, str) and not ( + value.startswith('"') or value.startswith("'") + ): + if value == "true" or value == "false": + value = f"{value}" + else: + value = f'"{value}"' + + f.write(f"{k}={value}\n") + + +def initialize_zgs_config(data_dir, config_parameters): + config_path = os.path.join(data_dir, "config.toml") + log_config_path = os.path.join(data_dir, "log_config") + local_conf = ZGS_CONFIG.copy() + local_conf.update(config_parameters) + initialize_config(config_path, local_conf) + with open(log_config_path, "w") as f: + f.write("trace") + + +def create_proof_and_segment(chunk_data, data_root, index=0): + proof = { + "lemma": [data_root], + "path": [], + } + + segment = { + "root": data_root, + "data": base64.b64encode(chunk_data).decode("utf-8"), + "index": index, + "proof": proof, + } + + return proof, segment + + +def assert_equal(thing1, thing2, *args): + if thing1 != thing2 or any(thing1 != arg for arg in args): + raise AssertionError( + "not(%s)" % " == ".join(str(arg) + for arg in (thing1, thing2) + args) + ) + + +def assert_ne(thing1, thing2): + if thing1 == thing2: + raise AssertionError("not(%s)" % " != ".join([thing1, thing2])) + + +def assert_greater_than(thing1, thing2): + if thing1 <= thing2: + raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) + + +def assert_greater_than_or_equal(thing1, thing2): + if thing1 < thing2: + raise AssertionError("%s < %s" % (str(thing1), str(thing2))) diff --git a/version-meld/eth2_ssz/.cargo-ok b/version-meld/eth2_ssz/.cargo-ok new file mode 100644 index 0000000..b5754e2 --- /dev/null +++ b/version-meld/eth2_ssz/.cargo-ok @@ -0,0 +1 @@ +ok \ No newline at end of file diff --git a/version-meld/eth2_ssz/.cargo_vcs_info.json b/version-meld/eth2_ssz/.cargo_vcs_info.json new file mode 100644 index 0000000..6292b69 --- /dev/null +++ b/version-meld/eth2_ssz/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "0b319d492695daf11cd8fc0712b602b63ee5ed50" + } +} diff --git a/version-meld/eth2_ssz/Cargo.lock b/version-meld/eth2_ssz/Cargo.lock new file mode 100644 index 0000000..f94addf --- /dev/null +++ b/version-meld/eth2_ssz/Cargo.lock @@ -0,0 +1,473 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "arbitrary" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" + +[[package]] +name = "arbitrary" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "510c76ecefdceada737ea728f4f9a84bd2e1ef29f1ba555e560940fe279954de" + +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "byte-slice-cast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "darling" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "757c0ded2af11d8e739c4daea1ac623dd1624b06c844cf3f5a39f1bdbd99bb12" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c34d8efb62d0c2d7f60ece80f75e5c63c1588ba68032740494b0b9a996466e3" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade7bff147130fe5e6d39f089c6bd49ec0250f35d70b2eebf72afdfc919f15cc" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "eth2_ssz" +version = "0.4.0" +dependencies = [ + "eth2_ssz_derive", + "ethereum-types", + "smallvec", +] + +[[package]] +name = "eth2_ssz_derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "635b86d2c941bb71e7419a571e1763d65c93e51a1bafc400352e3bef6ff59fc9" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ethbloom" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + +[[package]] +name = "ethereum-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "arbitrary 0.4.7", + "byteorder", + "rand", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + +[[package]] +name = "getrandom" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "impl-codec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "libc" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8521a1b57e76b1ec69af7599e75e38e7b7fad6610f037db8c79b127201b5d119" + +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" + +[[package]] +name = "primitive-types" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +dependencies = [ + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro2" +version = "1.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + +[[package]] +name = "rand" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rlp" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "serde" +version = "1.0.130" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" + +[[package]] +name = "smallvec" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "thiserror" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "uint" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" +dependencies = [ + "arbitrary 1.0.3", + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + +[[package]] +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" + +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" diff --git a/version-meld/eth2_ssz/Cargo.toml b/version-meld/eth2_ssz/Cargo.toml new file mode 100644 index 0000000..06596b9 --- /dev/null +++ b/version-meld/eth2_ssz/Cargo.toml @@ -0,0 +1,31 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "eth2_ssz" +version = "0.4.0" +authors = ["Paul Hauner "] +description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" +license = "Apache-2.0" + +[lib] +name = "ssz" +[dependencies.ethereum-types] +version = "0.14.1" + +[dependencies.smallvec] +version = "1.6.1" +[dev-dependencies.eth2_ssz_derive] +version = "0.3.0" + +[features] +arbitrary = ["ethereum-types/arbitrary"] diff --git a/version-meld/eth2_ssz/Cargo.toml.orig b/version-meld/eth2_ssz/Cargo.toml.orig new file mode 100644 index 0000000..853fd72 --- /dev/null +++ b/version-meld/eth2_ssz/Cargo.toml.orig @@ -0,0 +1,20 @@ +[package] +name = "eth2_ssz" +version = "0.4.0" +authors = ["Paul Hauner "] +edition = "2018" +description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" +license = "Apache-2.0" + +[lib] +name = "ssz" + +[dev-dependencies] +eth2_ssz_derive = "0.3.0" + +[dependencies] +ethereum-types = "0.12.1" +smallvec = "1.6.1" + +[features] +arbitrary = ["ethereum-types/arbitrary"] diff --git a/version-meld/eth2_ssz/README.md b/version-meld/eth2_ssz/README.md new file mode 100644 index 0000000..04603cd --- /dev/null +++ b/version-meld/eth2_ssz/README.md @@ -0,0 +1,3 @@ +# simpleserialize (ssz) + +[](https://crates.io/crates/eth2_ssz) diff --git a/version-meld/eth2_ssz/examples/large_list.rs b/version-meld/eth2_ssz/examples/large_list.rs new file mode 100644 index 0000000..a1b10ab --- /dev/null +++ b/version-meld/eth2_ssz/examples/large_list.rs @@ -0,0 +1,15 @@ +//! Encode and decode a list many times. +//! +//! Useful for `cargo flamegraph`. + +use ssz::{Decode, Encode}; + +fn main() { + let vec: Vec = vec![4242; 8196]; + + let output: Vec> = (0..40_000) + .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) + .collect(); + + println!("{}", output.len()); +} diff --git a/version-meld/eth2_ssz/examples/large_list_of_structs.rs b/version-meld/eth2_ssz/examples/large_list_of_structs.rs new file mode 100644 index 0000000..2aaaf9b --- /dev/null +++ b/version-meld/eth2_ssz/examples/large_list_of_structs.rs @@ -0,0 +1,31 @@ +//! Encode and decode a list many times. +//! +//! Useful for `cargo flamegraph`. + +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; + +#[derive(Clone, Copy, Encode, Decode)] +pub struct FixedLen { + a: u64, + b: u64, + c: u64, + d: u64, +} + +fn main() { + let fixed_len = FixedLen { + a: 42, + b: 42, + c: 42, + d: 42, + }; + + let vec: Vec = vec![fixed_len; 8196]; + + let output: Vec> = (0..40_000) + .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) + .collect(); + + println!("{}", output.len()); +} diff --git a/version-meld/eth2_ssz/examples/struct_definition.rs b/version-meld/eth2_ssz/examples/struct_definition.rs new file mode 100644 index 0000000..123da12 --- /dev/null +++ b/version-meld/eth2_ssz/examples/struct_definition.rs @@ -0,0 +1,73 @@ +use ssz::{Decode, DecodeError, Encode, SszDecoderBuilder, SszEncoder}; + +#[derive(Debug, PartialEq)] +pub struct Foo { + a: u16, + b: Vec, + c: u16, +} + +impl Encode for Foo { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + ::ssz_fixed_len() + + ssz::BYTES_PER_LENGTH_OFFSET + + ::ssz_fixed_len() + + self.b.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + let offset = ::ssz_fixed_len() + + as Encode>::ssz_fixed_len() + + ::ssz_fixed_len(); + + let mut encoder = SszEncoder::container(buf, offset); + + encoder.append(&self.a); + encoder.append(&self.b); + encoder.append(&self.c); + + encoder.finalize(); + } +} + +impl Decode for Foo { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() && as Decode>::is_ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let mut builder = SszDecoderBuilder::new(bytes); + + builder.register_type::()?; + builder.register_type::>()?; + builder.register_type::()?; + + let mut decoder = builder.build()?; + + Ok(Self { + a: decoder.decode_next()?, + b: decoder.decode_next()?, + c: decoder.decode_next()?, + }) + } +} + +fn main() { + let my_foo = Foo { + a: 42, + b: vec![0, 1, 2, 3], + c: 11, + }; + + let bytes = vec![42, 0, 8, 0, 0, 0, 11, 0, 0, 1, 2, 3]; + + assert_eq!(my_foo.as_ssz_bytes(), bytes); + + let decoded_foo = Foo::from_ssz_bytes(&bytes).unwrap(); + + assert_eq!(my_foo, decoded_foo); +} diff --git a/version-meld/eth2_ssz/src/decode.rs b/version-meld/eth2_ssz/src/decode.rs new file mode 100644 index 0000000..1c4c04f --- /dev/null +++ b/version-meld/eth2_ssz/src/decode.rs @@ -0,0 +1,372 @@ +use super::*; +use smallvec::{smallvec, SmallVec}; +use std::cmp::Ordering; + +type SmallVec8 = SmallVec<[T; 8]>; + +pub mod impls; + +/// Returned when SSZ decoding fails. +#[derive(Debug, PartialEq, Clone)] +pub enum DecodeError { + /// The bytes supplied were too short to be decoded into the specified type. + InvalidByteLength { len: usize, expected: usize }, + /// The given bytes were too short to be read as a length prefix. + InvalidLengthPrefix { len: usize, expected: usize }, + /// A length offset pointed to a byte that was out-of-bounds (OOB). + /// + /// A bytes may be OOB for the following reasons: + /// + /// - It is `>= bytes.len()`. + /// - When decoding variable length items, the 1st offset points "backwards" into the fixed + /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). + /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. + OutOfBoundsByte { i: usize }, + /// An offset points “backwards” into the fixed-bytes portion of the message, essentially + /// double-decoding bytes that will also be decoded as fixed-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion + OffsetIntoFixedPortion(usize), + /// The first offset does not point to the byte that follows the fixed byte portion, + /// essentially skipping a variable-length byte. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte + OffsetSkipsVariableBytes(usize), + /// An offset points to bytes prior to the previous offset. Depending on how you look at it, + /// this either double-decodes bytes or makes the first offset a negative-length. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing + OffsetsAreDecreasing(usize), + /// An offset references byte indices that do not exist in the source bytes. + /// + /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds + OffsetOutOfBounds(usize), + /// A variable-length list does not have a fixed portion that is cleanly divisible by + /// `BYTES_PER_LENGTH_OFFSET`. + InvalidListFixedBytesLen(usize), + /// Some item has a `ssz_fixed_len` of zero. This is illegal. + ZeroLengthItem, + /// The given bytes were invalid for some application-level reason. + BytesInvalid(String), + /// The given union selector is out of bounds. + UnionSelectorInvalid(u8), +} + +/// Performs checks on the `offset` based upon the other parameters provided. +/// +/// ## Detail +/// +/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). +/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the +/// previously-read offset. Used to ensure offsets are not decreasing. +/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not +/// out of bounds. +/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure +/// that the first offset doesn't skip any variable bytes. +/// +/// ## References +/// +/// The checks here are derived from this document: +/// +/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view +pub fn sanitize_offset( + offset: usize, + previous_offset: Option, + num_bytes: usize, + num_fixed_bytes: Option, +) -> Result { + if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { + Err(DecodeError::OffsetIntoFixedPortion(offset)) + } else if previous_offset.is_none() + && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) + { + Err(DecodeError::OffsetSkipsVariableBytes(offset)) + } else if offset > num_bytes { + Err(DecodeError::OffsetOutOfBounds(offset)) + } else if previous_offset.map_or(false, |prev| prev > offset) { + Err(DecodeError::OffsetsAreDecreasing(offset)) + } else { + Ok(offset) + } +} + +/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. +/// +/// See `examples/` for manual implementations or the crate root for implementations using +/// `#[derive(Decode)]`. +pub trait Decode: Sized { + /// Returns `true` if this object has a fixed-length. + /// + /// I.e., there are no variable length items in this object or any of it's contained objects. + fn is_ssz_fixed_len() -> bool; + + /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. + /// + /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length + /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which + /// represents their length. + fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + /// Attempts to decode `Self` from `bytes`, returning a `DecodeError` on failure. + /// + /// The supplied bytes must be the exact length required to decode `Self`, excess bytes will + /// result in an error. + fn from_ssz_bytes(bytes: &[u8]) -> Result; +} + +#[derive(Copy, Clone, Debug)] +pub struct Offset { + position: usize, + offset: usize, +} + +/// Builds an `SszDecoder`. +/// +/// The purpose of this struct is to split some SSZ bytes into individual slices. The builder is +/// then converted into a `SszDecoder` which decodes those values into object instances. +/// +/// See [`SszDecoder`](struct.SszDecoder.html) for usage examples. +pub struct SszDecoderBuilder<'a> { + bytes: &'a [u8], + items: SmallVec8<&'a [u8]>, + offsets: SmallVec8, + items_index: usize, +} + +impl<'a> SszDecoderBuilder<'a> { + /// Instantiate a new builder that should build a `SszDecoder` over the given `bytes` which + /// are assumed to be the SSZ encoding of some object. + pub fn new(bytes: &'a [u8]) -> Self { + Self { + bytes, + items: smallvec![], + offsets: smallvec![], + items_index: 0, + } + } + + /// Registers a variable-length object as the next item in `bytes`, without specifying the + /// actual type. + /// + /// ## Notes + /// + /// Use of this function is generally discouraged since it cannot detect if some type changes + /// from variable to fixed length. + /// + /// Use `Self::register_type` wherever possible. + pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { + struct Anonymous; + + impl Decode for Anonymous { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(_bytes: &[u8]) -> Result { + unreachable!("Anonymous should never be decoded") + } + } + + self.register_type::() + } + + /// Declares that some type `T` is the next item in `bytes`. + pub fn register_type(&mut self) -> Result<(), DecodeError> { + self.register_type_parameterized(T::is_ssz_fixed_len(), T::ssz_fixed_len()) + } + + /// Declares that a type with the given parameters is the next item in `bytes`. + pub fn register_type_parameterized( + &mut self, + is_ssz_fixed_len: bool, + ssz_fixed_len: usize, + ) -> Result<(), DecodeError> { + if is_ssz_fixed_len { + let start = self.items_index; + self.items_index += ssz_fixed_len; + + let slice = self.bytes.get(start..self.items_index).ok_or_else(|| { + DecodeError::InvalidByteLength { + len: self.bytes.len(), + expected: self.items_index, + } + })?; + + self.items.push(slice); + } else { + self.offsets.push(Offset { + position: self.items.len(), + offset: sanitize_offset( + read_offset(&self.bytes[self.items_index..])?, + self.offsets.last().map(|o| o.offset), + self.bytes.len(), + None, + )?, + }); + + // Push an empty slice into items; it will be replaced later. + self.items.push(&[]); + + self.items_index += BYTES_PER_LENGTH_OFFSET; + } + + Ok(()) + } + + fn finalize(&mut self) -> Result<(), DecodeError> { + if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { + // Check to ensure the first offset points to the byte immediately following the + // fixed-length bytes. + match first_offset.cmp(&self.items_index) { + Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)), + Ordering::Greater => { + return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)) + } + Ordering::Equal => (), + } + + // Iterate through each pair of offsets, grabbing the slice between each of the offsets. + for pair in self.offsets.windows(2) { + let a = pair[0]; + let b = pair[1]; + + self.items[a.position] = &self.bytes[a.offset..b.offset]; + } + + // Handle the last offset, pushing a slice from it's start through to the end of + // `self.bytes`. + if let Some(last) = self.offsets.last() { + self.items[last.position] = &self.bytes[last.offset..] + } + } else { + // If the container is fixed-length, ensure there are no excess bytes. + if self.items_index != self.bytes.len() { + return Err(DecodeError::InvalidByteLength { + len: self.bytes.len(), + expected: self.items_index, + }); + } + } + + Ok(()) + } + + /// Finalizes the builder, returning a `SszDecoder` that may be used to instantiate objects. + pub fn build(mut self) -> Result, DecodeError> { + self.finalize()?; + + Ok(SszDecoder { items: self.items }) + } +} + +/// Decodes some slices of SSZ into object instances. Should be instantiated using +/// [`SszDecoderBuilder`](struct.SszDecoderBuilder.html). +/// +/// ## Example +/// +/// ```rust +/// use ssz_derive::{Encode, Decode}; +/// use ssz::{Decode, Encode, SszDecoder, SszDecoderBuilder}; +/// +/// #[derive(PartialEq, Debug, Encode, Decode)] +/// struct Foo { +/// a: u64, +/// b: Vec, +/// } +/// +/// fn ssz_decoding_example() { +/// let foo = Foo { +/// a: 42, +/// b: vec![1, 3, 3, 7] +/// }; +/// +/// let bytes = foo.as_ssz_bytes(); +/// +/// let mut builder = SszDecoderBuilder::new(&bytes); +/// +/// builder.register_type::().unwrap(); +/// builder.register_type::>().unwrap(); +/// +/// let mut decoder = builder.build().unwrap(); +/// +/// let decoded_foo = Foo { +/// a: decoder.decode_next().unwrap(), +/// b: decoder.decode_next().unwrap(), +/// }; +/// +/// assert_eq!(foo, decoded_foo); +/// } +/// +/// ``` +pub struct SszDecoder<'a> { + items: SmallVec8<&'a [u8]>, +} + +impl<'a> SszDecoder<'a> { + /// Decodes the next item. + /// + /// # Panics + /// + /// Panics when attempting to decode more items than actually exist. + pub fn decode_next(&mut self) -> Result { + self.decode_next_with(|slice| T::from_ssz_bytes(slice)) + } + + /// Decodes the next item using the provided function. + pub fn decode_next_with(&mut self, f: F) -> Result + where + F: FnOnce(&'a [u8]) -> Result, + { + f(self.items.remove(0)) + } +} + +/// Takes `bytes`, assuming it is the encoding for a SSZ union, and returns the union-selector and +/// the body (trailing bytes). +/// +/// ## Errors +/// +/// Returns an error if: +/// +/// - `bytes` is empty. +/// - the union selector is not a valid value (i.e., larger than the maximum number of variants. +pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeError> { + let selector = bytes + .first() + .copied() + .ok_or(DecodeError::OutOfBoundsByte { i: 0 }) + .and_then(UnionSelector::new)?; + let body = bytes + .get(1..) + .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; + Ok((selector, body)) +} + +/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= +/// BYTES_PER_LENGTH_OFFSET`. +pub fn read_offset(bytes: &[u8]) -> Result { + decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or_else(|| { + DecodeError::InvalidLengthPrefix { + len: bytes.len(), + expected: BYTES_PER_LENGTH_OFFSET, + } + })?) +} + +/// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != +/// BYTES_PER_LENGTH_OFFSET`. +fn decode_offset(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = BYTES_PER_LENGTH_OFFSET; + + if len != expected { + Err(DecodeError::InvalidLengthPrefix { len, expected }) + } else { + let mut array: [u8; BYTES_PER_LENGTH_OFFSET] = std::default::Default::default(); + array.clone_from_slice(bytes); + + Ok(u32::from_le_bytes(array) as usize) + } +} diff --git a/version-meld/eth2_ssz/src/decode/impls.rs b/version-meld/eth2_ssz/src/decode/impls.rs new file mode 100644 index 0000000..417d981 --- /dev/null +++ b/version-meld/eth2_ssz/src/decode/impls.rs @@ -0,0 +1,718 @@ +use super::*; +use core::num::NonZeroUsize; +use ethereum_types::{H256, U128, U256, H160}; +use smallvec::SmallVec; +use std::sync::Arc; + +macro_rules! impl_decodable_for_uint { + ($type: ident, $bit_size: expr) => { + impl Decode for $type { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + $bit_size / 8 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + let mut array: [u8; $bit_size / 8] = std::default::Default::default(); + array.clone_from_slice(bytes); + + Ok(Self::from_le_bytes(array)) + } + } + } + }; +} + +impl_decodable_for_uint!(u8, 8); +impl_decodable_for_uint!(u16, 16); +impl_decodable_for_uint!(u32, 32); +impl_decodable_for_uint!(u64, 64); + +#[cfg(target_pointer_width = "32")] +impl_decodable_for_uint!(usize, 32); + +#[cfg(target_pointer_width = "64")] +impl_decodable_for_uint!(usize, 64); + +macro_rules! impl_decode_for_tuples { + ($( + $Tuple:ident { + $(($idx:tt) -> $T:ident)+ + } + )+) => { + $( + impl<$($T: Decode),+> Decode for ($($T,)+) { + fn is_ssz_fixed_len() -> bool { + $( + <$T as Decode>::is_ssz_fixed_len() && + )* + true + } + + fn ssz_fixed_len() -> usize { + if ::is_ssz_fixed_len() { + $( + <$T as Decode>::ssz_fixed_len() + + )* + 0 + } else { + BYTES_PER_LENGTH_OFFSET + } + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let mut builder = SszDecoderBuilder::new(bytes); + + $( + builder.register_type::<$T>()?; + )* + + let mut decoder = builder.build()?; + + Ok(($( + decoder.decode_next::<$T>()?, + )* + )) + } + } + )+ + } +} + +impl_decode_for_tuples! { + Tuple2 { + (0) -> A + (1) -> B + } + Tuple3 { + (0) -> A + (1) -> B + (2) -> C + } + Tuple4 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + } + Tuple5 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + } + Tuple6 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + } + Tuple7 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + } + Tuple8 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + } + Tuple9 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + } + Tuple10 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + } + Tuple11 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + (10) -> K + } + Tuple12 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + (10) -> K + (11) -> L + } +} + +impl Decode for bool { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 1 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + match bytes[0] { + 0b0000_0000 => Ok(false), + 0b0000_0001 => Ok(true), + _ => Err(DecodeError::BytesInvalid(format!( + "Out-of-range for boolean: {}", + bytes[0] + ))), + } + } + } +} + +impl Decode for NonZeroUsize { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let x = usize::from_ssz_bytes(bytes)?; + + if x == 0 { + Err(DecodeError::BytesInvalid( + "NonZeroUsize cannot be zero.".to_string(), + )) + } else { + // `unwrap` is safe here as `NonZeroUsize::new()` succeeds if `x > 0` and this path + // never executes when `x == 0`. + Ok(NonZeroUsize::new(x).unwrap()) + } + } +} + +impl Decode for Arc { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + T::from_ssz_bytes(bytes).map(Arc::new) + } +} + +impl Decode for H256 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 32 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + Ok(H256::from_slice(bytes)) + } + } +} + +impl Decode for H160 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 20 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + Ok(H160::from_slice(bytes)) + } + } +} + +impl Decode for U256 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 32 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + Ok(U256::from_little_endian(bytes)) + } + } +} + +impl Decode for U128 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 16 + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + Ok(U128::from_little_endian(bytes)) + } + } +} + +macro_rules! impl_decodable_for_u8_array { + ($len: expr) => { + impl Decode for [u8; $len] { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + $len + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let len = bytes.len(); + let expected = ::ssz_fixed_len(); + + if len != expected { + Err(DecodeError::InvalidByteLength { len, expected }) + } else { + let mut array: [u8; $len] = [0; $len]; + array.copy_from_slice(bytes); + + Ok(array) + } + } + } + }; +} + +impl_decodable_for_u8_array!(4); +impl_decodable_for_u8_array!(32); + +macro_rules! impl_for_vec { + ($type: ty, $max_len: expr) => { + impl Decode for $type { + fn is_ssz_fixed_len() -> bool { + false + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + if bytes.is_empty() { + Ok(vec![].into()) + } else if T::is_ssz_fixed_len() { + bytes + .chunks(T::ssz_fixed_len()) + .map(|chunk| T::from_ssz_bytes(chunk)) + .collect() + } else { + decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into()) + } + } + } + }; +} + +impl_for_vec!(Vec, None); +impl_for_vec!(SmallVec<[T; 1]>, Some(1)); +impl_for_vec!(SmallVec<[T; 2]>, Some(2)); +impl_for_vec!(SmallVec<[T; 3]>, Some(3)); +impl_for_vec!(SmallVec<[T; 4]>, Some(4)); +impl_for_vec!(SmallVec<[T; 5]>, Some(5)); +impl_for_vec!(SmallVec<[T; 6]>, Some(6)); +impl_for_vec!(SmallVec<[T; 7]>, Some(7)); +impl_for_vec!(SmallVec<[T; 8]>, Some(8)); + +/// Decodes `bytes` as if it were a list of variable-length items. +/// +/// The `ssz::SszDecoder` can also perform this functionality, however it it significantly faster +/// as it is optimized to read same-typed items whilst `ssz::SszDecoder` supports reading items of +/// differing types. +pub fn decode_list_of_variable_length_items( + bytes: &[u8], + max_len: Option, +) -> Result, DecodeError> { + if bytes.is_empty() { + return Ok(vec![]); + } + + let first_offset = read_offset(bytes)?; + sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; + + if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { + return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); + } + + let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; + + if max_len.map_or(false, |max| num_items > max) { + return Err(DecodeError::BytesInvalid(format!( + "Variable length list of {} items exceeds maximum of {:?}", + num_items, max_len + ))); + } + + // Only initialize the vec with a capacity if a maximum length is provided. + // + // We assume that if a max length is provided then the application is able to handle an + // allocation of this size. + let mut values = if max_len.is_some() { + Vec::with_capacity(num_items) + } else { + vec![] + }; + + let mut offset = first_offset; + for i in 1..=num_items { + let slice_option = if i == num_items { + bytes.get(offset..) + } else { + let start = offset; + + let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; + offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; + + bytes.get(start..offset) + }; + + let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; + + values.push(T::from_ssz_bytes(slice)?); + } + + Ok(values) +} + +#[cfg(test)] +mod tests { + use super::*; + + // Note: decoding of valid bytes is generally tested "indirectly" in the `/tests` dir, by + // encoding then decoding the element. + + #[test] + fn invalid_u8_array_4() { + assert_eq!( + <[u8; 4]>::from_ssz_bytes(&[0; 3]), + Err(DecodeError::InvalidByteLength { + len: 3, + expected: 4 + }) + ); + + assert_eq!( + <[u8; 4]>::from_ssz_bytes(&[0; 5]), + Err(DecodeError::InvalidByteLength { + len: 5, + expected: 4 + }) + ); + } + + #[test] + fn invalid_bool() { + assert_eq!( + bool::from_ssz_bytes(&[0; 2]), + Err(DecodeError::InvalidByteLength { + len: 2, + expected: 1 + }) + ); + + assert_eq!( + bool::from_ssz_bytes(&[]), + Err(DecodeError::InvalidByteLength { + len: 0, + expected: 1 + }) + ); + + if let Err(DecodeError::BytesInvalid(_)) = bool::from_ssz_bytes(&[2]) { + // Success. + } else { + panic!("Did not return error on invalid bool val") + } + } + + #[test] + fn invalid_h256() { + assert_eq!( + H256::from_ssz_bytes(&[0; 33]), + Err(DecodeError::InvalidByteLength { + len: 33, + expected: 32 + }) + ); + + assert_eq!( + H256::from_ssz_bytes(&[0; 31]), + Err(DecodeError::InvalidByteLength { + len: 31, + expected: 32 + }) + ); + } + + #[test] + fn empty_list() { + let vec: Vec> = vec![]; + let bytes = vec.as_ssz_bytes(); + assert!(bytes.is_empty()); + assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); + } + + #[test] + fn first_length_points_backwards() { + assert_eq!( + >>::from_ssz_bytes(&[0, 0, 0, 0]), + Err(DecodeError::InvalidListFixedBytesLen(0)) + ); + + assert_eq!( + >>::from_ssz_bytes(&[1, 0, 0, 0]), + Err(DecodeError::InvalidListFixedBytesLen(1)) + ); + + assert_eq!( + >>::from_ssz_bytes(&[2, 0, 0, 0]), + Err(DecodeError::InvalidListFixedBytesLen(2)) + ); + + assert_eq!( + >>::from_ssz_bytes(&[3, 0, 0, 0]), + Err(DecodeError::InvalidListFixedBytesLen(3)) + ); + } + + #[test] + fn lengths_are_decreasing() { + assert_eq!( + >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), + Err(DecodeError::OffsetsAreDecreasing(12)) + ); + } + + #[test] + fn awkward_fixed_length_portion() { + assert_eq!( + >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), + Err(DecodeError::InvalidListFixedBytesLen(10)) + ); + } + + #[test] + fn length_out_of_bounds() { + assert_eq!( + >>::from_ssz_bytes(&[5, 0, 0, 0]), + Err(DecodeError::OffsetOutOfBounds(5)) + ); + assert_eq!( + >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), + Err(DecodeError::OffsetOutOfBounds(9)) + ); + assert_eq!( + >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), + Err(DecodeError::OffsetOutOfBounds(16)) + ); + } + + #[test] + fn vec_of_vec_of_u16() { + assert_eq!( + >>::from_ssz_bytes(&[4, 0, 0, 0]), + Ok(vec![vec![]]) + ); + + assert_eq!( + >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), + Ok(vec![0, 1, 2, 3]) + ); + assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); + assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); + assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); + + assert_eq!( + ::from_ssz_bytes(&[255]), + Err(DecodeError::InvalidByteLength { + len: 1, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[]), + Err(DecodeError::InvalidByteLength { + len: 0, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[0, 1, 2]), + Err(DecodeError::InvalidByteLength { + len: 3, + expected: 2 + }) + ); + } + + #[test] + fn vec_of_u16() { + assert_eq!(>::from_ssz_bytes(&[0, 0, 0, 0]), Ok(vec![0, 0])); + assert_eq!( + >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), + Ok(vec![0, 1, 2, 3]) + ); + assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); + assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); + assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); + + assert_eq!( + ::from_ssz_bytes(&[255]), + Err(DecodeError::InvalidByteLength { + len: 1, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[]), + Err(DecodeError::InvalidByteLength { + len: 0, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[0, 1, 2]), + Err(DecodeError::InvalidByteLength { + len: 3, + expected: 2 + }) + ); + } + + #[test] + fn u16() { + assert_eq!(::from_ssz_bytes(&[0, 0]), Ok(0)); + assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); + assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); + assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); + + assert_eq!( + ::from_ssz_bytes(&[255]), + Err(DecodeError::InvalidByteLength { + len: 1, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[]), + Err(DecodeError::InvalidByteLength { + len: 0, + expected: 2 + }) + ); + + assert_eq!( + ::from_ssz_bytes(&[0, 1, 2]), + Err(DecodeError::InvalidByteLength { + len: 3, + expected: 2 + }) + ); + } + + #[test] + fn tuple() { + assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 0, 0, 0]), Ok((0, 0))); + assert_eq!(<(u16, u16)>::from_ssz_bytes(&[16, 0, 17, 0]), Ok((16, 17))); + assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 1, 2, 0]), Ok((256, 2))); + assert_eq!( + <(u16, u16)>::from_ssz_bytes(&[255, 255, 0, 0]), + Ok((65535, 0)) + ); + } +} diff --git a/version-meld/eth2_ssz/src/encode.rs b/version-meld/eth2_ssz/src/encode.rs new file mode 100644 index 0000000..cecd615 --- /dev/null +++ b/version-meld/eth2_ssz/src/encode.rs @@ -0,0 +1,196 @@ +use super::*; + +mod impls; + +/// Provides SSZ encoding (serialization) via the `as_ssz_bytes(&self)` method. +/// +/// See `examples/` for manual implementations or the crate root for implementations using +/// `#[derive(Encode)]`. +pub trait Encode { + /// Returns `true` if this object has a fixed-length. + /// + /// I.e., there are no variable length items in this object or any of it's contained objects. + fn is_ssz_fixed_len() -> bool; + + /// Append the encoding `self` to `buf`. + /// + /// Note, variable length objects need only to append their "variable length" portion, they do + /// not need to provide their offset. + fn ssz_append(&self, buf: &mut Vec); + + /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. + /// + /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length + /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which + /// represents their length. + fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + /// Returns the size (in bytes) when `self` is serialized. + /// + /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more + /// efficient. + fn ssz_bytes_len(&self) -> usize; + + /// Returns the full-form encoding of this object. + /// + /// The default implementation of this method should suffice for most cases. + fn as_ssz_bytes(&self) -> Vec { + let mut buf = vec![]; + + self.ssz_append(&mut buf); + + buf + } +} + +/// Allow for encoding an ordered series of distinct or indistinct objects as SSZ bytes. +/// +/// **You must call `finalize(..)` after the final `append(..)` call** to ensure the bytes are +/// written to `buf`. +/// +/// ## Example +/// +/// Use `SszEncoder` to produce identical output to `foo.as_ssz_bytes()`: +/// +/// ```rust +/// use ssz_derive::{Encode, Decode}; +/// use ssz::{Decode, Encode, SszEncoder}; +/// +/// #[derive(PartialEq, Debug, Encode, Decode)] +/// struct Foo { +/// a: u64, +/// b: Vec, +/// } +/// +/// fn ssz_encode_example() { +/// let foo = Foo { +/// a: 42, +/// b: vec![1, 3, 3, 7] +/// }; +/// +/// let mut buf: Vec = vec![]; +/// let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); +/// +/// let mut encoder = SszEncoder::container(&mut buf, offset); +/// +/// encoder.append(&foo.a); +/// encoder.append(&foo.b); +/// +/// encoder.finalize(); +/// +/// assert_eq!(foo.as_ssz_bytes(), buf); +/// } +/// +/// ``` +pub struct SszEncoder<'a> { + offset: usize, + buf: &'a mut Vec, + variable_bytes: Vec, +} + +impl<'a> SszEncoder<'a> { + /// Instantiate a new encoder for encoding a SSZ container. + pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { + buf.reserve(num_fixed_bytes); + + Self { + offset: num_fixed_bytes, + buf, + variable_bytes: vec![], + } + } + + /// Append some `item` to the SSZ bytes. + pub fn append(&mut self, item: &T) { + self.append_parameterized(T::is_ssz_fixed_len(), |buf| item.ssz_append(buf)) + } + + /// Uses `ssz_append` to append the encoding of some item to the SSZ bytes. + pub fn append_parameterized(&mut self, is_ssz_fixed_len: bool, ssz_append: F) + where + F: Fn(&mut Vec), + { + if is_ssz_fixed_len { + ssz_append(&mut self.buf); + } else { + self.buf + .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); + + ssz_append(&mut self.variable_bytes); + } + } + + /// Write the variable bytes to `self.bytes`. + /// + /// This method must be called after the final `append(..)` call when serializing + /// variable-length items. + pub fn finalize(&mut self) -> &mut Vec { + self.buf.append(&mut self.variable_bytes); + + &mut self.buf + } +} + +/// Encode `len` as a little-endian byte array of `BYTES_PER_LENGTH_OFFSET` length. +/// +/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. +pub fn encode_length(len: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { + // Note: it is possible for `len` to be larger than what can be encoded in + // `BYTES_PER_LENGTH_OFFSET` bytes, triggering this debug assertion. + // + // These are the alternatives to using a `debug_assert` here: + // + // 1. Use `assert`. + // 2. Push an error to the caller (e.g., `Option` or `Result`). + // 3. Ignore it completely. + // + // I have avoided (1) because it's basically a choice between "produce invalid SSZ" or "kill + // the entire program". I figure it may be possible for an attacker to trigger this assert and + // take the program down -- I think producing invalid SSZ is a better option than this. + // + // I have avoided (2) because this error will need to be propagated upstream, making encoding a + // function which may fail. I don't think this is ergonomic and the upsides don't outweigh the + // downsides. + // + // I figure a `debug_assertion` is better than (3) as it will give us a change to detect the + // error during testing. + // + // If you have a different opinion, feel free to start an issue and tag @paulhauner. + debug_assert!(len <= MAX_LENGTH_VALUE); + + let mut bytes = [0; BYTES_PER_LENGTH_OFFSET]; + bytes.copy_from_slice(&len.to_le_bytes()[0..BYTES_PER_LENGTH_OFFSET]); + bytes +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_encode_length() { + assert_eq!(encode_length(0), [0; 4]); + + assert_eq!(encode_length(1), [1, 0, 0, 0]); + + assert_eq!( + encode_length(MAX_LENGTH_VALUE), + [255; BYTES_PER_LENGTH_OFFSET] + ); + } + + #[test] + #[should_panic] + #[cfg(debug_assertions)] + fn test_encode_length_above_max_debug_panics() { + encode_length(MAX_LENGTH_VALUE + 1); + } + + #[test] + #[cfg(not(debug_assertions))] + fn test_encode_length_above_max_not_debug_does_not_panic() { + assert_eq!(&encode_length(MAX_LENGTH_VALUE + 1)[..], &[0; 4]); + } +} diff --git a/version-meld/eth2_ssz/src/encode/impls.rs b/version-meld/eth2_ssz/src/encode/impls.rs new file mode 100644 index 0000000..886914e --- /dev/null +++ b/version-meld/eth2_ssz/src/encode/impls.rs @@ -0,0 +1,522 @@ +use super::*; +use core::num::NonZeroUsize; +use ethereum_types::{H256, U128, U256, H160}; +use smallvec::SmallVec; +use std::sync::Arc; + +macro_rules! impl_encodable_for_uint { + ($type: ident, $bit_size: expr) => { + impl Encode for $type { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + $bit_size / 8 + } + + fn ssz_bytes_len(&self) -> usize { + $bit_size / 8 + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.to_le_bytes()); + } + } + }; +} + +impl_encodable_for_uint!(u8, 8); +impl_encodable_for_uint!(u16, 16); +impl_encodable_for_uint!(u32, 32); +impl_encodable_for_uint!(u64, 64); + +#[cfg(target_pointer_width = "32")] +impl_encodable_for_uint!(usize, 32); + +#[cfg(target_pointer_width = "64")] +impl_encodable_for_uint!(usize, 64); + +// Based on the `tuple_impls` macro from the standard library. +macro_rules! impl_encode_for_tuples { + ($( + $Tuple:ident { + $(($idx:tt) -> $T:ident)+ + } + )+) => { + $( + impl<$($T: Encode),+> Encode for ($($T,)+) { + fn is_ssz_fixed_len() -> bool { + $( + <$T as Encode>::is_ssz_fixed_len() && + )* + true + } + + fn ssz_fixed_len() -> usize { + if ::is_ssz_fixed_len() { + $( + <$T as Encode>::ssz_fixed_len() + + )* + 0 + } else { + BYTES_PER_LENGTH_OFFSET + } + } + + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + let mut len = 0; + $( + len += if <$T as Encode>::is_ssz_fixed_len() { + <$T as Encode>::ssz_fixed_len() + } else { + BYTES_PER_LENGTH_OFFSET + + self.$idx.ssz_bytes_len() + }; + )* + len + } + } + + fn ssz_append(&self, buf: &mut Vec) { + let offset = $( + <$T as Encode>::ssz_fixed_len() + + )* + 0; + + let mut encoder = SszEncoder::container(buf, offset); + + $( + encoder.append(&self.$idx); + )* + + encoder.finalize(); + } + } + )+ + } +} + +impl_encode_for_tuples! { + Tuple2 { + (0) -> A + (1) -> B + } + Tuple3 { + (0) -> A + (1) -> B + (2) -> C + } + Tuple4 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + } + Tuple5 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + } + Tuple6 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + } + Tuple7 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + } + Tuple8 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + } + Tuple9 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + } + Tuple10 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + } + Tuple11 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + (10) -> K + } + Tuple12 { + (0) -> A + (1) -> B + (2) -> C + (3) -> D + (4) -> E + (5) -> F + (6) -> G + (7) -> H + (8) -> I + (9) -> J + (10) -> K + (11) -> L + } +} + +impl Encode for Arc { + fn is_ssz_fixed_len() -> bool { + T::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + T::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.as_ref().ssz_append(buf) + } + + fn ssz_bytes_len(&self) -> usize { + self.as_ref().ssz_bytes_len() + } +} + +macro_rules! impl_for_vec { + ($type: ty) => { + impl Encode for $type { + fn is_ssz_fixed_len() -> bool { + false + } + + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() * self.len() + } else { + let mut len = self.iter().map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * self.len(); + len + } + } + + fn ssz_append(&self, buf: &mut Vec) { + if T::is_ssz_fixed_len() { + buf.reserve(T::ssz_fixed_len() * self.len()); + + for item in self { + item.ssz_append(buf); + } + } else { + let mut encoder = + SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET); + + for item in self { + encoder.append(item); + } + + encoder.finalize(); + } + } + } + }; +} + +impl_for_vec!(Vec); +impl_for_vec!(SmallVec<[T; 1]>); +impl_for_vec!(SmallVec<[T; 2]>); +impl_for_vec!(SmallVec<[T; 3]>); +impl_for_vec!(SmallVec<[T; 4]>); +impl_for_vec!(SmallVec<[T; 5]>); +impl_for_vec!(SmallVec<[T; 6]>); +impl_for_vec!(SmallVec<[T; 7]>); +impl_for_vec!(SmallVec<[T; 8]>); + +impl Encode for bool { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 1 + } + + fn ssz_bytes_len(&self) -> usize { + 1 + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(&(*self as u8).to_le_bytes()); + } +} + +impl Encode for NonZeroUsize { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + std::mem::size_of::() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.get().ssz_append(buf) + } +} + +impl Encode for H256 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 32 + } + + fn ssz_bytes_len(&self) -> usize { + 32 + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(self.as_bytes()); + } +} + +impl Encode for H160 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 20 + } + + fn ssz_bytes_len(&self) -> usize { + 20 + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(self.as_bytes()); + } +} + +impl Encode for U256 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 32 + } + + fn ssz_bytes_len(&self) -> usize { + 32 + } + + fn ssz_append(&self, buf: &mut Vec) { + let n = ::ssz_fixed_len(); + let s = buf.len(); + + buf.resize(s + n, 0); + self.to_little_endian(&mut buf[s..]); + } +} + +impl Encode for U128 { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + 16 + } + + fn ssz_bytes_len(&self) -> usize { + 16 + } + + fn ssz_append(&self, buf: &mut Vec) { + let n = ::ssz_fixed_len(); + let s = buf.len(); + + buf.resize(s + n, 0); + self.to_little_endian(&mut buf[s..]); + } +} + +macro_rules! impl_encodable_for_u8_array { + ($len: expr) => { + impl Encode for [u8; $len] { + fn is_ssz_fixed_len() -> bool { + true + } + + fn ssz_fixed_len() -> usize { + $len + } + + fn ssz_bytes_len(&self) -> usize { + $len + } + + fn ssz_append(&self, buf: &mut Vec) { + buf.extend_from_slice(&self[..]); + } + } + }; +} + +impl_encodable_for_u8_array!(4); +impl_encodable_for_u8_array!(32); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn vec_of_u8() { + let vec: Vec = vec![]; + assert_eq!(vec.as_ssz_bytes(), vec![]); + + let vec: Vec = vec![1]; + assert_eq!(vec.as_ssz_bytes(), vec![1]); + + let vec: Vec = vec![0, 1, 2, 3]; + assert_eq!(vec.as_ssz_bytes(), vec![0, 1, 2, 3]); + } + + #[test] + fn vec_of_vec_of_u8() { + let vec: Vec> = vec![]; + assert_eq!(vec.as_ssz_bytes(), vec![]); + + let vec: Vec> = vec![vec![]]; + assert_eq!(vec.as_ssz_bytes(), vec![4, 0, 0, 0]); + + let vec: Vec> = vec![vec![], vec![]]; + assert_eq!(vec.as_ssz_bytes(), vec![8, 0, 0, 0, 8, 0, 0, 0]); + + let vec: Vec> = vec![vec![0, 1, 2], vec![11, 22, 33]]; + assert_eq!( + vec.as_ssz_bytes(), + vec![8, 0, 0, 0, 11, 0, 0, 0, 0, 1, 2, 11, 22, 33] + ); + } + + #[test] + fn ssz_encode_u8() { + assert_eq!(0_u8.as_ssz_bytes(), vec![0]); + assert_eq!(1_u8.as_ssz_bytes(), vec![1]); + assert_eq!(100_u8.as_ssz_bytes(), vec![100]); + assert_eq!(255_u8.as_ssz_bytes(), vec![255]); + } + + #[test] + fn ssz_encode_u16() { + assert_eq!(1_u16.as_ssz_bytes(), vec![1, 0]); + assert_eq!(100_u16.as_ssz_bytes(), vec![100, 0]); + assert_eq!((1_u16 << 8).as_ssz_bytes(), vec![0, 1]); + assert_eq!(65535_u16.as_ssz_bytes(), vec![255, 255]); + } + + #[test] + fn ssz_encode_u32() { + assert_eq!(1_u32.as_ssz_bytes(), vec![1, 0, 0, 0]); + assert_eq!(100_u32.as_ssz_bytes(), vec![100, 0, 0, 0]); + assert_eq!((1_u32 << 16).as_ssz_bytes(), vec![0, 0, 1, 0]); + assert_eq!((1_u32 << 24).as_ssz_bytes(), vec![0, 0, 0, 1]); + assert_eq!((!0_u32).as_ssz_bytes(), vec![255, 255, 255, 255]); + } + + #[test] + fn ssz_encode_u64() { + assert_eq!(1_u64.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + (!0_u64).as_ssz_bytes(), + vec![255, 255, 255, 255, 255, 255, 255, 255] + ); + } + + #[test] + fn ssz_encode_usize() { + assert_eq!(1_usize.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); + assert_eq!( + (!0_usize).as_ssz_bytes(), + vec![255, 255, 255, 255, 255, 255, 255, 255] + ); + } + + #[test] + fn ssz_encode_bool() { + assert_eq!(true.as_ssz_bytes(), vec![1]); + assert_eq!(false.as_ssz_bytes(), vec![0]); + } + + #[test] + fn ssz_encode_h256() { + assert_eq!(H256::from(&[0; 32]).as_ssz_bytes(), vec![0; 32]); + assert_eq!(H256::from(&[1; 32]).as_ssz_bytes(), vec![1; 32]); + + let bytes = vec![ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]; + + assert_eq!(H256::from_slice(&bytes).as_ssz_bytes(), bytes); + } + + #[test] + fn ssz_encode_u8_array_4() { + assert_eq!([0, 0, 0, 0].as_ssz_bytes(), vec![0; 4]); + assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]); + assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]); + } + + #[test] + fn tuple() { + assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]); + assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]); + assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]); + } +} diff --git a/version-meld/eth2_ssz/src/legacy.rs b/version-meld/eth2_ssz/src/legacy.rs new file mode 100644 index 0000000..4953db0 --- /dev/null +++ b/version-meld/eth2_ssz/src/legacy.rs @@ -0,0 +1,265 @@ +//! Provides a "legacy" version of SSZ encoding for `Option where T: Encode + Decode`. +//! +//! The SSZ specification changed in 2021 to use a 1-byte union selector, instead of a 4-byte one +//! which was used in the Lighthouse database. +//! +//! Users can use the `four_byte_option_impl` macro to define a module that can be used with the +//! `#[ssz(with = "module")]`. +//! +//! ## Example +//! +//! ```rust +//! use ssz_derive::{Encode, Decode}; +//! use ssz::four_byte_option_impl; +//! +//! four_byte_option_impl!(impl_for_u64, u64); +//! +//! #[derive(Encode, Decode)] +//! struct Foo { +//! #[ssz(with = "impl_for_u64")] +//! a: Option, +//! } +//! ``` + +use crate::*; + +#[macro_export] +macro_rules! four_byte_option_impl { + ($mod_name: ident, $type: ty) => { + #[allow(dead_code)] + mod $mod_name { + use super::*; + + pub mod encode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn ssz_bytes_len(opt: &Option<$type>) -> usize { + if let Some(some) = opt { + let len = if <$type as Encode>::is_ssz_fixed_len() { + <$type as Encode>::ssz_fixed_len() + } else { + <$type as Encode>::ssz_bytes_len(some) + }; + len + BYTES_PER_LENGTH_OFFSET + } else { + BYTES_PER_LENGTH_OFFSET + } + } + + pub fn ssz_append(opt: &Option<$type>, buf: &mut Vec) { + match opt { + None => buf.extend_from_slice(&legacy::encode_four_byte_union_selector(0)), + Some(t) => { + buf.extend_from_slice(&legacy::encode_four_byte_union_selector(1)); + t.ssz_append(buf); + } + } + } + + pub fn as_ssz_bytes(opt: &Option<$type>) -> Vec { + let mut buf = vec![]; + + ssz_append(opt, &mut buf); + + buf + } + } + + pub mod decode { + use super::*; + #[allow(unused_imports)] + use ssz::*; + + pub fn is_ssz_fixed_len() -> bool { + false + } + + pub fn ssz_fixed_len() -> usize { + BYTES_PER_LENGTH_OFFSET + } + + pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { + if bytes.len() < BYTES_PER_LENGTH_OFFSET { + return Err(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: BYTES_PER_LENGTH_OFFSET, + }); + } + + let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); + + let index = legacy::read_four_byte_union_selector(index_bytes)?; + if index == 0 { + Ok(None) + } else if index == 1 { + Ok(Some(<$type as ssz::Decode>::from_ssz_bytes(value_bytes)?)) + } else { + Err(DecodeError::BytesInvalid(format!( + "{} is not a valid union index for Option", + index + ))) + } + } + } + } + }; +} + +pub fn encode_four_byte_union_selector(selector: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { + encode_length(selector) +} + +pub fn read_four_byte_union_selector(bytes: &[u8]) -> Result { + read_offset(bytes) +} + +#[cfg(test)] +mod test { + use super::*; + use crate as ssz; + use ssz_derive::{Decode, Encode}; + + type VecU16 = Vec; + + four_byte_option_impl!(impl_u16, u16); + four_byte_option_impl!(impl_vec_u16, VecU16); + + #[test] + fn ssz_encode_option_u16() { + let item = Some(65535_u16); + let bytes = vec![1, 0, 0, 0, 255, 255]; + assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); + assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); + + let item = None; + let bytes = vec![0, 0, 0, 0]; + assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); + assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), None); + } + + #[test] + fn ssz_encode_option_vec_u16() { + let item = Some(vec![0_u16, 1]); + let bytes = vec![1, 0, 0, 0, 0, 0, 1, 0]; + assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); + assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); + + let item = None; + let bytes = vec![0, 0, 0, 0]; + assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); + assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); + } + + fn round_trip(items: Vec) { + for item in items { + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); + } + } + + #[derive(Debug, PartialEq, Encode, Decode)] + struct TwoVariableLenOptions { + a: u16, + #[ssz(with = "impl_u16")] + b: Option, + #[ssz(with = "impl_vec_u16")] + c: Option>, + #[ssz(with = "impl_vec_u16")] + d: Option>, + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn two_variable_len_options_encoding() { + let s = TwoVariableLenOptions { + a: 42, + b: None, + c: Some(vec![0]), + d: None, + }; + + let bytes = vec![ + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 + // | option | offset | offset | option = vec![ + TwoVariableLenOptions { + a: 42, + b: Some(12), + c: Some(vec![0]), + d: Some(vec![1]), + }, + TwoVariableLenOptions { + a: 42, + b: Some(12), + c: Some(vec![0]), + d: None, + }, + TwoVariableLenOptions { + a: 42, + b: None, + c: Some(vec![0]), + d: None, + }, + TwoVariableLenOptions { + a: 42, + b: None, + c: None, + d: None, + }, + ]; + + round_trip(vec); + } + + #[test] + fn tuple_u8_u16() { + let vec: Vec<(u8, u16)> = vec![ + (0, 0), + (0, 1), + (1, 0), + (u8::max_value(), u16::max_value()), + (0, u16::max_value()), + (u8::max_value(), 0), + (42, 12301), + ]; + + round_trip(vec); + } + + #[test] + fn tuple_vec_vec() { + let vec: Vec<(u64, Vec, Vec>)> = vec![ + (0, vec![], vec![vec![]]), + (99, vec![101], vec![vec![], vec![]]), + ( + 42, + vec![12, 13, 14], + vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], + ), + ]; + + round_trip(vec); + } +} diff --git a/version-meld/eth2_ssz/src/lib.rs b/version-meld/eth2_ssz/src/lib.rs new file mode 100644 index 0000000..df00c51 --- /dev/null +++ b/version-meld/eth2_ssz/src/lib.rs @@ -0,0 +1,71 @@ +//! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) +//! format designed for use in Ethereum 2.0. +//! +//! Adheres to the Ethereum 2.0 [SSZ +//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) +//! at v0.12.1. +//! +//! ## Example +//! +//! ```rust +//! use ssz_derive::{Encode, Decode}; +//! use ssz::{Decode, Encode}; +//! +//! #[derive(PartialEq, Debug, Encode, Decode)] +//! struct Foo { +//! a: u64, +//! b: Vec, +//! } +//! +//! fn ssz_encode_decode_example() { +//! let foo = Foo { +//! a: 42, +//! b: vec![1, 3, 3, 7] +//! }; +//! +//! let ssz_bytes: Vec = foo.as_ssz_bytes(); +//! +//! let decoded_foo = Foo::from_ssz_bytes(&ssz_bytes).unwrap(); +//! +//! assert_eq!(foo, decoded_foo); +//! } +//! +//! ``` +//! +//! See `examples/` for manual implementations of the `Encode` and `Decode` traits. + +mod decode; +mod encode; +pub mod legacy; +mod union_selector; + +pub use decode::{ + impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, Decode, + DecodeError, SszDecoder, SszDecoderBuilder, +}; +pub use encode::{encode_length, Encode, SszEncoder}; +pub use union_selector::UnionSelector; + +/// The number of bytes used to represent an offset. +pub const BYTES_PER_LENGTH_OFFSET: usize = 4; +/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`. +#[cfg(target_pointer_width = "32")] +pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> (8 * (4 - BYTES_PER_LENGTH_OFFSET))) as usize; +#[cfg(target_pointer_width = "64")] +pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> (8 * (8 - BYTES_PER_LENGTH_OFFSET))) as usize; + +/// The number of bytes used to indicate the variant of a union. +pub const BYTES_PER_UNION_SELECTOR: usize = 1; +/// The highest possible union selector value (higher values are reserved for backwards compatible +/// extensions). +pub const MAX_UNION_SELECTOR: u8 = 127; + +/// Convenience function to SSZ encode an object supporting ssz::Encode. +/// +/// Equivalent to `val.as_ssz_bytes()`. +pub fn ssz_encode(val: &T) -> Vec +where + T: Encode, +{ + val.as_ssz_bytes() +} diff --git a/version-meld/eth2_ssz/src/union_selector.rs b/version-meld/eth2_ssz/src/union_selector.rs new file mode 100644 index 0000000..18bab09 --- /dev/null +++ b/version-meld/eth2_ssz/src/union_selector.rs @@ -0,0 +1,29 @@ +use crate::*; + +/// Provides the one-byte "selector" from the SSZ union specification: +/// +/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union +#[derive(Copy, Clone)] +pub struct UnionSelector(u8); + +impl From for u8 { + fn from(union_selector: UnionSelector) -> u8 { + union_selector.0 + } +} + +impl PartialEq for UnionSelector { + fn eq(&self, other: &u8) -> bool { + self.0 == *other + } +} + +impl UnionSelector { + /// Instantiate `self`, returning an error if `selector > MAX_UNION_SELECTOR`. + pub fn new(selector: u8) -> Result { + Some(selector) + .filter(|_| selector <= MAX_UNION_SELECTOR) + .map(Self) + .ok_or(DecodeError::UnionSelectorInvalid(selector)) + } +} diff --git a/version-meld/eth2_ssz/tests/tests.rs b/version-meld/eth2_ssz/tests/tests.rs new file mode 100644 index 0000000..7bd6252 --- /dev/null +++ b/version-meld/eth2_ssz/tests/tests.rs @@ -0,0 +1,466 @@ +use ethereum_types::H256; +use ssz::{Decode, DecodeError, Encode}; +use ssz_derive::{Decode, Encode}; + +mod round_trip { + use super::*; + + fn round_trip(items: Vec) { + for item in items { + let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); + assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); + } + } + + #[test] + fn bool() { + let items: Vec = vec![true, false]; + + round_trip(items); + } + + #[test] + fn u8_array_4() { + let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; + + round_trip(items); + } + + #[test] + fn h256() { + let items: Vec = vec![H256::zero(), H256::from([1; 32]), H256::random()]; + + round_trip(items); + } + + #[test] + fn vec_of_h256() { + let items: Vec> = vec![ + vec![], + vec![H256::zero(), H256::from([1; 32]), H256::random()], + ]; + + round_trip(items); + } + + #[test] + fn vec_u16() { + let items: Vec> = vec![ + vec![], + vec![255], + vec![0, 1, 2], + vec![100; 64], + vec![255, 0, 255], + ]; + + round_trip(items); + } + + #[test] + fn vec_of_vec_u16() { + let items: Vec>> = vec![ + vec![], + vec![vec![]], + vec![vec![1, 2, 3]], + vec![vec![], vec![]], + vec![vec![], vec![1, 2, 3]], + vec![vec![1, 2, 3], vec![1, 2, 3]], + vec![vec![1, 2, 3], vec![], vec![1, 2, 3]], + vec![vec![], vec![], vec![1, 2, 3]], + vec![vec![], vec![1], vec![1, 2, 3]], + vec![vec![], vec![1], vec![1, 2, 3]], + ]; + + round_trip(items); + } + + #[derive(Debug, PartialEq, Encode, Decode)] + struct FixedLen { + a: u16, + b: u64, + c: u32, + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn fixed_len_struct_encoding() { + let items: Vec = vec![ + FixedLen { a: 0, b: 0, c: 0 }, + FixedLen { a: 1, b: 1, c: 1 }, + FixedLen { a: 1, b: 0, c: 1 }, + ]; + + let expected_encodings = vec![ + // | u16--| u64----------------------------| u32----------| + vec![00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], + vec![01, 00, 01, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], + vec![01, 00, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], + ]; + + for i in 0..items.len() { + assert_eq!( + items[i].as_ssz_bytes(), + expected_encodings[i], + "Failed on {}", + i + ); + } + } + + #[test] + fn fixed_len_excess_bytes() { + let fixed = FixedLen { a: 1, b: 2, c: 3 }; + + let mut bytes = fixed.as_ssz_bytes(); + bytes.append(&mut vec![0]); + + assert_eq!( + FixedLen::from_ssz_bytes(&bytes), + Err(DecodeError::InvalidByteLength { + len: 15, + expected: 14, + }) + ); + } + + #[test] + fn vec_of_fixed_len_struct() { + let items: Vec = vec![ + FixedLen { a: 0, b: 0, c: 0 }, + FixedLen { a: 1, b: 1, c: 1 }, + FixedLen { a: 1, b: 0, c: 1 }, + ]; + + round_trip(items); + } + + #[derive(Debug, PartialEq, Encode, Decode)] + struct VariableLen { + a: u16, + b: Vec, + c: u32, + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn offset_into_fixed_bytes() { + let bytes = vec![ + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + // | offset | u32 | variable + 01, 00, 09, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, + ]; + + assert_eq!( + VariableLen::from_ssz_bytes(&bytes), + Err(DecodeError::OffsetIntoFixedPortion(9)) + ); + } + + #[test] + fn variable_len_excess_bytes() { + let variable = VariableLen { + a: 1, + b: vec![2], + c: 3, + }; + + let mut bytes = variable.as_ssz_bytes(); + bytes.append(&mut vec![0]); + + // The error message triggered is not so helpful, it's caught by a side-effect. Just + // checking there is _some_ error is fine. + assert!(VariableLen::from_ssz_bytes(&bytes).is_err()); + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn first_offset_skips_byte() { + let bytes = vec![ + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + // | offset | u32 | variable + 01, 00, 11, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, + ]; + + assert_eq!( + VariableLen::from_ssz_bytes(&bytes), + Err(DecodeError::OffsetSkipsVariableBytes(11)) + ); + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn variable_len_struct_encoding() { + let items: Vec = vec![ + VariableLen { + a: 0, + b: vec![], + c: 0, + }, + VariableLen { + a: 1, + b: vec![0], + c: 1, + }, + VariableLen { + a: 1, + b: vec![0, 1, 2], + c: 1, + }, + ]; + + let expected_encodings = vec![ + // 00..................................09 + // | u16--| vec offset-----| u32------------| vec payload --------| + vec![00, 00, 10, 00, 00, 00, 00, 00, 00, 00], + vec![01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00], + vec![ + 01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, + ], + ]; + + for i in 0..items.len() { + assert_eq!( + items[i].as_ssz_bytes(), + expected_encodings[i], + "Failed on {}", + i + ); + } + } + + #[test] + fn vec_of_variable_len_struct() { + let items: Vec = vec![ + VariableLen { + a: 0, + b: vec![], + c: 0, + }, + VariableLen { + a: 255, + b: vec![0, 1, 2, 3], + c: 99, + }, + VariableLen { + a: 255, + b: vec![0], + c: 99, + }, + VariableLen { + a: 50, + b: vec![0], + c: 0, + }, + ]; + + round_trip(items); + } + + #[derive(Debug, PartialEq, Encode, Decode)] + struct ThreeVariableLen { + a: u16, + b: Vec, + c: Vec, + d: Vec, + } + + #[test] + fn three_variable_len() { + let vec: Vec = vec![ThreeVariableLen { + a: 42, + b: vec![0], + c: vec![1], + d: vec![2], + }]; + + round_trip(vec); + } + + #[test] + #[allow(clippy::zero_prefixed_literal)] + fn offsets_decreasing() { + let bytes = vec![ + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + // | offset | offset | offset | variable + 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, + ]; + + assert_eq!( + ThreeVariableLen::from_ssz_bytes(&bytes), + Err(DecodeError::OffsetsAreDecreasing(14)) + ); + } + + #[test] + fn tuple_u8_u16() { + let vec: Vec<(u8, u16)> = vec![ + (0, 0), + (0, 1), + (1, 0), + (u8::max_value(), u16::max_value()), + (0, u16::max_value()), + (u8::max_value(), 0), + (42, 12301), + ]; + + round_trip(vec); + } + + #[test] + fn tuple_vec_vec() { + let vec: Vec<(u64, Vec, Vec>)> = vec![ + (0, vec![], vec![vec![]]), + (99, vec![101], vec![vec![], vec![]]), + ( + 42, + vec![12, 13, 14], + vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], + ), + ]; + + round_trip(vec); + } +} + +mod derive_macro { + use ssz::{Decode, Encode}; + use ssz_derive::{Decode, Encode}; + use std::fmt::Debug; + + fn assert_encode(item: &T, bytes: &[u8]) { + assert_eq!(item.as_ssz_bytes(), bytes); + } + + fn assert_encode_decode(item: &T, bytes: &[u8]) { + assert_encode(item, bytes); + assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); + } + + #[derive(PartialEq, Debug, Encode, Decode)] + #[ssz(enum_behaviour = "union")] + enum TwoFixedUnion { + U8(u8), + U16(u16), + } + + #[derive(PartialEq, Debug, Encode, Decode)] + struct TwoFixedUnionStruct { + a: TwoFixedUnion, + } + + #[test] + fn two_fixed_union() { + let eight = TwoFixedUnion::U8(1); + let sixteen = TwoFixedUnion::U16(1); + + assert_encode_decode(&eight, &[0, 1]); + assert_encode_decode(&sixteen, &[1, 1, 0]); + + assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); + assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); + } + + #[derive(PartialEq, Debug, Encode, Decode)] + struct VariableA { + a: u8, + b: Vec, + } + + #[derive(PartialEq, Debug, Encode, Decode)] + struct VariableB { + a: Vec, + b: u8, + } + + #[derive(PartialEq, Debug, Encode)] + #[ssz(enum_behaviour = "transparent")] + enum TwoVariableTrans { + A(VariableA), + B(VariableB), + } + + #[derive(PartialEq, Debug, Encode)] + struct TwoVariableTransStruct { + a: TwoVariableTrans, + } + + #[derive(PartialEq, Debug, Encode, Decode)] + #[ssz(enum_behaviour = "union")] + enum TwoVariableUnion { + A(VariableA), + B(VariableB), + } + + #[derive(PartialEq, Debug, Encode, Decode)] + struct TwoVariableUnionStruct { + a: TwoVariableUnion, + } + + #[test] + fn two_variable_trans() { + let trans_a = TwoVariableTrans::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let trans_b = TwoVariableTrans::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); + assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); + + assert_encode( + &TwoVariableTransStruct { a: trans_a }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode( + &TwoVariableTransStruct { a: trans_b }, + &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], + ); + } + + #[test] + fn two_variable_union() { + let union_a = TwoVariableUnion::A(VariableA { + a: 1, + b: vec![2, 3], + }); + let union_b = TwoVariableUnion::B(VariableB { + a: vec![1, 2], + b: 3, + }); + + assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); + assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); + + assert_encode_decode( + &TwoVariableUnionStruct { a: union_a }, + &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], + ); + assert_encode_decode( + &TwoVariableUnionStruct { a: union_b }, + &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], + ); + } + + #[derive(PartialEq, Debug, Encode, Decode)] + #[ssz(enum_behaviour = "union")] + enum TwoVecUnion { + A(Vec), + B(Vec), + } + + #[test] + fn two_vec_union() { + assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); + assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); + + assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); + assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); + + assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); + assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); + } +} diff --git a/zerog-storage-rust b/zerog-storage-rust new file mode 160000 index 0000000..9058467 --- /dev/null +++ b/zerog-storage-rust @@ -0,0 +1 @@ +Subproject commit 9058467be1246e4693cb0098a62b18d01b498700