From a483d52d79a02541ac0af80a1162c2efb5abad11 Mon Sep 17 00:00:00 2001 From: Amrik Ajimal Date: Wed, 8 Jan 2025 11:48:44 -0800 Subject: [PATCH 01/23] Switch to OSS bitnami postgresql chart (#1899) ## Summary Update the postgresql chart dependency for evm-stack ## Background The existing charts no longer work due to VMWare requiring a paid subscription ## Changes - Update chart URL to point to the OSS oci:// repo ## Testing Github's smoke-tests --- charts/evm-stack/Chart.lock | 6 +++--- charts/evm-stack/Chart.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/charts/evm-stack/Chart.lock b/charts/evm-stack/Chart.lock index 7de99729c..55dd9cec3 100644 --- a/charts/evm-stack/Chart.lock +++ b/charts/evm-stack/Chart.lock @@ -15,10 +15,10 @@ dependencies: repository: file://../evm-bridge-withdrawer version: 1.0.1 - name: postgresql - repository: https://charts.bitnami.com/bitnami + repository: oci://registry-1.docker.io/bitnamicharts version: 15.2.4 - name: blockscout-stack repository: https://blockscout.github.io/helm-charts version: 1.6.8 -digest: sha256:4715e557b6ceb0fa85c9efe86f5b26d665783f0be9162728efe808fa3a35d727 -generated: "2024-12-12T19:52:24.992658+02:00" +digest: sha256:371c35af96fc5d82aa2b4d894dc7d2e11e150380fd6f09eb0ca94b4202b24698 +generated: "2025-01-08T11:22:41.273867-08:00" diff --git a/charts/evm-stack/Chart.yaml b/charts/evm-stack/Chart.yaml index dea0641cf..7e1d7cad5 100644 --- a/charts/evm-stack/Chart.yaml +++ b/charts/evm-stack/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.6 +version: 1.0.7 dependencies: - name: celestia-node @@ -39,7 +39,7 @@ dependencies: condition: evm-bridge-withdrawer.enabled - name: postgresql version: "15.2.4" - repository: "https://charts.bitnami.com/bitnami" + repository: "oci://registry-1.docker.io/bitnamicharts" condition: postgresql.enabled - name: blockscout-stack repository: "https://blockscout.github.io/helm-charts" From a19b82243962a89fbdee149cac00e0c3a3d9fefc Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Thu, 9 Jan 2025 14:42:46 +0100 Subject: [PATCH 02/23] chore(sequencer): remove misplaced logs (#1892) ## Summary Remove events from `Sequencer::run_until_stopped` that report on whether whether a state DB will be opened or created. ## Background Cnidarium already reports which paths it will read. More egregious however is that `Sequencer::run_until_stopped` is the wrong place to emit these events because it does not have authority over which actions cnidarium will take. Should the constructor ever change, then the events will be immediately out of whack, which will lead to confusion. ## Changes - Remove state storage creation events from `Sequencer::run_until_stopped`. ## Testing No testing necessary. These are just some limited events. ## Changelogs Changelogs updated. --- crates/astria-sequencer/CHANGELOG.md | 1 + crates/astria-sequencer/src/sequencer.rs | 20 -------------------- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index c80e93586..5dd521e94 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Consolidate action handling to single module [#1759](https://github.com/astriaorg/astria/pull/1759). - Ensure all deposit assets are trace prefixed [#1807](https://github.com/astriaorg/astria/pull/1807). - Update `idna` dependency to resolve cargo audit warning [#1869](https://github.com/astriaorg/astria/pull/1869). +- Remove events reporting on state storage creation [#1892](https://github.com/astriaorg/astria/pull/1892). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index 92df49d47..3a7c5b9c8 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -54,26 +54,6 @@ impl Sequencer { register_histogram_global("cnidarium_nonverifiable_get_raw_duration_seconds"); let span = info_span!("Sequencer::run_until_stopped"); - if config - .db_filepath - .try_exists() - .context("failed checking for existence of db storage file")? - { - span.in_scope(|| { - info!( - path = %config.db_filepath.display(), - "opening storage db" - ); - }); - } else { - span.in_scope(|| { - info!( - path = %config.db_filepath.display(), - "creating storage db" - ); - }); - } - let mut signals = spawn_signal_handler(); let substore_prefixes = vec![penumbra_ibc::IBC_SUBSTORE_PREFIX]; From 5c4feafd155c4550d2355c8fd35702907589adfd Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Mon, 13 Jan 2025 14:32:55 -0600 Subject: [PATCH 03/23] fix(sequencer)!: use bridge address to determine asset in bridge unlock cost estimation instead of signer (#1905) ## Summary Changed bridge unlock cost calculation to use bridge address instead of signer address. ## Background Previously, the `get_total_transaction_cost` attempted to retrieve the bridge account asset by the tx signer instead of by the bridge address. This works fine if the sender is a bridge account, but if it is the authorized bridge withdrawer, it will fail. ## Changes - Change `BridgeUnlock` cost calculation to retrieve asset based on the action's bridge address instead of the transaction signer. ## Testing Added unit test to ensure function works correctly when submitting a `BridgeUnlock` from the bridge withdrawer address. Synced to mainnet from genesis to ensure this would not be a network breaking change. ## Changelogs Changelog updates. ## Breaking Changelist - This change is breaking since previous `BridgeUnlock` submitted from the withdrawer address would fail. This has been synced to mainnet successfully from genesis as of January 13, 2025 at 2:32pm CST. ## Related Issues closes #1904 --- crates/astria-sequencer/CHANGELOG.md | 2 + .../src/transaction/checks.rs | 65 ++++++++++++++++++- 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index 5dd521e94..bd1b45b06 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Ensure all deposit assets are trace prefixed [#1807](https://github.com/astriaorg/astria/pull/1807). - Update `idna` dependency to resolve cargo audit warning [#1869](https://github.com/astriaorg/astria/pull/1869). - Remove events reporting on state storage creation [#1892](https://github.com/astriaorg/astria/pull/1892). +- Use bridge address to determine asset in bridge unlock cost estimation instead +of signer [#1905](https://github.com/astriaorg/astria/pull/1905). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-sequencer/src/transaction/checks.rs b/crates/astria-sequencer/src/transaction/checks.rs index 8d7943ca9..78848bf0c 100644 --- a/crates/astria-sequencer/src/transaction/checks.rs +++ b/crates/astria-sequencer/src/transaction/checks.rs @@ -100,7 +100,7 @@ pub(crate) async fn get_total_transaction_cost( } Action::BridgeUnlock(act) => { let asset = state - .get_bridge_account_ibc_asset(&tx) + .get_bridge_account_ibc_asset(&act.bridge_address) .await .wrap_err("failed to get bridge account asset id")?; cost_by_asset @@ -157,15 +157,23 @@ mod tests { use crate::{ accounts::StateWriteExt as _, address::{ - StateReadExt, + StateReadExt as _, StateWriteExt as _, }, - app::test_utils::*, + app::{ + benchmark_and_test_utils::{ + ALICE_ADDRESS, + BOB_ADDRESS, + }, + test_utils::*, + }, assets::StateWriteExt as _, benchmark_and_test_utils::{ + astria_address_from_hex_string, nria, ASTRIA_PREFIX, }, + bridge::StateWriteExt as _, fees::{ StateReadExt as _, StateWriteExt as _, @@ -349,4 +357,55 @@ mod tests { .contains(&other_asset.to_ibc_prefixed().to_string()) ); } + + #[tokio::test] + async fn get_total_transaction_cost_bridge_unlock_with_withdrawer_address_ok() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state_tx = StateDelta::new(snapshot); + + let withdrawer = get_alice_signing_key(); + let withdrawer_address = astria_address_from_hex_string(ALICE_ADDRESS); + let bridge_address = astria_address_from_hex_string(BOB_ADDRESS); + + state_tx + .put_fees(FeeComponents::::new(0, 0)) + .unwrap(); + state_tx + .put_account_balance(&bridge_address, &nria(), 1000) + .unwrap(); + state_tx + .put_account_balance(&withdrawer_address, &nria(), 1000) + .unwrap(); + state_tx + .put_bridge_account_ibc_asset(&bridge_address, nria()) + .unwrap(); + state_tx + .put_bridge_account_rollup_id(&bridge_address, [0; 32].into()) + .unwrap(); + state_tx + .put_bridge_account_withdrawer_address(&bridge_address, withdrawer_address) + .unwrap(); + + let actions = vec![Action::BridgeUnlock(BridgeUnlock { + to: withdrawer_address, + amount: 100, + fee_asset: nria().into(), + bridge_address, + memo: String::new(), + rollup_block_number: 1, + rollup_withdrawal_event_id: String::new(), + })]; + + let tx = TransactionBody::builder() + .actions(actions) + .chain_id("test-chain-id") + .try_build() + .unwrap(); + + let signed_tx = tx.sign(&withdrawer); + check_balance_for_total_fees_and_transfers(&signed_tx, &state_tx) + .await + .unwrap(); + } } From fd94ec04fcefe4ce83cd2436b4fcd66362dbb843 Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Tue, 14 Jan 2025 18:12:16 +0100 Subject: [PATCH 04/23] chore(clippy): ignore CometBFT as an identifier (#1913) ## Summary Allows writing CometBFT without backticks in Rust doc comments. ## Background Clippy's pedantic settings consider CometBFT a Rust identifier because of its CamelCase. That's annoying, because it's actually the name of the consensus implementation we are using. ## Changes - Create a `clippy.toml` in the root of the repository. - Add `doc-allowed-idents = ["CometBFT"]` to it. ## Testing Writing `CometBFT` in a Rust doc no longer triggers a clippy warning (no lines in this PR). --- clippy.toml | 1 + 1 file changed, 1 insertion(+) create mode 100644 clippy.toml diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 000000000..94f89911e --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = ["CometBFT", ".."] From 2899049bf0dd5bd7ba05927a5daf73ee986a46dc Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Thu, 16 Jan 2025 12:29:22 -0600 Subject: [PATCH 05/23] chore: bump to rust version 1.83 (#1857) ## Summary Bumped MSRV to 1.83 and made small changes to appease new Clippy lints. ## Background Rust 1.83 solves some bizarre Clippy false positives, improved lints also revealed some nit improvement points. Bumping MSRV also doesn't have a huge cost, since we don't have many downstream users. ## Changes - Bumped MSRV. - Changed small bits to appease Clippy, mostly changing to use lifetime elisions. - rustfmt made a bunch of changes with the new version ## Testing Passing all tests. ## Changelogs Changelogs updated for crates which have already been released. ## Related Issues closes #1580 --- .github/workflows/lint.yml | 2 +- .github/workflows/reusable-build.yml | 2 +- .github/workflows/test.yml | 6 +- Cargo.lock | 460 ++++++++++-------- containerfiles/Dockerfile | 2 +- crates/astria-bridge-contracts/Cargo.toml | 2 +- crates/astria-bridge-withdrawer/CHANGELOG.md | 4 + crates/astria-bridge-withdrawer/Cargo.toml | 2 +- crates/astria-build-info/Cargo.toml | 2 +- crates/astria-cli/CHANGELOG.md | 1 + crates/astria-cli/Cargo.toml | 2 +- crates/astria-composer/CHANGELOG.md | 1 + crates/astria-composer/Cargo.toml | 2 +- .../src/executor/bundle_factory/mod.rs | 4 +- crates/astria-conductor/CHANGELOG.md | 1 + crates/astria-conductor/Cargo.toml | 2 +- crates/astria-conductor/src/block_cache.rs | 6 +- .../src/celestia/block_verifier.rs | 10 +- .../src/celestia/reporting.rs | 8 +- .../src/sequencer/reporting.rs | 4 +- crates/astria-config/Cargo.toml | 2 +- crates/astria-core-address/src/lib.rs | 2 +- crates/astria-core-crypto/Cargo.toml | 2 +- crates/astria-core/CHANGELOG.md | 1 + crates/astria-core/Cargo.toml | 2 +- crates/astria-core/src/generated/mod.rs | 3 +- .../src/primitive/v1/asset/denom.rs | 10 +- crates/astria-core/src/primitive/v1/u128.rs | 20 +- crates/astria-core/src/protocol/genesis/v1.rs | 6 - crates/astria-eyre/Cargo.toml | 2 +- crates/astria-grpc-mock-test/Cargo.toml | 2 +- crates/astria-grpc-mock-test/src/lib.rs | 3 +- crates/astria-grpc-mock/Cargo.toml | 2 +- crates/astria-grpc-mock/src/mock_server.rs | 7 +- crates/astria-merkle/Cargo.toml | 2 +- crates/astria-merkle/src/audit.rs | 44 +- crates/astria-merkle/src/lib.rs | 26 +- crates/astria-sequencer-client/Cargo.toml | 2 +- .../astria-sequencer-client/src/tests/http.rs | 16 +- crates/astria-sequencer-relayer/CHANGELOG.md | 1 + crates/astria-sequencer-relayer/Cargo.toml | 2 +- .../src/relayer/write/conversion.rs | 3 +- .../blackbox/helpers/mock_sequencer_server.rs | 2 + crates/astria-sequencer-utils/Cargo.toml | 2 +- .../astria-sequencer-utils/src/blob_parser.rs | 14 +- crates/astria-sequencer/CHANGELOG.md | 1 + crates/astria-sequencer/Cargo.toml | 2 +- .../src/accounts/storage/values.rs | 4 +- .../impls/bridge_sudo_change.rs | 14 +- .../src/action_handler/impls/fee_change.rs | 12 +- .../src/action_handler/mod.rs | 2 +- .../src/app/benchmark_and_test_utils.rs | 4 +- .../src/app/storage/values/block_height.rs | 2 +- .../src/app/storage/values/block_timestamp.rs | 2 +- .../src/app/storage/values/chain_id.rs | 4 +- .../src/app/storage/values/revision_number.rs | 2 +- .../src/app/storage/values/storage_version.rs | 2 +- .../src/app/tests_app/mempool.rs | 69 ++- .../astria-sequencer/src/app/tests_app/mod.rs | 110 ++--- .../src/app/tests_breaking_changes.rs | 54 +- .../src/app/tests_execute_transaction.rs | 150 +++--- .../src/authority/storage/values.rs | 4 +- .../src/bridge/storage/keys.rs | 6 +- .../bridge/storage/values/address_bytes.rs | 2 +- .../src/bridge/storage/values/block_height.rs | 2 +- .../storage/values/ibc_prefixed_denom.rs | 2 +- .../src/bridge/storage/values/rollup_id.rs | 2 +- .../bridge/storage/values/transaction_id.rs | 2 +- crates/astria-sequencer/src/fees/tests.rs | 82 ++-- .../src/grpc/storage/values/block_hash.rs | 2 +- .../src/grpc/storage/values/rollup_ids.rs | 2 +- .../storage/values/rollup_transactions.rs | 2 +- .../storage/values/sequencer_block_header.rs | 6 +- .../src/ibc/storage/values.rs | 4 +- crates/astria-sequencer/src/mempool/mod.rs | 12 +- .../src/mempool/transactions_container.rs | 10 +- .../astria-sequencer/src/service/consensus.rs | 62 +-- crates/astria-sequencer/src/storage/keys.rs | 8 +- .../src/storage/stored_value.rs | 2 +- .../src/transaction/checks.rs | 9 +- crates/astria-telemetry/Cargo.toml | 2 +- crates/astria-telemetry/src/display.rs | 4 +- .../astria-telemetry/src/metrics/factories.rs | 6 +- crates/astria-test-utils/Cargo.toml | 2 +- crates/astria-test-utils/src/mock/geth.rs | 2 +- justfile | 7 +- lint/tracing_debug_field/Cargo.toml | 6 +- lint/tracing_debug_field/rust-toolchain | 4 +- rust-toolchain.toml | 2 +- rustfmt.toml | 2 +- 90 files changed, 669 insertions(+), 724 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b80c8417d..3e491caed 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -72,7 +72,7 @@ jobs: uses: taiki-e/install-action@just - uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2024-09-15 + toolchain: nightly-2024-10-03 components: rustfmt - name: run rustfmt run: just lint rust-fmt diff --git a/.github/workflows/reusable-build.yml b/.github/workflows/reusable-build.yml index 3d4c52c6d..fedbd889c 100644 --- a/.github/workflows/reusable-build.yml +++ b/.github/workflows/reusable-build.yml @@ -14,7 +14,7 @@ env: REGISTRY: ghcr.io FULL_REF: ${{ inputs.tag && format('refs/tags/{0}', inputs.tag) || github.ref }} # This must match the entry in rust-toolchain.toml at the repository root - RUSTUP_TOOLCHAIN: "1.81.0" + RUSTUP_TOOLCHAIN: "1.83.0" jobs: upload-binaries: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 833358ab3..10849db97 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,7 @@ env: RUSTFLAGS: "-D warnings -D unreachable-pub --cfg tokio_unstable" # This must match the entry in rust-toolchain.toml at the repository root - RUSTUP_TOOLCHAIN: "1.81.0" + RUSTUP_TOOLCHAIN: "1.83.0" on: pull_request: merge_group: @@ -250,13 +250,13 @@ jobs: - uses: dtolnay/rust-toolchain@master with: # This has to match `rust-toolchain` in the rust-toolchain file of the dylint lints - toolchain: nightly-2024-09-05 + toolchain: nightly-2024-10-03 components: "clippy, llvm-tools-preview, rustc-dev, rust-src" - uses: Swatinem/rust-cache@v2.7.3 with: cache-provider: "buildjet" - name: install cargo-dylint and dylint-link - run: cargo install cargo-dylint@3.2.0 dylint-link@3.2.0 --locked + run: cargo install cargo-dylint@3.3.0 dylint-link@3.3.0 --locked - uses: arduino/setup-protoc@v3 with: version: "24.4" diff --git a/Cargo.lock b/Cargo.lock index 130b3bff2..cf483afff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -323,7 +323,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -415,7 +415,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -510,7 +510,7 @@ dependencies = [ "serde", "serde_json", "tendermint", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -624,7 +624,7 @@ dependencies = [ "tempfile", "tendermint", "tendermint-rpc", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", "tokio-test", @@ -676,7 +676,7 @@ dependencies = [ "sha2 0.10.8", "tendermint", "tendermint-rpc", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", "tokio-util 0.7.11", @@ -727,7 +727,7 @@ dependencies = [ "tempfile", "tendermint", "tendermint-proto", - "thiserror", + "thiserror 1.0.63", "tonic 0.10.2", "tracing", ] @@ -738,7 +738,7 @@ version = "0.1.0" dependencies = [ "astria-core-consts", "bech32 0.11.0", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -754,7 +754,7 @@ dependencies = [ "ed25519-consensus", "rand 0.8.5", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "zeroize", ] @@ -855,7 +855,7 @@ dependencies = [ "sha2 0.10.8", "tendermint", "tendermint-proto", - "thiserror", + "thiserror 1.0.63", "tokio", "tonic 0.10.2", "tower", @@ -884,7 +884,7 @@ dependencies = [ "tendermint", "tendermint-proto", "tendermint-rpc", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", "tokio-test", @@ -936,7 +936,7 @@ dependencies = [ "tendermint", "tendermint-config", "tendermint-rpc", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", "tokio-test", @@ -989,7 +989,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", "tracing-opentelemetry", @@ -1057,9 +1057,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -1068,9 +1068,9 @@ version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -1130,9 +1130,9 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -1300,12 +1300,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -1431,7 +1431,7 @@ dependencies = [ "cid", "dashmap", "multihash", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -1453,9 +1453,9 @@ checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "syn_derive", ] @@ -1497,7 +1497,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.9", "serde", ] @@ -1574,7 +1574,21 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8769706aad5d996120af43197bf46ef6ad0fda35216b4505f926a365a232d924" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.23", + "serde", + "serde_json", + "thiserror 2.0.8", ] [[package]] @@ -1619,7 +1633,7 @@ dependencies = [ "http 0.2.12", "jsonrpsee", "serde", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -1694,7 +1708,7 @@ dependencies = [ "serde", "serde_repr", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -1816,7 +1830,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "471df7896633bfc1e7d3da5b598422891e4cb8931210168ec63ea586e285803f" dependencies = [ - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -1839,9 +1853,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -1853,7 +1867,7 @@ checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "clippy_config" version = "0.1.83" -source = "git+https://github.com/rust-lang/rust-clippy?rev=a95afe2d0a2051d97b723b0b197393b7811bc4e4#a95afe2d0a2051d97b723b0b197393b7811bc4e4" +source = "git+https://github.com/rust-lang/rust-clippy?rev=aa0d551351a9c15d8a95fdb3e2946b505893dda8#aa0d551351a9c15d8a95fdb3e2946b505893dda8" dependencies = [ "itertools 0.12.1", "serde", @@ -1863,7 +1877,7 @@ dependencies = [ [[package]] name = "clippy_utils" version = "0.1.83" -source = "git+https://github.com/rust-lang/rust-clippy?rev=a95afe2d0a2051d97b723b0b197393b7811bc4e4#a95afe2d0a2051d97b723b0b197393b7811bc4e4" +source = "git+https://github.com/rust-lang/rust-clippy?rev=aa0d551351a9c15d8a95fdb3e2946b505893dda8#aa0d551351a9c15d8a95fdb3e2946b505893dda8" dependencies = [ "arrayvec 0.7.6", "clippy_config", @@ -1931,7 +1945,7 @@ dependencies = [ "k256", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -1947,7 +1961,7 @@ dependencies = [ "pbkdf2 0.12.2", "rand 0.8.5", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -1967,7 +1981,7 @@ dependencies = [ "serde_derive", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -2101,7 +2115,7 @@ version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "unicode-xid 0.2.5", ] @@ -2302,9 +2316,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2338,10 +2352,10 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "strsim", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2352,7 +2366,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2456,7 +2470,7 @@ dependencies = [ "blake2b_simd 1.0.2", "decaf377", "rand_core 0.6.4", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -2468,7 +2482,7 @@ dependencies = [ "decaf377", "hex", "rand_core 0.6.4", - "thiserror", + "thiserror 1.0.63", "zeroize", "zeroize_derive", ] @@ -2487,7 +2501,7 @@ dependencies = [ "hex", "rand_core 0.6.4", "serde", - "thiserror", + "thiserror 1.0.63", "zeroize", ] @@ -2517,7 +2531,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -2528,9 +2542,9 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6433aac097572ea8ccc60b3f2e756c661c9aeed9225cdd4d0cb119cb7ff6ba" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2539,9 +2553,9 @@ version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2625,9 +2639,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2650,9 +2664,9 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27540baf49be0d484d8f0130d7d8da3011c32a44d4fc873368154f1510e574a2" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2678,13 +2692,13 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "dylint" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60c66a33f0ecdb430682ffd8fa085ad57d0df028c9edaebb16e1b31b8c0240b8" +checksum = "b8b1c23bc7c6694e47c35bb4685e91b11464eb4373e0ca4b139d26b548c7acc6" dependencies = [ "ansi_term", "anyhow", - "cargo_metadata", + "cargo_metadata 0.19.1", "dirs", "dylint_internal", "is-terminal", @@ -2699,14 +2713,14 @@ dependencies = [ [[package]] name = "dylint_internal" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30490df8338ad9f2c9b2e5766632083cd4b08f12b93994042c020e1c4604381" +checksum = "26fe025af824ecd9be633f23ad98363b954237f314b05acf2e8dc52258dea349" dependencies = [ "ansi_term", "anyhow", "bitflags 2.6.0", - "cargo_metadata", + "cargo_metadata 0.19.1", "git2", "home", "if_chain", @@ -2716,33 +2730,33 @@ dependencies = [ "regex", "rust-embed", "serde", - "thiserror", + "thiserror 2.0.8", "toml 0.8.19", ] [[package]] name = "dylint_linting" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b1935d10c8bbfa22ee0b54ef6c05bbb432d36dd048f2723a2a5dfefa77e414" +checksum = "bc26e4dd700f99c321fe55282ac658fdebe224d6b0c6f955ecc5b6248f2ce981" dependencies = [ - "cargo_metadata", + "cargo_metadata 0.19.1", "dylint_internal", "paste", "rustversion", "serde", - "thiserror", + "thiserror 2.0.8", "toml 0.8.19", ] [[package]] name = "dylint_testing" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee051ff840d2cdc82574ce73c604bc01c0e204ec07ae8b297530b494dd0df319" +checksum = "3a5ea6be2adc417abf2ad958cc5f2ff0c17a4fcbd78f97de3472fb1218fe93dc" dependencies = [ "anyhow", - "cargo_metadata", + "cargo_metadata 0.19.1", "compiletest_rs", "dylint", "dylint_internal", @@ -2793,7 +2807,7 @@ dependencies = [ "hex", "rand_core 0.6.4", "sha2 0.9.9", - "thiserror", + "thiserror 1.0.63", "zeroize", ] @@ -2874,9 +2888,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -2929,12 +2943,12 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2955,7 +2969,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.63", "uuid 0.8.2", ] @@ -2972,7 +2986,7 @@ dependencies = [ "serde", "serde_json", "sha3", - "thiserror", + "thiserror 1.0.63", "uint", ] @@ -3049,7 +3063,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -3064,12 +3078,12 @@ dependencies = [ "ethers-core", "eyre", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "regex", "serde", "serde_json", - "syn 2.0.75", + "syn 2.0.90", "toml 0.8.19", "walkdir", ] @@ -3084,10 +3098,10 @@ dependencies = [ "const-hex", "ethers-contract-abigen", "ethers-core", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "serde_json", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -3098,7 +3112,7 @@ checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" dependencies = [ "arrayvec 0.7.6", "bytes", - "cargo_metadata", + "cargo_metadata 0.18.1", "chrono", "const-hex", "elliptic-curve", @@ -3113,9 +3127,9 @@ dependencies = [ "serde", "serde_json", "strum", - "syn 2.0.75", + "syn 2.0.90", "tempfile", - "thiserror", + "thiserror 1.0.63", "tiny-keccak", "unicode-xid 0.2.5", ] @@ -3139,7 +3153,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", "tracing-futures", @@ -3172,7 +3186,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-tungstenite", "tracing", @@ -3199,7 +3213,7 @@ dependencies = [ "ethers-core", "rand 0.8.5", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -3266,9 +3280,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -3403,7 +3417,7 @@ dependencies = [ "rand_core 0.6.4", "serde", "serdect", - "thiserror", + "thiserror 1.0.63", "thiserror-nostd-notrait", "visibility", "zeroize", @@ -3531,9 +3545,9 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -3665,8 +3679,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -4513,9 +4527,9 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -4599,7 +4613,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -4797,7 +4811,7 @@ dependencies = [ "num-traits", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -4847,7 +4861,7 @@ dependencies = [ "pin-project", "rustls-native-certs", "soketto", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-rustls", "tokio-util 0.7.11", @@ -4875,7 +4889,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", ] @@ -4893,7 +4907,7 @@ dependencies = [ "jsonrpsee-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", "tokio", "tower", "tracing", @@ -4908,7 +4922,7 @@ checksum = "29110019693a4fa2dbda04876499d098fa16d70eba06b1e6e2b3f1b251419515" dependencies = [ "heck 0.4.1", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -4928,7 +4942,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", "tokio-util 0.7.11", @@ -4946,7 +4960,7 @@ dependencies = [ "beef", "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -5014,9 +5028,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libgit2-sys" @@ -5069,7 +5083,7 @@ dependencies = [ "multihash", "quick-protobuf", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -5238,7 +5252,7 @@ dependencies = [ "metrics 0.23.0", "metrics-util", "quanta", - "thiserror", + "thiserror 1.0.63", "tokio", "tracing", ] @@ -5334,7 +5348,7 @@ dependencies = [ "rustc_version 0.4.0", "smallvec", "tagptr", - "thiserror", + "thiserror 1.0.63", "triomphe", "uuid 1.10.0", ] @@ -5452,7 +5466,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -5502,9 +5516,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -5533,9 +5547,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "opaque-debug" @@ -5563,7 +5577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" dependencies = [ "bytes", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -5597,7 +5611,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.63", "urlencoding", ] @@ -5615,7 +5629,7 @@ dependencies = [ "opentelemetry-semantic-conventions", "opentelemetry_sdk", "prost", - "thiserror", + "thiserror 1.0.63", "tokio", "tonic 0.11.0", ] @@ -5670,7 +5684,7 @@ dependencies = [ "ordered-float", "percent-encoding", "rand 0.8.5", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-stream", ] @@ -5723,7 +5737,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -5836,10 +5850,10 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "proc-macro2-diagnostics", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -5865,7 +5879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdad6a1d9cf116a059582ce415d5f5566aabcd4008646779dab7fdc2a9a9d426" dependencies = [ "peg-runtime", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", ] @@ -5919,7 +5933,7 @@ dependencies = [ "serde", "serde_with", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -6000,7 +6014,7 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -6036,7 +6050,7 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -6130,7 +6144,7 @@ dependencies = [ "poseidon377", "rand 0.8.5", "serde", - "thiserror", + "thiserror 1.0.63", "tracing", ] @@ -6183,7 +6197,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.63", "ucd-trie", ] @@ -6222,9 +6236,9 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -6394,8 +6408,8 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.75", + "proc-macro2 1.0.92", + "syn 2.0.90", ] [[package]] @@ -6438,7 +6452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "version_check", ] @@ -6449,7 +6463,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "version_check", ] @@ -6465,9 +6479,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -6478,9 +6492,9 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "version_check", "yansi", ] @@ -6497,7 +6511,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "unarray", ] @@ -6528,7 +6542,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.75", + "syn 2.0.90", "tempfile", ] @@ -6540,9 +6554,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -6584,7 +6598,7 @@ version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", ] [[package]] @@ -6734,19 +6748,19 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.63", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -6760,13 +6774,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -6783,9 +6797,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -6901,7 +6915,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -6969,10 +6983,10 @@ version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6125dbc8867951125eec87294137f4e9c2c96566e61bf72c45095a7c77761478" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "rust-embed-utils", - "syn 2.0.75", + "syn 2.0.90", "walkdir", ] @@ -7041,21 +7055,21 @@ checksum = "70f5b7fc8060f4f8373f9381a630304b42e1183535d9beb1d3f596b236c9106a" dependencies = [ "serde", "serde_json", - "thiserror", + "thiserror 1.0.63", "tracing", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7197,7 +7211,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "syn 1.0.109", ] @@ -7339,9 +7353,9 @@ version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7374,7 +7388,7 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.63", ] [[package]] @@ -7383,9 +7397,9 @@ version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7434,9 +7448,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7568,7 +7582,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.63", "time", ] @@ -7699,10 +7713,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "rustversion", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7732,18 +7746,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.75" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", "unicode-ident", ] @@ -7755,9 +7769,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7772,9 +7786,9 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -7812,12 +7826,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.0", + "fastrand 2.3.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -7924,7 +7938,7 @@ dependencies = [ "tendermint", "tendermint-config", "tendermint-proto", - "thiserror", + "thiserror 1.0.63", "time", "tokio", "tracing", @@ -7991,7 +8005,16 @@ version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.63", +] + +[[package]] +name = "thiserror" +version = "2.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" +dependencies = [ + "thiserror-impl 2.0.8", ] [[package]] @@ -8000,9 +8023,20 @@ version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" +dependencies = [ + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8020,9 +8054,9 @@ version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "585e5ef40a784ce60b49c67d762110688d211d395d39e096be204535cf64590e" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8137,9 +8171,9 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8363,10 +8397,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2" dependencies = [ "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "prost-build", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8430,7 +8464,7 @@ checksum = "b882e5e82ee7440a08335f4d5a2edd9f7678b2cba73eac4826b53c22fd76fdd3" dependencies = [ "futures", "pin-project", - "thiserror", + "thiserror 1.0.63", "tokio", "tokio-util 0.7.11", "tower", @@ -8485,9 +8519,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8638,7 +8672,7 @@ dependencies = [ "rand 0.8.5", "rustls", "sha1", - "thiserror", + "thiserror 1.0.63", "url", "utf-8", ] @@ -8832,7 +8866,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e27d6bdd219887a9eadd19e1c34f32e47fa332301184935c6d9bca26f3cca525" dependencies = [ "anyhow", - "cargo_metadata", + "cargo_metadata 0.18.1", "cfg-if", "git2", "regex", @@ -8853,9 +8887,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -8924,9 +8958,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -8958,9 +8992,9 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9329,7 +9363,7 @@ dependencies = [ "pharos", "rustc_version 0.4.0", "send_wrapper 0.6.0", - "thiserror", + "thiserror 1.0.63", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -9368,9 +9402,9 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "synstructure", ] @@ -9390,9 +9424,9 @@ version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -9410,9 +9444,9 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", "synstructure", ] @@ -9431,9 +9465,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] @@ -9453,9 +9487,9 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.92", "quote", - "syn 2.0.75", + "syn 2.0.90", ] [[package]] diff --git a/containerfiles/Dockerfile b/containerfiles/Dockerfile index f5fefa984..036132f63 100644 --- a/containerfiles/Dockerfile +++ b/containerfiles/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM rust:1.81-bookworm AS rust +FROM --platform=$BUILDPLATFORM rust:1.83-bookworm AS rust WORKDIR /build/ diff --git a/crates/astria-bridge-contracts/Cargo.toml b/crates/astria-bridge-contracts/Cargo.toml index a398dc9d2..ec539e618 100644 --- a/crates/astria-bridge-contracts/Cargo.toml +++ b/crates/astria-bridge-contracts/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-bridge-contracts" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/astria-bridge-withdrawer/CHANGELOG.md b/crates/astria-bridge-withdrawer/CHANGELOG.md index 59090f4cd..792abcab9 100644 --- a/crates/astria-bridge-withdrawer/CHANGELOG.md +++ b/crates/astria-bridge-withdrawer/CHANGELOG.md @@ -15,6 +15,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [1.0.1] - 2024-11-01 +### Changed + +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). + ### Fixed - Set `batch_total_settled_value` metric to 0 when no withdrawals are settled [#1778](https://github.com/astriaorg/astria/pull/1768) diff --git a/crates/astria-bridge-withdrawer/Cargo.toml b/crates/astria-bridge-withdrawer/Cargo.toml index f5f50a2cc..103f23ae8 100644 --- a/crates/astria-bridge-withdrawer/Cargo.toml +++ b/crates/astria-bridge-withdrawer/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-bridge-withdrawer" version = "1.0.1" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-build-info/Cargo.toml b/crates/astria-build-info/Cargo.toml index ceab67fe2..a6b743470 100644 --- a/crates/astria-build-info/Cargo.toml +++ b/crates/astria-build-info/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-build-info" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/astria-cli/CHANGELOG.md b/crates/astria-cli/CHANGELOG.md index 7cadb8c69..4d35fa1f0 100644 --- a/crates/astria-cli/CHANGELOG.md +++ b/crates/astria-cli/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Update `idna` dependency to resolve cargo audit warning [#1869](https://github.com/astriaorg/astria/pull/1869). - Remove default values from `--sequencer.chain-id` and `--sequencer-url` arguments [#1792](https://github.com/astriaorg/astria/pull/1792) diff --git a/crates/astria-cli/Cargo.toml b/crates/astria-cli/Cargo.toml index eabf1c69b..0dd1fca9f 100644 --- a/crates/astria-cli/Cargo.toml +++ b/crates/astria-cli/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-cli" version = "0.5.1" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-composer/CHANGELOG.md b/crates/astria-composer/CHANGELOG.md index 52a945378..5891bd7f5 100644 --- a/crates/astria-composer/CHANGELOG.md +++ b/crates/astria-composer/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Bump penumbra dependencies [#1740](https://github.com/astriaorg/astria/pull/1740). ## [1.0.0-rc.2] - 2024-10-23 diff --git a/crates/astria-composer/Cargo.toml b/crates/astria-composer/Cargo.toml index 5c62d2de7..ac17b45de 100644 --- a/crates/astria-composer/Cargo.toml +++ b/crates/astria-composer/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-composer" version = "1.0.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-composer/src/executor/bundle_factory/mod.rs b/crates/astria-composer/src/executor/bundle_factory/mod.rs index f35eb89d9..81652d86b 100644 --- a/crates/astria-composer/src/executor/bundle_factory/mod.rs +++ b/crates/astria-composer/src/executor/bundle_factory/mod.rs @@ -36,7 +36,7 @@ enum SizedBundleError { pub(super) struct SizedBundleReport<'a>(pub(super) &'a SizedBundle); -impl<'a> Serialize for SizedBundleReport<'a> { +impl Serialize for SizedBundleReport<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -277,7 +277,7 @@ pub(super) struct NextFinishedBundle<'a> { bundle_factory: &'a mut BundleFactory, } -impl<'a> NextFinishedBundle<'a> { +impl NextFinishedBundle<'_> { pub(super) fn pop(self) -> SizedBundle { self.bundle_factory .finished diff --git a/crates/astria-conductor/CHANGELOG.md b/crates/astria-conductor/CHANGELOG.md index b9de90e6d..687145fa5 100644 --- a/crates/astria-conductor/CHANGELOG.md +++ b/crates/astria-conductor/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Bump penumbra dependencies [#1740](https://github.com/astriaorg/astria/pull/1740). ## [1.0.0-rc.2] - 2024-10-23 diff --git a/crates/astria-conductor/Cargo.toml b/crates/astria-conductor/Cargo.toml index 8dc3f608a..7934612df 100644 --- a/crates/astria-conductor/Cargo.toml +++ b/crates/astria-conductor/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-conductor" version = "1.0.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-conductor/src/block_cache.rs b/crates/astria-conductor/src/block_cache.rs index 344d2bbec..799b31eb2 100644 --- a/crates/astria-conductor/src/block_cache.rs +++ b/crates/astria-conductor/src/block_cache.rs @@ -105,7 +105,9 @@ impl BlockCache { pub(crate) enum Error { #[error("block at sequencer height {height} already in cache")] Occupied { height: u64 }, - #[error("block too old: expect sequencer height {current_height} or newer, got {block_height}")] + #[error( + "block too old: expect sequencer height {current_height} or newer, got {block_height}" + )] Old { block_height: u64, current_height: u64, @@ -120,7 +122,7 @@ pin_project! { } } -impl<'a, T> Future for NextBlock<'a, T> { +impl Future for NextBlock<'_, T> { type Output = Option; fn poll( diff --git a/crates/astria-conductor/src/celestia/block_verifier.rs b/crates/astria-conductor/src/celestia/block_verifier.rs index f8fb5ced5..53a6a90f8 100644 --- a/crates/astria-conductor/src/celestia/block_verifier.rs +++ b/crates/astria-conductor/src/celestia/block_verifier.rs @@ -531,11 +531,9 @@ mod tests { &tendermint::chain::Id::try_from("test-chain-g3ejvw").unwrap(), ); assert!(result.is_err()); - assert!( - result - .unwrap_err() - .to_string() - .contains("commit voting power is less than 2/3 of total voting power") - ); + assert!(result + .unwrap_err() + .to_string() + .contains("commit voting power is less than 2/3 of total voting power")); } } diff --git a/crates/astria-conductor/src/celestia/reporting.rs b/crates/astria-conductor/src/celestia/reporting.rs index 8d6cccfae..efe571891 100644 --- a/crates/astria-conductor/src/celestia/reporting.rs +++ b/crates/astria-conductor/src/celestia/reporting.rs @@ -11,7 +11,7 @@ use super::{ }; pub(super) struct ReportReconstructedBlocks<'a>(pub(super) &'a ReconstructedBlocks); -impl<'a> Serialize for ReportReconstructedBlocks<'a> { +impl Serialize for ReportReconstructedBlocks<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -25,7 +25,7 @@ impl<'a> Serialize for ReportReconstructedBlocks<'a> { } struct ReportReconstructedBlocksSeq<'a>(&'a [ReconstructedBlock]); -impl<'a> Serialize for ReportReconstructedBlocksSeq<'a> { +impl Serialize for ReportReconstructedBlocksSeq<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -39,7 +39,7 @@ impl<'a> Serialize for ReportReconstructedBlocksSeq<'a> { } struct ReportReconstructedBlock<'a>(&'a ReconstructedBlock); -impl<'a> Serialize for ReportReconstructedBlock<'a> { +impl Serialize for ReportReconstructedBlock<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -61,7 +61,7 @@ impl<'a> Serialize for ReportReconstructedBlock<'a> { struct SerializeDisplay<'a, T>(&'a T); -impl<'a, T> Serialize for SerializeDisplay<'a, T> +impl Serialize for SerializeDisplay<'_, T> where T: std::fmt::Display, { diff --git a/crates/astria-conductor/src/sequencer/reporting.rs b/crates/astria-conductor/src/sequencer/reporting.rs index 779eb5762..25dafd3a3 100644 --- a/crates/astria-conductor/src/sequencer/reporting.rs +++ b/crates/astria-conductor/src/sequencer/reporting.rs @@ -13,7 +13,7 @@ use serde::ser::{ }; pub(super) struct ReportFilteredSequencerBlock<'a>(pub(super) &'a FilteredSequencerBlock); -impl<'a> Serialize for ReportFilteredSequencerBlock<'a> { +impl Serialize for ReportFilteredSequencerBlock<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, @@ -27,7 +27,7 @@ impl<'a> Serialize for ReportFilteredSequencerBlock<'a> { struct ReportRollups<'a>(&'a IndexMap); -impl<'a> Serialize for ReportRollups<'a> { +impl Serialize for ReportRollups<'_> { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, diff --git a/crates/astria-config/Cargo.toml b/crates/astria-config/Cargo.toml index 6672b7528..a8642f478 100644 --- a/crates/astria-config/Cargo.toml +++ b/crates/astria-config/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-config" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-core-address/src/lib.rs b/crates/astria-core-address/src/lib.rs index 3685222d8..43bbe7496 100644 --- a/crates/astria-core-address/src/lib.rs +++ b/crates/astria-core-address/src/lib.rs @@ -214,7 +214,7 @@ impl Builder { } } -impl<'a, 'b, TFormat, TBytesIter> Builder, WithPrefix<'b>> +impl Builder, WithPrefix<'_>> where TBytesIter: IntoIterator, TBytesIter::IntoIter: ExactSizeIterator, diff --git a/crates/astria-core-crypto/Cargo.toml b/crates/astria-core-crypto/Cargo.toml index 4e8b94bf7..264cff2df 100644 --- a/crates/astria-core-crypto/Cargo.toml +++ b/crates/astria-core-crypto/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-core-crypto" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-core/CHANGELOG.md b/crates/astria-core/CHANGELOG.md index d80e1e537..c14c392d3 100644 --- a/crates/astria-core/CHANGELOG.md +++ b/crates/astria-core/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Move `astria_core::crypto` to `astria-core-crypto` and reexport `astria_core_crypto as crypto` (this change is transparent) [#1800](https://github.com/astriaorg/astria/pull/1800/). diff --git a/crates/astria-core/Cargo.toml b/crates/astria-core/Cargo.toml index b816e3fdf..7003f1d9f 100644 --- a/crates/astria-core/Cargo.toml +++ b/crates/astria-core/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-core" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-core/src/generated/mod.rs b/crates/astria-core/src/generated/mod.rs index 41e186e2d..1fef28c5e 100644 --- a/crates/astria-core/src/generated/mod.rs +++ b/crates/astria-core/src/generated/mod.rs @@ -2,7 +2,8 @@ unreachable_pub, clippy::pedantic, clippy::needless_borrows_for_generic_args, - clippy::arithmetic_side_effects + clippy::arithmetic_side_effects, + clippy::needless_lifetimes )] //! Files generated using [`tonic-build`] and [`buf`] via the [`tools/protobuf-compiler`] //! build tool. diff --git a/crates/astria-core/src/primitive/v1/asset/denom.rs b/crates/astria-core/src/primitive/v1/asset/denom.rs index f85511bd5..0c1ead355 100644 --- a/crates/astria-core/src/primitive/v1/asset/denom.rs +++ b/crates/astria-core/src/primitive/v1/asset/denom.rs @@ -99,13 +99,13 @@ impl From for Denom { } } -impl<'a> From<&'a IbcPrefixed> for Denom { +impl From<&IbcPrefixed> for Denom { fn from(value: &IbcPrefixed) -> Self { Self::IbcPrefixed(*value) } } -impl<'a> From<&'a TracePrefixed> for Denom { +impl From<&TracePrefixed> for Denom { fn from(value: &TracePrefixed) -> Self { Self::TracePrefixed(value.clone()) } @@ -117,7 +117,7 @@ impl From for IbcPrefixed { } } -impl<'a> From<&'a TracePrefixed> for IbcPrefixed { +impl From<&TracePrefixed> for IbcPrefixed { fn from(value: &TracePrefixed) -> Self { value.to_ibc_prefixed() } @@ -129,13 +129,13 @@ impl From for IbcPrefixed { } } -impl<'a> From<&'a Denom> for IbcPrefixed { +impl From<&Denom> for IbcPrefixed { fn from(value: &Denom) -> Self { value.to_ibc_prefixed() } } -impl<'a> From<&'a IbcPrefixed> for IbcPrefixed { +impl From<&IbcPrefixed> for IbcPrefixed { fn from(value: &IbcPrefixed) -> Self { *value } diff --git a/crates/astria-core/src/primitive/v1/u128.rs b/crates/astria-core/src/primitive/v1/u128.rs index 581f5b5ec..e6a0edd14 100644 --- a/crates/astria-core/src/primitive/v1/u128.rs +++ b/crates/astria-core/src/primitive/v1/u128.rs @@ -3,24 +3,8 @@ use crate::generated::astria::primitive::v1::Uint128; impl From for Uint128 { fn from(primitive: u128) -> Self { - let [ - h0, - h1, - h2, - h3, - h4, - h5, - h6, - h7, - l0, - l1, - l2, - l3, - l4, - l5, - l6, - l7, - ] = primitive.to_be_bytes(); + let [h0, h1, h2, h3, h4, h5, h6, h7, l0, l1, l2, l3, l4, l5, l6, l7] = + primitive.to_be_bytes(); let lo = u64::from_be_bytes([l0, l1, l2, l3, l4, l5, l6, l7]); let hi = u64::from_be_bytes([h0, h1, h2, h3, h4, h5, h6, h7]); Self { diff --git a/crates/astria-core/src/protocol/genesis/v1.rs b/crates/astria-core/src/protocol/genesis/v1.rs index 2dac7e1aa..d6102f23d 100644 --- a/crates/astria-core/src/protocol/genesis/v1.rs +++ b/crates/astria-core/src/protocol/genesis/v1.rs @@ -152,12 +152,6 @@ impl Protobuf for GenesisAppState { type Error = GenesisAppStateError; type Raw = raw::GenesisAppState; - // TODO (https://github.com/astriaorg/astria/issues/1580): remove this once Rust is upgraded to/past 1.83 - #[expect( - clippy::allow_attributes, - clippy::allow_attributes_without_reason, - reason = "false positive on `allowed_fee_assets` due to \"allow\" in the name" - )] fn try_from_raw_ref(raw: &Self::Raw) -> Result { let Self::Raw { address_prefixes, diff --git a/crates/astria-eyre/Cargo.toml b/crates/astria-eyre/Cargo.toml index 6ee2bb106..e0a63967e 100644 --- a/crates/astria-eyre/Cargo.toml +++ b/crates/astria-eyre/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-eyre" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/astria-grpc-mock-test/Cargo.toml b/crates/astria-grpc-mock-test/Cargo.toml index 1767b42a7..be5638119 100644 --- a/crates/astria-grpc-mock-test/Cargo.toml +++ b/crates/astria-grpc-mock-test/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-grpc-mock-test" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/astria-grpc-mock-test/src/lib.rs b/crates/astria-grpc-mock-test/src/lib.rs index 6dad4f20c..c1f9a2287 100644 --- a/crates/astria-grpc-mock-test/src/lib.rs +++ b/crates/astria-grpc-mock-test/src/lib.rs @@ -8,7 +8,8 @@ #[expect( clippy::allow_attributes, clippy::allow_attributes_without_reason, - reason = "cannot prevent generated files from having allow attributes" + clippy::needless_lifetimes, + reason = "cannot prevent generated files from having allow attributes or specific lifetimes" )] pub mod health { include!("generated/grpc.health.v1.rs"); diff --git a/crates/astria-grpc-mock/Cargo.toml b/crates/astria-grpc-mock/Cargo.toml index 97f7c6a5b..cb66c612d 100644 --- a/crates/astria-grpc-mock/Cargo.toml +++ b/crates/astria-grpc-mock/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-grpc-mock" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/astria-grpc-mock/src/mock_server.rs b/crates/astria-grpc-mock/src/mock_server.rs index dcbad3d80..4f78077e0 100644 --- a/crates/astria-grpc-mock/src/mock_server.rs +++ b/crates/astria-grpc-mock/src/mock_server.rs @@ -178,7 +178,7 @@ impl MockServer { if let VerificationOutcome::Failure(failed_verifications) = self.state.read().await.verify() { let received_requests_message = - received_requests_message(&self.state.read().await.received_requests); + received_requests_message(self.state.read().await.received_requests.as_ref()); let verifications_errors: String = failed_verifications.iter().fold(String::new(), |mut s, m| { @@ -366,7 +366,8 @@ impl Drop for MockGuard { if report.is_satisfied() { state.mock_set.deactivate(*mock_id); } else { - let received_requests_message = received_requests_message(&state.received_requests); + let received_requests_message = + received_requests_message(state.received_requests.as_ref()); let verifications_error = format!("- {}\n", report.error_message()); let error_message = format!( @@ -385,7 +386,7 @@ impl Drop for MockGuard { } fn received_requests_message( - received_requests: &Option>, + received_requests: Option<&Vec<(&'static str, MockRequest)>>, ) -> String { if let Some(received_requests) = received_requests { if received_requests.is_empty() { diff --git a/crates/astria-merkle/Cargo.toml b/crates/astria-merkle/Cargo.toml index 19af2d1aa..be01e6458 100644 --- a/crates/astria-merkle/Cargo.toml +++ b/crates/astria-merkle/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-merkle" version = "1.0.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" diff --git a/crates/astria-merkle/src/audit.rs b/crates/astria-merkle/src/audit.rs index ce524e0a7..0963f7223 100644 --- a/crates/astria-merkle/src/audit.rs +++ b/crates/astria-merkle/src/audit.rs @@ -141,7 +141,7 @@ impl<'a, TLeaf, TRoot> Audit<'a, TLeaf, TRoot> { } } -impl<'a, TRoot> Audit<'a, WithLeafHash, TRoot> { +impl Audit<'_, WithLeafHash, TRoot> { /// Reconstruct the root hash using the leaf hash stored in the [`WithLeafHash`] state. /// /// # Examples @@ -171,7 +171,7 @@ impl<'a, TRoot> Audit<'a, WithLeafHash, TRoot> { } } -impl<'a> Audit<'a, WithLeafHash, WithRoot> { +impl Audit<'_, WithLeafHash, WithRoot> { /// Check if the leaf is included in the tree using the internal proof. /// /// This method reconstructs a Merkle tree root starting from the @@ -189,16 +189,14 @@ impl<'a> Audit<'a, WithLeafHash, WithRoot> { /// tree.build_leaf().write(&[4, 2]).write(b"answer"); /// let root = tree.root(); /// let proof = tree.construct_proof(3).expect("leaf 4 is inside the tree"); - /// assert!( - /// proof - /// .audit() - /// .with_root(root) - /// .with_leaf_builder() - /// .write(&[4, 2]) - /// .write(b"answer") - /// .finish_leaf() - /// .perform() - /// ); + /// assert!(proof + /// .audit() + /// .with_root(root) + /// .with_leaf_builder() + /// .write(&[4, 2]) + /// .write(b"answer") + /// .finish_leaf() + /// .perform()); /// ``` #[must_use = "verify the audit result"] pub fn perform(&self) -> bool { @@ -497,18 +495,16 @@ impl Proof { /// .write(b"42"); /// let root = tree.root(); /// let proof = tree.construct_proof(3).expect("leaf 4 is in the tree"); - /// assert!( - /// proof - /// .audit() - /// .with_root(root) - /// .with_leaf_builder() - /// .write(&[42; 1]) - /// .write(&[1, 1]) - /// .write(&vec![42; 3]) - /// .write(b"42") - /// .finish_leaf() - /// .perform() - /// ); + /// assert!(proof + /// .audit() + /// .with_root(root) + /// .with_leaf_builder() + /// .write(&[42; 1]) + /// .write(&[1, 1]) + /// .write(&vec![42; 3]) + /// .write(b"42") + /// .finish_leaf() + /// .perform()); /// ``` #[must_use = "an audit must be performed to be useful"] pub fn audit(&self) -> Audit { diff --git a/crates/astria-merkle/src/lib.rs b/crates/astria-merkle/src/lib.rs index 9ca82b184..4d89f1295 100644 --- a/crates/astria-merkle/src/lib.rs +++ b/crates/astria-merkle/src/lib.rs @@ -39,18 +39,16 @@ //! .construct_proof(4) //! .expect("leaf 5 must be inside the tree"); //! -//! assert!( -//! proof -//! .audit() -//! .with_root(root) -//! .with_leaf_builder() -//! .write(&[42; 1]) -//! .write(&[1, 1]) -//! .write(&vec![42; 3]) -//! .write(b"42") -//! .finish_leaf() -//! .perform() -//! ); +//! assert!(proof +//! .audit() +//! .with_root(root) +//! .with_leaf_builder() +//! .write(&[42; 1]) +//! .write(&[1, 1]) +//! .write(&vec![42; 3]) +//! .write(b"42") +//! .finish_leaf() +//! .perform()); //! ``` //! //! # Indexing scheme @@ -184,7 +182,7 @@ pub struct LeafBuilder<'a> { hasher: Option, } -impl<'a> LeafBuilder<'a> { +impl LeafBuilder<'_> { /// Takes ownership of the builder, dropping it. /// /// This method causes the leaf builder to go out of scope, causing it @@ -232,7 +230,7 @@ impl<'a> LeafBuilder<'a> { } } -impl<'a> Drop for LeafBuilder<'a> { +impl Drop for LeafBuilder<'_> { fn drop(&mut self) { let Self { tree, diff --git a/crates/astria-sequencer-client/Cargo.toml b/crates/astria-sequencer-client/Cargo.toml index 1fa879ff1..1807b11c6 100644 --- a/crates/astria-sequencer-client/Cargo.toml +++ b/crates/astria-sequencer-client/Cargo.toml @@ -3,7 +3,7 @@ name = "astria-sequencer-client" version = "0.1.0" edition = "2021" license = "MIT OR Apache-2.0" -rust-version = "1.81.0" +rust-version = "1.83.0" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" diff --git a/crates/astria-sequencer-client/src/tests/http.rs b/crates/astria-sequencer-client/src/tests/http.rs index 92f02124c..9f0cca97d 100644 --- a/crates/astria-sequencer-client/src/tests/http.rs +++ b/crates/astria-sequencer-client/src/tests/http.rs @@ -151,15 +151,13 @@ fn create_signed_transaction() -> Transaction { .unwrap(); let alice_key = SigningKey::from(alice_secret_bytes); - let actions = vec![ - Transfer { - to: bob_address(), - amount: 333_333, - asset: "nria".parse().unwrap(), - fee_asset: "nria".parse().unwrap(), - } - .into(), - ]; + let actions = vec![Transfer { + to: bob_address(), + amount: 333_333, + asset: "nria".parse().unwrap(), + fee_asset: "nria".parse().unwrap(), + } + .into()]; TransactionBody::builder() .actions(actions) .chain_id("test") diff --git a/crates/astria-sequencer-relayer/CHANGELOG.md b/crates/astria-sequencer-relayer/CHANGELOG.md index b2afb363c..0bd6e4fa6 100644 --- a/crates/astria-sequencer-relayer/CHANGELOG.md +++ b/crates/astria-sequencer-relayer/CHANGELOG.md @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Bump penumbra dependencies [#1740](https://github.com/astriaorg/astria/pull/1740). ## [1.0.0-rc.2] - 2024-10-23 diff --git a/crates/astria-sequencer-relayer/Cargo.toml b/crates/astria-sequencer-relayer/Cargo.toml index a0eed0eb6..1d0589e93 100644 --- a/crates/astria-sequencer-relayer/Cargo.toml +++ b/crates/astria-sequencer-relayer/Cargo.toml @@ -3,7 +3,7 @@ name = "astria-sequencer-relayer" version = "1.0.0" edition = "2021" license = "MIT OR Apache-2.0" -rust-version = "1.81.0" +rust-version = "1.83.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" diff --git a/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs b/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs index 82d425ff8..ad00a3cbd 100644 --- a/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs +++ b/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs @@ -387,7 +387,7 @@ pin_project! { } } -impl<'a> Future for TakeSubmission<'a> { +impl Future for TakeSubmission<'_> { type Output = Option; fn poll(self: Pin<&mut Self>, _: &mut std::task::Context<'_>) -> Poll { @@ -441,6 +441,7 @@ fn sequencer_namespace(metadata: &SubmittedMetadata) -> Namespace { ) } +#[expect(clippy::ref_option, reason = "necessary for serde impl")] fn serialize_opt_namespace( namespace: &Option, serializer: S, diff --git a/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs b/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs index 33ff34d98..9a1405bc2 100644 --- a/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs +++ b/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs @@ -96,6 +96,8 @@ impl MockSequencerServer { } } +// TODO(https://github.com/astriaorg/astria/issues/1859): box enum variants to avoid large sizes +#[expect(clippy::large_enum_variant, reason = "should be fixed")] pub enum SequencerBlockToMount { GoodAtHeight(u32), BadAtHeight(u32), diff --git a/crates/astria-sequencer-utils/Cargo.toml b/crates/astria-sequencer-utils/Cargo.toml index 2c2974577..992a47394 100644 --- a/crates/astria-sequencer-utils/Cargo.toml +++ b/crates/astria-sequencer-utils/Cargo.toml @@ -3,7 +3,7 @@ name = "astria-sequencer-utils" version = "0.1.0" edition = "2021" license = "MIT OR Apache-2.0" -rust-version = "1.81.0" +rust-version = "1.83.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" diff --git a/crates/astria-sequencer-utils/src/blob_parser.rs b/crates/astria-sequencer-utils/src/blob_parser.rs index 1e104c469..c537a4240 100644 --- a/crates/astria-sequencer-utils/src/blob_parser.rs +++ b/crates/astria-sequencer-utils/src/blob_parser.rs @@ -489,17 +489,17 @@ impl Display for RollupTransaction { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { colored_ln(f, "hash", &self.hash)?; colored_ln(f, "nonce", &self.nonce)?; - colored_ln(f, "block hash", none_or_value(&self.block_hash))?; - colored_ln(f, "block number", none_or_value(&self.block_number))?; + colored_ln(f, "block hash", none_or_value(self.block_hash.as_ref()))?; + colored_ln(f, "block number", none_or_value(self.block_number.as_ref()))?; colored_ln( f, "transaction index", - none_or_value(&self.transaction_index), + none_or_value(self.transaction_index.as_ref()), )?; colored_ln(f, "from", &self.from)?; - colored_ln(f, "to", none_or_value(&self.to))?; + colored_ln(f, "to", none_or_value(self.to.as_ref()))?; colored_ln(f, "value", &self.value)?; - colored_ln(f, "gas price", none_or_value(&self.gas_price))?; + colored_ln(f, "gas price", none_or_value(self.gas_price.as_ref()))?; colored_ln(f, "gas", &self.gas)?; colored_ln(f, "input", &self.input)?; colored_ln(f, "v", self.v)?; @@ -790,10 +790,10 @@ fn indent<'a, 'b>(f: &'a mut Formatter<'b>) -> indenter::Indented<'a, Formatter< indented(f).with_str(" ") } -fn none_or_value(maybe_value: &Option) -> String { +fn none_or_value(maybe_value: Option<&T>) -> String { maybe_value .as_ref() - .map_or("none".to_string(), T::to_string) + .map_or("none".to_string(), |o| o.to_string()) } fn colored_label(f: &mut Formatter<'_>, label: &str) -> fmt::Result { diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index bd1b45b06..644238818 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Index all event attributes [#1786](https://github.com/astriaorg/astria/pull/1786). - Consolidate action handling to single module [#1759](https://github.com/astriaorg/astria/pull/1759). - Ensure all deposit assets are trace prefixed [#1807](https://github.com/astriaorg/astria/pull/1807). diff --git a/crates/astria-sequencer/Cargo.toml b/crates/astria-sequencer/Cargo.toml index 56014df01..18458c890 100644 --- a/crates/astria-sequencer/Cargo.toml +++ b/crates/astria-sequencer/Cargo.toml @@ -3,7 +3,7 @@ name = "astria-sequencer" version = "1.0.0" edition = "2021" license = "MIT OR Apache-2.0" -rust-version = "1.81.0" +rust-version = "1.83.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" diff --git a/crates/astria-sequencer/src/accounts/storage/values.rs b/crates/astria-sequencer/src/accounts/storage/values.rs index ca5f7b397..9a771823a 100644 --- a/crates/astria-sequencer/src/accounts/storage/values.rs +++ b/crates/astria-sequencer/src/accounts/storage/values.rs @@ -28,7 +28,7 @@ impl From for u128 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(balance: Balance) -> Self { crate::storage::StoredValue::Accounts(Value(ValueImpl::Balance(balance))) } @@ -61,7 +61,7 @@ impl From for u32 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(nonce: Nonce) -> Self { crate::storage::StoredValue::Accounts(Value(ValueImpl::Nonce(nonce))) } diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs index 97f7dbaa5..867a824c2 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs @@ -145,14 +145,12 @@ mod tests { fee_asset: asset.clone(), }; - assert!( - action - .check_and_execute(state) - .await - .unwrap_err() - .to_string() - .contains("unauthorized for bridge sudo change action") - ); + assert!(action + .check_and_execute(state) + .await + .unwrap_err() + .to_string() + .contains("unauthorized for bridge sudo change action")); } #[tokio::test] diff --git a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs index 3b399ddc4..24448b908 100644 --- a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs @@ -202,13 +202,11 @@ mod tests { }); state.put_sudo_address([1; 20]).unwrap(); - assert!( - state - .get_fees::() - .await - .expect("should not error fetching unstored action fees") - .is_none() - ); + assert!(state + .get_fees::() + .await + .expect("should not error fetching unstored action fees") + .is_none()); // Execute an initial fee change tx to store the first version of the fees. let initial_fees = FeeComponents::::new(1, 2); diff --git a/crates/astria-sequencer/src/action_handler/mod.rs b/crates/astria-sequencer/src/action_handler/mod.rs index 09bde22b0..0921cb82e 100644 --- a/crates/astria-sequencer/src/action_handler/mod.rs +++ b/crates/astria-sequencer/src/action_handler/mod.rs @@ -45,7 +45,7 @@ pub(crate) trait ActionHandler { async fn check_stateless(&self) -> astria_eyre::eyre::Result<()>; async fn check_and_execute(&self, mut state: S) - -> astria_eyre::eyre::Result<()>; + -> astria_eyre::eyre::Result<()>; } async fn execute_transfer( diff --git a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs index 7171bce58..889082345 100644 --- a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs +++ b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs @@ -89,8 +89,8 @@ pub(crate) fn default_fees() -> astria_core::protocol::genesis::v1::GenesisFees } } -pub(crate) fn proto_genesis_state() --> astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { +pub(crate) fn proto_genesis_state( +) -> astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { use astria_core::generated::astria::protocol::genesis::v1::{ GenesisAppState, IbcParameters, diff --git a/crates/astria-sequencer/src/app/storage/values/block_height.rs b/crates/astria-sequencer/src/app/storage/values/block_height.rs index 29b5298a4..c07102a4d 100644 --- a/crates/astria-sequencer/src/app/storage/values/block_height.rs +++ b/crates/astria-sequencer/src/app/storage/values/block_height.rs @@ -24,7 +24,7 @@ impl From for u64 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(block_height: BlockHeight) -> Self { crate::storage::StoredValue::App(Value(ValueImpl::BlockHeight(block_height))) } diff --git a/crates/astria-sequencer/src/app/storage/values/block_timestamp.rs b/crates/astria-sequencer/src/app/storage/values/block_timestamp.rs index 23aa4cddf..6a28e3d08 100644 --- a/crates/astria-sequencer/src/app/storage/values/block_timestamp.rs +++ b/crates/astria-sequencer/src/app/storage/values/block_timestamp.rs @@ -49,7 +49,7 @@ impl BorshDeserialize for BlockTimestamp { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(block_timestamp: BlockTimestamp) -> Self { crate::storage::StoredValue::App(Value(ValueImpl::BlockTimestamp(block_timestamp))) } diff --git a/crates/astria-sequencer/src/app/storage/values/chain_id.rs b/crates/astria-sequencer/src/app/storage/values/chain_id.rs index 0547029db..237f204ec 100644 --- a/crates/astria-sequencer/src/app/storage/values/chain_id.rs +++ b/crates/astria-sequencer/src/app/storage/values/chain_id.rs @@ -30,13 +30,13 @@ impl<'a> From> for tendermint::chain::Id { } } -impl<'a> BorshSerialize for ChainId<'a> { +impl BorshSerialize for ChainId<'_> { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { self.0.as_str().serialize(writer) } } -impl<'a> BorshDeserialize for ChainId<'a> { +impl BorshDeserialize for ChainId<'_> { fn deserialize_reader(reader: &mut R) -> std::io::Result { let chain_id_str = String::deserialize_reader(reader)?; let chain_id = diff --git a/crates/astria-sequencer/src/app/storage/values/revision_number.rs b/crates/astria-sequencer/src/app/storage/values/revision_number.rs index d238b5b5e..93e5c42a9 100644 --- a/crates/astria-sequencer/src/app/storage/values/revision_number.rs +++ b/crates/astria-sequencer/src/app/storage/values/revision_number.rs @@ -24,7 +24,7 @@ impl From for u64 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(revision_number: RevisionNumber) -> Self { crate::storage::StoredValue::App(Value(ValueImpl::RevisionNumber(revision_number))) } diff --git a/crates/astria-sequencer/src/app/storage/values/storage_version.rs b/crates/astria-sequencer/src/app/storage/values/storage_version.rs index 933f76b8f..53e7a6f9a 100644 --- a/crates/astria-sequencer/src/app/storage/values/storage_version.rs +++ b/crates/astria-sequencer/src/app/storage/values/storage_version.rs @@ -24,7 +24,7 @@ impl From for u64 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(storage_version: StorageVersion) -> Self { crate::storage::StoredValue::App(Value(ValueImpl::StorageVersion(storage_version))) } diff --git a/crates/astria-sequencer/src/app/tests_app/mempool.rs b/crates/astria-sequencer/src/app/tests_app/mempool.rs index 4a30b4460..a0dafc189 100644 --- a/crates/astria-sequencer/src/app/tests_app/mempool.rs +++ b/crates/astria-sequencer/src/app/tests_app/mempool.rs @@ -54,9 +54,10 @@ async fn trigger_cleaning() { // create tx which will cause mempool cleaning flag to be set let tx_trigger = TransactionBody::builder() - .actions(vec![ - FeeChange::Transfer(FeeComponents::::new(10, 0)).into(), - ]) + .actions(vec![FeeChange::Transfer(FeeComponents::::new( + 10, 0, + )) + .into()]) .chain_id("test") .try_build() .unwrap() @@ -146,9 +147,10 @@ async fn do_not_trigger_cleaning() { // create tx which will fail execution and not trigger flag // (wrong sudo signer) let tx_fail = TransactionBody::builder() - .actions(vec![ - FeeChange::Transfer(FeeComponents::::new(10, 0)).into(), - ]) + .actions(vec![FeeChange::Transfer(FeeComponents::::new( + 10, 0, + )) + .into()]) .chain_id("test") .try_build() .unwrap() @@ -213,15 +215,13 @@ async fn maintenance_recosting_promotes() { // create tx which will not be included in block due to // having insufficient funds (transaction will be recosted to enable) let tx_fail_recost_funds = TransactionBody::builder() - .actions(vec![ - Transfer { - to: astria_address_from_hex_string(CAROL_ADDRESS), - amount: 1u128, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: astria_address_from_hex_string(CAROL_ADDRESS), + amount: 1u128, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() @@ -243,9 +243,10 @@ async fn maintenance_recosting_promotes() { // create tx which will enable recost tx to pass let tx_recost = TransactionBody::builder() - .actions(vec![ - FeeChange::Transfer(FeeComponents::::new(10, 0)).into(), - ]) + .actions(vec![FeeChange::Transfer(FeeComponents::::new( + 10, 0, + )) + .into()]) .chain_id("test") .try_build() .unwrap() @@ -389,15 +390,13 @@ async fn maintenance_funds_added_promotes() { // create tx that will not be included in block due to // having no funds (will be sent transfer to then enable) let tx_fail_transfer_funds = TransactionBody::builder() - .actions(vec![ - Transfer { - to: astria_address_from_hex_string(BOB_ADDRESS), - amount: 10u128, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: astria_address_from_hex_string(BOB_ADDRESS), + amount: 10u128, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() @@ -419,15 +418,13 @@ async fn maintenance_funds_added_promotes() { // create tx which will enable no funds to pass let tx_fund = TransactionBody::builder() - .actions(vec![ - Transfer { - to: astria_address_from_hex_string(CAROL_ADDRESS), - amount: 22u128, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: astria_address_from_hex_string(CAROL_ADDRESS), + amount: 22u128, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() diff --git a/crates/astria-sequencer/src/app/tests_app/mod.rs b/crates/astria-sequencer/src/app/tests_app/mod.rs index c0cd55b1c..011b2886f 100644 --- a/crates/astria-sequencer/src/app/tests_app/mod.rs +++ b/crates/astria-sequencer/src/app/tests_app/mod.rs @@ -244,15 +244,13 @@ async fn app_transfer_block_fees_to_sudo() { let bob_address = astria_address_from_hex_string(BOB_ADDRESS); let amount = 333_333; let tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: bob_address, - amount, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: bob_address, + amount, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -589,28 +587,24 @@ async fn app_prepare_proposal_cometbft_max_bytes_overflow_ok() { // create txs which will cause cometBFT overflow let alice = get_alice_signing_key(); let tx_pass = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 100_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 100_000]), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() .sign(&alice); let tx_overflow = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 100_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 100_000]), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .nonce(1) .try_build() @@ -679,27 +673,23 @@ async fn app_prepare_proposal_sequencer_max_bytes_overflow_ok() { // create txs which will cause sequencer overflow (max is currently 256_000 bytes) let alice = get_alice_signing_key(); let tx_pass = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 200_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 200_000]), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() .sign(&alice); let tx_overflow = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 100_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 100_000]), + fee_asset: nria().into(), + } + .into()]) .nonce(1) .chain_id("test") .try_build() @@ -768,27 +758,23 @@ async fn app_process_proposal_sequencer_max_bytes_overflow_fail() { // create txs which will cause sequencer overflow (max is currently 256_000 bytes) let alice = get_alice_signing_key(); let tx_pass = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 200_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 200_000]), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() .sign(&alice); let tx_overflow = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from([1u8; 32]), - data: Bytes::copy_from_slice(&[1u8; 100_000]), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from([1u8; 32]), + data: Bytes::copy_from_slice(&[1u8; 100_000]), + fee_asset: nria().into(), + } + .into()]) .nonce(1) .chain_id("test") .try_build() @@ -834,12 +820,10 @@ async fn app_process_proposal_transaction_fails_to_execute_fails() { // create txs which will cause transaction execution failure let alice = get_alice_signing_key(); let tx_fail = TransactionBody::builder() - .actions(vec![ - SudoAddressChange { - new_address: astria_address_from_hex_string(BOB_ADDRESS), - } - .into(), - ]) + .actions(vec![SudoAddressChange { + new_address: astria_address_from_hex_string(BOB_ADDRESS), + } + .into()]) .chain_id("test") .try_build() .unwrap() diff --git a/crates/astria-sequencer/src/app/tests_breaking_changes.rs b/crates/astria-sequencer/src/app/tests_breaking_changes.rs index 2118d41d4..9beb72a95 100644 --- a/crates/astria-sequencer/src/app/tests_breaking_changes.rs +++ b/crates/astria-sequencer/src/app/tests_breaking_changes.rs @@ -259,24 +259,20 @@ async fn app_execute_transaction_with_every_action_snapshot() { .unwrap(); let tx_sudo_ibc = TransactionBody::builder() - .actions(vec![ - IbcSudoChange { - new_address: bob_address, - } - .into(), - ]) + .actions(vec![IbcSudoChange { + new_address: bob_address, + } + .into()]) .nonce(2) .chain_id("test") .try_build() .unwrap(); let tx_sudo = TransactionBody::builder() - .actions(vec![ - SudoAddressChange { - new_address: bob_address, - } - .into(), - ]) + .actions(vec![SudoAddressChange { + new_address: bob_address, + } + .into()]) .nonce(3) .chain_id("test") .try_build() @@ -299,16 +295,14 @@ async fn app_execute_transaction_with_every_action_snapshot() { app.execute_transaction(signed_tx_sudo).await.unwrap(); let tx = TransactionBody::builder() - .actions(vec![ - InitBridgeAccount { - rollup_id, - asset: nria().into(), - fee_asset: nria().into(), - sudo_address: None, - withdrawer_address: None, - } - .into(), - ]) + .actions(vec![InitBridgeAccount { + rollup_id, + asset: nria().into(), + fee_asset: nria().into(), + sudo_address: None, + withdrawer_address: None, + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -345,15 +339,13 @@ async fn app_execute_transaction_with_every_action_snapshot() { app.execute_transaction(signed_tx).await.unwrap(); let tx_bridge = TransactionBody::builder() - .actions(vec![ - BridgeSudoChange { - bridge_address, - new_sudo_address: Some(bob_address), - new_withdrawer_address: Some(bob_address), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![BridgeSudoChange { + bridge_address, + new_sudo_address: Some(bob_address), + new_withdrawer_address: Some(bob_address), + fee_asset: nria().into(), + } + .into()]) .nonce(2) .chain_id("test") .try_build() diff --git a/crates/astria-sequencer/src/app/tests_execute_transaction.rs b/crates/astria-sequencer/src/app/tests_execute_transaction.rs index 7f5d778e3..3b30b53ac 100644 --- a/crates/astria-sequencer/src/app/tests_execute_transaction.rs +++ b/crates/astria-sequencer/src/app/tests_execute_transaction.rs @@ -113,15 +113,13 @@ async fn app_execute_transaction_transfer() { let bob_address = astria_address_from_hex_string(BOB_ADDRESS); let value = 333_333; let tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: bob_address, - amount: value, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: bob_address, + amount: value, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -177,15 +175,13 @@ async fn app_execute_transaction_transfer_not_native_token() { // transfer funds from Alice to Bob; use native token for fee payment let bob_address = astria_address_from_hex_string(BOB_ADDRESS); let tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: bob_address, - amount: value, - asset: test_asset(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: bob_address, + amount: value, + asset: test_asset(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -249,15 +245,13 @@ async fn app_execute_transaction_transfer_balance_too_low_for_fee() { // 0-value transfer; only fee is deducted from sender let tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: bob, - amount: 0, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: bob, + amount: 0, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -287,14 +281,12 @@ async fn app_execute_transaction_sequence() { let fee = calculate_rollup_data_submission_fee_from_state(&data, &app.state).await; let tx = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data, - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data, + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -323,14 +315,12 @@ async fn app_execute_transaction_invalid_fee_asset() { let data = Bytes::from_static(b"hello world"); let tx = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data, - fee_asset: test_asset(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data, + fee_asset: test_asset(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); @@ -818,14 +808,12 @@ async fn app_execute_transaction_invalid_nonce() { let data = Bytes::from_static(b"hello world"); let tx = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data, - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data, + fee_asset: nria().into(), + } + .into()]) .nonce(1) .chain_id("test") .try_build() @@ -867,14 +855,12 @@ async fn app_execute_transaction_invalid_chain_id() { // create tx with invalid nonce 1 let data = Bytes::from_static(b"hello world"); let tx = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data, - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data, + fee_asset: nria().into(), + } + .into()]) .chain_id("wrong-chain") .try_build() .unwrap(); @@ -922,15 +908,13 @@ async fn app_stateful_check_fails_insufficient_total_balance() { // transfer just enough to cover single sequence fee with data let signed_tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: keypair_address, - amount: fee, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: keypair_address, + amount: fee, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() @@ -968,14 +952,12 @@ async fn app_stateful_check_fails_insufficient_total_balance() { // build single transfer to see passes let signed_tx_pass = TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data, - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data, + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() @@ -1252,15 +1234,13 @@ async fn transaction_execution_records_fee_event() { let bob_address = astria_address_from_hex_string(BOB_ADDRESS); let value = 333_333; let tx = TransactionBody::builder() - .actions(vec![ - Transfer { - to: bob_address, - amount: value, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]) + .actions(vec![Transfer { + to: bob_address, + amount: value, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap(); diff --git a/crates/astria-sequencer/src/authority/storage/values.rs b/crates/astria-sequencer/src/authority/storage/values.rs index 1c2aaaa4a..a2e1a3095 100644 --- a/crates/astria-sequencer/src/authority/storage/values.rs +++ b/crates/astria-sequencer/src/authority/storage/values.rs @@ -36,7 +36,7 @@ enum ValueImpl<'a> { #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::authority) struct AddressBytes<'a>(Cow<'a, [u8; ADDRESS_LEN]>); -impl<'a> Debug for AddressBytes<'a> { +impl Debug for AddressBytes<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } @@ -75,7 +75,7 @@ impl<'a> TryFrom> for AddressBytes<'a> { #[derive(BorshSerialize, BorshDeserialize)] struct VerificationKey<'a>(Cow<'a, [u8; 32]>); -impl<'a> Debug for VerificationKey<'a> { +impl Debug for VerificationKey<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/bridge/storage/keys.rs b/crates/astria-sequencer/src/bridge/storage/keys.rs index 358127c92..53ea216aa 100644 --- a/crates/astria-sequencer/src/bridge/storage/keys.rs +++ b/crates/astria-sequencer/src/bridge/storage/keys.rs @@ -131,10 +131,8 @@ mod tests { fn bridge_account_prefix_should_be_prefix_of_relevant_keys() { assert!(rollup_id(&address()).starts_with(BRIDGE_ACCOUNT_PREFIX)); assert!(asset_id(&address()).starts_with(BRIDGE_ACCOUNT_PREFIX)); - assert!( - bridge_account_withdrawal_event(&address(), "the-event") - .starts_with(BRIDGE_ACCOUNT_PREFIX) - ); + assert!(bridge_account_withdrawal_event(&address(), "the-event") + .starts_with(BRIDGE_ACCOUNT_PREFIX)); assert!( last_transaction_id_for_bridge_account(&address()).starts_with(BRIDGE_ACCOUNT_PREFIX) ); diff --git a/crates/astria-sequencer/src/bridge/storage/values/address_bytes.rs b/crates/astria-sequencer/src/bridge/storage/values/address_bytes.rs index 73d1d016f..d420cf333 100644 --- a/crates/astria-sequencer/src/bridge/storage/values/address_bytes.rs +++ b/crates/astria-sequencer/src/bridge/storage/values/address_bytes.rs @@ -24,7 +24,7 @@ use crate::accounts::AddressBytes as DomainAddressBytes; #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::bridge) struct AddressBytes<'a>(Cow<'a, [u8; ADDRESS_LEN]>); -impl<'a> Debug for AddressBytes<'a> { +impl Debug for AddressBytes<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/bridge/storage/values/block_height.rs b/crates/astria-sequencer/src/bridge/storage/values/block_height.rs index 1fe4edf48..3c88824ac 100644 --- a/crates/astria-sequencer/src/bridge/storage/values/block_height.rs +++ b/crates/astria-sequencer/src/bridge/storage/values/block_height.rs @@ -36,7 +36,7 @@ impl From for u64 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(block_height: BlockHeight) -> Self { crate::storage::StoredValue::Bridge(Value(ValueImpl::BlockHeight(block_height))) } diff --git a/crates/astria-sequencer/src/bridge/storage/values/ibc_prefixed_denom.rs b/crates/astria-sequencer/src/bridge/storage/values/ibc_prefixed_denom.rs index 725af800d..db8b23f91 100644 --- a/crates/astria-sequencer/src/bridge/storage/values/ibc_prefixed_denom.rs +++ b/crates/astria-sequencer/src/bridge/storage/values/ibc_prefixed_denom.rs @@ -23,7 +23,7 @@ use super::{ #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::bridge) struct IbcPrefixedDenom<'a>(Cow<'a, [u8; 32]>); -impl<'a> Debug for IbcPrefixedDenom<'a> { +impl Debug for IbcPrefixedDenom<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/bridge/storage/values/rollup_id.rs b/crates/astria-sequencer/src/bridge/storage/values/rollup_id.rs index 5a67daf7f..dff8246b1 100644 --- a/crates/astria-sequencer/src/bridge/storage/values/rollup_id.rs +++ b/crates/astria-sequencer/src/bridge/storage/values/rollup_id.rs @@ -23,7 +23,7 @@ use super::{ #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::bridge) struct RollupId<'a>(Cow<'a, [u8; 32]>); -impl<'a> Debug for RollupId<'a> { +impl Debug for RollupId<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/bridge/storage/values/transaction_id.rs b/crates/astria-sequencer/src/bridge/storage/values/transaction_id.rs index fa12a21f4..d752d3c63 100644 --- a/crates/astria-sequencer/src/bridge/storage/values/transaction_id.rs +++ b/crates/astria-sequencer/src/bridge/storage/values/transaction_id.rs @@ -26,7 +26,7 @@ use super::{ #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::bridge) struct TransactionId<'a>(Cow<'a, [u8; TRANSACTION_ID_LEN]>); -impl<'a> Debug for TransactionId<'a> { +impl Debug for TransactionId<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", hex(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/fees/tests.rs b/crates/astria-sequencer/src/fees/tests.rs index 8a6f2202f..0e52622a9 100644 --- a/crates/astria-sequencer/src/fees/tests.rs +++ b/crates/astria-sequencer/src/fees/tests.rs @@ -79,15 +79,13 @@ async fn ensure_correct_block_fees_transfer() { let alice = get_alice_signing_key(); let bob_address = astria_address_from_hex_string(BOB_ADDRESS); - let actions = vec![ - Transfer { - to: bob_address, - amount: 1000, - asset: nria().into(), - fee_asset: nria().into(), - } - .into(), - ]; + let actions = vec![Transfer { + to: bob_address, + amount: 1000, + asset: nria().into(), + fee_asset: nria().into(), + } + .into()]; let tx = TransactionBody::builder() .actions(actions) @@ -117,14 +115,12 @@ async fn ensure_correct_block_fees_sequence() { let alice = get_alice_signing_key(); let data = b"hello world".to_vec(); - let actions = vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data: data.clone().into(), - fee_asset: nria().into(), - } - .into(), - ]; + let actions = vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data: data.clone().into(), + fee_asset: nria().into(), + } + .into()]; let tx = TransactionBody::builder() .actions(actions) @@ -157,16 +153,14 @@ async fn ensure_correct_block_fees_init_bridge_acct() { let alice = get_alice_signing_key(); - let actions = vec![ - InitBridgeAccount { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - asset: nria().into(), - fee_asset: nria().into(), - sudo_address: None, - withdrawer_address: None, - } - .into(), - ]; + let actions = vec![InitBridgeAccount { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + asset: nria().into(), + fee_asset: nria().into(), + sudo_address: None, + withdrawer_address: None, + } + .into()]; let tx = TransactionBody::builder() .actions(actions) @@ -215,16 +209,14 @@ async fn ensure_correct_block_fees_bridge_lock() { .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - let actions = vec![ - BridgeLock { - to: bridge_address, - amount: 1, - asset: nria().into(), - fee_asset: nria().into(), - destination_chain_address: rollup_id.to_string(), - } - .into(), - ]; + let actions = vec![BridgeLock { + to: bridge_address, + amount: 1, + asset: nria().into(), + fee_asset: nria().into(), + destination_chain_address: rollup_id.to_string(), + } + .into()]; let tx = TransactionBody::builder() .actions(actions) @@ -278,15 +270,13 @@ async fn ensure_correct_block_fees_bridge_sudo_change() { .await .unwrap(); - let actions = vec![ - BridgeSudoChange { - bridge_address, - new_sudo_address: None, - new_withdrawer_address: None, - fee_asset: nria().into(), - } - .into(), - ]; + let actions = vec![BridgeSudoChange { + bridge_address, + new_sudo_address: None, + new_withdrawer_address: None, + fee_asset: nria().into(), + } + .into()]; let tx = TransactionBody::builder() .actions(actions) diff --git a/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs b/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs index ae6259656..441855d40 100644 --- a/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs +++ b/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs @@ -25,7 +25,7 @@ pub(in crate::grpc) struct BlockHash<'a>(Cow<'a, [u8; 32]>); // NOTE(janis): Is it confusing that the display impl at the service level is hex, // while here it's base64? This probably makes sense because storage is closer to // the wire format, which itself followes the base64 pbjson convention. -impl<'a> Debug for BlockHash<'a> { +impl Debug for BlockHash<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/grpc/storage/values/rollup_ids.rs b/crates/astria-sequencer/src/grpc/storage/values/rollup_ids.rs index 05575132a..d2bb6f607 100644 --- a/crates/astria-sequencer/src/grpc/storage/values/rollup_ids.rs +++ b/crates/astria-sequencer/src/grpc/storage/values/rollup_ids.rs @@ -23,7 +23,7 @@ use super::{ #[derive(BorshSerialize, BorshDeserialize)] pub(super) struct RollupId<'a>(Cow<'a, [u8; 32]>); -impl<'a> Debug for RollupId<'a> { +impl Debug for RollupId<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/grpc/storage/values/rollup_transactions.rs b/crates/astria-sequencer/src/grpc/storage/values/rollup_transactions.rs index f54947e03..733c6f8be 100644 --- a/crates/astria-sequencer/src/grpc/storage/values/rollup_transactions.rs +++ b/crates/astria-sequencer/src/grpc/storage/values/rollup_transactions.rs @@ -32,7 +32,7 @@ pub(in crate::grpc) struct RollupTransactions<'a> { proof: Proof<'a>, } -impl<'a> Debug for RollupTransactions<'a> { +impl Debug for RollupTransactions<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("RollupTransactions") .field("rollup_id", &self.rollup_id) diff --git a/crates/astria-sequencer/src/grpc/storage/values/sequencer_block_header.rs b/crates/astria-sequencer/src/grpc/storage/values/sequencer_block_header.rs index 0c53040cc..3c6f6ab62 100644 --- a/crates/astria-sequencer/src/grpc/storage/values/sequencer_block_header.rs +++ b/crates/astria-sequencer/src/grpc/storage/values/sequencer_block_header.rs @@ -45,13 +45,13 @@ impl<'a> From> for tendermint::chain::Id { } } -impl<'a> BorshSerialize for ChainId<'a> { +impl BorshSerialize for ChainId<'_> { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { self.0.as_str().serialize(writer) } } -impl<'a> BorshDeserialize for ChainId<'a> { +impl BorshDeserialize for ChainId<'_> { fn deserialize_reader(reader: &mut R) -> std::io::Result { let chain_id_str = String::deserialize_reader(reader)?; let chain_id = @@ -106,7 +106,7 @@ pub(in crate::grpc) struct SequencerBlockHeader<'a> { proposer_address: [u8; ADDRESS_LEN], } -impl<'a> Debug for SequencerBlockHeader<'a> { +impl Debug for SequencerBlockHeader<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("SequencerBlockHeader") .field("chain_id", &self.chain_id) diff --git a/crates/astria-sequencer/src/ibc/storage/values.rs b/crates/astria-sequencer/src/ibc/storage/values.rs index f1a0783f8..ad3512505 100644 --- a/crates/astria-sequencer/src/ibc/storage/values.rs +++ b/crates/astria-sequencer/src/ibc/storage/values.rs @@ -41,7 +41,7 @@ impl From for u128 { } } -impl<'a> From for crate::storage::StoredValue<'a> { +impl From for crate::storage::StoredValue<'_> { fn from(balance: Balance) -> Self { crate::storage::StoredValue::Ibc(Value(ValueImpl::Balance(balance))) } @@ -61,7 +61,7 @@ impl<'a> TryFrom> for Balance { #[derive(BorshSerialize, BorshDeserialize)] pub(in crate::ibc) struct AddressBytes<'a>(Cow<'a, [u8; ADDRESS_LEN]>); -impl<'a> Debug for AddressBytes<'a> { +impl Debug for AddressBytes<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", base64(self.0.as_slice())) } diff --git a/crates/astria-sequencer/src/mempool/mod.rs b/crates/astria-sequencer/src/mempool/mod.rs index e3391df88..069671a2f 100644 --- a/crates/astria-sequencer/src/mempool/mod.rs +++ b/crates/astria-sequencer/src/mempool/mod.rs @@ -113,7 +113,7 @@ struct ContainedTxLock<'a> { txs: RwLockWriteGuard<'a, HashSet<[u8; 32]>>, } -impl<'a> ContainedTxLock<'a> { +impl ContainedTxLock<'_> { fn add(&mut self, id: [u8; 32]) { if !self.txs.insert(id) { self.mempool.metrics.increment_internal_logic_error(); @@ -1043,12 +1043,10 @@ mod tests { ); // Check the pending nonce for an address with no txs is `None`. - assert!( - mempool - .pending_nonce(astria_address_from_hex_string(CAROL_ADDRESS).as_bytes()) - .await - .is_none() - ); + assert!(mempool + .pending_nonce(astria_address_from_hex_string(CAROL_ADDRESS).as_bytes()) + .await + .is_none()); } #[tokio::test] diff --git a/crates/astria-sequencer/src/mempool/transactions_container.rs b/crates/astria-sequencer/src/mempool/transactions_container.rs index 9dc2b5198..50b89e52f 100644 --- a/crates/astria-sequencer/src/mempool/transactions_container.rs +++ b/crates/astria-sequencer/src/mempool/transactions_container.rs @@ -964,12 +964,10 @@ mod tests { let ttx = MockTTXBuilder::new().nonce(0).build(); let priority = ttx.priority(1); - assert!( - priority - .unwrap_err() - .to_string() - .contains("less than current account nonce") - ); + assert!(priority + .unwrap_err() + .to_string() + .contains("less than current account nonce")); } // From https://doc.rust-lang.org/std/cmp/trait.PartialOrd.html diff --git a/crates/astria-sequencer/src/service/consensus.rs b/crates/astria-sequencer/src/service/consensus.rs index a5f5e8d4b..c496d089c 100644 --- a/crates/astria-sequencer/src/service/consensus.rs +++ b/crates/astria-sequencer/src/service/consensus.rs @@ -240,14 +240,12 @@ mod tests { fn make_unsigned_tx() -> TransactionBody { TransactionBody::builder() - .actions(vec![ - RollupDataSubmission { - rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), - data: Bytes::from_static(b"hello world"), - fee_asset: crate::benchmark_and_test_utils::nria().into(), - } - .into(), - ]) + .actions(vec![RollupDataSubmission { + rollup_id: RollupId::from_unhashed_bytes(b"testchainid"), + data: Bytes::from_static(b"hello world"), + fee_asset: crate::benchmark_and_test_utils::nria().into(), + } + .into()]) .chain_id("test") .try_build() .unwrap() @@ -342,30 +340,26 @@ mod tests { async fn process_proposal_fail_missing_action_commitment() { let (mut consensus_service, _) = new_consensus_service(None).await; let process_proposal = new_process_proposal_request(vec![]); - assert!( - consensus_service - .handle_process_proposal(process_proposal) - .await - .err() - .unwrap() - .to_string() - .contains("no transaction commitment in proposal") - ); + assert!(consensus_service + .handle_process_proposal(process_proposal) + .await + .err() + .unwrap() + .to_string() + .contains("no transaction commitment in proposal")); } #[tokio::test] async fn process_proposal_fail_wrong_commitment_length() { let (mut consensus_service, _) = new_consensus_service(None).await; let process_proposal = new_process_proposal_request(vec![[0u8; 16].to_vec().into()]); - assert!( - consensus_service - .handle_process_proposal(process_proposal) - .await - .err() - .unwrap() - .to_string() - .contains("transaction commitment must be 32 bytes") - ); + assert!(consensus_service + .handle_process_proposal(process_proposal) + .await + .err() + .unwrap() + .to_string() + .contains("transaction commitment must be 32 bytes")); } #[tokio::test] @@ -375,15 +369,13 @@ mod tests { [99u8; 32].to_vec().into(), [99u8; 32].to_vec().into(), ]); - assert!( - consensus_service - .handle_process_proposal(process_proposal) - .await - .err() - .unwrap() - .to_string() - .contains("transaction commitment does not match expected") - ); + assert!(consensus_service + .handle_process_proposal(process_proposal) + .await + .err() + .unwrap() + .to_string() + .contains("transaction commitment does not match expected")); } #[tokio::test] diff --git a/crates/astria-sequencer/src/storage/keys.rs b/crates/astria-sequencer/src/storage/keys.rs index edf0333ba..fba71e6be 100644 --- a/crates/astria-sequencer/src/storage/keys.rs +++ b/crates/astria-sequencer/src/storage/keys.rs @@ -30,7 +30,7 @@ impl<'a, T> AccountPrefixer<'a, T> { } } -impl<'a, T: AddressBytes> Display for AccountPrefixer<'a, T> { +impl Display for AccountPrefixer<'_, T> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use base64::{ display::Base64Display, @@ -46,13 +46,13 @@ impl<'a, T: AddressBytes> Display for AccountPrefixer<'a, T> { #[cfg_attr(test, derive(Debug, PartialEq))] pub(crate) struct Asset<'a>(Cow<'a, IbcPrefixed>); -impl<'a> Asset<'a> { +impl Asset<'_> { pub(crate) fn get(self) -> IbcPrefixed { self.0.into_owned() } } -impl<'a> Display for Asset<'a> { +impl Display for Asset<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.0.fmt(f) } @@ -67,7 +67,7 @@ where } } -impl<'a> FromStr for Asset<'a> { +impl FromStr for Asset<'_> { type Err = ParseIbcPrefixedError; fn from_str(s: &str) -> Result { diff --git a/crates/astria-sequencer/src/storage/stored_value.rs b/crates/astria-sequencer/src/storage/stored_value.rs index 07164615e..3fe203521 100644 --- a/crates/astria-sequencer/src/storage/stored_value.rs +++ b/crates/astria-sequencer/src/storage/stored_value.rs @@ -21,7 +21,7 @@ pub(crate) enum StoredValue<'a> { Grpc(crate::grpc::storage::Value<'a>), } -impl<'a> StoredValue<'a> { +impl StoredValue<'_> { pub(crate) fn serialize(&self) -> Result> { borsh::to_vec(&self).wrap_err("failed to serialize stored value") } diff --git a/crates/astria-sequencer/src/transaction/checks.rs b/crates/astria-sequencer/src/transaction/checks.rs index 78848bf0c..974808a4a 100644 --- a/crates/astria-sequencer/src/transaction/checks.rs +++ b/crates/astria-sequencer/src/transaction/checks.rs @@ -351,11 +351,10 @@ mod tests { .await .err() .unwrap(); - assert!( - err.root_cause() - .to_string() - .contains(&other_asset.to_ibc_prefixed().to_string()) - ); + assert!(err + .root_cause() + .to_string() + .contains(&other_asset.to_ibc_prefixed().to_string())); } #[tokio::test] diff --git a/crates/astria-telemetry/Cargo.toml b/crates/astria-telemetry/Cargo.toml index 7cc30072a..cb17bdcc9 100644 --- a/crates/astria-telemetry/Cargo.toml +++ b/crates/astria-telemetry/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-telemetry" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-telemetry/src/display.rs b/crates/astria-telemetry/src/display.rs index 3a1c9b98e..c132f0c51 100644 --- a/crates/astria-telemetry/src/display.rs +++ b/crates/astria-telemetry/src/display.rs @@ -97,7 +97,7 @@ pub struct Json<'a, T>(&'a T); // NOTE: This implementation is lifted straight from serde_json: // https://docs.rs/serde_json/1.0.114/src/serde_json/value/mod.rs.html#197 -impl<'a, T> Display for Json<'a, T> +impl Display for Json<'_, T> where T: serde::Serialize, { @@ -106,7 +106,7 @@ where inner: &'a mut Formatter<'b>, } - impl<'a, 'b> io::Write for WriterFormatter<'a, 'b> { + impl io::Write for WriterFormatter<'_, '_> { fn write(&mut self, buf: &[u8]) -> io::Result { // NOTE: Same argument for safety as in // https://docs.rs/serde_json/1.0.114/src/serde_json/value/mod.rs.html#229 diff --git a/crates/astria-telemetry/src/metrics/factories.rs b/crates/astria-telemetry/src/metrics/factories.rs index a3abc3b85..bdd154926 100644 --- a/crates/astria-telemetry/src/metrics/factories.rs +++ b/crates/astria-telemetry/src/metrics/factories.rs @@ -176,7 +176,7 @@ trait RegisterMetric { fn metric_type() -> &'static str; } -impl<'a> RegisterMetric for Factory<'a, Counter> { +impl RegisterMetric for Factory<'_, Counter> { fn register_metric(&self, key: &Key) -> Counter { let ignored_metadata = Metadata::new("", metrics::Level::ERROR, None); Counter::new(self.recorder.register_counter(key, &ignored_metadata)) @@ -187,7 +187,7 @@ impl<'a> RegisterMetric for Factory<'a, Counter> { } } -impl<'a> RegisterMetric for Factory<'a, Gauge> { +impl RegisterMetric for Factory<'_, Gauge> { fn register_metric(&self, key: &Key) -> Gauge { let ignored_metadata = Metadata::new("", metrics::Level::ERROR, None); Gauge::new(self.recorder.register_gauge(key, &ignored_metadata)) @@ -198,7 +198,7 @@ impl<'a> RegisterMetric for Factory<'a, Gauge> { } } -impl<'a> RegisterMetric for Factory<'a, Histogram> { +impl RegisterMetric for Factory<'_, Histogram> { fn register_metric(&self, key: &Key) -> Histogram { let ignored_metadata = Metadata::new("", metrics::Level::ERROR, None); Histogram::new(self.recorder.register_histogram(key, &ignored_metadata)) diff --git a/crates/astria-test-utils/Cargo.toml b/crates/astria-test-utils/Cargo.toml index 854cdd4b0..e7a551c1d 100644 --- a/crates/astria-test-utils/Cargo.toml +++ b/crates/astria-test-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "astria-test-utils" version = "0.1.0" edition = "2021" -rust-version = "1.81.0" +rust-version = "1.83.0" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" diff --git a/crates/astria-test-utils/src/mock/geth.rs b/crates/astria-test-utils/src/mock/geth.rs index 4551f6e2c..f82da647f 100644 --- a/crates/astria-test-utils/src/mock/geth.rs +++ b/crates/astria-test-utils/src/mock/geth.rs @@ -120,7 +120,7 @@ mod __rpc_traits { pub trait Geth { #[subscription(name = "eth_subscribe", item = Transaction, unsubscribe = "eth_unsubscribe")] async fn eth_subscribe(&self, target: String, full_txs: Option) - -> SubscriptionResult; + -> SubscriptionResult; #[method(name = "net_version")] async fn net_version(&self) -> Result; diff --git a/justfile b/justfile index 6c35eafd9..39c3f0284 100644 --- a/justfile +++ b/justfile @@ -91,7 +91,7 @@ _fmt-all: [no-exit-message] _fmt-rust: - cargo +nightly-2024-09-15 fmt --all + cargo +nightly-2024-10-03 fmt --all [no-exit-message] _lint-rust: @@ -103,10 +103,11 @@ _lint-rust: [no-exit-message] _lint-rust-fmt: - cargo +nightly-2024-09-15 fmt --all -- --check + cargo +nightly-2024-10-03 fmt --all -- --check [no-exit-message] _lint-rust-clippy: + cargo clippy --version cargo clippy --all-targets --all-features \ -- --warn clippy::pedantic --warn clippy::arithmetic-side-effects \ --warn clippy::allow_attributes --warn clippy::allow_attributes_without_reason \ @@ -114,7 +115,7 @@ _lint-rust-clippy: [no-exit-message] _lint-rust-clippy-custom: - cargo +nightly-2024-09-05 clippy --all-targets --all-features \ + cargo +nightly-2024-10-03 clippy --all-targets --all-features \ -p tracing_debug_field \ -- --warn clippy::pedantic --deny warnings diff --git a/lint/tracing_debug_field/Cargo.toml b/lint/tracing_debug_field/Cargo.toml index cfd3a0f18..2c758c945 100644 --- a/lint/tracing_debug_field/Cargo.toml +++ b/lint/tracing_debug_field/Cargo.toml @@ -13,12 +13,12 @@ name = "ui" path = "ui/main.rs" [dependencies] -clippy_utils = { git = "https://github.com/rust-lang/rust-clippy", rev = "a95afe2d0a2051d97b723b0b197393b7811bc4e4" } -dylint_linting = "3.2.0" +clippy_utils = { git = "https://github.com/rust-lang/rust-clippy", rev = "aa0d551351a9c15d8a95fdb3e2946b505893dda8" } +dylint_linting = "3.3.0" if_chain = "1.0.2" [dev-dependencies] -dylint_testing = "3.2.0" +dylint_testing = "3.3.0" tracing = "0.1" [package.metadata.rust-analyzer] diff --git a/lint/tracing_debug_field/rust-toolchain b/lint/tracing_debug_field/rust-toolchain index 44bc11be7..986c56ee6 100644 --- a/lint/tracing_debug_field/rust-toolchain +++ b/lint/tracing_debug_field/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2024-09-05" -components = ["llvm-tools-preview", "rustc-dev"] +channel = "nightly-2024-10-03" +components = ["llvm-tools-preview", "rustc-dev", "rust-src"] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index dd2691e04..3a6eabc89 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.81.0" +channel = "1.83.0" components = ["cargo", "clippy", "rust-std", "rustc"] diff --git a/rustfmt.toml b/rustfmt.toml index 48a4537a5..bde71e9a1 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -11,7 +11,7 @@ imports_granularity = "Crate" group_imports = "StdExternalCrate" reorder_impl_items = true edition = "2021" -version = "Two" +style_edition = "2018" use_field_init_shorthand = true condense_wildcard_suffixes = true unstable_features = true From 12b3bd1cfa556eed955423ee559242fd77609123 Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Fri, 17 Jan 2025 14:15:24 -0600 Subject: [PATCH 06/23] refactor(sequencer): split run method to eliminate long-lived spans (#1898) ## Summary Split `run_until_stopped()` into two methods and called them both in `spawn()`. ## Background There are two reasons for making this change, firstly being to eliminate the long-lived span in `run_until_stopped()`. Secondly, this is for parity with the rest of the codebase, which separates the creation and/or setup of the components from the running process. ## Changes - Moved initialization logic into `initialize()`, which is instrumented. - Called both `initialize()` and `run_until_stopped()` in new method `spawn()`. - Moved signal handler initialization into `spawn()` and poll for signal receipt during initialization. - Added doc comment for the public `spawn()` method. ## Testing Passing all current tests, no additional testing needed. ## Changelogs Changelogs updated ## Breaking Changelist - Breaks public API, as the public-facing run function is now `spawn()` instead of `run_until_stopped()` ## Related Issues closes #1895 closes #1893 --- crates/astria-sequencer/src/main.rs | 2 +- crates/astria-sequencer/src/sequencer.rs | 124 +++++++++++++++++------ 2 files changed, 94 insertions(+), 32 deletions(-) diff --git a/crates/astria-sequencer/src/main.rs b/crates/astria-sequencer/src/main.rs index 25455a49c..04287ca2b 100644 --- a/crates/astria-sequencer/src/main.rs +++ b/crates/astria-sequencer/src/main.rs @@ -55,7 +55,7 @@ async fn main() -> ExitCode { "initializing sequencer" ); - Sequencer::run_until_stopped(cfg, metrics) + Sequencer::spawn(cfg, metrics) .await .expect("failed to run sequencer"); diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index 3a7c5b9c8..7c427d3e7 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -28,10 +28,12 @@ use tokio::{ }; use tower_abci::v038::Server; use tracing::{ + debug, error, error_span, info, info_span, + instrument, }; use crate::{ @@ -46,15 +48,82 @@ use crate::{ pub struct Sequencer; +type GRPCServerHandle = JoinHandle>; +type ABCIServerHandle = JoinHandle<()>; + +struct RunningGRPCServer { + pub handle: GRPCServerHandle, + pub shutdown_tx: oneshot::Sender<()>, +} + +struct RunningABCIServer { + pub handle: ABCIServerHandle, + pub shutdown_rx: oneshot::Receiver<()>, +} + impl Sequencer { - #[expect(clippy::missing_errors_doc, reason = "not a public function")] - pub async fn run_until_stopped(config: Config, metrics: &'static Metrics) -> Result<()> { + /// Builds and runs the sequencer until it is either stopped by a signal or an error occurs. + /// + /// # Errors + /// Returns an error in the following cases: + /// - Database file does not exist, or cannot be loaded into storage + /// - The app fails to initialize + /// - Info service fails to initialize + /// - The server builder fails to return a server + /// - The gRPC address cannot be parsed + /// - The gRPC server fails to exit properly + pub async fn spawn(config: Config, metrics: &'static Metrics) -> Result<()> { + let mut signals = spawn_signal_handler(); + let initialize_fut = Self::initialize(config, metrics); + select! { + _ = signals.stop_rx.changed() => { + info_span!("initialize").in_scope(|| info!("shutting down sequencer")); + Ok(()) + } + + result = initialize_fut => { + let (grpc_server, abci_server) = result?; + Self::run_until_stopped(abci_server, grpc_server, &mut signals).await + } + } + } + + async fn run_until_stopped( + abci_server: RunningABCIServer, + grpc_server: RunningGRPCServer, + signals: &mut SignalReceiver, + ) -> Result<()> { + select! { + _ = signals.stop_rx.changed() => { + info_span!("run_until_stopped").in_scope(|| info!("shutting down sequencer")); + } + + _ = abci_server.shutdown_rx => { + info_span!("run_until_stopped").in_scope(|| error!("ABCI server task exited, this shouldn't happen")); + } + } + + grpc_server + .shutdown_tx + .send(()) + .map_err(|()| eyre!("failed to send shutdown signal to grpc server"))?; + grpc_server + .handle + .await + .wrap_err("grpc server task failed")? + .wrap_err("grpc server failed")?; + abci_server.handle.abort(); + Ok(()) + } + + #[instrument(skip_all)] + async fn initialize( + config: Config, + metrics: &'static Metrics, + ) -> Result<(RunningGRPCServer, RunningABCIServer)> { cnidarium::register_metrics(); register_histogram_global("cnidarium_get_raw_duration_seconds"); register_histogram_global("cnidarium_nonverifiable_get_raw_duration_seconds"); - let span = info_span!("Sequencer::run_until_stopped"); - - let mut signals = spawn_signal_handler(); let substore_prefixes = vec![penumbra_ibc::IBC_SUBSTORE_PREFIX]; @@ -88,7 +157,7 @@ impl Sequencer { service::Info::new(storage.clone()).wrap_err("failed initializing info service")?; let snapshot_service = service::Snapshot; - let server = Server::builder() + let abci_server = Server::builder() .consensus(consensus_service) .info(info_service) .mempool(mempool_service) @@ -96,18 +165,20 @@ impl Sequencer { .finish() .ok_or_eyre("server builder didn't return server; are all fields set?")?; - let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); - let (server_exit_tx, server_exit_rx) = tokio::sync::oneshot::channel(); + let (grpc_shutdown_tx, grpc_shutdown_rx) = tokio::sync::oneshot::channel(); + let (abci_shutdown_tx, abci_shutdown_rx) = tokio::sync::oneshot::channel(); let grpc_addr = config .grpc_addr .parse() .wrap_err("failed to parse grpc_addr address")?; - let grpc_server_handle = start_grpc_server(&storage, mempool, grpc_addr, shutdown_rx); + let grpc_server_handle = start_grpc_server(&storage, mempool, grpc_addr, grpc_shutdown_rx); - span.in_scope(|| info!(config.listen_addr, "starting sequencer")); - let server_handle = tokio::spawn(async move { - match server.listen_tcp(&config.listen_addr).await { + debug!(config.listen_addr, "starting sequencer"); + + let listen_addr = config.listen_addr.clone(); + let abci_server_handle = tokio::spawn(async move { + match abci_server.listen_tcp(listen_addr).await { Ok(()) => { // this shouldn't happen, as there isn't a way for the ABCI server to exit info_span!("abci_server").in_scope(|| info!("ABCI server exited successfully")); @@ -117,28 +188,19 @@ impl Sequencer { .in_scope(|| error!(err = e.as_ref(), "ABCI server exited with error")); } } - let _ = server_exit_tx.send(()); + let _ = abci_shutdown_tx.send(()); }); - select! { - _ = signals.stop_rx.changed() => { - span.in_scope(|| info!("shutting down sequencer")); - } - - _ = server_exit_rx => { - span.in_scope(|| error!("ABCI server task exited, this shouldn't happen")); - } - } + let grpc_server = RunningGRPCServer { + handle: grpc_server_handle, + shutdown_tx: grpc_shutdown_tx, + }; + let abci_server = RunningABCIServer { + handle: abci_server_handle, + shutdown_rx: abci_shutdown_rx, + }; - shutdown_tx - .send(()) - .map_err(|()| eyre!("failed to send shutdown signal to grpc server"))?; - grpc_server_handle - .await - .wrap_err("grpc server task failed")? - .wrap_err("grpc server failed")?; - server_handle.abort(); - Ok(()) + Ok((grpc_server, abci_server)) } } From 8879aafebec56fcdefb563a543778b1a25fd35c8 Mon Sep 17 00:00:00 2001 From: Lily Johnson <35852084+Lilyjjo@users.noreply.github.com> Date: Sat, 18 Jan 2025 04:58:13 -0500 Subject: [PATCH 07/23] fix(sequencer): remove unwrap from app utilized mempool logic (#1772) ## Summary This PR changes the mempool's `builder_queue()` to be infallible. The code previously would return an error if the nonce fetch from the database failed. Now it handles the error case by using the pending's lowest nonce for the account as an educated guess of what nonce to use for transaction priority construction. This is okay since `prepare_proposal()`'s logic will just reject invalid nonces if the mempool's state is out of date due to a bug. ## Background We shouldn't have non-existential issues in the mempool cause failures in the sequencer. ## Changes - Changed mempool `builder_queue()` to be infallible with reasonable fallbacks. ## Testing Manually changed the code path to only use the new logic and watched all tests pass except for those explicitly testing the jitter between the mempool and database (which shouldn't happen if the rest of the code works). The tests in those scenarios only fail due to unmet assertions and not because of panics. ## Changelogs No updates required. ## Related Issues closes #1769 --------- Co-authored-by: Fraser Hutchison --- crates/astria-sequencer/src/app/mod.rs | 6 +- .../src/mempool/benchmarks.rs | 11 +-- crates/astria-sequencer/src/mempool/mod.rs | 67 ++++--------- .../src/mempool/transactions_container.rs | 98 ++++++++----------- 4 files changed, 61 insertions(+), 121 deletions(-) diff --git a/crates/astria-sequencer/src/app/mod.rs b/crates/astria-sequencer/src/app/mod.rs index 9a5064ff6..3dd9e29c4 100644 --- a/crates/astria-sequencer/src/app/mod.rs +++ b/crates/astria-sequencer/src/app/mod.rs @@ -588,11 +588,7 @@ impl App { }; // get copy of transactions to execute from mempool - let pending_txs = self - .mempool - .builder_queue(&self.state) - .await - .expect("failed to fetch pending transactions"); + let pending_txs = self.mempool.builder_queue().await; let mut unused_count = pending_txs.len(); for (tx_hash, tx) in pending_txs { diff --git a/crates/astria-sequencer/src/mempool/benchmarks.rs b/crates/astria-sequencer/src/mempool/benchmarks.rs index 3caaa920d..eac392014 100644 --- a/crates/astria-sequencer/src/mempool/benchmarks.rs +++ b/crates/astria-sequencer/src/mempool/benchmarks.rs @@ -193,20 +193,11 @@ fn builder_queue(bencher: divan::Bencher) { .build() .unwrap(); - let mut mock_state = runtime.block_on(mock_state_getter()); - - // iterate over all signers and put their balances and nonces into the mock state - for i in 0..SIGNER_COUNT { - let signing_key = SigningKey::from([i; 32]); - let signing_address = signing_key.address_bytes(); - mock_state_put_account_nonce(&mut mock_state, &signing_address, 0); - } - bencher .with_inputs(|| init_mempool::()) .bench_values(move |mempool| { runtime.block_on(async { - mempool.builder_queue(&mock_state).await.unwrap(); + mempool.builder_queue().await; }); }); } diff --git a/crates/astria-sequencer/src/mempool/mod.rs b/crates/astria-sequencer/src/mempool/mod.rs index 069671a2f..de71d097e 100644 --- a/crates/astria-sequencer/src/mempool/mod.rs +++ b/crates/astria-sequencer/src/mempool/mod.rs @@ -292,12 +292,8 @@ impl Mempool { /// Returns a copy of all transactions and their hashes ready for execution, sorted first by the /// difference between a transaction and the account's current nonce and then by the time that /// the transaction was first seen by the appside mempool. - #[instrument(skip_all, err(level = Level::DEBUG))] - pub(crate) async fn builder_queue( - &self, - state: &S, - ) -> Result)>> { - self.pending.read().await.builder_queue(state).await + pub(crate) async fn builder_queue(&self) -> Vec<([u8; 32], Arc)> { + self.pending.read().await.builder_queue() } /// Removes the target transaction and all transactions for associated account with higher @@ -589,10 +585,9 @@ mod tests { #[tokio::test] async fn single_account_flow_extensive() { // This test tries to hit the more complex edges of the mempool with a single account. - // The test adds the nonces [1,2,0,4], creates a builder queue with the account - // nonce at 1, and then cleans the pool to nonce 4. This tests some of the - // odder edge cases that can be hit if a node goes offline or fails to see - // some transactions that other nodes include into their proposed blocks. + // The test adds the nonces [1,2,0,4], creates a builder queue, and then cleans the pool to + // nonce 4. This tests some of the odder edge cases that can be hit if a node goes offline + // or fails to see some transactions that other nodes include into their proposed blocks. let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); let account_balances = mock_balances(100, 100); @@ -642,24 +637,13 @@ mod tests { // assert size assert_eq!(mempool.len().await, 4); - // mock state with nonce at 1 - let mut mock_state = mock_state_getter().await; - mock_state_put_account_nonce( - &mut mock_state, - astria_address_from_hex_string(ALICE_ADDRESS).as_bytes(), - 1, - ); - - // grab building queue, should return transactions [1,2] since [0] was below and [4] is - // gapped - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + // grab building queue, should return transactions [0,1,2] since [4] is gapped + let builder_queue = mempool.builder_queue().await; // see contains first two transactions that should be pending - assert_eq!(builder_queue[0].1.nonce(), 1, "nonce should be one"); - assert_eq!(builder_queue[1].1.nonce(), 2, "nonce should be two"); + assert_eq!(builder_queue[0].1.nonce(), 0, "nonce should be zero"); + assert_eq!(builder_queue[1].1.nonce(), 1, "nonce should be one"); + assert_eq!(builder_queue[2].1.nonce(), 2, "nonce should be two"); // see mempool's transactions just cloned, not consumed assert_eq!(mempool.len().await, 4); @@ -668,6 +652,7 @@ mod tests { // to pending // setup state + let mut mock_state = mock_state_getter().await; mock_state_put_account_nonce( &mut mock_state, astria_address_from_hex_string(ALICE_ADDRESS).as_bytes(), @@ -685,10 +670,7 @@ mod tests { assert_eq!(mempool.len().await, 1); // see transaction [4] properly promoted - let mut builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let mut builder_queue = mempool.builder_queue().await; let (_, returned_tx) = builder_queue.pop().expect("should return last transaction"); assert_eq!(returned_tx.nonce(), 4, "nonce should be four"); } @@ -733,10 +715,7 @@ mod tests { 1, ); - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let builder_queue = mempool.builder_queue().await; assert_eq!( builder_queue.len(), 1, @@ -755,10 +734,7 @@ mod tests { mempool.run_maintenance(&mock_state, false).await; // see builder queue now contains them - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let builder_queue = mempool.builder_queue().await; assert_eq!( builder_queue.len(), 3, @@ -807,10 +783,7 @@ mod tests { 1, ); - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let builder_queue = mempool.builder_queue().await; assert_eq!( builder_queue.len(), 4, @@ -827,10 +800,7 @@ mod tests { mempool.run_maintenance(&mock_state, false).await; // see builder queue now contains single transactions - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let builder_queue = mempool.builder_queue().await; assert_eq!( builder_queue.len(), 1, @@ -850,10 +820,7 @@ mod tests { mempool.run_maintenance(&mock_state, false).await; - let builder_queue = mempool - .builder_queue(&mock_state) - .await - .expect("failed to get builder queue"); + let builder_queue = mempool.builder_queue().await; assert_eq!( builder_queue.len(), 3, diff --git a/crates/astria-sequencer/src/mempool/transactions_container.rs b/crates/astria-sequencer/src/mempool/transactions_container.rs index 50b89e52f..67ae6a945 100644 --- a/crates/astria-sequencer/src/mempool/transactions_container.rs +++ b/crates/astria-sequencer/src/mempool/transactions_container.rs @@ -20,7 +20,6 @@ use astria_core::{ use astria_eyre::eyre::{ eyre, Result, - WrapErr as _, }; use tokio::time::{ Duration, @@ -29,7 +28,6 @@ use tokio::time::{ use tracing::{ error, instrument, - Level, }; use super::RemovalReason; @@ -214,6 +212,10 @@ impl PendingTransactionsForAccount { self.txs.last_key_value().map(|(nonce, _)| *nonce) } + fn current_account_nonce(&self) -> Option { + self.txs.first_key_value().map(|(nonce, _)| *nonce) + } + /// Removes and returns transactions that exceed the balances in `available_balances`. fn find_demotables( &mut self, @@ -775,11 +777,7 @@ impl PendingTransactions { /// Returns a copy of transactions and their hashes sorted by nonce difference and then time /// first seen. - #[instrument(skip_all, err(level = Level::DEBUG))] - pub(super) async fn builder_queue( - &self, - state: &S, - ) -> Result)>> { + pub(super) fn builder_queue(&self) -> Vec<([u8; 32], Arc)> { // Used to hold the values in Vec for sorting. struct QueueEntry { tx: Arc, @@ -790,10 +788,14 @@ impl PendingTransactions { let mut queue = Vec::with_capacity(self.len()); // Add all transactions to the queue. for (address, account_txs) in &self.txs { - let current_account_nonce = state - .get_account_nonce(address) - .await - .wrap_err("failed to fetch account nonce for builder queue")?; + let Some(current_account_nonce) = account_txs.current_account_nonce() else { + error!( + address = %telemetry::display::base64(address), + "pending queue is empty during builder queue step" + ); + continue; + }; + for ttx in account_txs.txs.values() { let priority = match ttx.priority(current_account_nonce) { Ok(priority) => priority, @@ -817,11 +819,11 @@ impl PendingTransactions { // Sort the queue and return the relevant data. Note that the sorted queue will be ordered // from lowest to highest priority, so we need to reverse the order before returning. queue.sort_unstable_by_key(|entry| entry.priority); - Ok(queue + queue .into_iter() .rev() .map(|entry| (entry.tx_hash, entry.tx)) - .collect()) + .collect() } } @@ -872,7 +874,6 @@ mod tests { denom_3, mock_balances, mock_state_getter, - mock_state_put_account_nonce, mock_tx_cost, ALICE_ADDRESS, BOB_ADDRESS, @@ -1749,9 +1750,9 @@ mod tests { ); } - #[tokio::test] + #[test] #[expect(clippy::too_many_lines, reason = "it's a test")] - async fn transactions_container_clean_account_stale_expired() { + fn transactions_container_clean_account_stale_expired() { let mut pending_txs = PendingTransactions::new(TX_TTL); // transactions to add to accounts @@ -1967,8 +1968,8 @@ mod tests { ); } - #[tokio::test] - async fn pending_transactions_builder_queue() { + #[test] + fn pending_transactions_builder_queue() { let mut pending_txs = PendingTransactions::new(TX_TTL); // transactions to add to accounts @@ -2001,28 +2002,12 @@ mod tests { .add(ttx_s1_3.clone(), 1, &account_balances) .unwrap(); - // should return all transactions from Alice and last two from Bob - let mut mock_state = mock_state_getter().await; - mock_state_put_account_nonce( - &mut mock_state, - astria_address_from_hex_string(ALICE_ADDRESS).as_bytes(), - 1, - ); - mock_state_put_account_nonce( - &mut mock_state, - astria_address_from_hex_string(BOB_ADDRESS).as_bytes(), - 2, - ); - - // get builder queue - let builder_queue = pending_txs - .builder_queue(&mock_state) - .await - .expect("building builders queue should work"); + // get builder queue - should return all transactions from Alice and Bob + let builder_queue = pending_txs.builder_queue(); assert_eq!( builder_queue.len(), - 3, - "three transactions should've been popped" + 4, + "four transactions should've been popped" ); // check that the transactions are in the expected order @@ -2033,13 +2018,18 @@ mod tests { ); let (second_tx_hash, _) = builder_queue[1]; assert_eq!( - second_tx_hash, ttx_s1_2.tx_hash, + second_tx_hash, ttx_s1_1.tx_hash, "expected other low nonce diff (0) to be second" ); let (third_tx_hash, _) = builder_queue[2]; assert_eq!( - third_tx_hash, ttx_s1_3.tx_hash, - "expected highest nonce diff to be last" + third_tx_hash, ttx_s1_2.tx_hash, + "expected middle nonce diff (1) to be third" + ); + let (fourth_tx_hash, _) = builder_queue[3]; + assert_eq!( + fourth_tx_hash, ttx_s1_3.tx_hash, + "expected highest nonce diff (2) to be last" ); // ensure transactions not removed @@ -2050,8 +2040,8 @@ mod tests { ); } - #[tokio::test] - async fn parked_transactions_find_promotables() { + #[test] + fn parked_transactions_find_promotables() { let mut parked_txs = ParkedTransactions::::new(TX_TTL, 100); // transactions to add to accounts @@ -2117,8 +2107,8 @@ mod tests { ); } - #[tokio::test] - async fn pending_transactions_find_demotables() { + #[test] + fn pending_transactions_find_demotables() { let mut pending_txs = PendingTransactions::new(TX_TTL); // transactions to add to account @@ -2196,8 +2186,8 @@ mod tests { ); } - #[tokio::test] - async fn pending_transactions_remaining_account_balances() { + #[test] + fn pending_transactions_remaining_account_balances() { let mut pending_txs = PendingTransactions::new(TX_TTL); // transactions to add to account @@ -2252,10 +2242,9 @@ mod tests { ); } - #[tokio::test] - async fn builder_queue_should_be_sorted_by_action_group_type() { + #[test] + fn builder_queue_should_be_sorted_by_action_group_type() { let mut pending_txs = PendingTransactions::new(TX_TTL); - let mock_state = mock_state_getter().await; // create transactions in reverse order let ttx_unbundleable_sudo = MockTTXBuilder::new() @@ -2295,10 +2284,7 @@ mod tests { // get the builder queue // note: the account nonces are set to zero when not initialized in the mock state - let builder_queue = pending_txs - .builder_queue(&mock_state) - .await - .expect("building builders queue should work"); + let builder_queue = pending_txs.builder_queue(); // check that the transactions are in the expected order let (first_tx_hash, _) = builder_queue[0]; @@ -2326,8 +2312,8 @@ mod tests { ); } - #[tokio::test] - async fn parked_transactions_size_limit_works() { + #[test] + fn parked_transactions_size_limit_works() { let mut parked_txs = ParkedTransactions::::new(TX_TTL, 1); // transactions to add to account From d34fb06eed31a7781062a1c77052833481fc479c Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Tue, 21 Jan 2025 09:59:51 -0600 Subject: [PATCH 08/23] refactor(sequencer): clarify transaction cost estimation (#1908) ## Summary Refactored calculation of total transfers made in a transaction to helper function for clarity. ## Background See here: https://github.com/astriaorg/astria/pull/1905#pullrequestreview-2542728784 > I think we should remove this function entirely in favor of two functions, `calculate_fees_for_transaction` (already exists) and `calculate_funds_moved_by_transaction` (essentially move the loop over the actions to it). The check for enough funds would then take the result of both methods. ## Changes - Moved calculation of all transfers made in a transaction to helper function, and called this helper function in `get_total_transaction_cost()`. ## Testing Passing all tests, no additional tests needed. ## Changelogs No updates required. ## Related Issues closes #1907 --- crates/astria-sequencer/src/transaction/checks.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/astria-sequencer/src/transaction/checks.rs b/crates/astria-sequencer/src/transaction/checks.rs index 974808a4a..df18bd4c7 100644 --- a/crates/astria-sequencer/src/transaction/checks.rs +++ b/crates/astria-sequencer/src/transaction/checks.rs @@ -77,6 +77,18 @@ pub(crate) async fn get_total_transaction_cost( .await .context("failed to get fees for transaction")?; + add_total_transfers_for_transaction(tx, state, &mut cost_by_asset) + .await + .context("failed to add total transfers for transaction")?; + + Ok(cost_by_asset) +} + +async fn add_total_transfers_for_transaction( + tx: &Transaction, + state: &S, + cost_by_asset: &mut HashMap, +) -> Result<()> { // add values transferred within the tx to the cost for action in tx.actions() { match action { @@ -123,7 +135,7 @@ pub(crate) async fn get_total_transaction_cost( } } - Ok(cost_by_asset) + Ok(()) } #[cfg(test)] From 92d2d463788c9f12d2a9969cd27e63d3d9c582dd Mon Sep 17 00:00:00 2001 From: Jordan Oroshiba Date: Tue, 21 Jan 2025 10:46:40 -0800 Subject: [PATCH 09/23] feat(charts): snapshot startup for sequencer (#1876) ## Summary Adds the ability to start a sequencer node with a snapshot load, speeding up the startup time of a new node. ## Background Starting a new node within k8s requires either manual intervention on the PVC right now, or syncing from genesis. Doing a sync from genesis can be correct but not ideal for scaling infrastructure. ## Changes - Changes sequencer chart `initContainer` for to use rclone - Pull `snapshot` if `snapshotLoad` enabled during `initContainer` script run. ## Testing Ran locally, and confirmed startup and syncs. ## Changelogs No updates required. ## Issues closes https://github.com/astriaorg/astria/issues/1912 --- charts/sequencer/Chart.yaml | 2 +- .../sequencer/files/scripts/init-cometbft.sh | 26 +++++++++++++++++++ charts/sequencer/templates/statefulsets.yaml | 6 +++++ charts/sequencer/values.yaml | 14 ++++++++-- 4 files changed, 45 insertions(+), 3 deletions(-) diff --git a/charts/sequencer/Chart.yaml b/charts/sequencer/Chart.yaml index 832ce7df7..5107f393d 100644 --- a/charts/sequencer/Chart.yaml +++ b/charts/sequencer/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.1 +version: 1.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/charts/sequencer/files/scripts/init-cometbft.sh b/charts/sequencer/files/scripts/init-cometbft.sh index 9cc643f81..4c9ddb438 100644 --- a/charts/sequencer/files/scripts/init-cometbft.sh +++ b/charts/sequencer/files/scripts/init-cometbft.sh @@ -4,13 +4,39 @@ set -o errexit -o nounset # Only need to configure cometbft data if not already initialized if [ ! -d "/cometbft/data" ]; then + # Load the snapshot on load if enabled + {{- if .Values.snapshotLoad.enabled }} + echo "Downdloading snapshot..." + rclone config create r2 s3 \ + provider={{ .Values.snapshotLoad.config.provider }} \ + access_key_id={{ .Values.snapshotLoad.config.accessKeyId }} \ + secret_access_key={{ .Values.snapshotLoad.config.secretAccessKey }} \ + region={{ .Values.snapshotLoad.config.region }} \ + endpoint={{ .Values.snapshotLoad.config.endpoint }} \ + acl={{ .Values.snapshotLoad.config.acl }} + rclone copy -P r2:astria-mainnet-snapshots/ /snapshot/ + + echo "Extracting snapshot..." + mkdir /cometbft/data + mkdir /sequencer/penumbra.db + tar -C /cometbft/data/ --strip-components=2 -xzf /snapshot/cometbft_*.tar.gz cometbft/data + tar -C /sequencer/penumbra.db/ --strip-components=2 -xzf /snapshot/sequencer_*.tar.gz sequencer/penumbra.db + rm /snapshot/cometbft_*.tar.gz /snapshot/sequencer_*.tar.gz + {{- else }} + # Otherwise initialize with basic values + echo "Intializing cometbft with empty data directory..." cp -LR /data/ /cometbft/data + {{- end }} +else + echo "CometBFT data directory already initialized" fi # Don't replace the config directory if it already exists if [ ! -d "/cometbft/config" ]; then + echo "Creating Config Directory..." cp -LR /config/ /cometbft/config else + echo "Updating config directory..." cp /config/* /cometbft/config/ fi diff --git a/charts/sequencer/templates/statefulsets.yaml b/charts/sequencer/templates/statefulsets.yaml index 362a410f6..aa1fdf79d 100644 --- a/charts/sequencer/templates/statefulsets.yaml +++ b/charts/sequencer/templates/statefulsets.yaml @@ -31,6 +31,12 @@ spec: - mountPath: /cometbft name: sequencer-shared-storage-vol subPath: {{ .Values.moniker }}/cometbft + - mountPath: /sequencer + name: sequencer-shared-storage-vol + subPath: {{ .Values.moniker }}/sequencer + - mountPath: /snapshot + name: sequencer-shared-storage-vol + subPath: {{ .Values.moniker }}/snapshot containers: - name: sequencer image: {{ include "sequencer.image" . }} diff --git a/charts/sequencer/values.yaml b/charts/sequencer/values.yaml index 80bd79525..2a23f01e2 100644 --- a/charts/sequencer/values.yaml +++ b/charts/sequencer/values.yaml @@ -9,12 +9,22 @@ global: useTTY: true dev: false +snapshotLoad: + enabled: false + config: + provider: Cloudflare + accessKeyId: 0d8d8005e468dd86498bde6dfa02044f + secretAccessKey: e33ea43e00d9b655cb72d8a8107fa2957bd6b77e5718df0a26f259956532bba8 + region: auto + endpoint: https://fb1caa337c8e4e3101363ca1240e03ca.r2.cloudflarestorage.com + acl: private + # sequencer core images images: init: - repo: ghcr.io/tomwright/dasel + repo: rclone/rclone pullPolicy: IfNotPresent - tag: alpine + tag: 1.56.0 cometBFT: repo: docker.io/cometbft/cometbft pullPolicy: IfNotPresent From c6ca388d4b40b36b8d95b96afcb6b5f2e4917a22 Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Wed, 22 Jan 2025 08:31:28 -0600 Subject: [PATCH 10/23] chore(composer): add missing blackbox tests (#1834) ## Summary Added blackbox tests to cover whitebox tests which were removed in #1643. ## Background Tests were removed in #1643 because the internal tests were very whitebox-y and fragile. They have not yet been added back as blackbox tests. ## Changes - Added 3 new blackbox tests to cover `bundle_triggered_by_block_timer`, `two_seq_actions_single_bundle`, and `chain_id_mismatch_returns_error`. - Removed old internal tests. ## Testing Passing all tests ## Changelogs No updates required. ## Related Issues closes #1652 --------- Co-authored-by: Fraser Hutchison <190532+Fraser999@users.noreply.github.com> --- crates/astria-composer/src/executor/tests.rs | 645 ------------------ crates/astria-composer/tests/blackbox/api.rs | 4 +- .../tests/blackbox/executor.rs | 228 +++++++ .../tests/blackbox/geth_collector.rs | 14 +- .../tests/blackbox/grpc_collector.rs | 8 +- .../blackbox/helper/mock_abci_sequencer.rs | 6 +- .../blackbox/helper/mock_grpc_sequencer.rs | 3 +- .../tests/blackbox/helper/mod.rs | 104 ++- crates/astria-composer/tests/blackbox/main.rs | 1 + 9 files changed, 346 insertions(+), 667 deletions(-) delete mode 100644 crates/astria-composer/src/executor/tests.rs create mode 100644 crates/astria-composer/tests/blackbox/executor.rs diff --git a/crates/astria-composer/src/executor/tests.rs b/crates/astria-composer/src/executor/tests.rs deleted file mode 100644 index 6698ff051..000000000 --- a/crates/astria-composer/src/executor/tests.rs +++ /dev/null @@ -1,645 +0,0 @@ -use std::{ - io::Write, - net::{ - IpAddr, - SocketAddr, - }, - sync::LazyLock, - time::Duration, -}; - -use astria_core::{ - generated::protocol::accounts::v1::NonceResponse, - primitive::v1::{ - asset::{ - Denom, - IbcPrefixed, - }, - RollupId, - ROLLUP_ID_LEN, - }, - protocol::transaction::v1::action::Sequence, -}; -use astria_eyre::eyre; -use prost::{ - bytes::Bytes, - Message as _, -}; -use sequencer_client::Transaction; -use serde_json::json; -use telemetry::Metrics as _; -use tempfile::NamedTempFile; -use tendermint::{ - consensus::{ - params::{ - AbciParams, - ValidatorParams, - }, - Params, - }, - Genesis, - Time, -}; -use tendermint_rpc::{ - endpoint::broadcast::tx_sync, - request, - response, - Id, -}; -use tokio::{ - sync::watch, - time, -}; -use tokio_util::sync::CancellationToken; -use tracing::debug; -use wiremock::{ - matchers::{ - body_partial_json, - body_string_contains, - }, - Mock, - MockGuard, - MockServer, - Request, - ResponseTemplate, -}; - -use crate::{ - executor, - executor::EnsureChainIdError, - metrics::Metrics, - test_utils::sequence_action_of_max_size, - Config, -}; - -static TELEMETRY: LazyLock<()> = LazyLock::new(|| { - // This config can be meaningless - it's only used inside `try_init` to init the metrics, but we - // haven't configured telemetry to provide metrics here. - let config = Config { - log: String::new(), - api_listen_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), - sequencer_url: String::new(), - sequencer_chain_id: String::new(), - rollups: String::new(), - private_key_file: String::new(), - sequencer_address_prefix: String::new(), - block_time_ms: 0, - max_bytes_per_bundle: 0, - bundle_queue_capacity: 0, - force_stdout: false, - no_otel: false, - no_metrics: false, - metrics_http_listener_addr: String::new(), - pretty_print: false, - grpc_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), - fee_asset: Denom::IbcPrefixed(IbcPrefixed::new([0; 32])), - }; - if std::env::var_os("TEST_LOG").is_some() { - let filter_directives = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()); - telemetry::configure() - .set_no_otel(true) - .set_stdout_writer(std::io::stdout) - .set_filter_directives(&filter_directives) - .try_init::(&config) - .unwrap(); - } else { - telemetry::configure() - .set_no_otel(true) - .set_stdout_writer(std::io::sink) - .try_init::(&config) - .unwrap(); - } -}); - -fn sequence_action() -> Sequence { - Sequence { - rollup_id: RollupId::new([0; ROLLUP_ID_LEN]), - data: Bytes::new(), - fee_asset: "nria".parse().unwrap(), - } -} - -/// Start a mock sequencer server and mount a mock for the `accounts/nonce` query. -async fn setup() -> (MockServer, Config, NamedTempFile) { - LazyLock::force(&TELEMETRY); - let server = MockServer::start().await; - - let keyfile = NamedTempFile::new().unwrap(); - (&keyfile) - .write_all("2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90".as_bytes()) - .unwrap(); - - let cfg = Config { - log: String::new(), - api_listen_addr: "127.0.0.1:0".parse().unwrap(), - rollups: String::new(), - sequencer_url: server.uri(), - sequencer_chain_id: "test-chain-1".to_string(), - private_key_file: keyfile.path().to_string_lossy().to_string(), - sequencer_address_prefix: "astria".into(), - block_time_ms: 2000, - max_bytes_per_bundle: 1000, - bundle_queue_capacity: 10, - no_otel: false, - force_stdout: false, - no_metrics: false, - metrics_http_listener_addr: String::new(), - pretty_print: true, - grpc_addr: "127.0.0.1:0".parse().unwrap(), - fee_asset: "nria".parse().unwrap(), - }; - (server, cfg, keyfile) -} - -/// Assert that given error is of correct type and contains the expected chain IDs. -#[track_caller] -fn assert_chain_id_err( - err: &EnsureChainIdError, - configured_expected: &str, - configured_actual: &tendermint::chain::Id, -) { - match err { - EnsureChainIdError::WrongChainId { - expected, - actual, - } => { - assert_eq!(*expected, configured_expected); - assert_eq!(*actual, *configured_actual); - } - other @ EnsureChainIdError::GetChainId(_) => { - panic!("expected `EnsureChainIdError::WrongChainId`, but got '{other:?}'") - } - } -} - -/// Mount a mock for the `abci_query` endpoint. -async fn mount_default_nonce_query_mock(server: &MockServer) -> MockGuard { - let query_path = "accounts/nonce"; - let response = NonceResponse { - height: 0, - nonce: 0, - }; - let expected_body = json!({ - "method": "abci_query" - }); - let response = tendermint_rpc::endpoint::abci_query::Response { - response: tendermint_rpc::endpoint::abci_query::AbciQuery { - value: response.encode_to_vec(), - ..Default::default() - }, - }; - let wrapper = response::Wrapper::new_with_id(Id::Num(1), Some(response), None); - Mock::given(body_partial_json(&expected_body)) - .and(body_string_contains(query_path)) - .respond_with( - ResponseTemplate::new(200) - .set_body_json(&wrapper) - .append_header("Content-Type", "application/json"), - ) - .up_to_n_times(1) - .expect(1) - .mount_as_scoped(server) - .await -} - -fn tx_from_request(request: &Request) -> Transaction { - use astria_core::generated::protocol::transaction::v1::Transaction as RawTransaction; - - let wrapped_tx_sync_req: request::Wrapper = - serde_json::from_slice(&request.body) - .expect("can't deserialize to JSONRPC wrapped tx_sync::Request"); - let raw_signed_tx = RawTransaction::decode(&*wrapped_tx_sync_req.params().tx) - .expect("can't deserialize signed sequencer tx from broadcast jsonrpc request"); - let signed_tx = Transaction::try_from_raw(raw_signed_tx) - .expect("can't convert raw signed tx to checked signed tx"); - debug!(?signed_tx, "sequencer mock received signed transaction"); - - signed_tx -} - -/// Deserializes the bytes contained in a `tx_sync::Request` to a signed sequencer transaction -/// and verifies that the contained sequence action is in the given `expected_rollup_ids` and -/// `expected_nonces`. -async fn mount_broadcast_tx_sync_seq_actions_mock(server: &MockServer) -> MockGuard { - let matcher = move |request: &Request| { - let signed_tx = tx_from_request(request); - let actions = signed_tx.actions(); - - // verify all received actions are sequence actions - actions.iter().all(|action| action.as_sequence().is_some()) - }; - let jsonrpc_rsp = response::Wrapper::new_with_id( - Id::Num(1), - Some(tx_sync::Response { - code: 0.into(), - data: vec![].into(), - log: String::new(), - hash: tendermint::Hash::Sha256([0; 32]), - }), - None, - ); - - Mock::given(matcher) - .respond_with(ResponseTemplate::new(200).set_body_json(&jsonrpc_rsp)) - .up_to_n_times(1) - .expect(1) - .mount_as_scoped(server) - .await -} - -/// Mounts genesis file with specified sequencer chain ID -async fn mount_genesis(server: &MockServer, mock_sequencer_chain_id: &str) { - Mock::given(body_partial_json( - json!({"jsonrpc": "2.0", "method": "genesis", "params": null}), - )) - .respond_with(ResponseTemplate::new(200).set_body_json( - tendermint_rpc::response::Wrapper::new_with_id( - tendermint_rpc::Id::uuid_v4(), - Some( - tendermint_rpc::endpoint::genesis::Response:: { - genesis: Genesis { - genesis_time: Time::from_unix_timestamp(1, 1).unwrap(), - chain_id: mock_sequencer_chain_id.try_into().unwrap(), - initial_height: 1, - consensus_params: Params { - block: tendermint::block::Size { - max_bytes: 1024, - max_gas: 1024, - time_iota_ms: 1000, - }, - evidence: tendermint::evidence::Params { - max_age_num_blocks: 1000, - max_age_duration: tendermint::evidence::Duration( - Duration::from_secs(3600), - ), - max_bytes: 1_048_576, - }, - validator: ValidatorParams { - pub_key_types: vec![tendermint::public_key::Algorithm::Ed25519], - }, - version: None, - abci: AbciParams::default(), - }, - validators: vec![], - app_hash: tendermint::hash::AppHash::default(), - app_state: serde_json::Value::Null, - }, - }, - ), - None, - ), - )) - .expect(1..) - .mount(server) - .await; -} - -/// Helper to wait for the executor to connect to the mock sequencer -async fn wait_for_startup( - mut status: watch::Receiver, - nonce_guard: MockGuard, -) -> eyre::Result<()> { - // wait to receive executor status - status - .wait_for(executor::Status::is_connected) - .await - .unwrap(); - - tokio::time::timeout( - Duration::from_millis(100), - nonce_guard.wait_until_satisfied(), - ) - .await - .unwrap(); - - Ok(()) -} - -/// Test to check that the executor sends a signed transaction to the sequencer as soon as it -/// receives a `SequenceAction` that fills it beyond its `max_bundle_size`. -#[tokio::test] -async fn full_bundle() { - // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; - let shutdown_token = CancellationToken::new(); - let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); - mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; - let (executor, executor_handle) = executor::Builder { - sequencer_url: cfg.sequencer_url.clone(), - sequencer_chain_id: cfg.sequencer_chain_id.clone(), - private_key_file: cfg.private_key_file.clone(), - sequencer_address_prefix: "astria".into(), - block_time_ms: cfg.block_time_ms, - max_bytes_per_bundle: cfg.max_bytes_per_bundle, - bundle_queue_capacity: cfg.bundle_queue_capacity, - shutdown_token: shutdown_token.clone(), - metrics, - } - .build() - .unwrap(); - - let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; - let status = executor.subscribe(); - - let _executor_task = tokio::spawn(executor.run_until_stopped()); - // wait for sequencer to get the initial nonce request from sequencer - wait_for_startup(status, nonce_guard).await.unwrap(); - - let response_guard = mount_broadcast_tx_sync_seq_actions_mock(&sequencer).await; - - // send two sequence actions to the executor, the first of which is large enough to fill the - // bundle sending the second should cause the first to immediately be submitted in - // order to make space for the second - let seq0 = sequence_action_of_max_size(cfg.max_bytes_per_bundle); - - let seq1 = Sequence { - rollup_id: RollupId::new([1; ROLLUP_ID_LEN]), - ..sequence_action_of_max_size(cfg.max_bytes_per_bundle) - }; - - // push both sequence actions to the executor in order to force the full bundle to be sent - executor_handle - .send_timeout(seq0.clone(), Duration::from_millis(1000)) - .await - .unwrap(); - executor_handle - .send_timeout(seq1.clone(), Duration::from_millis(1000)) - .await - .unwrap(); - - // wait for the mock sequencer to receive the signed transaction - tokio::time::timeout( - Duration::from_millis(100), - response_guard.wait_until_satisfied(), - ) - .await - .unwrap(); - - // verify only one signed transaction was received by the mock sequencer - // i.e. only the full bundle was sent and not the second one due to the block timer - let expected_seq_actions = [seq0]; - let requests = response_guard.received_requests().await; - assert_eq!(requests.len(), 1); - - // verify the expected sequence actions were received - let signed_tx = tx_from_request(&requests[0]); - let actions = signed_tx.actions(); - - assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" - ); - - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); - } -} - -/// Test to check that the executor sends a signed transaction to the sequencer after its -/// `block_timer` has ticked -#[tokio::test] -async fn bundle_triggered_by_block_timer() { - // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; - let shutdown_token = CancellationToken::new(); - let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); - mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; - let (executor, executor_handle) = executor::Builder { - sequencer_url: cfg.sequencer_url.clone(), - sequencer_chain_id: cfg.sequencer_chain_id.clone(), - private_key_file: cfg.private_key_file.clone(), - sequencer_address_prefix: "astria".into(), - block_time_ms: cfg.block_time_ms, - max_bytes_per_bundle: cfg.max_bytes_per_bundle, - bundle_queue_capacity: cfg.bundle_queue_capacity, - shutdown_token: shutdown_token.clone(), - metrics, - } - .build() - .unwrap(); - - let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; - let status = executor.subscribe(); - - let _executor_task = tokio::spawn(executor.run_until_stopped()); - - // wait for sequencer to get the initial nonce request from sequencer - wait_for_startup(status, nonce_guard).await.unwrap(); - - let response_guard = mount_broadcast_tx_sync_seq_actions_mock(&sequencer).await; - - // send two sequence actions to the executor, both small enough to fit in a single bundle - // without filling it - let seq0 = Sequence { - data: vec![0u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() - }; - - // make sure at least one block has passed so that the executor will submit the bundle - // despite it not being full - time::pause(); - executor_handle - .send_timeout(seq0.clone(), Duration::from_millis(1000)) - .await - .unwrap(); - time::advance(Duration::from_millis(cfg.block_time_ms)).await; - time::resume(); - - // wait for the mock sequencer to receive the signed transaction - tokio::time::timeout( - Duration::from_millis(100), - response_guard.wait_until_satisfied(), - ) - .await - .unwrap(); - - // verify only one signed transaction was received by the mock sequencer - let expected_seq_actions = [seq0]; - let requests = response_guard.received_requests().await; - assert_eq!(requests.len(), 1); - - // verify the expected sequence actions were received - let signed_tx = tx_from_request(&requests[0]); - let actions = signed_tx.actions(); - - assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" - ); - - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); - } -} - -/// Test to check that the executor sends a signed transaction with two sequence actions to the -/// sequencer. -#[tokio::test] -async fn two_seq_actions_single_bundle() { - // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; - let shutdown_token = CancellationToken::new(); - let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); - mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; - let (executor, executor_handle) = executor::Builder { - sequencer_url: cfg.sequencer_url.clone(), - sequencer_chain_id: cfg.sequencer_chain_id.clone(), - private_key_file: cfg.private_key_file.clone(), - sequencer_address_prefix: "astria".into(), - block_time_ms: cfg.block_time_ms, - max_bytes_per_bundle: cfg.max_bytes_per_bundle, - bundle_queue_capacity: cfg.bundle_queue_capacity, - shutdown_token: shutdown_token.clone(), - metrics, - } - .build() - .unwrap(); - - let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; - let status = executor.subscribe(); - let _executor_task = tokio::spawn(executor.run_until_stopped()); - - // wait for sequencer to get the initial nonce request from sequencer - wait_for_startup(status, nonce_guard).await.unwrap(); - - let response_guard = mount_broadcast_tx_sync_seq_actions_mock(&sequencer).await; - - // send two sequence actions to the executor, both small enough to fit in a single bundle - // without filling it - let seq0 = Sequence { - data: vec![0u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() - }; - - let seq1 = Sequence { - rollup_id: RollupId::new([1; ROLLUP_ID_LEN]), - data: vec![1u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() - }; - - // make sure at least one block has passed so that the executor will submit the bundle - // despite it not being full - time::pause(); - executor_handle - .send_timeout(seq0.clone(), Duration::from_millis(1000)) - .await - .unwrap(); - executor_handle - .send_timeout(seq1.clone(), Duration::from_millis(1000)) - .await - .unwrap(); - time::advance(Duration::from_millis(cfg.block_time_ms)).await; - time::resume(); - - // wait for the mock sequencer to receive the signed transaction - tokio::time::timeout( - Duration::from_millis(100), - response_guard.wait_until_satisfied(), - ) - .await - .unwrap(); - - // verify only one signed transaction was received by the mock sequencer - let expected_seq_actions = [seq0, seq1]; - let requests = response_guard.received_requests().await; - assert_eq!(requests.len(), 1); - - // verify the expected sequence actions were received - let signed_tx = tx_from_request(&requests[0]); - let actions = signed_tx.actions(); - - assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" - ); - - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); - } -} - -/// Test to check that executor's chain ID check is properly checked against the sequencer's chain -/// ID -#[tokio::test] -async fn chain_id_mismatch_returns_error() { - use tendermint::chain::Id; - - // set up sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; - let shutdown_token = CancellationToken::new(); - let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); - - // mount a status response with an incorrect chain_id - mount_genesis(&sequencer, "bad-chain-id").await; - - // build the executor with the correct chain_id - let (executor, _executor_handle) = executor::Builder { - sequencer_url: cfg.sequencer_url.clone(), - sequencer_chain_id: cfg.sequencer_chain_id.clone(), - private_key_file: cfg.private_key_file.clone(), - sequencer_address_prefix: cfg.sequencer_address_prefix.clone(), - block_time_ms: cfg.block_time_ms, - max_bytes_per_bundle: cfg.max_bytes_per_bundle, - bundle_queue_capacity: cfg.bundle_queue_capacity, - shutdown_token: shutdown_token.clone(), - metrics, - } - .build() - .unwrap(); - - // ensure that run_until_stopped returns WrongChainId error - let err = executor.run_until_stopped().await.expect_err( - "should exit with an error when reading a bad chain ID, but exited with success", - ); - let mut found = false; - for cause in err.chain() { - if let Some(err) = cause.downcast_ref::() { - assert_chain_id_err( - err, - &cfg.sequencer_chain_id, - &Id::try_from("bad-chain-id".to_string()).unwrap(), - ); - found = true; - break; - } - } - - // ensure that the error chain contains the expected error - assert!( - found, - "expected `EnsureChainIdError::WrongChainId` in error chain, but it was not found" - ); -} diff --git a/crates/astria-composer/tests/blackbox/api.rs b/crates/astria-composer/tests/blackbox/api.rs index c65e2ff95..9d1275bbe 100644 --- a/crates/astria-composer/tests/blackbox/api.rs +++ b/crates/astria-composer/tests/blackbox/api.rs @@ -4,7 +4,7 @@ async fn readyz_with_one_rollup() { // spawn_composer hits `/readyz` as part of starting the test // environment. If this future return then `readyz` must have // returned `status: ok`. - let _test_composer = spawn_composer(&["test1"]).await; + let _test_composer = spawn_composer(&["test1"], None, true).await; } #[tokio::test] @@ -12,5 +12,5 @@ async fn readyz_with_two_rollups() { // spawn_composer hits `/readyz` as part of starting the test // environment. If this future return then `readyz` must have // returned `status: ok`. - let _test_composer = spawn_composer(&["test1", "test2"]).await; + let _test_composer = spawn_composer(&["test1", "test2"], None, true).await; } diff --git a/crates/astria-composer/tests/blackbox/executor.rs b/crates/astria-composer/tests/blackbox/executor.rs new file mode 100644 index 000000000..c4490e1b6 --- /dev/null +++ b/crates/astria-composer/tests/blackbox/executor.rs @@ -0,0 +1,228 @@ +use std::time::Duration; + +use astria_core::{ + generated::astria::composer::v1::{ + grpc_collector_service_client::GrpcCollectorServiceClient, + SubmitRollupTransactionRequest, + }, + primitive::v1::{ + RollupId, + ROLLUP_ID_LEN, + }, + protocol::transaction::v1::action::RollupDataSubmission, +}; +use tokio::time; + +use crate::helper::{ + mount_broadcast_tx_sync_rollup_data_submissions_mock, + signed_tx_from_request, + spawn_composer, +}; + +/// Test to check that the executor sends a signed transaction to the sequencer after its +/// `block_timer` has ticked +#[tokio::test] +async fn bundle_triggered_by_block_timer() { + let test_composer = spawn_composer(&["test1"], None, true).await; + let mut composer_client = GrpcCollectorServiceClient::connect(format!( + "http://{}", + test_composer.grpc_collector_addr + )) + .await + .unwrap(); + + let response_guard = + mount_broadcast_tx_sync_rollup_data_submissions_mock(&test_composer.sequencer).await; + + // send two sequence actions to the executor, both small enough to fit in a single bundle + // without filling it + let rollup_id = RollupId::new([0; ROLLUP_ID_LEN]); + let data = vec![0u8; 1000]; + + let seq0 = RollupDataSubmission { + data: data.clone().into(), + rollup_id, + fee_asset: "nria".parse().unwrap(), + }; + + // make sure at least one block has passed so that the executor will submit the bundle + // despite it not being full + time::pause(); + let submission_timeout = + Duration::from_millis(test_composer.cfg.block_time_ms.saturating_add(100)); + time::timeout(submission_timeout, async { + composer_client + .submit_rollup_transaction(SubmitRollupTransactionRequest { + rollup_id: Some(rollup_id.into_raw()), + data: data.into(), + }) + .await + .expect("rollup transactions should have been submitted successfully to grpc collector") + }) + .await + .unwrap(); + time::advance(Duration::from_millis(test_composer.cfg.block_time_ms)).await; + time::resume(); + + // wait for the mock sequencer to receive the signed transaction + tokio::time::timeout( + Duration::from_millis(100), + response_guard.wait_until_satisfied(), + ) + .await + .unwrap(); + + // verify only one signed transaction was received by the mock sequencer + let expected_rollup_data_submissions = [seq0]; + let requests = response_guard.received_requests().await; + assert_eq!(requests.len(), 1); + + // verify the expected sequence actions were received + let signed_tx = signed_tx_from_request(&requests[0]); + let actions = signed_tx.actions(); + + assert_eq!( + actions.len(), + expected_rollup_data_submissions.len(), + "received more than one action, one was supposed to fill the bundle" + ); + + for (action, expected_rollup_data_submission) in + actions.iter().zip(expected_rollup_data_submissions.iter()) + { + let rollup_data_submission = action.as_rollup_data_submission().unwrap(); + assert_eq!( + rollup_data_submission.rollup_id, expected_rollup_data_submission.rollup_id, + "chain id does not match. actual {:?} expected {:?}", + rollup_data_submission.rollup_id, expected_rollup_data_submission.rollup_id + ); + assert_eq!( + rollup_data_submission.data, expected_rollup_data_submission.data, + "data does not match expected data for action with rollup_id {:?}", + rollup_data_submission.rollup_id, + ); + } +} + +/// Test to check that the executor sends a signed transaction with two sequence actions to the +/// sequencer. +#[tokio::test] +async fn two_rollup_data_submissions_single_bundle() { + let test_composer = spawn_composer(&["test1"], None, true).await; + let mut composer_client = GrpcCollectorServiceClient::connect(format!( + "http://{}", + test_composer.grpc_collector_addr + )) + .await + .unwrap(); + + let response_guard = + mount_broadcast_tx_sync_rollup_data_submissions_mock(&test_composer.sequencer).await; + + // send two sequence actions to the executor, both small enough to fit in a single bundle + // without filling it + let seq0 = RollupDataSubmission { + rollup_id: RollupId::new([0; ROLLUP_ID_LEN]), + data: vec![0u8; 1000].into(), + fee_asset: "nria".parse().unwrap(), + }; + + let seq1 = RollupDataSubmission { + rollup_id: RollupId::new([1; ROLLUP_ID_LEN]), + data: vec![1u8; 1000].into(), + fee_asset: "nria".parse().unwrap(), + }; + + // make sure at least one block has passed so that the executor will submit the bundle + // despite it not being full + time::pause(); + let submission_timeout = + Duration::from_millis(test_composer.cfg.block_time_ms.saturating_add(100)); + time::timeout(submission_timeout, async { + composer_client + .submit_rollup_transaction(SubmitRollupTransactionRequest { + rollup_id: Some(seq0.rollup_id.into_raw()), + data: seq0.data.clone(), + }) + .await + .expect( + "rollup transactions should have been submitted successfully to grpc collector", + ); + composer_client + .submit_rollup_transaction(SubmitRollupTransactionRequest { + rollup_id: Some(seq1.rollup_id.into_raw()), + data: seq1.data.clone(), + }) + .await + .expect( + "rollup transactions should have been submitted successfully to grpc collector", + ); + }) + .await + .unwrap(); + time::advance(Duration::from_millis(test_composer.cfg.block_time_ms)).await; + time::resume(); + + // wait for the mock sequencer to receive the signed transaction + tokio::time::timeout( + Duration::from_millis(100), + response_guard.wait_until_satisfied(), + ) + .await + .unwrap(); + + // verify only one signed transaction was received by the mock sequencer + let expected_rollup_data_submissions = [seq0, seq1]; + let requests = response_guard.received_requests().await; + assert_eq!(requests.len(), 1); + + // verify the expected sequence actions were received + let signed_tx = signed_tx_from_request(&requests[0]); + let actions = signed_tx.actions(); + + assert_eq!( + actions.len(), + expected_rollup_data_submissions.len(), + "received more than one action, one was supposed to fill the bundle" + ); + + for (action, expected_rollup_data_submission) in + actions.iter().zip(expected_rollup_data_submissions.iter()) + { + let rollup_data_submission = action.as_rollup_data_submission().unwrap(); + assert_eq!( + rollup_data_submission.rollup_id, expected_rollup_data_submission.rollup_id, + "chain id does not match. actual {:?} expected {:?}", + rollup_data_submission.rollup_id, expected_rollup_data_submission.rollup_id + ); + assert_eq!( + rollup_data_submission.data, expected_rollup_data_submission.data, + "data does not match expected data for action with rollup_id {:?}", + rollup_data_submission.rollup_id, + ); + } +} + +/// Test to check that executor's chain ID check is properly checked against the sequencer's chain +/// ID +#[tokio::test] +async fn chain_id_mismatch_returns_error() { + // TODO(https://github.com/astriaorg/astria/issues/1833): this test will currently succeed if + // the executor fails for any reason on startup, not just if the chain ID is incorrect. This is + // a symptom of the current implementation of executor, though, which should be propagating + // errors. As such, I think it is out of the scope for the following test-only changes and + // should be fixed in a followup. + + let bad_chain_id = "bad_id"; + let test_composer = spawn_composer(&["test1"], Some(bad_chain_id), false).await; + let err = test_composer.composer.await.unwrap().unwrap_err(); + for cause in err.chain() { + if cause + .to_string() + .contains("executor failed while waiting for it to become ready") + { + return; + } + } + panic!("did not find expected executor error message") +} diff --git a/crates/astria-composer/tests/blackbox/geth_collector.rs b/crates/astria-composer/tests/blackbox/geth_collector.rs index 8acf6c817..337a08724 100644 --- a/crates/astria-composer/tests/blackbox/geth_collector.rs +++ b/crates/astria-composer/tests/blackbox/geth_collector.rs @@ -15,7 +15,7 @@ use crate::helper::{ async fn tx_from_one_rollup_is_received_by_sequencer() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer(&["test1"], None, true).await; let expected_rollup_ids = vec![RollupId::from_unhashed_bytes("test1")]; let mock_guard = @@ -37,7 +37,7 @@ async fn tx_from_one_rollup_is_received_by_sequencer() { async fn collector_restarts_after_exit() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer(&["test1"], None, true).await; // get rollup node let rollup_node = test_composer.rollup_nodes.get("test1").unwrap(); @@ -71,7 +71,7 @@ async fn collector_restarts_after_exit() { async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer(&["test1"], None, true).await; // Reject the first transaction for invalid nonce let invalid_nonce_guard = mount_broadcast_tx_sync_invalid_nonce_mock( @@ -88,7 +88,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Mount a response of 1 to a nonce query test_composer .sequencer_mock - .mount_pending_nonce_response(1, "setup correct nonce") + .mount_pending_nonce_response(1, "setup correct nonce", 1) .await; // Push a tx to the rollup node so that it is picked up by the composer and submitted with the @@ -117,7 +117,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { async fn nonce_taken_causes_resubmission_under_different_nonce() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer(&["test1"], None, true).await; // Reject the first transaction for taken nonce let invalid_nonce_guard = mount_broadcast_tx_sync_invalid_nonce_mock( @@ -134,7 +134,7 @@ async fn nonce_taken_causes_resubmission_under_different_nonce() { // Mount a response of 1 to a nonce query test_composer .sequencer_mock - .mount_pending_nonce_response(1, "setup correct nonce") + .mount_pending_nonce_response(1, "setup correct nonce", 1) .await; // Push a tx to the rollup node so that it is picked up by the composer and submitted with the @@ -163,7 +163,7 @@ async fn nonce_taken_causes_resubmission_under_different_nonce() { async fn single_rollup_tx_payload_integrity() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer(&["test1"], None, true).await; let tx: Transaction = serde_json::from_str(TEST_ETH_TX_JSON).unwrap(); let mock_guard = diff --git a/crates/astria-composer/tests/blackbox/grpc_collector.rs b/crates/astria-composer/tests/blackbox/grpc_collector.rs index 18767ab0a..af1c862a8 100644 --- a/crates/astria-composer/tests/blackbox/grpc_collector.rs +++ b/crates/astria-composer/tests/blackbox/grpc_collector.rs @@ -20,7 +20,7 @@ use crate::helper::{ #[tokio::test] async fn tx_from_one_rollup_is_received_by_sequencer() { - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer(&[], None, true).await; let rollup_id = RollupId::from_unhashed_bytes("test1"); let expected_chain_ids = vec![rollup_id]; let mock_guard = @@ -56,7 +56,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 let rollup_id = RollupId::from_unhashed_bytes("test1"); - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer(&[], None, true).await; // Reject the first transaction for invalid nonce let invalid_nonce_guard = @@ -65,7 +65,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Mount a response of 1 to a nonce query test_composer .sequencer_mock - .mount_pending_nonce_response(1, "setup correct nonce") + .mount_pending_nonce_response(1, "setup correct nonce", 1) .await; let expected_chain_ids = vec![rollup_id]; @@ -112,7 +112,7 @@ async fn single_rollup_tx_payload_integrity() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 let rollup_id = RollupId::from_unhashed_bytes("test1"); - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer(&[], None, true).await; let tx: Transaction = serde_json::from_str(TEST_ETH_TX_JSON).unwrap(); let mock_guard = diff --git a/crates/astria-composer/tests/blackbox/helper/mock_abci_sequencer.rs b/crates/astria-composer/tests/blackbox/helper/mock_abci_sequencer.rs index 28f5ca53a..ead144030 100644 --- a/crates/astria-composer/tests/blackbox/helper/mock_abci_sequencer.rs +++ b/crates/astria-composer/tests/blackbox/helper/mock_abci_sequencer.rs @@ -19,9 +19,11 @@ use wiremock::{ ResponseTemplate, }; -pub async fn start() -> MockServer { +use super::TEST_CHAIN_ID; + +pub async fn start(chain_id: Option<&str>) -> MockServer { let server = MockServer::start().await; - mount_genesis(&server, "test-chain-1").await; + mount_genesis(&server, chain_id.unwrap_or(TEST_CHAIN_ID)).await; server } diff --git a/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs b/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs index 089761096..06090abff 100644 --- a/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs +++ b/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs @@ -76,6 +76,7 @@ impl MockGrpcSequencer { &self, nonce_to_mount: u32, debug_name: impl Into, + expected_requests: u64, ) { let resp = GetPendingNonceResponse { inner: nonce_to_mount, @@ -86,7 +87,7 @@ impl MockGrpcSequencer { ) .respond_with(constant_response(resp)) .up_to_n_times(1) - .expect(1) + .expect(expected_requests) .with_name(debug_name) .mount(&self.mock_server) .await; diff --git a/crates/astria-composer/tests/blackbox/helper/mod.rs b/crates/astria-composer/tests/blackbox/helper/mod.rs index fca7da0f4..0efa11ec3 100644 --- a/crates/astria-composer/tests/blackbox/helper/mod.rs +++ b/crates/astria-composer/tests/blackbox/helper/mod.rs @@ -15,6 +15,7 @@ use astria_composer::{ Metrics, }; use astria_core::{ + generated::astria::protocol::accounts::v1::NonceResponse, primitive::v1::{ asset::{ Denom, @@ -31,6 +32,8 @@ use astria_core::{ use astria_eyre::eyre; use ethers::prelude::Transaction as EthersTransaction; use mock_grpc_sequencer::MockGrpcSequencer; +use prost::Message as _; +use serde_json::json; use telemetry::metrics; use tempfile::NamedTempFile; use tendermint_rpc::{ @@ -43,6 +46,10 @@ use test_utils::mock::Geth; use tokio::task::JoinHandle; use tracing::debug; use wiremock::{ + matchers::{ + body_partial_json, + body_string_contains, + }, Mock, MockGuard, MockServer, @@ -53,6 +60,8 @@ use wiremock::{ pub mod mock_abci_sequencer; pub mod mock_grpc_sequencer; +pub const TEST_CHAIN_ID: &str = "test-chain-1"; + static TELEMETRY: LazyLock<()> = LazyLock::new(|| { // This config can be meaningless - it's only used inside `try_init` to init the metrics, but we // haven't configured telemetry to provide metrics here. @@ -110,7 +119,11 @@ pub struct TestComposer { /// # Panics /// There is no explicit error handling in favour of panicking loudly /// and early. -pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { +pub async fn spawn_composer( + rollup_ids: &[&str], + sequencer_chain_id: Option<&str>, + loop_until_ready: bool, +) -> TestComposer { LazyLock::force(&TELEMETRY); let mut rollup_nodes = HashMap::new(); @@ -121,7 +134,7 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { rollup_nodes.insert((*id).to_string(), geth); rollups.push_str(&format!("{id}::{execution_url},")); } - let sequencer = mock_abci_sequencer::start().await; + let sequencer = mock_abci_sequencer::start(sequencer_chain_id).await; let grpc_server = MockGrpcSequencer::spawn().await; let sequencer_url = sequencer.uri(); let keyfile = NamedTempFile::new().unwrap(); @@ -131,7 +144,7 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { let config = Config { log: String::new(), api_listen_addr: "127.0.0.1:0".parse().unwrap(), - sequencer_chain_id: "test-chain-1".to_string(), + sequencer_chain_id: TEST_CHAIN_ID.to_string(), rollups, sequencer_abci_endpoint: sequencer_url.to_string(), sequencer_grpc_endpoint: format!("http://{}", grpc_server.local_addr), @@ -155,9 +168,15 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { .unwrap(); let metrics = Box::leak(Box::new(metrics)); + let expected_get_nonce_requests = loop_until_ready.into(); + // prepare get nonce response grpc_server - .mount_pending_nonce_response(0, "startup::wait_for_mempool()") + .mount_pending_nonce_response( + 0, + "startup::wait_for_mempool()", + expected_get_nonce_requests, + ) .await; let (composer_addr, grpc_collector_addr, composer_handle) = { @@ -168,7 +187,10 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { (composer_addr, grpc_collector_addr, task) }; - loop_until_composer_is_ready(composer_addr).await; + if loop_until_ready { + loop_until_composer_is_ready(composer_addr).await; + } + TestComposer { cfg: config, composer: composer_handle, @@ -206,7 +228,13 @@ pub async fn loop_until_composer_is_ready(addr: SocketAddr) { } } -fn signed_tx_from_request(request: &Request) -> Transaction { +/// Creates a signed transaction from a wiremock request. +/// +/// # Panics +/// +/// Panics if the request body can't be deserialiezed to a JSONRPC wrapped `tx_sync::Request`, or if +/// the deserialization from the JSONRPC request to the raw transaction fails. +pub fn signed_tx_from_request(request: &Request) -> Transaction { use astria_core::generated::astria::protocol::transaction::v1::Transaction as RawTransaction; use prost::Message as _; @@ -370,6 +398,70 @@ pub async fn mount_broadcast_tx_sync_nonce_taken_mock( .await } +/// Deserializes the bytes contained in a `tx_sync::Request` to a signed sequencer transaction +/// and verifies that the contained rollup data submission action is in the given +/// `expected_rollup_ids` and `expected_nonces`. +pub async fn mount_broadcast_tx_sync_rollup_data_submissions_mock( + server: &MockServer, +) -> MockGuard { + let matcher = move |request: &Request| { + let signed_tx = signed_tx_from_request(request); + let actions = signed_tx.actions(); + + // verify all received actions are sequence actions + actions + .iter() + .all(|action| action.as_rollup_data_submission().is_some()) + }; + let jsonrpc_rsp = response::Wrapper::new_with_id( + Id::Num(1), + Some(tx_sync::Response { + code: 0.into(), + data: vec![].into(), + log: String::new(), + hash: tendermint::Hash::Sha256([0; 32]), + }), + None, + ); + + Mock::given(matcher) + .respond_with(ResponseTemplate::new(200).set_body_json(&jsonrpc_rsp)) + .up_to_n_times(1) + .expect(1) + .mount_as_scoped(server) + .await +} + +/// Mount a mock for the `abci_query` endpoint. +pub async fn mount_default_nonce_query_mock(server: &MockServer) -> MockGuard { + let query_path = "accounts/nonce"; + let response = NonceResponse { + height: 0, + nonce: 0, + }; + let expected_body = json!({ + "method": "abci_query" + }); + let response = tendermint_rpc::endpoint::abci_query::Response { + response: tendermint_rpc::endpoint::abci_query::AbciQuery { + value: response.encode_to_vec(), + ..Default::default() + }, + }; + let wrapper = response::Wrapper::new_with_id(Id::Num(1), Some(response), None); + Mock::given(body_partial_json(&expected_body)) + .and(body_string_contains(query_path)) + .respond_with( + ResponseTemplate::new(200) + .set_body_json(&wrapper) + .append_header("Content-Type", "application/json"), + ) + .up_to_n_times(1) + .expect(1) + .mount_as_scoped(server) + .await +} + // A Uniswap V2 DAI-ETH swap transaction from mainnet // Etherscan link: https://etherscan.io/tx/0x99850dd1cf325c8ede9ba62b9d8a11aa199794450b581ce3a7bb8c1e5bb7562f pub const TEST_ETH_TX_JSON: &str = r#"{"blockHash":"0xe365f2163edb844b617ebe3d2af183b31d6c7ffa794f21d0b2d111d63e979a02","blockNumber":"0x1157959","from":"0xdc975a9bb00f4c030e4eb3268f68e4b8d0fa0362","gas":"0xcdf49","gasPrice":"0x374128344","maxFeePerGas":"0x374128344","maxPriorityFeePerGas":"0x0","hash":"0x99850dd1cf325c8ede9ba62b9d8a11aa199794450b581ce3a7bb8c1e5bb7562f","input":"0x022c0d9f0000000000000000000000000000000000000000000000c88a1ad5e15105525500000000000000000000000000000000000000000000000000000000000000000000000000000000000000001a2d11cb90d1de13bb81ee7b772a08ac234a8058000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001208974000000000000000000000000000000000000000000000000000000004de4000000000000000000000000000000000000000000000000017038152c223cb100000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000005200000000000000000000000000000000000000000000000000000000000000000000000000000000000000087870bca3f3fd6335c3f4ce8392d69350b4fa4e2000000000000000000000000ab12275f2d91f87b301a4f01c9af4e83b3f45baa0000000000000000000000006b175474e89094c44da98b954eedeac495271d0f000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2","nonce":"0x28","to":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","transactionIndex":"0x2","value":"0x0","type":"0x2","accessList":[{"address":"0x5f4ec3df9cbd43714fe2740f5e3616155c5b8419","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000002"]},{"address":"0x7effd7b47bfd17e52fb7559d3f924201b9dbff3d","storageKeys":[]},{"address":"0x018008bfb33d285247a21d44e50697654f754e63","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"]},{"address":"0x1a2d11cb90d1de13bb81ee7b772a08ac234a8058","storageKeys":[]},{"address":"0xe62b71cf983019bff55bc83b48601ce8419650cc","storageKeys":["0x9a09f352b299559621084d9b8d2625e8d5a97f382735872dd3bb1bdbdccc3fee","0x000000000000000000000000000000000000000000000000000000000000002b","0xfee3a99380070b792e111dd9a6a15e929983e2d0b7e170a5520e51b99be0c359"]},{"address":"0x87870bca3f3fd6335c3f4ce8392d69350b4fa4e2","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x070a95ec3546cae47592e0bcea195bf8f96287077fbb7a23785cc2887152941c","0x070a95ec3546cae47592e0bcea195bf8f96287077fbb7a23785cc28871529420","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec6","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4b","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ebf","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec0","0x4c0bd942d17410ca1f6d3278a62feef7078602605466e37de958808f1454efbd","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e48","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec3","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4f","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4a","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e50","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4d","0x4cb2b152c1b54ce671907a93c300fd5aa72383a9d4ec19a81e3333632ae92e00","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec4","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec7","0x4bea7244bd9088ac961c659a818b4f060de9712d20dc006c24f0985f19cf62d1","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e49","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec2","0x070a95ec3546cae47592e0bcea195bf8f96287077fbb7a23785cc2887152941d","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4c","0x5e14560e314427eb9d0c466a6058089f672317c8e26719a770a709c3f2481e4e","0x4480713a5820391a4815a640728dab70c3847e45854ef9e8117382da26ce9105","0x070a95ec3546cae47592e0bcea195bf8f96287077fbb7a23785cc2887152941f","0x000000000000000000000000000000000000000000000000000000000000003b","0x108718ddd11d4cf696a068770009c44aef387eb858097a37824291f99278d5e3","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec1","0xf81d8d79f42adb4c73cc3aa0c78e25d3343882d0313c0b80ece3d3a103ef1ec5"]},{"address":"0x2f39d218133afab8f2b819b1066c7e434ad94e9e","storageKeys":["0x740f710666bd7a12af42df98311e541e47f7fd33d382d11602457a6d540cbd63","0x0d2c1bcee56447b4f46248272f34207a580a5c40f666a31f4e2fbb470ea53ab8"]},{"address":"0xe7b67f44ea304dd7f6d215b13686637ff64cd2b2","storageKeys":[]},{"address":"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2","storageKeys":["0x7f6377583d24615ddfe989626525aeed0d158f924ee8c91664ab0dffd7863d00","0x3afb575d989d656a39ee0690da12b019915f3bd8709cc522e681b8dd04237970","0xa535fbd0ab3e0ad4ee444570368f3d474545b71fcc49228fe96a6406676fc126","0xb064600732a82908427d092d333e607598a6238a59aeb45e1288cb0bac7161cf"]},{"address":"0x4d5f47fa6a74757f35c14fd3a6ef8e3c9bc514e8","storageKeys":["0x000000000000000000000000000000000000000000000000000000000000003c","0x14a553e31736f19e3e380cf55bfb2f82dfd6d880cd07235affb68d8d3e0cac4d","0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x5e8cc6ee686108b7fd15638e2dbb32555b30d0bd1a191628bb70b5459b86cedc","0x000000000000000000000000000000000000000000000000000000000000003d","0x0000000000000000000000000000000000000000000000000000000000000036","0x0000000000000000000000000000000000000000000000000000000000000039"]},{"address":"0x6b175474e89094c44da98b954eedeac495271d0f","storageKeys":["0xd86cc1e239204d48eb0055f151744c4bb3d2337612287be803ae8247e95a67d2","0xe7ab5c3b3c86286a122f1937d4c70a3170dba7ef4f7603d830e8bcf7c9af583b","0x87c358b8e65d7446f52ffce25e44c9673d2bf461b3d3e4748afcf1238e9224a3","0xad740bfd58072c0bd719418966c52da18e837afec1b47e07bba370568cc87fbb"]},{"address":"0xe175de51f29d822b86e46a9a61246ec90631210d","storageKeys":[]},{"address":"0xcf8d0c70c850859266f5c338b38f9d663181c314","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000037","0x000000000000000000000000000000000000000000000000000000000000003d","0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x000000000000000000000000000000000000000000000000000000000000003a","0x4bea7244bd9088ac961c659a818b4f060de9712d20dc006c24f0985f19cf62d1"]},{"address":"0x413adac9e2ef8683adf5ddaece8f19613d60d1bb","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x000000000000000000000000000000000000000000000000000000000000003f","0x000000000000000000000000000000000000000000000000000000000000003a","0x4bea7244bd9088ac961c659a818b4f060de9712d20dc006c24f0985f19cf62d1"]},{"address":"0xaed0c38402a5d19df6e4c03f4e2dced6e29c1ee9","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000002"]},{"address":"0xea51d7853eefb32b6ee06b1c12e6dcca88be0ffe","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x000000000000000000000000000000000000000000000000000000000000003a"]},{"address":"0x54586be62e3c3580375ae3723c145253060ca0c2","storageKeys":["0x7145bb02480b505fc02ccfdba07d3ba3a9d821606f0688263abedd0ac6e5bec5","0x2a11cb67ca5c7e99dba99b50e02c11472d0f19c22ed5af42a1599a7f57e1c7a4","0x5306b8fbe80b30a74098357ee8e26fad8dc069da9011cca5f0870a0a5982e541"]},{"address":"0x478238a1c8b862498c74d0647329aef9ea6819ed","storageKeys":["0x9ef04667c5a1bd8192837ceac2ad5f2c41549d4db3406185e8c6aa95ea557bc5","0x000000000000000000000000000000000000000000000000000000000000002b","0x0020b304a2489d03d215fadd3bb6d3de2dda5a6a1235e76d693c30263e3cd054"]},{"address":"0xa700b4eb416be35b2911fd5dee80678ff64ff6c9","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x5e8cc6ee686108b7fd15638e2dbb32555b30d0bd1a191628bb70b5459b86cedc"]},{"address":"0x8164cc65827dcfe994ab23944cbc90e0aa80bfcb","storageKeys":["0x76f8b43dabb591eb6681562420f7f6aa393e6903d4e02e6f59e2957d94ceab20","0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x176062dac4e737f036c34baf4b07185f9c9fd3c1337ca36eb7c1f7a74aedb8ea"]},{"address":"0x9a158802cd924747ef336ca3f9de3bdb60cf43d3","storageKeys":[]},{"address":"0xac725cb59d16c81061bdea61041a8a5e73da9ec6","storageKeys":[]},{"address":"0x15c5620dffac7c7366eed66c20ad222ddbb1ed57","storageKeys":[]},{"address":"0x547a514d5e3769680ce22b2361c10ea13619e8a9","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000002"]},{"address":"0x8116b273cd75d79c382afacc706659ded5e0a59d","storageKeys":["0x0fb35ae12d348b84dc0910bcce7d3b0a3f6d23a3e1d0b53bbe5f135078b97b13","0x000000000000000000000000000000000000000000000000000000000000002b","0x1d90d8e683e6736ac0564a19732a642e4be100e7ee8c225feba909bbdaf1522b"]},{"address":"0x9f8ccdafcc39f3c7d6ebf637c9151673cbc36b88","storageKeys":[]},{"address":"0xa478c2975ab1ea89e8196811f51a7b7ade33eb11","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000c","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000006"]},{"address":"0xf1cd4193bbc1ad4a23e833170f49d60f3d35a621","storageKeys":[]},{"address":"0x102633152313c81cd80419b6ecf66d14ad68949a","storageKeys":["0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc","0x000000000000000000000000000000000000000000000000000000000000003f","0x000000000000000000000000000000000000000000000000000000000000003a"]},{"address":"0xb02381b1d27aa9845e5012083ca288c1818884f0","storageKeys":[]}],"chainId":"0x1","v":"0x0","r":"0xcb4eccf09e298388220c5560a6539322bde17581cee6908d56a92a19575e28e2","s":"0x2b4e34adad48aee14b6600c6366ad683c00c63c9da88fc2a232308421cf69a21"}"#; diff --git a/crates/astria-composer/tests/blackbox/main.rs b/crates/astria-composer/tests/blackbox/main.rs index d0ed46b3b..80ecf0e38 100644 --- a/crates/astria-composer/tests/blackbox/main.rs +++ b/crates/astria-composer/tests/blackbox/main.rs @@ -1,4 +1,5 @@ pub mod api; +mod executor; pub mod geth_collector; mod grpc_collector; pub mod helper; From 955357613150f996ac786e2eeaa603d6ea94d268 Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Thu, 23 Jan 2025 12:55:19 -0600 Subject: [PATCH 11/23] chore(composer): propagate errors (#1838) ## Summary Propagate errors which occur while composer is starting up and/or running so that the will be returned by the Composer's handle. ## Background Previously, composer would only exit with an error if the collectors' or executor's status channels were closed, and then the error message did not provide detailed information about the error that occurred. Additionally, if either of the `wait_for_*` loops failed, the composer would not shut down gracefully. This change is meant to expose the first eyre report which causes the composer to shut down, and gracefully shut down in all circumstances. ## Changes - Started collector and executor `wait_for_ready` loops concurrently, and continue with graceful shutdown even if these fail. - Store the first error composer encounters, and return it after graceful shutdown. If waiting for collectors or executor fails, Composer continues so that it can ascertain the underlying error from the task which caused it. ## Testing Passing all tests ## Changelogs Changelog updated ## Related Issues closes #1833 --- crates/astria-composer/CHANGELOG.md | 1 + crates/astria-composer/src/composer.rs | 54 +++++++++++++------ .../tests/blackbox/executor.rs | 29 +++++----- 3 files changed, 56 insertions(+), 28 deletions(-) diff --git a/crates/astria-composer/CHANGELOG.md b/crates/astria-composer/CHANGELOG.md index 5891bd7f5..7bacba8d8 100644 --- a/crates/astria-composer/CHANGELOG.md +++ b/crates/astria-composer/CHANGELOG.md @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). - Bump penumbra dependencies [#1740](https://github.com/astriaorg/astria/pull/1740). +- Propagate errors [#1838](https://github.com/astriaorg/astria/pull/1838). ## [1.0.0-rc.2] - 2024-10-23 diff --git a/crates/astria-composer/src/composer.rs b/crates/astria-composer/src/composer.rs index 4a6e5b774..6a57b2017 100644 --- a/crates/astria-composer/src/composer.rs +++ b/crates/astria-composer/src/composer.rs @@ -12,11 +12,15 @@ use astria_eyre::eyre::{ use itertools::Itertools as _; use tokio::{ io, + join, signal::unix::{ signal, SignalKind, }, - sync::watch, + sync::{ + watch, + OnceCell, + }, task::{ JoinError, JoinHandle, @@ -226,7 +230,7 @@ impl Composer { pub async fn run_until_stopped(self) -> eyre::Result<()> { let Self { api_server, - mut composer_status_sender, + composer_status_sender, executor, executor_handle, mut geth_collector_tasks, @@ -239,6 +243,8 @@ impl Composer { fee_asset, } = self; + let mut exit_err: OnceCell = OnceCell::new(); + // we need the API server to shutdown at the end, since it is used by k8s // to report the liveness of the service let api_server_shutdown_token = CancellationToken::new(); @@ -259,12 +265,18 @@ impl Composer { let mut executor_task = tokio::spawn(executor.run_until_stopped()); // wait for collectors and executor to come online - wait_for_collectors(&geth_collector_statuses, &mut composer_status_sender) - .await - .wrap_err("geth collectors failed to become ready")?; - wait_for_executor(executor_status, &mut composer_status_sender) - .await - .wrap_err("executor failed to become ready")?; + let collectors_startup_fut = + wait_for_collectors(&geth_collector_statuses, composer_status_sender.clone()); + let executor_startup_fut = wait_for_executor(executor_status, composer_status_sender); + + match join!(collectors_startup_fut, executor_startup_fut) { + (Ok(()), Ok(())) => {} + (Err(e), Ok(())) => error!(%e, "geth collectors failed to become ready"), + (Ok(()), Err(e)) => error!(%e, "executor failed to become ready"), + (Err(collector_err), Err(executor_err)) => { + error!(%collector_err, %executor_err, "geth collectors and executor failed to become ready"); + } + }; // run the grpc server let mut grpc_server_handle = tokio::spawn(async move { @@ -293,7 +305,7 @@ impl Composer { }; }, o = &mut api_task => { - report_exit("api server unexpectedly ended", o); + report_exit("api server unexpectedly ended", o, &exit_err); break ShutdownInfo { api_server_shutdown_token, composer_shutdown_token: shutdown_token, @@ -304,7 +316,7 @@ impl Composer { }; }, o = &mut executor_task => { - report_exit("executor unexpectedly ended", o); + report_exit("executor unexpectedly ended", o, &exit_err); break ShutdownInfo { api_server_shutdown_token, composer_shutdown_token: shutdown_token, @@ -315,7 +327,7 @@ impl Composer { }; }, o = &mut grpc_server_handle => { - report_exit("grpc server unexpectedly ended", o); + report_exit("grpc server unexpectedly ended", o, &exit_err); break ShutdownInfo { api_server_shutdown_token, composer_shutdown_token: shutdown_token, @@ -326,7 +338,7 @@ impl Composer { }; }, Some((rollup, collector_exit)) = geth_collector_tasks.join_next() => { - report_exit("collector", collector_exit); + report_exit("collector", collector_exit, &exit_err); if let Some(url) = rollups.get(&rollup) { let collector = geth::Builder { chain_name: rollup.clone(), @@ -348,7 +360,11 @@ impl Composer { }); }; - shutdown_info.run().await + let shutdown_res = shutdown_info.run().await; + if let Some(exit_err) = exit_err.take() { + return Err(exit_err); + } + shutdown_res } } @@ -467,7 +483,7 @@ fn spawn_geth_collectors( #[instrument(skip_all, err)] async fn wait_for_executor( mut executor_status: watch::Receiver, - composer_status_sender: &mut watch::Sender, + composer_status_sender: watch::Sender, ) -> eyre::Result<()> { executor_status .wait_for(executor::Status::is_connected) @@ -485,7 +501,7 @@ async fn wait_for_executor( #[instrument(skip_all, err)] async fn wait_for_collectors( collector_statuses: &HashMap>, - composer_status_sender: &mut watch::Sender, + composer_status_sender: watch::Sender, ) -> eyre::Result<()> { use futures::{ future::FutureExt as _, @@ -532,14 +548,20 @@ async fn wait_for_collectors( Ok(()) } -fn report_exit(task_name: &str, outcome: Result, JoinError>) { +fn report_exit( + task_name: &str, + outcome: Result, JoinError>, + exit_err: &OnceCell, +) { match outcome { Ok(Ok(())) => info!(task = task_name, "task exited successfully"), Ok(Err(error)) => { error!(%error, task = task_name, "task returned with error"); + let _ = exit_err.set(error); } Err(error) => { error!(%error, task = task_name, "task failed to complete"); + let _ = exit_err.set(error.into()); } } } diff --git a/crates/astria-composer/tests/blackbox/executor.rs b/crates/astria-composer/tests/blackbox/executor.rs index c4490e1b6..954f4c425 100644 --- a/crates/astria-composer/tests/blackbox/executor.rs +++ b/crates/astria-composer/tests/blackbox/executor.rs @@ -1,4 +1,7 @@ -use std::time::Duration; +use std::{ + fmt::Write as _, + time::Duration, +}; use astria_core::{ generated::astria::composer::v1::{ @@ -17,6 +20,7 @@ use crate::helper::{ mount_broadcast_tx_sync_rollup_data_submissions_mock, signed_tx_from_request, spawn_composer, + TEST_CHAIN_ID, }; /// Test to check that the executor sends a signed transaction to the sequencer after its @@ -207,22 +211,23 @@ async fn two_rollup_data_submissions_single_bundle() { /// ID #[tokio::test] async fn chain_id_mismatch_returns_error() { - // TODO(https://github.com/astriaorg/astria/issues/1833): this test will currently succeed if - // the executor fails for any reason on startup, not just if the chain ID is incorrect. This is - // a symptom of the current implementation of executor, though, which should be propagating - // errors. As such, I think it is out of the scope for the following test-only changes and - // should be fixed in a followup. - let bad_chain_id = "bad_id"; let test_composer = spawn_composer(&["test1"], Some(bad_chain_id), false).await; + let expected_err_msg = + format!("expected chain ID `{TEST_CHAIN_ID}`, but received `{bad_chain_id}`"); let err = test_composer.composer.await.unwrap().unwrap_err(); for cause in err.chain() { - if cause - .to_string() - .contains("executor failed while waiting for it to become ready") - { + if cause.to_string().contains(&expected_err_msg) { return; } } - panic!("did not find expected executor error message") + let mut panic_msg = String::new(); + writeln!( + &mut panic_msg, + "did not find expected executor error message" + ) + .unwrap(); + writeln!(&mut panic_msg, "expected cause:\n\t{expected_err_msg}").unwrap(); + writeln!(&mut panic_msg, "actual cause chain:\n\t{err:?}").unwrap(); + panic!("{panic_msg}"); } From d966cb86a72b74bd3fef7a0e8576fee41c894251 Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Fri, 24 Jan 2025 12:35:36 -0600 Subject: [PATCH 12/23] chore(sequencer): provide more thorough unit testing for actions (#1916) ## Summary Added more thorough unit tests for each action and revised existing unit tests as needed. ## Background Many of our actions were lacking thorough unit tests to ensure that they were succeeding and/or failing in the proper scenarios. Some had not unit tests at all. Additionally, some unit tests were incorrect or had some vestigial code regarding how fees were previously handled. ## Changes - Added unit tests for all potential successes and failures in actions, disregarding state read/write errors. - Revised existing unit tests as needed. ## Testing Passing all tests ## Changelogs Changelogs updated. ## Related Issues closes #1909 --- crates/astria-sequencer/CHANGELOG.md | 1 + .../src/action_handler/impls/bridge_lock.rs | 147 ++++++++++++++ .../impls/bridge_sudo_change.rs | 154 ++++++++++++-- .../src/action_handler/impls/bridge_unlock.rs | 124 +++++++++++- .../action_handler/impls/fee_asset_change.rs | 135 +++++++++++++ .../src/action_handler/impls/fee_change.rs | 27 +++ .../impls/ibc_relayer_change.rs | 132 ++++++++++++ .../action_handler/impls/ibc_sudo_change.rs | 113 +++++++++++ .../action_handler/impls/ics20_withdrawal.rs | 133 +++++++++++- .../impls/init_bridge_account.rs | 191 ++++++++++++++++++ .../impls/sudo_address_change.rs | 113 +++++++++++ .../action_handler/impls/validator_update.rs | 189 +++++++++++++++++ .../src/action_handler/mod.rs | 68 +++++++ .../src/benchmark_and_test_utils.rs | 4 +- 14 files changed, 1492 insertions(+), 39 deletions(-) diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index 644238818..9dd713e98 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Remove events reporting on state storage creation [#1892](https://github.com/astriaorg/astria/pull/1892). - Use bridge address to determine asset in bridge unlock cost estimation instead of signer [#1905](https://github.com/astriaorg/astria/pull/1905). +- Add more thorough unit tests for all actions [#1916](https://github.com/astriaorg/astria/pull/1916). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs index 38c61520c..27ec288c8 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs @@ -132,6 +132,7 @@ mod tests { address::StateWriteExt as _, assets::StateWriteExt as _, benchmark_and_test_utils::{ + assert_eyre_error, astria_address, nria, ASTRIA_PREFIX, @@ -199,4 +200,150 @@ mod tests { assert_eq!(deposits.len(), 1); assert!(deposits[0].asset.as_trace_prefixed().is_some()); } + + #[tokio::test] + async fn bridge_lock_fails_if_not_sent_to_bridge_account() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_transaction_context(TransactionContext { + address_bytes: [1; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + let bridge_lock_action = BridgeLock { + to: astria_address(&[3; 20]), + amount: 1, + asset: nria().into(), + fee_asset: nria().into(), + destination_chain_address: "ethan_was_here".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + "bridge lock must be sent to a bridge account", + ); + } + + #[tokio::test] + async fn bridge_lock_fails_if_destination_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_transaction_context(TransactionContext { + address_bytes: [1; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + + let bridge_lock_action = BridgeLock { + to: astria_address(&[3; 20]), + amount: 1, + asset: nria().into(), + fee_asset: nria().into(), + destination_chain_address: "ethan_was_here".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + &format!( + "address has prefix `{ASTRIA_PREFIX}` but only `{different_prefix}` is permitted" + ), + ); + } + + #[tokio::test] + async fn bridge_lock_fails_if_asset_is_not_allowed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let bridge_asset = "trace_asset" + .parse::() + .unwrap(); + let action_asset = nria(); + let bridge_address = astria_address(&[3; 20]); + let from_address = astria_address(&[1; 20]); + + state.put_transaction_context(TransactionContext { + address_bytes: *from_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state + .put_bridge_account_rollup_id(&bridge_address, [0; 32].into()) + .unwrap(); + state + .put_bridge_account_ibc_asset(&bridge_address, bridge_asset) + .unwrap(); + + let bridge_lock_action = BridgeLock { + to: astria_address(&[3; 20]), + amount: 1, + asset: action_asset.into(), + fee_asset: nria().into(), + destination_chain_address: "ethan_was_here".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + "asset ID is not authorized for transfer to bridge account", + ); + } + + #[tokio::test] + async fn bridge_lock_fails_if_ibc_asset_cannot_be_mapped_to_trace() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let ibc_asset = nria().to_ibc_prefixed(); + let bridge_address = astria_address(&[3; 20]); + let from_address = astria_address(&[1; 20]); + + state.put_transaction_context(TransactionContext { + address_bytes: *from_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state + .put_bridge_account_rollup_id(&bridge_address, [0; 32].into()) + .unwrap(); + state + .put_bridge_account_ibc_asset(&bridge_address, ibc_asset) + .unwrap(); + + let bridge_lock_action = BridgeLock { + to: astria_address(&[3; 20]), + amount: 1, + asset: ibc_asset.into(), + fee_asset: nria().into(), + destination_chain_address: "ethan_was_here".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + "mapping from IBC prefixed bridge asset to trace prefixed not found", + ); + } } diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs index 867a824c2..6b963c0c4 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs @@ -86,22 +86,22 @@ impl ActionHandler for BridgeSudoChange { #[cfg(test)] mod tests { use astria_core::{ - primitive::v1::TransactionId, - protocol::{ - fees::v1::FeeComponents, - transaction::v1::action::BridgeSudoChange, + primitive::v1::{ + Address, + TransactionId, }, + protocol::transaction::v1::action::BridgeSudoChange, }; use cnidarium::StateDelta; use crate::{ - accounts::StateWriteExt as _, action_handler::{ impls::test_utils::test_asset, ActionHandler as _, }, address::StateWriteExt as _, benchmark_and_test_utils::{ + assert_eyre_error, astria_address, ASTRIA_PREFIX, }, @@ -145,12 +145,10 @@ mod tests { fee_asset: asset.clone(), }; - assert!(action - .check_and_execute(state) - .await - .unwrap_err() - .to_string() - .contains("unauthorized for bridge sudo change action")); + assert_eyre_error( + &action.check_and_execute(state).await.unwrap_err(), + "unauthorized for bridge sudo change action", + ); } #[tokio::test] @@ -166,12 +164,6 @@ mod tests { position_in_transaction: 0, }); state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state - .put_fees(FeeComponents::::new(10, 0)) - .unwrap(); - - let fee_asset = test_asset(); - state.put_allowed_fee_asset(&fee_asset).unwrap(); let bridge_address = astria_address(&[99; 20]); @@ -181,15 +173,12 @@ mod tests { let new_sudo_address = astria_address(&[98; 20]); let new_withdrawer_address = astria_address(&[97; 20]); - state - .put_account_balance(&bridge_address, &fee_asset, 10) - .unwrap(); let action = BridgeSudoChange { bridge_address, new_sudo_address: Some(new_sudo_address), new_withdrawer_address: Some(new_withdrawer_address), - fee_asset, + fee_asset: test_asset(), }; action.check_and_execute(&mut state).await.unwrap(); @@ -209,4 +198,127 @@ mod tests { Some(new_withdrawer_address.bytes()), ); } + + #[tokio::test] + async fn bridge_sudo_change_fails_if_bridge_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[98; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: sudo_address.bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + + let bridge_address = astria_address(&[99; 20]); + + state + .put_bridge_account_sudo_address(&bridge_address, sudo_address) + .unwrap(); + + let action = BridgeSudoChange { + bridge_address, + new_sudo_address: Some(astria_address(&[98; 20])), + new_withdrawer_address: Some(astria_address(&[97; 20])), + fee_asset: test_asset(), + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{ASTRIA_PREFIX}` but only `{different_prefix}` is permitted" + ), + ); + } + + #[tokio::test] + async fn bridge_sudo_change_fails_if_new_sudo_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[98; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: sudo_address.bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + let bridge_address = astria_address(&[99; 20]); + + state + .put_bridge_account_sudo_address(&bridge_address, sudo_address) + .unwrap(); + + let different_prefix = "different_prefix"; + let new_sudo_address = Address::builder() + .array([98; 20]) + .prefix(different_prefix) + .try_build() + .unwrap(); + + let action = BridgeSudoChange { + bridge_address, + new_sudo_address: Some(new_sudo_address), + new_withdrawer_address: Some(astria_address(&[97; 20])), + fee_asset: test_asset(), + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } + + #[tokio::test] + async fn bridge_sudo_change_fails_if_new_withdrawer_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[98; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: sudo_address.bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + let bridge_address = astria_address(&[99; 20]); + + state + .put_bridge_account_sudo_address(&bridge_address, sudo_address) + .unwrap(); + + let different_prefix = "different_prefix"; + let new_withdrawer_address = Address::builder() + .array([97; 20]) + .prefix(different_prefix) + .try_build() + .unwrap(); + + let action = BridgeSudoChange { + bridge_address, + new_sudo_address: Some(astria_address(&[98; 20])), + new_withdrawer_address: Some(new_withdrawer_address), + fee_asset: test_asset(), + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } } diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs index 3789fa5bd..d5721f61f 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs @@ -111,13 +111,11 @@ impl ActionHandler for BridgeUnlock { mod tests { use astria_core::{ primitive::v1::{ + Address, RollupId, TransactionId, }, - protocol::{ - fees::v1::FeeComponents, - transaction::v1::action::BridgeUnlock, - }, + protocol::transaction::v1::action::BridgeUnlock, }; use cnidarium::StateDelta; @@ -131,10 +129,10 @@ mod tests { benchmark_and_test_utils::{ assert_eyre_error, astria_address, + nria, ASTRIA_PREFIX, }, bridge::StateWriteExt as _, - fees::StateWriteExt as _, transaction::{ StateWriteExt as _, TransactionContext, @@ -238,11 +236,7 @@ mod tests { state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); let asset = test_asset(); - let transfer_fee = 10; let transfer_amount = 100; - state - .put_fees(FeeComponents::::new(transfer_fee, 0)) - .unwrap(); let to_address = astria_address(&[2; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"test_rollup_id"); @@ -256,7 +250,6 @@ mod tests { state .put_bridge_account_withdrawer_address(&bridge_address, bridge_address) .unwrap(); - state.put_allowed_fee_asset(&asset).unwrap(); // Put plenty of balance state .put_account_balance(&bridge_address, &asset, 3 * transfer_amount) @@ -289,4 +282,115 @@ mod tests { "withdrawal event already processed", ); } + + #[tokio::test] + async fn bridge_unlock_fails_if_destination_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_transaction_context(TransactionContext { + address_bytes: [1; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + // Put different base prefix into state + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + + let bridge_lock_action = BridgeUnlock { + to: astria_address(&[0; 20]), // not base prefixed + amount: 1, + fee_asset: nria().into(), + memo: String::new(), + bridge_address: astria_address(&[1; 20]), + rollup_block_number: 1, + rollup_withdrawal_event_id: "rollup_withdrawal_event_id".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + &format!( + "address has prefix `{ASTRIA_PREFIX}` but only `{different_prefix}` is permitted" + ), + ); + } + + #[tokio::test] + async fn bridge_unlock_fails_if_bridge_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_transaction_context(TransactionContext { + address_bytes: [1; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + // Construct non base-prefixed bridge address + let different_prefix = "different_prefix"; + let bridge_address = Address::builder() + .array([1; 20]) + .prefix(different_prefix) + .try_build() + .unwrap(); + + let bridge_lock_action = BridgeUnlock { + to: astria_address(&[0; 20]), + amount: 1, + fee_asset: nria().into(), + memo: String::new(), + bridge_address, + rollup_block_number: 1, + rollup_withdrawal_event_id: "rollup_withdrawal_event_id".to_string(), + }; + + assert_eyre_error( + &bridge_lock_action + .check_and_execute(&mut state) + .await + .unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted", + ), + ); + } + + #[tokio::test] + async fn bridge_unlock_fails_if_bridge_address_is_not_a_bridge_account() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let bridge_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: bridge_address.bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + // No rollup ID or asset associated with `bridge_address` in state + + let action = BridgeUnlock { + to: astria_address(&[2; 20]), + amount: 100, + fee_asset: nria().into(), + memo: String::new(), + bridge_address, + rollup_block_number: 1, + rollup_withdrawal_event_id: "a-rollup-defined-hash".to_string(), + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed to get bridge's asset id, must be a bridge account", + ); + } } diff --git a/crates/astria-sequencer/src/action_handler/impls/fee_asset_change.rs b/crates/astria-sequencer/src/action_handler/impls/fee_asset_change.rs index fabbe3247..509c54c4d 100644 --- a/crates/astria-sequencer/src/action_handler/impls/fee_asset_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/fee_asset_change.rs @@ -68,3 +68,138 @@ impl ActionHandler for FeeAssetChange { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::TransactionId; + use cnidarium::StateDelta; + use futures::TryStreamExt as _; + + use super::*; + use crate::{ + accounts::AddressBytes, + action_handler::impls::test_utils::test_asset, + authority::StateWriteExt, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + nria, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn fee_asset_change_addition_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + state.put_allowed_fee_asset(&nria()).unwrap(); + + let fee_asset_change = FeeAssetChange::Addition(test_asset()); + fee_asset_change + .check_and_execute(&mut state) + .await + .unwrap(); + + let fee_assets = state + .allowed_fee_assets() + .try_collect::>() + .await + .unwrap(); + assert_eq!(fee_assets.len(), 2); + assert!(fee_assets.contains(&nria().to_ibc_prefixed())); + assert!(fee_assets.contains(&test_asset().to_ibc_prefixed())); + } + + #[tokio::test] + async fn fee_asset_change_removal_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + state.put_allowed_fee_asset(&nria()).unwrap(); + state.put_allowed_fee_asset(&test_asset()).unwrap(); + + let fee_asset_change = FeeAssetChange::Removal(test_asset()); + fee_asset_change + .check_and_execute(&mut state) + .await + .unwrap(); + + let fee_assets = state + .allowed_fee_assets() + .try_collect::>() + .await + .unwrap(); + assert_eq!(fee_assets.len(), 1); + assert!(fee_assets.contains(&nria().to_ibc_prefixed())); + } + + #[tokio::test] + async fn fee_asset_change_fails_if_signer_is_not_sudo_address() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[1; 20]); + let signer = astria_address(&[2; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *signer.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + + let fee_asset_change = FeeAssetChange::Addition(test_asset()); + assert_eyre_error( + &fee_asset_change + .check_and_execute(&mut state) + .await + .unwrap_err(), + "unauthorized address for fee asset change", + ); + } + + #[tokio::test] + async fn fee_asset_change_fails_if_attempting_to_remove_only_asset() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let sudo_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + state.put_allowed_fee_asset(&nria()).unwrap(); + + let fee_asset_change = FeeAssetChange::Removal(nria().into()); + assert_eyre_error( + &fee_asset_change + .check_and_execute(&mut state) + .await + .unwrap_err(), + "cannot remove last allowed fee asset", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs index 24448b908..627d12c87 100644 --- a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs @@ -101,8 +101,13 @@ mod tests { use penumbra_ibc::IbcRelay; use crate::{ + accounts::AddressBytes as _, action_handler::ActionHandler as _, authority::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + }, fees::{ FeeHandler, StateReadExt as _, @@ -114,6 +119,28 @@ mod tests { }, }; + #[tokio::test] + async fn fee_change_action_fails_if_signer_is_not_sudo_address() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + let signer = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *signer.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + + let action = FeeChange::Transfer(FeeComponents::::new(1, 2)); + assert_eyre_error( + &action.check_and_execute(state).await.unwrap_err(), + "signer is not the sudo key", + ); + } + #[tokio::test] async fn transfer_fee_change_action_executes_as_expected() { test_fee_change_action::().await; diff --git a/crates/astria-sequencer/src/action_handler/impls/ibc_relayer_change.rs b/crates/astria-sequencer/src/action_handler/impls/ibc_relayer_change.rs index 9ddce3ea3..a719635d7 100644 --- a/crates/astria-sequencer/src/action_handler/impls/ibc_relayer_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/ibc_relayer_change.rs @@ -63,3 +63,135 @@ impl ActionHandler for IbcRelayerChange { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::TransactionId; + + use super::*; + use crate::{ + accounts::AddressBytes as _, + address::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + ASTRIA_PREFIX, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn ibc_relayer_addition_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let ibc_sudo_address = astria_address(&[1; 20]); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *ibc_sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_ibc_sudo_address(ibc_sudo_address).unwrap(); + + let address_to_add = astria_address(&[0; 20]); + let action = IbcRelayerChange::Addition(address_to_add); + action.check_and_execute(&mut state).await.unwrap(); + + assert!(state.is_ibc_relayer(address_to_add).await.unwrap()); + } + + #[tokio::test] + async fn ibc_relayer_removal_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let address_to_remove = astria_address(&[0; 20]); + let ibc_sudo_address = astria_address(&[1; 20]); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *ibc_sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_ibc_sudo_address(ibc_sudo_address).unwrap(); + state.put_ibc_relayer_address(&address_to_remove).unwrap(); + + assert!(state.is_ibc_relayer(address_to_remove).await.unwrap()); + + let action = IbcRelayerChange::Removal(address_to_remove); + action.check_and_execute(&mut state).await.unwrap(); + + assert!(!state.is_ibc_relayer(address_to_remove).await.unwrap()); + } + + #[tokio::test] + async fn ibc_relayer_addition_fails_if_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = IbcRelayerChange::Addition(astria_address(&[0; 20])); + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed check for base prefix of provided address to be added/removed", + ); + } + + #[tokio::test] + async fn ibc_relayer_removal_fails_if_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = IbcRelayerChange::Removal(astria_address(&[0; 20])); + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed check for base prefix of provided address to be added/removed", + ); + } + + #[tokio::test] + async fn ibc_relayer_change_fails_if_signer_is_not_sudo_address() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let ibc_sudo_address = astria_address(&[1; 20]); + let signer = astria_address(&[2; 20]); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *signer.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_ibc_sudo_address(ibc_sudo_address).unwrap(); + + let action = IbcRelayerChange::Addition(astria_address(&[0; 20])); + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "unauthorized address for IBC relayer change", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/ibc_sudo_change.rs b/crates/astria-sequencer/src/action_handler/impls/ibc_sudo_change.rs index 487f71b4e..a3f384cab 100644 --- a/crates/astria-sequencer/src/action_handler/impls/ibc_sudo_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/ibc_sudo_change.rs @@ -47,3 +47,116 @@ impl ActionHandler for IbcSudoChange { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::TransactionId; + + use super::*; + use crate::{ + accounts::AddressBytes as _, + address::StateWriteExt as _, + authority::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + ASTRIA_PREFIX, + }, + ibc::StateReadExt as _, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn ibc_sudo_change_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let old_ibc_sudo_address = astria_address(&[0; 20]); + let new_ibc_sudo_address = astria_address(&[1; 20]); + let sudo_address = astria_address(&[2; 20]); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_sudo_address(sudo_address).unwrap(); + state.put_ibc_sudo_address(old_ibc_sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + assert_eq!( + state.get_ibc_sudo_address().await.unwrap(), + *old_ibc_sudo_address.address_bytes() + ); + + let action = IbcSudoChange { + new_address: new_ibc_sudo_address, + }; + + action.check_and_execute(&mut state).await.unwrap(); + + assert_eq!( + state.get_ibc_sudo_address().await.unwrap(), + *new_ibc_sudo_address.address_bytes() + ); + } + + #[tokio::test] + async fn ibc_sudo_change_fails_if_new_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let new_ibc_sudo_address = astria_address(&[1; 20]); + + let different_prefix = "different_prefix"; + state.put_base_prefix(different_prefix.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [2; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = IbcSudoChange { + new_address: new_ibc_sudo_address, + }; + + assert_eyre_error( + &action.check_and_execute(state).await.unwrap_err(), + &format!( + "address has prefix `{ASTRIA_PREFIX}` but only `{different_prefix}` is permitted" + ), + ); + } + + #[tokio::test] + async fn ibc_sudo_change_fails_if_signer_is_not_sudo_address() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + let signer = astria_address(&[1; 20]); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *signer.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_sudo_address(sudo_address).unwrap(); + + let action = IbcSudoChange { + new_address: astria_address(&[2; 20]), + }; + + assert_eyre_error( + &action.check_and_execute(state).await.unwrap_err(), + "signer is not the sudo key", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs b/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs index cf2970531..bb0791693 100644 --- a/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs +++ b/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs @@ -110,7 +110,7 @@ impl ActionHandler for action::Ics20Withdrawal { state .ensure_base_prefix(&self.return_address) .await - .wrap_err("failed to verify that return address address has permitted base prefix")?; + .wrap_err("failed to verify that return address has permitted base prefix")?; if let Some(bridge_address) = &self.bridge_address { state.ensure_base_prefix(bridge_address).await.wrap_err( @@ -275,24 +275,36 @@ fn is_source(source_port: &PortId, source_channel: &ChannelId, asset: &Denom) -> #[cfg(test)] mod tests { use astria_core::{ - primitive::v1::RollupId, + primitive::v1::{ + Address, + RollupId, + TransactionId, + }, protocol::transaction::v1::action, }; use cnidarium::StateDelta; use ibc_types::core::client::Height; use crate::{ - action_handler::impls::{ - ics20_withdrawal::establish_withdrawal_target, - test_utils::test_asset, + action_handler::{ + impls::{ + ics20_withdrawal::establish_withdrawal_target, + test_utils::test_asset, + }, + ActionHandler as _, }, address::StateWriteExt as _, benchmark_and_test_utils::{ assert_eyre_error, astria_address, + nria, ASTRIA_PREFIX, }, bridge::StateWriteExt as _, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, }; #[tokio::test] @@ -326,7 +338,7 @@ mod tests { } #[tokio::test] - async fn withdrawal_target_is_sender_if_bridge_is_unset_but_sender_is_bridge() { + async fn withdrawal_target_fails_if_bridge_is_unset_but_sender_is_bridge() { let storage = cnidarium::TempStorage::new().await.unwrap(); let snapshot = storage.latest_snapshot(); let mut state = StateDelta::new(snapshot); @@ -503,4 +515,113 @@ mod tests { "bridge address must have a withdrawer address set", ); } + + #[tokio::test] + async fn ics20_withdrawal_fails_if_return_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = action::Ics20Withdrawal { + amount: 1, + denom: nria().into(), + bridge_address: None, + destination_chain_address: "test".to_string(), + return_address: Address::builder() + .prefix("different_prefix") + .array([0; 20]) + .try_build() + .unwrap(), + timeout_height: Height::new(1, 1).unwrap(), + timeout_time: 1, + source_channel: "channel-0".to_string().parse().unwrap(), + fee_asset: nria().into(), + memo: String::new(), + use_compat_address: false, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed to verify that return address has permitted base prefix", + ); + } + + #[tokio::test] + async fn ics20_withdrawal_fails_if_bridge_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = action::Ics20Withdrawal { + amount: 1, + denom: nria().into(), + bridge_address: Some( + Address::builder() + .prefix("different_prefix") + .array([0; 20]) + .try_build() + .unwrap(), + ), + destination_chain_address: "test".to_string(), + return_address: astria_address(&[0; 20]), + timeout_height: Height::new(1, 1).unwrap(), + timeout_time: 1, + source_channel: "channel-0".to_string().parse().unwrap(), + fee_asset: nria().into(), + memo: String::new(), + use_compat_address: false, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed to verify that bridge address address has permitted base prefix", + ); + } + + #[tokio::test] + async fn ics20_withdrawal_fails_if_bridge_address_is_set_and_memo_is_bad() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = action::Ics20Withdrawal { + amount: 1, + denom: nria().into(), + bridge_address: Some(astria_address(&[1; 20])), + destination_chain_address: "test".to_string(), + return_address: astria_address(&[0; 20]), + timeout_height: Height::new(1, 1).unwrap(), + timeout_time: 1, + source_channel: "channel-0".to_string().parse().unwrap(), + fee_asset: nria().into(), + memo: String::new(), + use_compat_address: false, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "failed to parse memo for ICS bound bridge withdrawal", + ); + } } diff --git a/crates/astria-sequencer/src/action_handler/impls/init_bridge_account.rs b/crates/astria-sequencer/src/action_handler/impls/init_bridge_account.rs index fdea8cab4..f07e0a8b4 100644 --- a/crates/astria-sequencer/src/action_handler/impls/init_bridge_account.rs +++ b/crates/astria-sequencer/src/action_handler/impls/init_bridge_account.rs @@ -87,3 +87,194 @@ impl ActionHandler for InitBridgeAccount { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::{ + asset::Denom, + RollupId, + TransactionId, + }; + + use super::*; + use crate::{ + accounts::AddressBytes as _, + address::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + nria, + ASTRIA_PREFIX, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn init_bridge_account_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let bridge_address = astria_address(&[1; 20]); + let sudo_address = astria_address(&[2; 20]); + let withdrawer_address = astria_address(&[3; 20]); + let rollup_id = RollupId::new([1; 32]); + let asset = Denom::from(nria()); + + state.put_transaction_context(TransactionContext { + address_bytes: *bridge_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = InitBridgeAccount { + rollup_id, + asset: asset.clone(), + fee_asset: asset.clone(), + sudo_address: Some(sudo_address), + withdrawer_address: Some(withdrawer_address), + }; + + action.check_and_execute(&mut state).await.unwrap(); + + assert_eq!( + state + .get_bridge_account_rollup_id(&bridge_address) + .await + .unwrap(), + Some(rollup_id) + ); + assert_eq!( + state + .get_bridge_account_ibc_asset(&bridge_address) + .await + .unwrap(), + asset.to_ibc_prefixed() + ); + assert_eq!( + state + .get_bridge_account_sudo_address(&bridge_address) + .await + .unwrap(), + Some(*sudo_address.address_bytes()) + ); + assert_eq!( + state + .get_bridge_account_withdrawer_address(&bridge_address) + .await + .unwrap(), + Some(*withdrawer_address.address_bytes()) + ); + } + + #[tokio::test] + async fn init_bridge_account_fails_if_withdrawer_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let bridge_address = astria_address(&[1; 20]); + let different_prefix = "different_prefix"; + let withdrawer_address = Address::builder() + .prefix(different_prefix) + .array([0; 20]) + .try_build() + .unwrap(); + + state.put_transaction_context(TransactionContext { + address_bytes: *bridge_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = InitBridgeAccount { + rollup_id: RollupId::new([1; 32]), + asset: nria().into(), + fee_asset: nria().into(), + sudo_address: None, + withdrawer_address: Some(withdrawer_address), + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } + + #[tokio::test] + async fn init_bridge_account_fails_if_sudo_address_is_not_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let bridge_address = astria_address(&[1; 20]); + let different_prefix = "different_prefix"; + let sudo_address = Address::builder() + .prefix(different_prefix) + .array([0; 20]) + .try_build() + .unwrap(); + + state.put_transaction_context(TransactionContext { + address_bytes: *bridge_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = InitBridgeAccount { + rollup_id: RollupId::new([1; 32]), + asset: nria().into(), + fee_asset: nria().into(), + sudo_address: Some(sudo_address), + withdrawer_address: None, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } + + #[tokio::test] + async fn init_bridge_account_fails_if_bridge_account_already_exists() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let bridge_address = astria_address(&[1; 20]); + let rollup_id = RollupId::new([1; 32]); + + state.put_transaction_context(TransactionContext { + address_bytes: *bridge_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state + .put_bridge_account_rollup_id(&bridge_address, rollup_id) + .unwrap(); + + let action = InitBridgeAccount { + rollup_id, + asset: nria().into(), + fee_asset: nria().into(), + sudo_address: None, + withdrawer_address: None, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "bridge account already exists", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/sudo_address_change.rs b/crates/astria-sequencer/src/action_handler/impls/sudo_address_change.rs index eb3f4f771..f244699e3 100644 --- a/crates/astria-sequencer/src/action_handler/impls/sudo_address_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/sudo_address_change.rs @@ -51,3 +51,116 @@ impl ActionHandler for SudoAddressChange { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::{ + Address, + TransactionId, + }; + + use super::*; + use crate::{ + accounts::AddressBytes as _, + address::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + ASTRIA_PREFIX, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn sudo_address_change_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let old_sudo_address = astria_address(&[0; 20]); + let new_sudo_address = astria_address(&[1; 20]); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_sudo_address(old_sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *old_sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + assert_eq!( + state.get_sudo_address().await.unwrap(), + *old_sudo_address.address_bytes() + ); + + let action = SudoAddressChange { + new_address: new_sudo_address, + }; + + action.check_and_execute(&mut state).await.unwrap(); + + assert_eq!( + state.get_sudo_address().await.unwrap(), + *new_sudo_address.address_bytes() + ); + } + + #[tokio::test] + async fn sudo_address_change_fails_if_new_address_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let different_prefix = "different_prefix"; + let new_sudo_address = Address::builder() + .prefix(different_prefix) + .array([1; 20]) + .try_build() + .unwrap(); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = SudoAddressChange { + new_address: new_sudo_address, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } + + #[tokio::test] + async fn sudo_address_change_fails_if_signer_is_not_current_sudo() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let old_sudo_address = astria_address(&[0; 20]); + let new_sudo_address = astria_address(&[1; 20]); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state.put_sudo_address(old_sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *new_sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = SudoAddressChange { + new_address: new_sudo_address, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "signer is not the sudo key", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/validator_update.rs b/crates/astria-sequencer/src/action_handler/impls/validator_update.rs index 5ad1ceab0..baa5fe7b4 100644 --- a/crates/astria-sequencer/src/action_handler/impls/validator_update.rs +++ b/crates/astria-sequencer/src/action_handler/impls/validator_update.rs @@ -70,3 +70,192 @@ impl ActionHandler for ValidatorUpdate { Ok(()) } } + +#[cfg(test)] +mod tests { + use astria_core::{ + crypto::VerificationKey, + primitive::v1::TransactionId, + }; + + use super::*; + use crate::{ + accounts::AddressBytes as _, + authority::ValidatorSet, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn validator_update_add_executes_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + state.put_sudo_address(sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + assert_eq!(state.get_validator_updates().await.unwrap().len(), 0); + + let action = ValidatorUpdate { + verification_key: VerificationKey::try_from([0; 32]).unwrap(), + power: 100, + }; + + action.check_and_execute(&mut state).await.unwrap(); + + let validator_updates = state.get_validator_updates().await.unwrap(); + assert_eq!(validator_updates.len(), 1); + assert_eq!( + validator_updates.get(action.verification_key.address_bytes()), + Some(&action) + ); + } + + #[tokio::test] + async fn validator_update_remove_works_as_expected() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + state.put_sudo_address(sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let validator_update_1 = ValidatorUpdate { + verification_key: VerificationKey::try_from([0; 32]).unwrap(), + power: 100, + }; + + let validator_update_2 = ValidatorUpdate { + verification_key: VerificationKey::try_from([1; 32]).unwrap(), + power: 100, + }; + + state + .put_validator_set(ValidatorSet::new_from_updates(vec![ + validator_update_1.clone(), + validator_update_2.clone(), + ])) + .unwrap(); + + assert_eq!(state.get_validator_set().await.unwrap().len(), 2); + + let action = ValidatorUpdate { + verification_key: validator_update_1.verification_key, + power: 0, + }; + + action.check_and_execute(&mut state).await.unwrap(); + + let validator_updates = state.get_validator_updates().await.unwrap(); + assert_eq!(validator_updates.len(), 1); + assert_eq!( + validator_updates.get(action.verification_key.address_bytes()), + Some(&action) + ); + } + + #[tokio::test] + async fn validator_update_fails_if_signer_is_not_sudo_address() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_sudo_address(astria_address(&[1; 20])).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: [0; 20], + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let action = ValidatorUpdate { + verification_key: VerificationKey::try_from([0; 32]).unwrap(), + power: 100, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "signer is not the sudo key", + ); + } + #[tokio::test] + async fn validator_update_remove_fails_if_validator_is_not_in_set() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + state.put_sudo_address(sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + state + .put_validator_set(ValidatorSet::new_from_updates(vec![])) + .unwrap(); + + let action = ValidatorUpdate { + verification_key: VerificationKey::try_from([0; 32]).unwrap(), + power: 0, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "cannot remove a non-existing validator", + ); + } + + #[tokio::test] + async fn validator_update_remove_fails_if_attempting_to_remove_only_validator() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + let sudo_address = astria_address(&[0; 20]); + state.put_sudo_address(sudo_address).unwrap(); + state.put_transaction_context(TransactionContext { + address_bytes: *sudo_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + + let validator_update_1 = ValidatorUpdate { + verification_key: VerificationKey::try_from([1; 32]).unwrap(), + power: 100, + }; + + state + .put_validator_set(ValidatorSet::new_from_updates(vec![ + validator_update_1.clone() + ])) + .unwrap(); + + let action = ValidatorUpdate { + verification_key: validator_update_1.verification_key, + power: 0, + }; + + assert_eyre_error( + &action.check_and_execute(&mut state).await.unwrap_err(), + "cannot remove the last validator", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/mod.rs b/crates/astria-sequencer/src/action_handler/mod.rs index 0921cb82e..e035d17ca 100644 --- a/crates/astria-sequencer/src/action_handler/mod.rs +++ b/crates/astria-sequencer/src/action_handler/mod.rs @@ -92,3 +92,71 @@ where Ok(()) } + +#[cfg(test)] +mod tests { + use astria_core::primitive::v1::Address; + + use super::*; + use crate::{ + address::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + nria, + ASTRIA_PREFIX, + }, + }; + + #[tokio::test] + async fn check_transfer_fails_if_destination_is_not_base_prefixed() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let different_prefix = "different_prefix"; + let to_address = Address::builder() + .prefix(different_prefix.to_string()) + .array([0; 20]) + .try_build() + .unwrap(); + let action = Transfer { + to: to_address, + fee_asset: nria().into(), + asset: nria().into(), + amount: 100, + }; + + assert_eyre_error( + &check_transfer(&action, &astria_address(&[1; 20]), &state) + .await + .unwrap_err(), + &format!( + "address has prefix `{different_prefix}` but only `{ASTRIA_PREFIX}` is permitted" + ), + ); + } + #[tokio::test] + + async fn check_transfer_fails_if_insufficient_funds_in_sender_account() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = cnidarium::StateDelta::new(snapshot); + + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + let action = Transfer { + to: astria_address(&[0; 20]), + fee_asset: nria().into(), + asset: nria().into(), + amount: 100, + }; + + assert_eyre_error( + &check_transfer(&action, &astria_address(&[1; 20]), &state) + .await + .unwrap_err(), + "insufficient funds for transfer", + ); + } +} diff --git a/crates/astria-sequencer/src/benchmark_and_test_utils.rs b/crates/astria-sequencer/src/benchmark_and_test_utils.rs index 3b94f6dbb..be2c448cf 100644 --- a/crates/astria-sequencer/src/benchmark_and_test_utils.rs +++ b/crates/astria-sequencer/src/benchmark_and_test_utils.rs @@ -29,8 +29,8 @@ pub(crate) fn verification_key(seed: u64) -> astria_core::crypto::VerificationKe #[cfg(test)] #[track_caller] -pub(crate) fn assert_eyre_error(error: &astria_eyre::eyre::Error, expected: &'static str) { - let msg = error.to_string(); +pub(crate) fn assert_eyre_error(error: &astria_eyre::eyre::Error, expected: &'_ str) { + let msg = format!("{error:?}"); assert!( msg.contains(expected), "error contained different message\n\texpected: {expected}\n\tfull_error: {msg}", From acb3dc1f190c67af2942accce624a95dc2d52aa5 Mon Sep 17 00:00:00 2001 From: Ethan Oroshiba Date: Mon, 27 Jan 2025 08:54:18 -0600 Subject: [PATCH 13/23] chore(sequencer): fix names to conform to Rust naming conventions (#1931) ## Summary Changed server names to conform to Rust naming conventions. ## Background Per Rust naming conventions [here](https://github.com/rust-lang/rfcs/blob/1c590ce05d676e72e2217845ee054758d3a6df34/text/0430-finalizing-naming-conventions.md?plain=1#L46-L47) acronyms in upper camel case should be treated as words. ## Changes - Renamed `RunningABCIServer` to `RunningAbciServer`. - Renamed `RunningGrpcServer` to `RunningGrpcServer`. - Removed aliases for the handles to these types. ## Testing Passing all tests, no additional tests needed. ## Changelogs No updates required. ## Related Issues closes #1930 --- crates/astria-sequencer/src/sequencer.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index 7c427d3e7..520cc8f78 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -48,16 +48,13 @@ use crate::{ pub struct Sequencer; -type GRPCServerHandle = JoinHandle>; -type ABCIServerHandle = JoinHandle<()>; - -struct RunningGRPCServer { - pub handle: GRPCServerHandle, +struct RunningGrpcServer { + pub handle: JoinHandle>, pub shutdown_tx: oneshot::Sender<()>, } -struct RunningABCIServer { - pub handle: ABCIServerHandle, +struct RunningAbciServer { + pub handle: JoinHandle<()>, pub shutdown_rx: oneshot::Receiver<()>, } @@ -89,8 +86,8 @@ impl Sequencer { } async fn run_until_stopped( - abci_server: RunningABCIServer, - grpc_server: RunningGRPCServer, + abci_server: RunningAbciServer, + grpc_server: RunningGrpcServer, signals: &mut SignalReceiver, ) -> Result<()> { select! { @@ -120,7 +117,7 @@ impl Sequencer { async fn initialize( config: Config, metrics: &'static Metrics, - ) -> Result<(RunningGRPCServer, RunningABCIServer)> { + ) -> Result<(RunningGrpcServer, RunningAbciServer)> { cnidarium::register_metrics(); register_histogram_global("cnidarium_get_raw_duration_seconds"); register_histogram_global("cnidarium_nonverifiable_get_raw_duration_seconds"); @@ -191,11 +188,11 @@ impl Sequencer { let _ = abci_shutdown_tx.send(()); }); - let grpc_server = RunningGRPCServer { + let grpc_server = RunningGrpcServer { handle: grpc_server_handle, shutdown_tx: grpc_shutdown_tx, }; - let abci_server = RunningABCIServer { + let abci_server = RunningAbciServer { handle: abci_server_handle, shutdown_rx: abci_shutdown_rx, }; From 7ded3a9e2d1e24b48848cba061f98d4868547d9d Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Tue, 28 Jan 2025 19:17:32 +0100 Subject: [PATCH 14/23] chore(ci): update CODEOWNERS (#1935) ## Summary Update CODEOWNERS allocate by project. ## Background The previous CODEOWNERS file assigned reviews mainly by language (which is rust), which is no longer workable for a growing project leading to a) review bottlenecks, and b) unclear responsibilities over project-local styles and cognitive models on how to evolve the codebase. The present changes have a BDFL model in mind, where the CODEOWNERS get the final vote over modifications to their crates. ## Changes - Remove any `@astriaorg/rust-reviewers` from the CODEOWNERS file - Allocate crate ownership by project to @Fraser999, @noot, @SuperFluffy, and @joroshiba. - Remove the `*.rs` catchall - Assign rust-specific config files (`Cargo.toml`, and `Cargo.lock`, `rustfmt.toml`, `nextest.toml`, `rust-toolchain`, `.cargo/`) relative to CODEWONERS at the repository root to @SuperFluffy and @Fraser999 - Reassign ownership of the workflows `.github/workflows/test.yml` and `.github/workflows/lint.yml` to @Fraser999 and @SuperFluffy ## Testing Github confirms this is a valid CODEOWNERS file. Other than that this cannot be tested. ## Changelogs No updates required. --- CODEOWNERS | 49 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 11 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 5fb071874..c07487cbc 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,6 +1,33 @@ -* @joroshiba @SuperFluffy @noot +* @joroshiba -*.snap @joroshiba +astria-bridge-contracts/ @SuperFluffy @joroshiba +astria-bridge-withdrawer/ @SuperFluffy @joroshiba +astria-build-info/ @SuperFluffy @joroshiba +astria-cli/ @SuperFluffy @joroshiba +astria-composer/ @SuperFluffy @joroshiba +astria-conductor/ @SuperFluffy @joroshiba +astria-config/ @SuperFluffy @joroshiba +astria-core/ @SuperFluffy @fraser999 +astria-core-address/ @SuperFluffy @fraser999 +astria-core-consts/ @SuperFluffy @fraser999 +astria-core-crypto/ @SuperFluffy @fraser999 +astria-eyre/ @SuperFluffy @fraser999 +astria-grpc-mock/ @SuperFluffy @fraser999 +astria-grpc-mock-test/ @SuperFluffy @fraser999 +astria-grpc-mock-test-codegen/ @SuperFluffy @fraser999 +astria-merkle/ @SuperFluffy @joroshiba +astria-sequencer/ @noot @fraser999 +astria-sequencer-client/ @SuperFluffy @fraser999 +astria-sequencer-relayer/ @fraser999 @SuperFluffy +astria-sequencer-utils/ @fraser999 @noot +astria-telemetry/ @SuperFluffy @fraser999 +astria-test-utils/ @SuperFluffy @joroshiba + +tools/astria-address @SuperFluffy @joroshiba +tools/protobuf-compiler @SuperFluffy @joroshiba +tools/solidity-compiler @SuperFluffy @joroshiba + +*.snap @joroshiba @SuperFluffy @fraser999 specs/ @astriaorg/engineering justfile @astriaorg/engineering @@ -10,7 +37,9 @@ taplo.toml @astriaorg/engineering .gitignore @astriaorg/engineering /.github/ @astriaorg/infra -/containerfiles/ @astriaorg/infra +/.github/workflows/lint.yml @superfluffy @fraser999 +/.github/workflows/test.yml @superfluffy @fraser999 +/containerfiles/ @superfluffy @fraser999 /charts/ @astriaorg/infra /dev/ @astriaorg/infra .dockerignore @astriaorg/infra @@ -22,11 +51,9 @@ buf.lock @astriaorg/api-reviewers buf.yaml @astriaorg/api-reviewers buf.work.yaml @astriaorg/api-reviewers -*.rs @astriaorg/rust-reviewers -Cargo.toml @astriaorg/rust-reviewers -Cargo.lock @astriaorg/rust-reviewers -rust-toolchain @astriaorg/rust-reviewers -.cargo/ @astriaorg/rust-reviewers -nextest.toml @astriaorg/rust-reviewers -rusfmt.toml @astriaorg/rust-reviewers -crates/*/CHANGELOG.md @astriaorg/rust-reviewers +/Cargo.toml @SuperFluffy @fraser999 +/Cargo.lock @SuperFluffy @fraser999 +/rust-toolchain @SuperFluffy @fraser999 +/.cargo/ @SuperFluffy @fraser999 +/nextest.toml @SuperFluffy @fraser999 +/rusfmt.toml @SuperFluffy @fraser999 From 7450f84bf57b1028bec6c0594512e60ee0f72ae4 Mon Sep 17 00:00:00 2001 From: Joshua Dechant Date: Fri, 31 Jan 2025 12:46:45 -0500 Subject: [PATCH 15/23] feat(charts): Snapshots for geth nodes (#1906) ## Summary Allows restoring a geth node from a snapshot, and creating snapshots on a schedule with the option to upload them to remote storage via rclone. ## Background Spinning up a geth node from a snapshot is more ideal than syncing from genesis. We won't always have a PVC snapshot to restore from. This allows any geth node to migrate to Astria infra, and is needed to migrate Forma testnet. ## Changes - Added a restore from snapshot - snapshots can be loaded from any compatible rclone source (including local) - rclone config allow snapshots to be loaded from private sources - optional sha256 checksum to verify the snapshot after download - Added scheduled snapshot creation - Cron schedule - Retention count for # of snapshots to keep on local disk - Optional upload of snapshot to a remote destination - rclone config management via secrets ## Testing Tested on local k8s dev cluster, with and without rclone w/ an AWS S3 config --- charts/evm-rollup/Chart.yaml | 2 +- .../files/scripts/create-snapshot.sh | 31 ++++++++++ charts/evm-rollup/files/scripts/init-geth.sh | 4 ++ .../files/scripts/restore-snapshot.sh | 37 +++++++++++ charts/evm-rollup/templates/configmap.yaml | 32 ++++++++++ charts/evm-rollup/templates/cronjobs.yaml | 60 ++++++++++++++++++ .../templates/secretproviderclass.yaml | 14 +++++ charts/evm-rollup/templates/statefulsets.yaml | 32 ++++++++++ charts/evm-rollup/values.yaml | 61 ++++++++++++++++++- charts/evm-stack/Chart.lock | 6 +- charts/evm-stack/Chart.yaml | 4 +- 11 files changed, 276 insertions(+), 7 deletions(-) create mode 100644 charts/evm-rollup/files/scripts/create-snapshot.sh create mode 100644 charts/evm-rollup/files/scripts/restore-snapshot.sh create mode 100644 charts/evm-rollup/templates/cronjobs.yaml create mode 100644 charts/evm-rollup/templates/secretproviderclass.yaml diff --git a/charts/evm-rollup/Chart.yaml b/charts/evm-rollup/Chart.yaml index 482a16256..fd614d54d 100644 --- a/charts/evm-rollup/Chart.yaml +++ b/charts/evm-rollup/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.1 +version: 1.1.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/charts/evm-rollup/files/scripts/create-snapshot.sh b/charts/evm-rollup/files/scripts/create-snapshot.sh new file mode 100644 index 000000000..96ad83aaa --- /dev/null +++ b/charts/evm-rollup/files/scripts/create-snapshot.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +set -o errexit -o nounset + +SNAPSHOT_PATH="{{ tpl .Values.config.geth.snapshot.create.storage.path $ }}" +SNAPSHOT_FILE="{{ include "rollup.name" . }}-snapshot-$(date +%Y-%m-%d-%T).tar.gz" +RETENTION_COUNT="{{ .Values.config.geth.snapshot.create.retentionCount }}" + +echo "💿 Creating snapshot at $SNAPSHOT_FILE" + +mkdir -p "$SNAPSHOT_PATH" +tar -zcvf $SNAPSHOT_PATH/$SNAPSHOT_FILE \ + -C $data_dir/geth \ + --exclude='chaindata/LOCK' \ + --exclude='chaindata/ancient/chain/FLOCK' \ + chaindata + +echo "📦 Snapshot created successfully" + +{{- if .Values.config.geth.snapshot.create.storage.upload.enabled }} +echo "⬆️ Uploading snapshot to {{ .Values.config.geth.snapshot.create.storage.upload.destination }}" +rclone copy -vv \ + $SNAPSHOT_PATH/$SNAPSHOT_FILE \ + {{ .Values.config.geth.snapshot.create.storage.upload.destination }} +{{- end }} + +echo "🧹 Cleaning up old snapshots (keeping last $RETENTION_COUNT)" +cd "$SNAPSHOT_PATH" +ls -t snapshot-*.tar.gz 2>/dev/null | tail -n +$((RETENTION_COUNT + 1)) | xargs -r rm -- + +echo "Done 🎉" diff --git a/charts/evm-rollup/files/scripts/init-geth.sh b/charts/evm-rollup/files/scripts/init-geth.sh index 52318aeca..6c01916d9 100755 --- a/charts/evm-rollup/files/scripts/init-geth.sh +++ b/charts/evm-rollup/files/scripts/init-geth.sh @@ -19,5 +19,9 @@ elif ! cmp -s "/scripts/geth-genesis.json" "$home_dir/genesis.json"; then cp /scripts/geth-genesis.json $home_dir/genesis.json + exec geth --datadir "$data_dir/" init $home_dir/genesis.json +elif [ "{{ .Values.config.geth.snapshot.restore.enabled }}" = "true" ]; then + echo "Snapshot restore enabled, running geth init..." + exec geth --datadir "$data_dir/" init $home_dir/genesis.json fi diff --git a/charts/evm-rollup/files/scripts/restore-snapshot.sh b/charts/evm-rollup/files/scripts/restore-snapshot.sh new file mode 100644 index 000000000..69d6b1b82 --- /dev/null +++ b/charts/evm-rollup/files/scripts/restore-snapshot.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +set -o errexit -o nounset + +rm -rf "$data_dir/geth" +mkdir -p "$data_dir/geth" + +SNAPSHOT="{{ .Values.config.geth.snapshot.restore.source }}" +SNAPSHOT_FILE=$(basename "$SNAPSHOT") + +echo "💿 Copying snapshot from $SNAPSHOT" + +rclone copy -vv \ + {{ .Values.config.geth.snapshot.restore.source }} \ + "$data_dir/snapshot-load/" + +{{if .Values.config.geth.snapshot.restore.checksum -}} +echo "🕵️ Verifying snapshot checksum..." +EXPECTED_CHECKSUM="{{ .Values.config.geth.snapshot.restore.checksum }}" +ACTUAL_CHECKSUM=$(sha256sum "$data_dir/snapshot-load/$SNAPSHOT_FILE" | cut -d ' ' -f 1) + +if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]; then + echo "🚨 Checksum verification failed!" + echo "Expected: $EXPECTED_CHECKSUM" + echo "Got: $ACTUAL_CHECKSUM" + exit 1 +fi +echo "✅ Checksum verified successfully" +{{- end}} + +echo "Extracting snapshot..." +tar -xvf $data_dir/snapshot-load/$SNAPSHOT_FILE -C $data_dir/geth + +echo "🧹 Cleaning up..." +rm -rf $data_dir/snapshot-load + +echo "Snapshot loaded successfully 🎉" diff --git a/charts/evm-rollup/templates/configmap.yaml b/charts/evm-rollup/templates/configmap.yaml index bf733912b..f912d50bd 100644 --- a/charts/evm-rollup/templates/configmap.yaml +++ b/charts/evm-rollup/templates/configmap.yaml @@ -53,4 +53,36 @@ data: {{- tpl (.Files.Get "files/genesis/geth-genesis.json") $ | nindent 4 }} init-geth.sh: | {{- tpl (.Files.Get "files/scripts/init-geth.sh") $ | nindent 4 }} + {{- if .Values.config.geth.snapshot.create.enabled }} + create-snapshot.sh: | + {{- tpl (.Files.Get "files/scripts/create-snapshot.sh") $ | nindent 4 }} + {{- end }} + {{- if .Values.config.geth.snapshot.restore.enabled }} + restore-snapshot.sh: | + {{- tpl (.Files.Get "files/scripts/restore-snapshot.sh") $ | nindent 4 }} + {{- end }} +--- +{{- if or + (and + .Values.config.geth.snapshot.create.enabled + .Values.config.geth.snapshot.create.storage.upload.enabled + ) + (.Values.config.geth.snapshot.restore.enabled) +}} +{{- if not .Values.secretProvider.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "rollup.name" . }}-rclone-config + namespace: {{ include "rollup.namespace" . }} +data: + rclone.conf: | + {{- range $section, $config := .Values.rclone.config.devContent }} + [{{ $section }}] + {{- range $key, $value := $config }} + {{ $key | snakecase }} = {{ $value }} + {{- end }} + {{- end }} --- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/evm-rollup/templates/cronjobs.yaml b/charts/evm-rollup/templates/cronjobs.yaml new file mode 100644 index 000000000..976fc90d6 --- /dev/null +++ b/charts/evm-rollup/templates/cronjobs.yaml @@ -0,0 +1,60 @@ +{{- if .Values.config.geth.snapshot.create.enabled }} +--- +kind: CronJob +apiVersion: batch/v1 +metadata: + name: {{ include "rollup.name" . }}-geth-snapshots + labels: + app: {{ include "rollup.appName" . }} + namespace: {{ include "rollup.namespace" . }} +spec: + schedule: "{{ .Values.config.geth.snapshot.create.schedule }}" + jobTemplate: + spec: + template: + spec: + containers: + - name: create-snapshot + image: {{ .Values.images.snapshot.repo }}:{{ .Values.images.snapshot.tag }} + imagePullPolicy: {{ .Values.images.snapshot.pullPolicy }} + command: [ "/scripts/create-snapshot.sh" ] + envFrom: + - configMapRef: + name: {{ include "rollup.name" . }}-geth-env + volumeMounts: + {{- if .Values.config.geth.snapshot.create.storage.upload.enabled }} + - mountPath: /root/.config/rclone/ + name: {{ include "rollup.name" . }}-rclone-config-volume + {{- end }} + - mountPath: /scripts/ + name: {{ include "rollup.name" . }}-executor-scripts-volume + - mountPath: /home/geth + name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + subPath: {{ include "rollup.name" . }}/executor + restartPolicy: OnFailure + volumes: + {{- if .Values.config.geth.snapshot.create.storage.upload.enabled }} + - name: {{ include "rollup.name" . }}-rclone-config-volume + {{- if .Values.secretProvider.enabled }} + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: {{ include "rollup.name" . }}-secrets-rclone + {{- else }} + configMap: + name: {{ include "rollup.name" . }}-rclone-config + {{- end }} + {{- end }} + - name: {{ include "rollup.name" . }}-executor-scripts-volume + configMap: + name: {{ include "rollup.name" . }}-executor-scripts + defaultMode: 0500 + - name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + {{- if .Values.storage.enabled }} + persistentVolumeClaim: + claimName: {{ include "rollup.name" $ }}-rollup-shared-storage-pvc-geth + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} diff --git a/charts/evm-rollup/templates/secretproviderclass.yaml b/charts/evm-rollup/templates/secretproviderclass.yaml new file mode 100644 index 000000000..34ed7cfe7 --- /dev/null +++ b/charts/evm-rollup/templates/secretproviderclass.yaml @@ -0,0 +1,14 @@ +--- +{{- if .Values.secretProvider.enabled }} +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: {{ include "rollup.name" . }}-secrets-rclone + namespace: {{ include "rollup.namespace" . }} +spec: + provider: {{ .Values.secretProvider.provider }} + parameters: + {{- $_ := set $ "key" .Values.rclone.config.secret }} + {{- tpl $.Values.secretProvider.parametersTemplate $ | nindent 4 }} +--- +{{- end }} diff --git a/charts/evm-rollup/templates/statefulsets.yaml b/charts/evm-rollup/templates/statefulsets.yaml index d2ddcd9e5..408638328 100644 --- a/charts/evm-rollup/templates/statefulsets.yaml +++ b/charts/evm-rollup/templates/statefulsets.yaml @@ -17,6 +17,25 @@ spec: app: {{ include "rollup.appName" . }} spec: initContainers: + {{- if .Values.config.geth.snapshot.restore.enabled }} + - name: restore-snapshot + image: {{ .Values.images.snapshot.repo }}:{{ .Values.images.snapshot.tag }} + imagePullPolicy: {{ .Values.images.snapshot.pullPolicy }} + command: [ "/scripts/restore-snapshot.sh" ] + envFrom: + - configMapRef: + name: {{ include "rollup.name" . }}-geth-env + volumeMounts: + {{- if .Values.config.geth.snapshot.restore.enabled }} + - mountPath: /root/.config/rclone/ + name: {{ include "rollup.name" . }}-rclone-config-volume + {{- end }} + - mountPath: /scripts/ + name: {{ include "rollup.name" . }}-executor-scripts-volume + - mountPath: /home/geth + name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + subPath: {{ include "rollup.name" . }}/executor + {{- end }} {{- if .Values.config.geth.purgeMempool }} - name: purge-mempool image: {{ include "rollup.image" . }} @@ -91,6 +110,19 @@ spec: name: conductor-metr {{- end }} volumes: + {{- if .Values.config.geth.snapshot.restore.enabled }} + - name: {{ include "rollup.name" . }}-rclone-config-volume + {{- if .Values.secretProvider.enabled }} + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: {{ include "rollup.name" . }}-secrets-rclone + {{- else }} + configMap: + name: {{ include "rollup.name" . }}-rclone-config + {{- end }} + {{- end }} - name: {{ include "rollup.name" . }}-executor-scripts-volume configMap: name: {{ include "rollup.name" . }}-executor-scripts diff --git a/charts/evm-rollup/values.yaml b/charts/evm-rollup/values.yaml index 5437e0c07..a2b3b96e0 100644 --- a/charts/evm-rollup/values.yaml +++ b/charts/evm-rollup/values.yaml @@ -18,7 +18,10 @@ images: pullPolicy: IfNotPresent tag: 1.0.0 devTag: latest - + snapshot: + repo: rclone/rclone + pullPolicy: IfNotPresent + tag: 1.69.0 genesis: ## These values are used to configure the genesis block of the rollup chain @@ -105,6 +108,27 @@ config: archiveNode: false # Set to true to clear the mempool on startup/restart purgeMempool: false + snapshot: + # Load from snapshot + restore: + enabled: false + # rclone source to download the snapshot from (in the form of remote:path) + source: "" + # Optional sha256 checksum to verify the snapshot + checksum: "" + # Create snapshots on a schedule + create: + enabled: false + # Cron format (default: daily at midnight) + schedule: "@daily" + # Number of snapshots to keep on disk + retentionCount: 7 + storage: + path: '{{ include "rollup.gethDataDir" . }}/snapshots' + upload: + enabled: false + # rclone destination to upload the snapshot to (in the form of remote:path) + destination: "" flags: - name: datadir value: '{{ include "rollup.gethDataDir" . }}' @@ -189,6 +213,30 @@ config: rpc: "" token: "" +rclone: + config: + # Content to be written to rclone.conf file + # Can be used to configure remote storage providers like S3, GCS, etc + # Example: + # [snapshots] + # type = s3 + # provider = AWS + # region = us-east-2 + # access_key_id = key + # secret_access_key = secret + # no_check_bucket = true + devContent: + snapshots: + type: s3 + provider: AWS + region: us-east-2 + accessKeyId: "" + secretAccessKey: "" + noCheckBucket: true + # The secret resource name for the rclone config + secret: + resourceName: "projects/$PROJECT_ID/secrets/rclone-config/versions/latest" + metrics: # set to enable prometheus metrics enabled: false @@ -300,6 +348,17 @@ storage: persistentVolumeName: "rollup-shared-storage" path: "/data/rollup-data" +# When deploying in a production environment should use a secret provider +# This is configured for use with GCP, need to set own resource names +# and keys +secretProvider: + enabled: false + provider: gcp + parametersTemplate: |- + secrets: | + - resourceName: {{ .key.resourceName }} + fileName: "{{ .key.filename }}" + # Default service ports ports: jsonRPC: 8545 diff --git a/charts/evm-stack/Chart.lock b/charts/evm-stack/Chart.lock index 55dd9cec3..297cbc473 100644 --- a/charts/evm-stack/Chart.lock +++ b/charts/evm-stack/Chart.lock @@ -4,7 +4,7 @@ dependencies: version: 0.4.0 - name: evm-rollup repository: file://../evm-rollup - version: 1.0.1 + version: 1.1.0 - name: composer repository: file://../composer version: 1.0.0 @@ -20,5 +20,5 @@ dependencies: - name: blockscout-stack repository: https://blockscout.github.io/helm-charts version: 1.6.8 -digest: sha256:371c35af96fc5d82aa2b4d894dc7d2e11e150380fd6f09eb0ca94b4202b24698 -generated: "2025-01-08T11:22:41.273867-08:00" +digest: sha256:c437d6967341b9bb6e10a809ce13e81130bfb95fb111c8712088ab443adea3f1 +generated: "2025-01-28T23:46:42.687706-05:00" diff --git a/charts/evm-stack/Chart.yaml b/charts/evm-stack/Chart.yaml index 7e1d7cad5..17bceddce 100644 --- a/charts/evm-stack/Chart.yaml +++ b/charts/evm-stack/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.7 +version: 1.0.8 dependencies: - name: celestia-node @@ -23,7 +23,7 @@ dependencies: repository: "file://../celestia-node" condition: celestia-node.enabled - name: evm-rollup - version: 1.0.1 + version: 1.1.0 repository: "file://../evm-rollup" - name: composer version: 1.0.0 From 141988f0c0c41dc6ed3fe72e3fe92adb353c826d Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Mon, 3 Feb 2025 13:52:24 +0100 Subject: [PATCH 16/23] fix(conductor): remove panic source on shutdown (#1919) ## Summary Removes a panic source in the conductor frontend by moving around the restart logic. ## Background `Conductor::run_until_stopped` contains a loop to allow restarting its backend business logic. This loop delegates to a `Conductor::restart_or_shutdown` method to execute the restart of the backend, and then immediately reenters the loop no matter a restart happened or not. While refactoring the inner conductor business logic (which lead to slightly different return values in turn affecting the blackbox test assertions) it was found that behavior can lead to the conductor backend panicking because it's being polled again even after running to completion. ## Changes - Rename `Conductor::shutdown_or_restart` to `Conductor::restart_or_shutdown` to match the enum it's branching on. - Execute the restart inside the frontend loop to not reentering it, avoiding to poll the already completed backend task. ## Testing This can't really be tested. But it does fix the panic in the refactor PR. ## Changelogs Changelogs updated. --- crates/astria-conductor/CHANGELOG.md | 1 + .../astria-conductor/src/conductor/inner.rs | 10 +++++ crates/astria-conductor/src/conductor/mod.rs | 39 ++++++++----------- 3 files changed, 28 insertions(+), 22 deletions(-) diff --git a/crates/astria-conductor/CHANGELOG.md b/crates/astria-conductor/CHANGELOG.md index 687145fa5..2210b7b33 100644 --- a/crates/astria-conductor/CHANGELOG.md +++ b/crates/astria-conductor/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Update `idna` dependency to resolve cargo audit warning [#1869](https://github.com/astriaorg/astria/pull/1869). +- Remove panic source on shutdown [#1919](https://github.com/astriaorg/astria/pull/1919). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-conductor/src/conductor/inner.rs b/crates/astria-conductor/src/conductor/inner.rs index c52bd3253..b6dc9d3eb 100644 --- a/crates/astria-conductor/src/conductor/inner.rs +++ b/crates/astria-conductor/src/conductor/inner.rs @@ -44,6 +44,16 @@ pub(super) enum RestartOrShutdown { Shutdown, } +impl std::fmt::Display for RestartOrShutdown { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let msg = match self { + RestartOrShutdown::Restart => "restarting", + RestartOrShutdown::Shutdown => "shutting down", + }; + f.write_str(msg) + } +} + enum ExitReason { ShutdownSignal, TaskFailed { diff --git a/crates/astria-conductor/src/conductor/mod.rs b/crates/astria-conductor/src/conductor/mod.rs index f7450d066..9c94fab0c 100644 --- a/crates/astria-conductor/src/conductor/mod.rs +++ b/crates/astria-conductor/src/conductor/mod.rs @@ -8,6 +8,7 @@ use std::{ use astria_eyre::eyre::{ self, Result, + WrapErr as _, }; use inner::{ ConductorInner, @@ -20,10 +21,7 @@ use tokio::task::{ JoinHandle, }; use tokio_util::sync::CancellationToken; -use tracing::{ - info, - instrument, -}; +use tracing::instrument; use crate::{ metrics::Metrics, @@ -110,42 +108,39 @@ impl Conductor { async fn run_until_stopped(mut self) -> eyre::Result<()> { loop { let exit_reason = (&mut self.inner).await; - self.shutdown_or_restart(exit_reason).await?; - if self.shutdown_token.is_cancelled() { - break; + match self.restart_or_shutdown(exit_reason).await? { + RestartOrShutdown::Restart => self.restart()?, + RestartOrShutdown::Shutdown => break Ok(()), } } - Ok(()) } /// Creates and spawns a new [`ConductorInner`] task with the same configuration, replacing /// the previous one. This function should only be called after a graceful shutdown of the /// inner conductor task. - fn restart(&mut self) { - info!("restarting conductor"); + #[instrument(skip_all, err)] + fn restart(&mut self) -> eyre::Result<()> { let new_handle = ConductorInner::spawn( self.cfg.clone(), self.metrics, self.shutdown_token.child_token(), ) - .expect("failed to create new conductor after restart"); + .wrap_err("failed to instantiate Conductor for restart")?; self.inner = new_handle; + Ok(()) } - /// Initiates either a restart or a shutdown of all conductor tasks. - #[instrument(skip_all, err)] - async fn shutdown_or_restart( + /// Reports if conductor will shutdown or restart. + /// + /// This method only exists to encapsulate tracing and generate + /// events for restart, shutdown, or errors. + #[instrument(skip_all, err, ret(Display))] + async fn restart_or_shutdown( &mut self, exit_reason: Result, JoinError>, - ) -> eyre::Result<&'static str> { + ) -> eyre::Result { match exit_reason { - Ok(Ok(restart_or_shutdown)) => match restart_or_shutdown { - RestartOrShutdown::Restart => { - self.restart(); - return Ok("restarting"); - } - RestartOrShutdown::Shutdown => Ok("shutting down"), - }, + Ok(Ok(restart_or_shutdown)) => Ok(restart_or_shutdown), Ok(Err(err)) => Err(err.wrap_err("conductor exited with an error")), Err(err) => Err(eyre::Report::new(err).wrap_err("conductor panicked")), } From 77d0217a86f4718d57b39381acaa70612da6632d Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Mon, 3 Feb 2025 18:21:05 +0100 Subject: [PATCH 17/23] refactor(conductor): make firm, soft readers subtasks (#1926) ## Summary Makes the firm and soft readers subtasks of the main exection loop. ## Background Making the celestia (firm) and astria (soft) readers subtasks of the executor loop is a more faithful representation of their dependencies: 1. the execution loop can run with either or both present. 2. but the readers cannot run without the execution loop present (the tasks do not run at the same level of privilege). 3. to fully initialize they the readers depend on data provided via the execution loop (rollup genesis info and commitment state). This patch then spins up the readers during initialization of the execution event loop, which allows removing a lot of complexity from the conductor codebase: 1. the readers need not explicitly wait for the state to be initialized but receive an already initialized state. 2. there is no need for a bespoke channel to dynamically set permits (this was needed for dynamic backpressure handling by setting the channel capacity after reading the celestia variance from the rollup): a standard mpsc channel can be instantiated and then passed to the (soft/sequencer) reader. executor::Initialized::run delegates to executor::Initialized::run_event_loop to separate the shutdown token from the other arms of the select macro - this way, an else => {} arm can be introduced that shuts down executor as a fallback ## Changes - make the firm (celestia) and soft (sequencer) readers subtasks of the executor task - remove the `JoinMap` from the `conductor::Inner` business logic as this is now tracked by the executor task - initialize the reader tasks only after the initial rollup node state is received - in the executor task, separate the test for shutdown token cancellation from its main event loop so that the `tokio::select!` macro's `else => {}` arm can be used as a fallback. - remove the bespoke `executor::channel` for dynamically setting channel capacity at runtime (no longer necessary; the channel is initialized later after we know its capacity) - simplify the `State` watch channel notifying the readers of the latest rollup state by removing the `StateIsInit` and `StateNotInit` markers (the readers now get a fully initialized watch channel). ## Testing These changes should be transparent and not noticeable. The blackbox tests still pass which implies that the (tested) conductor behavior works. ## Changelogs **NOTE**: Update before merge Ensure all relevant changelog files are updated as necessary. See [keepachangelog](https://keepachangelog.com/en/1.1.0/#how) for change categories. Replace this text with e.g. "Changelogs updated." or "No updates required." to acknowledge changelogs have been considered. ## Related Issues **NOTE**: Link the forma issue? Link any issues that are related, prefer full GitHub links. closes --- .../astria-conductor/src/celestia/builder.rs | 16 +- crates/astria-conductor/src/celestia/mod.rs | 120 ++--- .../astria-conductor/src/celestia/verify.rs | 8 +- .../astria-conductor/src/conductor/inner.rs | 279 +++------- crates/astria-conductor/src/conductor/mod.rs | 26 +- crates/astria-conductor/src/config.rs | 10 + .../astria-conductor/src/executor/builder.rs | 74 +-- .../astria-conductor/src/executor/channel.rs | 241 --------- crates/astria-conductor/src/executor/mod.rs | 481 ++++++++---------- crates/astria-conductor/src/executor/state.rs | 122 +++-- crates/astria-conductor/src/executor/tests.rs | 7 +- .../astria-conductor/src/sequencer/builder.rs | 13 +- crates/astria-conductor/src/sequencer/mod.rs | 96 ++-- .../tests/blackbox/firm_only.rs | 2 +- 14 files changed, 481 insertions(+), 1014 deletions(-) delete mode 100644 crates/astria-conductor/src/executor/channel.rs diff --git a/crates/astria-conductor/src/celestia/builder.rs b/crates/astria-conductor/src/celestia/builder.rs index ef33a28cb..eb03d9440 100644 --- a/crates/astria-conductor/src/celestia/builder.rs +++ b/crates/astria-conductor/src/celestia/builder.rs @@ -10,9 +10,12 @@ use jsonrpsee::http_client::HttpClient as CelestiaClient; use tendermint_rpc::HttpClient as SequencerClient; use tokio_util::sync::CancellationToken; -use super::Reader; +use super::{ + Reader, + ReconstructedBlock, +}; use crate::{ - executor, + executor::StateReceiver, metrics::Metrics, }; @@ -20,7 +23,8 @@ pub(crate) struct Builder { pub(crate) celestia_block_time: Duration, pub(crate) celestia_http_endpoint: String, pub(crate) celestia_token: Option, - pub(crate) executor: executor::Handle, + pub(crate) firm_blocks: tokio::sync::mpsc::Sender>, + pub(crate) rollup_state: StateReceiver, pub(crate) sequencer_cometbft_client: SequencerClient, pub(crate) sequencer_requests_per_second: u32, pub(crate) expected_celestia_chain_id: String, @@ -36,13 +40,14 @@ impl Builder { celestia_block_time, celestia_http_endpoint, celestia_token, - executor, sequencer_cometbft_client, sequencer_requests_per_second, expected_celestia_chain_id, expected_sequencer_chain_id, shutdown, metrics, + firm_blocks, + rollup_state, } = self; let celestia_client = create_celestia_client(celestia_http_endpoint, celestia_token) @@ -51,7 +56,8 @@ impl Builder { Ok(Reader { celestia_block_time, celestia_client, - executor, + firm_blocks, + rollup_state, sequencer_cometbft_client, sequencer_requests_per_second, expected_celestia_chain_id, diff --git a/crates/astria-conductor/src/celestia/mod.rs b/crates/astria-conductor/src/celestia/mod.rs index 9acfc2c50..07b1633b9 100644 --- a/crates/astria-conductor/src/celestia/mod.rs +++ b/crates/astria-conductor/src/celestia/mod.rs @@ -58,15 +58,11 @@ use tracing::{ trace, trace_span, warn, + Instrument as _, }; use crate::{ block_cache::GetSequencerHeight, - executor::{ - FirmSendError, - FirmTrySendError, - StateIsInit, - }, metrics::Metrics, utils::flatten, }; @@ -95,10 +91,7 @@ use self::{ BlobVerifier, }, }; -use crate::{ - block_cache::BlockCache, - executor, -}; +use crate::block_cache::BlockCache; /// Sequencer Block information reconstructed from Celestia blobs. /// @@ -138,8 +131,11 @@ pub(crate) struct Reader { /// Client to fetch heights and blocks from Celestia. celestia_client: CelestiaClient, - /// The channel used to send messages to the executor task. - executor: executor::Handle, + /// The channel to forward firm blocks to the executor. + firm_blocks: mpsc::Sender>, + + /// The channel to read updates of the rollup state from. + rollup_state: crate::executor::StateReceiver, /// The client to get the sequencer namespace and verify blocks. sequencer_cometbft_client: SequencerClient, @@ -162,7 +158,7 @@ pub(crate) struct Reader { impl Reader { pub(crate) async fn run_until_stopped(mut self) -> eyre::Result<()> { - let (executor, sequencer_chain_id) = select!( + let sequencer_chain_id = select!( () = self.shutdown.clone().cancelled_owned() => { info_span!("conductor::celestia::Reader::run_until_stopped").in_scope(|| info!("received shutdown signal while waiting for Celestia reader task to initialize") @@ -175,16 +171,14 @@ impl Reader { } ); - RunningReader::from_parts(self, executor, sequencer_chain_id) + RunningReader::from_parts(self, sequencer_chain_id) .wrap_err("failed entering run loop")? .run_until_stopped() .await } #[instrument(skip_all, err)] - async fn initialize( - &mut self, - ) -> eyre::Result<(executor::Handle, tendermint::chain::Id)> { + async fn initialize(&mut self) -> eyre::Result { let validate_celestia_chain_id = async { let actual_celestia_chain_id = get_celestia_chain_id(&self.celestia_client) .await @@ -196,14 +190,8 @@ impl Reader { `{actual_celestia_chain_id}`" ); Ok(()) - }; - - let wait_for_init_executor = async { - self.executor - .wait_for_init() - .await - .wrap_err("handle to executor failed while waiting for it being initialized") - }; + } + .in_current_span(); let get_and_validate_sequencer_chain_id = async { let actual_sequencer_chain_id = @@ -217,18 +205,18 @@ impl Reader { actual: `{actual_sequencer_chain_id}`" ); Ok(actual_sequencer_chain_id) - }; + } + .in_current_span(); try_join!( validate_celestia_chain_id, - wait_for_init_executor, get_and_validate_sequencer_chain_id ) - .map(|((), executor_init, sequencer_chain_id)| (executor_init, sequencer_chain_id)) + .map(|((), sequencer_chain_id)| sequencer_chain_id) } } -#[instrument(skip_all, err)] +#[instrument(skip_all, err, ret(Display))] async fn get_celestia_chain_id( celestia_client: &CelestiaClient, ) -> eyre::Result { @@ -263,8 +251,11 @@ struct RunningReader { // Client to fetch heights and blocks from Celestia. celestia_client: CelestiaClient, - /// The channel used to send messages to the executor task. - executor: executor::Handle, + /// The channel to forward firm blocks to the executor. + firm_blocks: mpsc::Sender>, + + /// The channel to read updates of the rollup state from. + rollup_state: crate::executor::StateReceiver, /// Token to listen for Conductor being shut down. shutdown: CancellationToken, @@ -280,7 +271,8 @@ struct RunningReader { /// capacity again. Used as a back pressure mechanism so that this task does not fetch more /// blobs if there is no capacity in the executor to execute them against the rollup in /// time. - enqueued_block: Fuse>>, + enqueued_block: + Fuse>>>>, /// The latest observed head height of the Celestia network. Set by values read from /// the `latest_height` stream. @@ -323,7 +315,6 @@ struct RunningReader { impl RunningReader { fn from_parts( exposed_reader: Reader, - mut executor: executor::Handle, sequencer_chain_id: tendermint::chain::Id, ) -> eyre::Result { let Reader { @@ -333,21 +324,23 @@ impl RunningReader { shutdown, sequencer_requests_per_second, metrics, + firm_blocks, + rollup_state, .. } = exposed_reader; let block_cache = - BlockCache::with_next_height(executor.next_expected_firm_sequencer_height()) + BlockCache::with_next_height(rollup_state.next_expected_firm_sequencer_height()) .wrap_err("failed constructing sequential block cache")?; let latest_heights = stream_latest_heights(celestia_client.clone(), celestia_block_time); - let rollup_id = executor.rollup_id(); + let rollup_id = rollup_state.rollup_id(); let rollup_namespace = astria_core::celestia::namespace_v0_from_rollup_id(rollup_id); let sequencer_namespace = astria_core::celestia::namespace_v0_from_sha256_of_bytes(sequencer_chain_id.as_bytes()); - let celestia_next_height = executor.celestia_base_block_height(); - let celestia_reference_height = executor.celestia_base_block_height(); - let celestia_variance = executor.celestia_block_variance(); + let celestia_next_height = rollup_state.celestia_base_block_height(); + let celestia_reference_height = rollup_state.celestia_base_block_height(); + let celestia_variance = rollup_state.celestia_block_variance(); Ok(Self { block_cache, @@ -357,7 +350,8 @@ impl RunningReader { ), celestia_client, enqueued_block: Fuse::terminated(), - executor, + firm_blocks, + rollup_state, latest_heights, shutdown, reconstruction_tasks: JoinMap::new(), @@ -498,7 +492,7 @@ impl RunningReader { rollup_id: self.rollup_id, rollup_namespace: self.rollup_namespace, sequencer_namespace: self.sequencer_namespace, - executor: self.executor.clone(), + rollup_state: self.rollup_state.clone(), metrics: self.metrics, }; self.reconstruction_tasks.spawn(height, task.execute()); @@ -520,28 +514,20 @@ impl RunningReader { #[instrument(skip_all)] fn forward_block_to_executor(&mut self, block: ReconstructedBlock) -> eyre::Result<()> { let celestia_height = block.celestia_height; - match self.executor.try_send_firm_block(block) { + match self.firm_blocks.try_send(block.into()) { Ok(()) => self.advance_reference_celestia_height(celestia_height), - Err(FirmTrySendError::Channel { - source, - }) => match source { - mpsc::error::TrySendError::Full(block) => { - trace!( - "executor channel is full; rescheduling block fetch until the channel \ - opens up" - ); - self.enqueued_block = - enqueue_block(self.executor.clone(), block).boxed().fuse(); - } - mpsc::error::TrySendError::Closed(_) => { - bail!("exiting because executor channel is closed"); - } - }, - Err(FirmTrySendError::NotSet) => bail!( - "exiting because executor was configured without firm commitments; this Celestia \ - reader should have never been started" - ), - } + Err(mpsc::error::TrySendError::Full(block)) => { + trace!( + "executor channel is full; rescheduling block fetch until the channel opens up" + ); + self.enqueued_block = enqueue_block(self.firm_blocks.clone(), block) + .boxed() + .fuse(); + } + Err(mpsc::error::TrySendError::Closed(_)) => { + bail!("exiting because executor channel is closed"); + } + }; Ok(()) } @@ -574,7 +560,7 @@ struct FetchConvertVerifyAndReconstruct { rollup_id: RollupId, rollup_namespace: Namespace, sequencer_namespace: Namespace, - executor: executor::Handle, + rollup_state: crate::executor::StateReceiver, metrics: &'static Metrics, } @@ -593,7 +579,7 @@ impl FetchConvertVerifyAndReconstruct { rollup_id, rollup_namespace, sequencer_namespace, - executor, + rollup_state, metrics, } = self; @@ -633,7 +619,7 @@ impl FetchConvertVerifyAndReconstruct { "decoded Sequencer header and rollup info from raw Celestia blobs", ); - let verified_blobs = verify_metadata(blob_verifier, decoded_blobs, executor).await; + let verified_blobs = verify_metadata(blob_verifier, decoded_blobs, rollup_state).await; metrics.record_sequencer_blocks_metadata_verified_per_celestia_fetch( verified_blobs.len_header_blobs(), @@ -671,15 +657,15 @@ impl FetchConvertVerifyAndReconstruct { #[instrument(skip_all, err)] async fn enqueue_block( - executor: executor::Handle, + firm_blocks_tx: mpsc::Sender>, block: Box, -) -> Result { +) -> Result>> { let celestia_height = block.celestia_height; - executor.send_firm_block(block).await?; + firm_blocks_tx.send(block).await?; Ok(celestia_height) } -#[instrument(skip_all, err)] +#[instrument(skip_all, err, ret(Display))] async fn get_sequencer_chain_id(client: SequencerClient) -> eyre::Result { use sequencer_client::Client as _; diff --git a/crates/astria-conductor/src/celestia/verify.rs b/crates/astria-conductor/src/celestia/verify.rs index a4415468b..d257a3b5c 100644 --- a/crates/astria-conductor/src/celestia/verify.rs +++ b/crates/astria-conductor/src/celestia/verify.rs @@ -51,10 +51,6 @@ use super::{ block_verifier, convert::ConvertedBlobs, }; -use crate::executor::{ - self, - StateIsInit, -}; pub(super) struct VerifiedBlobs { celestia_height: u64, @@ -99,7 +95,7 @@ struct VerificationTaskKey { pub(super) async fn verify_metadata( blob_verifier: Arc, converted_blobs: ConvertedBlobs, - mut executor: executor::Handle, + rollup_state: crate::executor::StateReceiver, ) -> VerifiedBlobs { let (celestia_height, header_blobs, rollup_blobs) = converted_blobs.into_parts(); @@ -107,7 +103,7 @@ pub(super) async fn verify_metadata( let mut verified_header_blobs = HashMap::with_capacity(header_blobs.len()); let next_expected_firm_sequencer_height = - executor.next_expected_firm_sequencer_height().value(); + rollup_state.next_expected_firm_sequencer_height().value(); for (index, blob) in header_blobs.into_iter().enumerate() { if blob.height().value() < next_expected_firm_sequencer_height { diff --git a/crates/astria-conductor/src/conductor/inner.rs b/crates/astria-conductor/src/conductor/inner.rs index b6dc9d3eb..7f964a7ba 100644 --- a/crates/astria-conductor/src/conductor/inner.rs +++ b/crates/astria-conductor/src/conductor/inner.rs @@ -1,25 +1,17 @@ -use std::{ - future::Future, - time::Duration, -}; +use std::time::Duration; use astria_eyre::eyre::{ self, eyre, - Result, + Report, WrapErr as _, }; -use itertools::Itertools as _; -use pin_project_lite::pin_project; -use sequencer_client::HttpClient; use tokio::{ select, + task::JoinHandle, time::timeout, }; -use tokio_util::{ - sync::CancellationToken, - task::JoinMap, -}; +use tokio_util::sync::CancellationToken; use tracing::{ error, info, @@ -29,16 +21,14 @@ use tracing::{ }; use crate::{ - celestia, executor, - sequencer, - utils::flatten, Config, Metrics, }; /// Exit value of the inner conductor impl to signal to the outer task whether to restart or /// shutdown +#[derive(Debug)] pub(super) enum RestartOrShutdown { Restart, Shutdown, @@ -54,177 +44,68 @@ impl std::fmt::Display for RestartOrShutdown { } } -enum ExitReason { - ShutdownSignal, - TaskFailed { - name: &'static str, - error: eyre::Report, - }, -} - -pin_project! { - /// A handle returned by [`ConductorInner::spawn`]. - pub(super) struct InnerHandle { - shutdown_token: CancellationToken, - task: Option>>, - } -} - -impl Future for InnerHandle { - type Output = Result, tokio::task::JoinError>; - - fn poll( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - use futures::future::FutureExt as _; - let this = self.project(); - let task = this - .task - .as_mut() - .expect("the Conductor handle must not be polled after shutdown"); - task.poll_unpin(cx) - } -} +struct ShutdownSignalReceived; -pub(super) struct ConductorInner { +/// The business logic of Conductur. +pub(super) struct Inner { /// Token to signal to all tasks to shut down gracefully. shutdown_token: CancellationToken, - /// The different long-running tasks that make up the conductor; - tasks: JoinMap<&'static str, eyre::Result<()>>, + executor: Option>>, } -impl ConductorInner { - const CELESTIA: &'static str = "celestia"; - const EXECUTOR: &'static str = "executor"; - const SEQUENCER: &'static str = "sequencer"; - - /// Create a new [`ConductorInner`] from a [`Config`]. +impl Inner { + /// Create a new [`Inner`] from a [`Config`]. /// /// # Errors /// Returns an error in the following cases if one of its constituent /// actors could not be spawned (executor, sequencer reader, or data availability reader). /// This usually happens if the actors failed to connect to their respective endpoints. - fn new( - cfg: Config, + pub(super) fn new( + config: Config, metrics: &'static Metrics, shutdown_token: CancellationToken, ) -> eyre::Result { - let mut tasks = JoinMap::new(); - - let sequencer_cometbft_client = HttpClient::new(&*cfg.sequencer_cometbft_url) - .wrap_err("failed constructing sequencer cometbft RPC client")?; - - // Spawn the executor task. - let executor_handle = { - let (executor, handle) = executor::Builder { - mode: cfg.execution_commit_level, - rollup_address: cfg.execution_rpc_url, - shutdown: shutdown_token.clone(), - metrics, - } - .build() - .wrap_err("failed constructing executor")?; - - tasks.spawn(Self::EXECUTOR, executor.run_until_stopped()); - handle - }; - - if cfg.execution_commit_level.is_with_soft() { - let sequencer_grpc_client = - sequencer::SequencerGrpcClient::new(&cfg.sequencer_grpc_url) - .wrap_err("failed constructing grpc client for Sequencer")?; - - // The `sync_start_block_height` represents the height of the next - // sequencer block that can be executed on top of the rollup state. - // This value is derived by the Executor. - let sequencer_reader = sequencer::Builder { - sequencer_grpc_client, - sequencer_cometbft_client: sequencer_cometbft_client.clone(), - sequencer_block_time: Duration::from_millis(cfg.sequencer_block_time_ms), - expected_sequencer_chain_id: cfg.expected_sequencer_chain_id.clone(), - shutdown: shutdown_token.clone(), - executor: executor_handle.clone(), - } - .build(); - tasks.spawn(Self::SEQUENCER, sequencer_reader.run_until_stopped()); + let executor = executor::Builder { + config, + shutdown: shutdown_token.clone(), + metrics, } - - if cfg.execution_commit_level.is_with_firm() { - let celestia_token = if cfg.no_celestia_auth { - None - } else { - Some(cfg.celestia_bearer_token) - }; - - let reader = celestia::Builder { - celestia_http_endpoint: cfg.celestia_node_http_url, - celestia_token, - celestia_block_time: Duration::from_millis(cfg.celestia_block_time_ms), - executor: executor_handle.clone(), - sequencer_cometbft_client: sequencer_cometbft_client.clone(), - sequencer_requests_per_second: cfg.sequencer_requests_per_second, - expected_celestia_chain_id: cfg.expected_celestia_chain_id, - expected_sequencer_chain_id: cfg.expected_sequencer_chain_id, - shutdown: shutdown_token.clone(), - metrics, - } - .build() - .wrap_err("failed to build Celestia Reader")?; - - tasks.spawn(Self::CELESTIA, reader.run_until_stopped()); - }; + .build() + .wrap_err("failed constructing executor")?; Ok(Self { shutdown_token, - tasks, + executor: Some(tokio::spawn(executor.run_until_stopped())), }) } - /// Runs [`ConductorInner`] until it receives an exit signal. + /// Runs [`Inner`] until it receives an exit signal. /// /// # Panics /// Panics if it could not install a signal handler. - async fn run_until_stopped(mut self) -> Result { + pub(super) async fn run_until_stopped(mut self) -> eyre::Result { info_span!("Conductor::run_until_stopped").in_scope(|| info!("conductor is running")); let exit_reason = select! { biased; () = self.shutdown_token.cancelled() => { - ExitReason::ShutdownSignal + Ok(ShutdownSignalReceived) }, - Some((name, res)) = self.tasks.join_next() => { - match flatten(res) { - Ok(()) => ExitReason::TaskFailed{name, error: eyre!("task `{name}` exited unexpectedly")}, - Err(err) => ExitReason::TaskFailed{name, error: err.wrap_err(format!("task `{name}` failed"))}, + res = self.executor.as_mut().expect("task must always be set at this point") => { + // XXX: must Option::take the JoinHandle to avoid polling it in the shutdown logic. + self.executor.take(); + match res { + Ok(Ok(())) => Err(eyre!("executor exited unexpectedly")), + Ok(Err(err)) => Err(err.wrap_err("executor exited with error")), + Err(err) => Err(Report::new(err).wrap_err("executor panicked")), } } }; - let message = "initiating shutdown"; - report_exit(&exit_reason, message); - self.shutdown(exit_reason).await - } - - /// Creates and spawns a Conductor on the tokio runtime. - /// - /// This calls [`tokio::spawn`] and returns a [`InnerHandle`] to the - /// running Conductor task. - pub(super) fn spawn( - cfg: Config, - metrics: &'static Metrics, - shutdown_token: CancellationToken, - ) -> eyre::Result { - let conductor = Self::new(cfg, metrics, shutdown_token)?; - let shutdown_token = conductor.shutdown_token.clone(); - let task = tokio::spawn(conductor.run_until_stopped()); - Ok(InnerHandle { - shutdown_token, - task: Some(task), - }) + self.restart_or_shutdown(exit_reason).await } /// Shuts down all tasks. @@ -232,89 +113,43 @@ impl ConductorInner { /// Waits 25 seconds for all tasks to shut down before aborting them. 25 seconds /// because kubernetes issues SIGKILL 30 seconds after SIGTERM, giving 5 seconds /// to abort the remaining tasks. - #[instrument(skip_all)] - async fn shutdown(mut self, exit_reason: ExitReason) -> Result { + #[instrument(skip_all, err, ret(Display))] + async fn restart_or_shutdown( + mut self, + exit_reason: eyre::Result, + ) -> eyre::Result { self.shutdown_token.cancel(); - let mut restart_or_shutdown = RestartOrShutdown::Shutdown; - - match &exit_reason { - ExitReason::ShutdownSignal => { - info!("received shutdown signal, skipping check for restart"); - } - ExitReason::TaskFailed { - name, - error, - } => { - if check_for_restart(name, error) { - restart_or_shutdown = RestartOrShutdown::Restart; + let restart_or_shutdown = match exit_reason { + Ok(ShutdownSignalReceived) => Ok(RestartOrShutdown::Shutdown), + Err(error) => { + error!(%error, "executor failed; checking error chain if conductor should be restarted"); + if check_for_restart(&error) { + Ok(RestartOrShutdown::Restart) + } else { + Err(error) } } - } - - info!("signalled all tasks to shut down; waiting for 25 seconds to exit"); - - let shutdown_loop = async { - while let Some((name, res)) = self.tasks.join_next().await { - let message = "task shut down"; - match flatten(res) { - Ok(()) => { - info!(name, message); - } - Err(error) => { - if check_for_restart(name, &error) - && !matches!(exit_reason, ExitReason::ShutdownSignal) - { - restart_or_shutdown = RestartOrShutdown::Restart; - } - error!(name, %error, message); - } - }; - } }; - if timeout(Duration::from_secs(25), shutdown_loop) - .await - .is_err() - { - let tasks = self.tasks.keys().join(", "); - warn!( - tasks = format_args!("[{tasks}]"), - "aborting all tasks that have not yet shut down", - ); - self.tasks.abort_all(); - } else { - info!("all tasks shut down regularly"); - } - info!("shutting down"); - - if let ExitReason::TaskFailed { - error, .. - } = exit_reason - { - if matches!(restart_or_shutdown, RestartOrShutdown::Shutdown) { - return Err(error); + if let Some(mut executor) = self.executor.take() { + let wait_until_timeout = Duration::from_secs(25); + if timeout(wait_until_timeout, &mut executor).await.is_err() { + warn!( + "waited `{}` for executor start to respond to shutdown signal; aborting", + humantime::format_duration(wait_until_timeout) + ); + executor.abort(); + } else { + info!("executor shut down regularly"); } } - Ok(restart_or_shutdown) - } -} -#[instrument(skip_all)] -fn report_exit(exit_reason: &ExitReason, message: &str) { - match exit_reason { - ExitReason::ShutdownSignal => info!(reason = "received shutdown signal", message), - ExitReason::TaskFailed { - name: task, - error: reason, - } => error!(%reason, %task, message), + restart_or_shutdown } } #[instrument(skip_all)] -fn check_for_restart(name: &str, err: &eyre::Report) -> bool { - if name != ConductorInner::EXECUTOR { - return false; - } +fn check_for_restart(err: &eyre::Report) -> bool { let mut current = Some(err.as_ref() as &dyn std::error::Error); while let Some(err) = current { if let Some(status) = err.downcast_ref::() { @@ -338,6 +173,6 @@ mod tests { let err = tonic_error.wrap_err("wrapper_1"); let err = err.wrap_err("wrapper_2"); let err = err.wrap_err("wrapper_3"); - assert!(super::check_for_restart("executor", &err.unwrap_err())); + assert!(super::check_for_restart(&err.unwrap_err())); } } diff --git a/crates/astria-conductor/src/conductor/mod.rs b/crates/astria-conductor/src/conductor/mod.rs index 9c94fab0c..f6a3fd752 100644 --- a/crates/astria-conductor/src/conductor/mod.rs +++ b/crates/astria-conductor/src/conductor/mod.rs @@ -11,8 +11,7 @@ use astria_eyre::eyre::{ WrapErr as _, }; use inner::{ - ConductorInner, - InnerHandle, + Inner, RestartOrShutdown, }; use pin_project_lite::pin_project; @@ -79,7 +78,7 @@ pub struct Conductor { shutdown_token: CancellationToken, /// Handle for the inner conductor task. - inner: InnerHandle, + inner: JoinHandle>, /// Configuration for the conductor, necessary upon a restart. cfg: Config, @@ -95,11 +94,10 @@ impl Conductor { /// Returns an error if [`ConductorInner`] could not be created. pub fn new(cfg: Config, metrics: &'static Metrics) -> eyre::Result { let shutdown_token = CancellationToken::new(); - let conductor_inner_handle = - ConductorInner::spawn(cfg.clone(), metrics, shutdown_token.child_token())?; + let inner = Inner::new(cfg.clone(), metrics, shutdown_token.child_token())?; Ok(Self { shutdown_token, - inner: conductor_inner_handle, + inner: tokio::spawn(inner.run_until_stopped()), cfg, metrics, }) @@ -120,13 +118,15 @@ impl Conductor { /// inner conductor task. #[instrument(skip_all, err)] fn restart(&mut self) -> eyre::Result<()> { - let new_handle = ConductorInner::spawn( - self.cfg.clone(), - self.metrics, - self.shutdown_token.child_token(), - ) - .wrap_err("failed to instantiate Conductor for restart")?; - self.inner = new_handle; + self.inner = tokio::spawn( + Inner::new( + self.cfg.clone(), + self.metrics, + self.shutdown_token.child_token(), + ) + .wrap_err("failed to instantiate Conductor for restart")? + .run_until_stopped(), + ); Ok(()) } diff --git a/crates/astria-conductor/src/config.rs b/crates/astria-conductor/src/config.rs index e8211b171..cd48c4d1e 100644 --- a/crates/astria-conductor/src/config.rs +++ b/crates/astria-conductor/src/config.rs @@ -95,6 +95,16 @@ pub struct Config { pub pretty_print: bool, } +impl Config { + pub(crate) fn is_with_firm(&self) -> bool { + self.execution_commit_level.is_with_firm() + } + + pub(crate) fn is_with_soft(&self) -> bool { + self.execution_commit_level.is_with_soft() + } +} + impl config::Config for Config { const PREFIX: &'static str = "ASTRIA_CONDUCTOR_"; } diff --git a/crates/astria-conductor/src/executor/builder.rs b/crates/astria-conductor/src/executor/builder.rs index daf53e583..1cae5f008 100644 --- a/crates/astria-conductor/src/executor/builder.rs +++ b/crates/astria-conductor/src/executor/builder.rs @@ -1,86 +1,40 @@ -use std::collections::HashMap; - use astria_eyre::eyre::{ self, WrapErr as _, }; -use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; -use super::{ - state, - Executor, - Handle, - ReconstructedBlock, - StateNotInit, -}; -use crate::{ - config::CommitLevel, - metrics::Metrics, -}; +use super::Executor; +use crate::metrics::Metrics; pub(crate) struct Builder { - pub(crate) mode: CommitLevel, - pub(crate) rollup_address: String, + pub(crate) config: crate::Config, pub(crate) shutdown: CancellationToken, pub(crate) metrics: &'static Metrics, } impl Builder { - pub(crate) fn build(self) -> eyre::Result<(Executor, Handle)> { + pub(crate) fn build(self) -> eyre::Result { let Self { - mode, - rollup_address, + config, shutdown, metrics, } = self; - let client = super::client::Client::connect_lazy(&rollup_address).wrap_err_with(|| { - format!( - "failed to construct execution client for provided rollup address \ - `{rollup_address}`" - ) - })?; - - let mut firm_block_tx = None; - let mut firm_block_rx = None; - if mode.is_with_firm() { - let (tx, rx) = mpsc::channel::>(16); - firm_block_tx = Some(tx); - firm_block_rx = Some(rx); - } - - let mut soft_block_tx = None; - let mut soft_block_rx = None; - if mode.is_with_soft() { - let (tx, rx) = super::soft_block_channel(); - soft_block_tx = Some(tx); - soft_block_rx = Some(rx); - } - - let (state_tx, state_rx) = state::channel(); + let client = + super::client::Client::connect_lazy(&config.execution_rpc_url).wrap_err_with(|| { + format!( + "failed to construct execution client for provided rollup address `{}`", + config.execution_rpc_url, + ) + })?; let executor = Executor { + config, client, - - mode, - - firm_blocks: firm_block_rx, - soft_blocks: soft_block_rx, - shutdown, - state: state_tx, - blocks_pending_finalization: HashMap::new(), - - max_spread: None, metrics, }; - let handle = Handle { - firm_blocks: firm_block_tx, - soft_blocks: soft_block_tx, - state: state_rx, - _state_init: StateNotInit, - }; - Ok((executor, handle)) + Ok(executor) } } diff --git a/crates/astria-conductor/src/executor/channel.rs b/crates/astria-conductor/src/executor/channel.rs deleted file mode 100644 index 955d62d9b..000000000 --- a/crates/astria-conductor/src/executor/channel.rs +++ /dev/null @@ -1,241 +0,0 @@ -//! An mpsc channel bounded by an externally driven semaphore. -//! -//! While the main purpose of this channel is to send [`sequencer_client::SequencerBlock`]s -//! from a sequencer reader to the executor, the channel is generic over the values that are -//! being sent to better test its functionality. - -use std::sync::{ - Arc, - Weak, -}; - -use tokio::sync::{ - mpsc::{ - error::SendError as TokioSendError, - unbounded_channel, - UnboundedReceiver, - UnboundedSender, - }, - AcquireError, - Semaphore, - TryAcquireError, -}; -use tracing::instrument; - -/// Creates an mpsc channel for sending soft blocks between asynchronous task. -/// -/// The initial bound of the channel is 0 and the receiver is expected to add -/// capacity to the channel. -pub(super) fn soft_block_channel() -> (Sender, Receiver) { - let cap = 0; - let sem = Arc::new(Semaphore::new(0)); - let (tx, rx) = unbounded_channel(); - let sender = Sender { - chan: tx, - sem: Arc::downgrade(&sem), - }; - let receiver = Receiver { - cap, - chan: rx, - sem, - }; - (sender, receiver) -} - -#[derive(Debug, thiserror::Error, PartialEq)] -#[error("the channel is closed")] -pub(crate) struct SendError; - -impl From for SendError { - fn from(_: AcquireError) -> Self { - Self - } -} - -impl From> for SendError { - fn from(_: TokioSendError) -> Self { - Self - } -} - -#[derive(Debug, thiserror::Error, PartialEq)] -pub(crate) enum TrySendError { - #[error("the channel is closed")] - Closed(T), - #[error("no permits available")] - NoPermits(T), -} - -impl TrySendError { - fn from_semaphore(err: &TryAcquireError, block: T) -> Self { - match err { - tokio::sync::TryAcquireError::Closed => Self::Closed(block), - tokio::sync::TryAcquireError::NoPermits => Self::NoPermits(block), - } - } -} - -impl From> for TrySendError { - fn from(err: TokioSendError) -> Self { - Self::Closed(err.0) - } -} - -#[derive(Debug, Clone)] -pub(super) struct Sender { - sem: Weak, - chan: UnboundedSender, -} - -impl Sender { - /// Sends a block, waiting until the channel has permits. - /// - /// Returns an error if the channel is closed. - #[instrument(skip_all, err)] - pub(super) async fn send(&self, block: T) -> Result<(), SendError> { - let sem = self.sem.upgrade().ok_or(SendError)?; - let permit = sem.acquire().await?; - permit.forget(); - self.chan.send(block)?; - Ok(()) - } - - /// Attempts to send a block without blocking. - /// - /// Returns an error if the channel is out of permits or if it has been closed. - pub(super) fn try_send(&self, block: T) -> Result<(), TrySendError> { - let sem = match self.sem.upgrade() { - None => return Err(TrySendError::Closed(block)), - Some(sem) => sem, - }; - let permit = match sem.try_acquire() { - Err(err) => return Err(TrySendError::from_semaphore(&err, block)), - Ok(permit) => permit, - }; - permit.forget(); - self.chan.send(block)?; - Ok(()) - } -} - -pub(super) struct Receiver { - cap: usize, - sem: Arc, - chan: UnboundedReceiver, -} - -impl Drop for Receiver { - fn drop(&mut self) { - self.sem.close(); - } -} - -impl Receiver { - /// Sets the channel's capacity to `cap`. - /// - /// `cap` will be the maximum number of blocks that can be sent - /// over the channel before new permits are added with `[SoftBlockReceiver::add_permits]`. - pub(super) fn set_capacity(&mut self, cap: usize) { - self.cap = cap; - } - - /// Adds up to `capacity` number of permits to the channel. - /// - /// `capacity` is previously set by [`SoftBlockReceiver::set_capacity`] - /// or zero by default. - pub(super) fn fill_permits(&self) { - let additional = self.cap.saturating_sub(self.sem.available_permits()); - self.sem.add_permits(additional); - } - - /// Receives a block over the channel. - #[instrument(skip_all)] - pub(super) async fn recv(&mut self) -> Option { - self.chan.recv().await - } -} - -#[cfg(test)] -mod tests { - use super::{ - soft_block_channel, - SendError, - TrySendError, - }; - - #[test] - fn fresh_channel_has_no_capacity() { - let (tx, _rx) = soft_block_channel::<()>(); - assert_eq!( - tx.try_send(()).unwrap_err(), - TrySendError::NoPermits(()), - "a fresh channel starts without permits" - ); - } - - #[test] - fn permits_are_filled_to_capacity() { - let cap = 2; - let (tx, mut rx) = soft_block_channel::<()>(); - rx.set_capacity(cap); - rx.fill_permits(); - for _ in 0..cap { - tx.try_send(()).expect("the channel should have capacity"); - } - assert_eq!( - tx.try_send(()).unwrap_err(), - TrySendError::NoPermits(()), - "a channel that has its permits used up should return with a NoPermits error until \ - refilled or closed", - ); - } - - #[test] - fn refilling_twice_has_no_effect() { - let cap = 2; - let (tx, mut rx) = soft_block_channel::<()>(); - rx.set_capacity(cap); - rx.fill_permits(); - rx.fill_permits(); - for _ in 0..cap { - tx.try_send(()).expect("the channel should have capacity"); - } - assert_eq!( - tx.try_send(()).unwrap_err(), - TrySendError::NoPermits(()), - "refilling twice in a row should result in the same number of permits" - ); - } - - #[test] - fn try_sending_to_dropped_receiver_returns_closed_error() { - let (tx, rx) = soft_block_channel::<()>(); - std::mem::drop(rx); - assert_eq!( - tx.try_send(()).unwrap_err(), - TrySendError::Closed(()), - "a channel with a dropped receiver is considered closed", - ); - } - - #[tokio::test] - async fn async_sending_to_dropped_receiver_returns_closed_error() { - let (tx, rx) = soft_block_channel::<()>(); - std::mem::drop(rx); - assert_eq!( - tx.send(()).await.unwrap_err(), - SendError, - "a channel with a dropped receiver is considered closed", - ); - } - - #[tokio::test] - #[should_panic(expected = "receiving with all senders dropped should return None")] - async fn receiving_without_any_remaining_receivers_returns_none() { - let (tx, mut rx) = soft_block_channel::<()>(); - std::mem::drop(tx); - rx.recv() - .await - .expect("receiving with all senders dropped should return None"); - } -} diff --git a/crates/astria-conductor/src/executor/mod.rs b/crates/astria-conductor/src/executor/mod.rs index fb3525ead..ad475db85 100644 --- a/crates/astria-conductor/src/executor/mod.rs +++ b/crates/astria-conductor/src/executor/mod.rs @@ -1,4 +1,7 @@ -use std::collections::HashMap; +use std::{ + collections::HashMap, + time::Duration, +}; use astria_core::{ execution::v1::{ @@ -16,27 +19,33 @@ use astria_eyre::eyre::{ self, bail, ensure, + eyre, WrapErr as _, }; use bytes::Bytes; -use sequencer_client::tendermint::{ - block::Height as SequencerHeight, - Time as TendermintTime, +use sequencer_client::{ + tendermint::{ + block::Height as SequencerHeight, + Time as TendermintTime, + }, + HttpClient, }; use tokio::{ select, - sync::{ - mpsc, - watch::error::RecvError, - }, + sync::mpsc, + task::JoinError, +}; +use tokio_util::{ + sync::CancellationToken, + task::JoinMap, }; -use tokio_util::sync::CancellationToken; use tracing::{ debug, debug_span, error, info, instrument, + warn, }; use crate::{ @@ -46,10 +55,8 @@ use crate::{ }; mod builder; -pub(crate) mod channel; pub(crate) use builder::Builder; -use channel::soft_block_channel; mod client; mod state; @@ -57,180 +64,176 @@ mod state; mod tests; pub(super) use client::Client; -use state::StateReceiver; +use state::State; +pub(crate) use state::StateReceiver; use self::state::StateSender; type CelestiaHeight = u64; -#[derive(Clone, Debug)] -pub(crate) struct StateNotInit; -#[derive(Clone, Debug)] -pub(crate) struct StateIsInit; - -#[derive(Debug, thiserror::Error)] -pub(crate) enum FirmSendError { - #[error("executor was configured without firm commitments")] - NotSet, - #[error("failed sending blocks to executor")] - Channel { - #[from] - source: mpsc::error::SendError>, - }, -} - -#[derive(Debug, thiserror::Error)] -pub(crate) enum FirmTrySendError { - #[error("executor was configured without firm commitments")] - NotSet, - #[error("failed sending blocks to executor")] - Channel { - #[from] - source: mpsc::error::TrySendError>, - }, -} +pub(crate) struct Executor { + config: crate::Config, -#[derive(Debug, thiserror::Error)] -pub(crate) enum SoftSendError { - #[error("executor was configured without soft commitments")] - NotSet, - #[error("failed sending blocks to executor")] - Channel { source: Box }, -} + /// The execution client driving the rollup. + client: Client, -#[derive(Debug, thiserror::Error)] -pub(crate) enum SoftTrySendError { - #[error("executor was configured without firm commitments")] - NotSet, - #[error("failed sending blocks to executor")] - Channel { - source: Box>, - }, -} + /// Token to listen for Conductor being shut down. + shutdown: CancellationToken, -/// A handle to the executor. -/// -/// To be useful, [`Handle::wait_for_init`] must be called in -/// order to obtain a [`Handle`]. This is to ensure that the executor -/// state was primed before using its other methods. See [`State`] for more -/// information. -#[derive(Debug, Clone)] -pub(crate) struct Handle { - firm_blocks: Option>>, - soft_blocks: Option>, - state: StateReceiver, - _state_init: TStateInit, + metrics: &'static Metrics, } -impl Handle { - #[instrument(skip_all, err)] - pub(crate) async fn wait_for_init(&mut self) -> eyre::Result> { - self.state.wait_for_init().await.wrap_err( - "executor state channel terminated while waiting for the state to initialize", - )?; - let Self { - firm_blocks, - soft_blocks, - state, - .. - } = self.clone(); - Ok(Handle { - firm_blocks, - soft_blocks, - state, - _state_init: StateIsInit, - }) - } -} +impl Executor { + const CELESTIA: &'static str = "celestia"; + const SEQUENCER: &'static str = "sequencer"; -impl Handle { - #[instrument(skip_all, err)] - pub(crate) async fn send_firm_block( - self, - block: impl Into>, - ) -> Result<(), FirmSendError> { - let sender = self.firm_blocks.as_ref().ok_or(FirmSendError::NotSet)?; - Ok(sender.send(block.into()).await?) - } + pub(crate) async fn run_until_stopped(self) -> eyre::Result<()> { + let initialized = select!( + () = self.shutdown.clone().cancelled_owned() => { + return report_exit(Ok( + "received shutdown signal while initializing task; \ + aborting intialization and exiting" + ), ""); + } + res = self.init() => { + res.wrap_err("initialization failed")? + } + ); - pub(crate) fn try_send_firm_block( - &self, - block: impl Into>, - ) -> Result<(), FirmTrySendError> { - let sender = self.firm_blocks.as_ref().ok_or(FirmTrySendError::NotSet)?; - Ok(sender.try_send(block.into())?) + initialized.run().await } + /// Runs the init logic that needs to happen before [`Executor`] can enter its main loop. #[instrument(skip_all, err)] - pub(crate) async fn send_soft_block_owned( - self, - block: FilteredSequencerBlock, - ) -> Result<(), SoftSendError> { - let chan = self.soft_blocks.as_ref().ok_or(SoftSendError::NotSet)?; - chan.send(block) + async fn init(self) -> eyre::Result { + let state = self + .create_initial_node_state() .await - .map_err(|source| SoftSendError::Channel { - source: Box::new(source), - })?; - Ok(()) - } - - pub(crate) fn try_send_soft_block( - &self, - block: FilteredSequencerBlock, - ) -> Result<(), SoftTrySendError> { - let chan = self.soft_blocks.as_ref().ok_or(SoftTrySendError::NotSet)?; - chan.try_send(block) - .map_err(|source| SoftTrySendError::Channel { - source: Box::new(source), - })?; - Ok(()) - } + .wrap_err("failed setting initial rollup node state")?; - pub(crate) fn next_expected_firm_sequencer_height(&mut self) -> SequencerHeight { - self.state.next_expected_firm_sequencer_height() - } + let sequencer_cometbft_client = HttpClient::new(&*self.config.sequencer_cometbft_url) + .wrap_err("failed constructing sequencer cometbft RPC client")?; + + let reader_cancellation_token = self.shutdown.child_token(); + + let (firm_blocks_tx, firm_blocks_rx) = tokio::sync::mpsc::channel(16); + let (soft_blocks_tx, soft_blocks_rx) = + tokio::sync::mpsc::channel(state.calculate_max_spread()); + + let mut reader_tasks = JoinMap::new(); + if self.config.is_with_firm() { + let celestia_token = if self.config.no_celestia_auth { + None + } else { + Some(self.config.celestia_bearer_token.clone()) + }; + + let reader = crate::celestia::Builder { + celestia_http_endpoint: self.config.celestia_node_http_url.clone(), + celestia_token, + celestia_block_time: Duration::from_millis(self.config.celestia_block_time_ms), + firm_blocks: firm_blocks_tx, + rollup_state: state.subscribe(), + sequencer_cometbft_client: sequencer_cometbft_client.clone(), + sequencer_requests_per_second: self.config.sequencer_requests_per_second, + expected_celestia_chain_id: self.config.expected_celestia_chain_id.clone(), + expected_sequencer_chain_id: self.config.expected_sequencer_chain_id.clone(), + shutdown: reader_cancellation_token.child_token(), + metrics: self.metrics, + } + .build() + .wrap_err("failed to build Celestia Reader")?; + reader_tasks.spawn(Self::CELESTIA, reader.run_until_stopped()); + } - pub(crate) fn next_expected_soft_sequencer_height(&mut self) -> SequencerHeight { - self.state.next_expected_soft_sequencer_height() - } + if self.config.is_with_soft() { + let sequencer_grpc_client = + crate::sequencer::SequencerGrpcClient::new(&self.config.sequencer_grpc_url) + .wrap_err("failed constructing grpc client for Sequencer")?; + + let sequencer_reader = crate::sequencer::Builder { + sequencer_grpc_client, + sequencer_cometbft_client: sequencer_cometbft_client.clone(), + sequencer_block_time: Duration::from_millis(self.config.sequencer_block_time_ms), + expected_sequencer_chain_id: self.config.expected_sequencer_chain_id.clone(), + shutdown: reader_cancellation_token.child_token(), + soft_blocks: soft_blocks_tx, + rollup_state: state.subscribe(), + } + .build(); + reader_tasks.spawn(Self::SEQUENCER, sequencer_reader.run_until_stopped()); + }; - #[instrument(skip_all)] - pub(crate) async fn next_expected_soft_height_if_changed( - &mut self, - ) -> Result { - self.state.next_expected_soft_height_if_changed().await + Ok(Initialized { + config: self.config, + client: self.client, + firm_blocks: firm_blocks_rx, + soft_blocks: soft_blocks_rx, + shutdown: self.shutdown, + state, + blocks_pending_finalization: HashMap::new(), + metrics: self.metrics, + reader_tasks, + reader_cancellation_token, + }) } - pub(crate) fn rollup_id(&mut self) -> RollupId { - self.state.rollup_id() - } + #[instrument(skip_all, err)] + async fn create_initial_node_state(&self) -> eyre::Result { + let genesis_info = { + async { + self.client + .clone() + .get_genesis_info_with_retry() + .await + .wrap_err("failed getting genesis info") + } + }; + let commitment_state = { + async { + self.client + .clone() + .get_commitment_state_with_retry() + .await + .wrap_err("failed getting commitment state") + } + }; + let (genesis_info, commitment_state) = tokio::try_join!(genesis_info, commitment_state)?; - pub(crate) fn celestia_base_block_height(&mut self) -> CelestiaHeight { - self.state.celestia_base_block_height() - } + let (state, _) = state::channel( + State::try_from_genesis_info_and_commitment_state(genesis_info, commitment_state) + .wrap_err( + "failed to construct initial state gensis and commitment info received from \ + rollup", + )?, + ); - pub(crate) fn celestia_block_variance(&mut self) -> u64 { - self.state.celestia_block_variance() + self.metrics + .absolute_set_executed_firm_block_number(state.firm_number()); + self.metrics + .absolute_set_executed_soft_block_number(state.soft_number()); + info!( + initial_state = serde_json::to_string(&*state.get()) + .expect("writing json to a string should not fail"), + "received genesis info from rollup", + ); + Ok(state) } } -pub(crate) struct Executor { +struct Initialized { + config: crate::Config, + /// The execution client driving the rollup. client: Client, - /// The mode under which this executor (and hence conductor) runs. - mode: CommitLevel, - /// The channel of which this executor receives blocks for executing /// firm commitments. - /// Only set if `mode` is `FirmOnly` or `SoftAndFirm`. - firm_blocks: Option>>, + firm_blocks: mpsc::Receiver>, /// The channel of which this executor receives blocks for executing /// soft commitments. - /// Only set if `mode` is `SoftOnly` or `SoftAndFirm`. - soft_blocks: Option>, + soft_blocks: mpsc::Receiver, /// Token to listen for Conductor being shut down. shutdown: CancellationToken, @@ -244,43 +247,38 @@ pub(crate) struct Executor { /// without re-executing on top of the rollup node. blocks_pending_finalization: HashMap, - /// The maximum permitted spread between firm and soft blocks. - max_spread: Option, - metrics: &'static Metrics, + + /// The tasks reading block data off Celestia or Sequencer. + reader_tasks: JoinMap<&'static str, eyre::Result<()>>, + + /// The cancellation token specifically for signaling the `reader_tasks` to shut down. + reader_cancellation_token: CancellationToken, } -impl Executor { - pub(crate) async fn run_until_stopped(mut self) -> eyre::Result<()> { - select!( +impl Initialized { + async fn run(mut self) -> eyre::Result<()> { + let reason = select!( + biased; + () = self.shutdown.clone().cancelled_owned() => { - return report_exit(Ok( - "received shutdown signal while initializing task; \ - aborting intialization and exiting" - ), ""); + Ok("received shutdown signal") } - res = self.init() => { - res.wrap_err("initialization failed")?; + + res = self.run_event_loop() => { + res } ); - let reason = loop { - let spread_not_too_large = !self.is_spread_too_large(); - if spread_not_too_large { - if let Some(channel) = self.soft_blocks.as_mut() { - channel.fill_permits(); - } - } + self.shutdown(reason).await + } + async fn run_event_loop(&mut self) -> eyre::Result<&'static str> { + loop { select!( biased; - () = self.shutdown.cancelled() => { - break Ok("received shutdown signal"); - } - - Some(block) = async { self.firm_blocks.as_mut().unwrap().recv().await }, - if self.firm_blocks.is_some() => + Some(block) = self.firm_blocks.recv() => { debug_span!("conductor::Executor::run_until_stopped").in_scope(||debug!( block.height = %block.sequencer_height(), @@ -292,8 +290,7 @@ impl Executor { } } - Some(block) = async { self.soft_blocks.as_mut().unwrap().recv().await }, - if self.soft_blocks.is_some() && spread_not_too_large => + Some(block) = self.soft_blocks.recv(), if !self.is_spread_too_large() => { debug_span!("conductor::Executor::run_until_stopped").in_scope(||debug!( block.height = %block.height(), @@ -304,53 +301,14 @@ impl Executor { break Err(error).wrap_err("failed executing soft block"); } } - ); - }; - - // XXX: explicitly setting the message (usually implicitly set by tracing) - let message = "shutting down"; - report_exit(reason, message) - } - /// Runs the init logic that needs to happen before [`Executor`] can enter its main loop. - #[instrument(skip_all, err)] - async fn init(&mut self) -> eyre::Result<()> { - self.set_initial_node_state() - .await - .wrap_err("failed setting initial rollup node state")?; + Some((task, res)) = self.reader_tasks.join_next() => { + break handle_task_exit(task, res); + } - let max_spread: usize = self.calculate_max_spread(); - self.max_spread.replace(max_spread); - if let Some(channel) = self.soft_blocks.as_mut() { - channel.set_capacity(max_spread); - info!( - max_spread, - "setting capacity of soft blocks channel to maximum permitted firm<>soft \ - commitment spread (this has no effect if conductor is set to perform soft-sync \ - only)" + else => break Ok("all channels are closed") ); } - - Ok(()) - } - - /// Calculates the maximum allowed spread between firm and soft commitments heights. - /// - /// The maximum allowed spread is taken as `max_spread = variance * 6`, where `variance` - /// is the `celestia_block_variance` as defined in the rollup node's genesis that this - /// executor/conductor talks to. - /// - /// The heuristic 6 is the largest number of Sequencer heights that will be found at - /// one Celestia height. - /// - /// # Panics - /// Panics if the `u32` underlying the celestia block variance tracked in the state could - /// not be converted to a `usize`. This should never happen on any reasonable architecture - /// that Conductor will run on. - fn calculate_max_spread(&self) -> usize { - usize::try_from(self.state.celestia_block_variance()) - .expect("converting a u32 to usize should work on any architecture conductor runs on") - .saturating_mul(6) } /// Returns if the spread between firm and soft commitment heights in the tracked state is too @@ -362,7 +320,7 @@ impl Executor { /// /// Panics if called before [`Executor::init`] because `max_spread` must be set. fn is_spread_too_large(&self) -> bool { - if self.firm_blocks.is_none() { + if !self.config.is_with_firm() { return false; } let (next_firm, next_soft) = { @@ -372,12 +330,7 @@ impl Executor { }; let is_too_far_ahead = usize::try_from(next_soft.saturating_sub(next_firm)) - .map(|spread| { - spread - >= self - .max_spread - .expect("executor must be initalized and this field set") - }) + .map(|spread| spread >= self.state.calculate_max_spread()) .unwrap_or(false); if is_too_far_ahead { @@ -568,43 +521,6 @@ impl Executor { Ok(executed_block) } - #[instrument(skip_all, err)] - async fn set_initial_node_state(&mut self) -> eyre::Result<()> { - let genesis_info = { - async { - self.client - .clone() - .get_genesis_info_with_retry() - .await - .wrap_err("failed getting genesis info") - } - }; - let commitment_state = { - async { - self.client - .clone() - .get_commitment_state_with_retry() - .await - .wrap_err("failed getting commitment state") - } - }; - let (genesis_info, commitment_state) = tokio::try_join!(genesis_info, commitment_state)?; - self.state - .try_init(genesis_info, commitment_state) - .wrap_err("failed initializing state tracking")?; - - self.metrics - .absolute_set_executed_firm_block_number(self.state.firm_number()); - self.metrics - .absolute_set_executed_soft_block_number(self.state.soft_number()); - info!( - initial_state = serde_json::to_string(&*self.state.get()) - .expect("writing json to a string should not fail"), - "received genesis info from rollup", - ); - Ok(()) - } - #[instrument(skip_all, err)] async fn update_commitment_state(&mut self, update: Update) -> eyre::Result<()> { use Update::{ @@ -662,13 +578,46 @@ impl Executor { should_execute_firm_block( self.state.next_expected_firm_sequencer_height().value(), self.state.next_expected_soft_sequencer_height().value(), - self.mode, + self.config.execution_commit_level, ) } + + #[instrument(skip_all, err)] + async fn shutdown(mut self, reason: eyre::Result<&'static str>) -> eyre::Result<()> { + info!("signaling all reader tasks to exit"); + self.reader_cancellation_token.cancel(); + while let Some((task, exit_status)) = self.reader_tasks.join_next().await { + match crate::utils::flatten(exit_status) { + Ok(()) => info!(task, "task exited"), + Err(error) => warn!(task, %error, "task exited with error"), + } + } + report_exit(reason, "shutting down") + } +} + +/// Wraps a task result to explain why it exited. +/// +/// Right now only the err-branch is populated because tasks should +/// never exit. Still returns an `eyre::Result` to line up with the +/// return type of [`Executor::run_until_stopped`]. +/// +/// Executor should `break handle_task_exit` immediately after calling +/// this method. +fn handle_task_exit( + task: &'static str, + res: Result, JoinError>, +) -> eyre::Result<&'static str> { + match res { + Ok(Ok(())) => Err(eyre!("task `{task}` finished unexpectedly")), + Ok(Err(err)) => Err(err).wrap_err_with(|| format!("task `{task}` exited with error")), + Err(err) => Err(err).wrap_err_with(|| format!("task `{task}` panicked")), + } } #[instrument(skip_all)] fn report_exit(reason: eyre::Result<&str>, message: &str) -> eyre::Result<()> { + // XXX: explicitly setting the message (usually implicitly set by tracing) match reason { Ok(reason) => { info!(%reason, message); diff --git a/crates/astria-conductor/src/executor/state.rs b/crates/astria-conductor/src/executor/state.rs index 2ae4e2c4b..1f315b078 100644 --- a/crates/astria-conductor/src/executor/state.rs +++ b/crates/astria-conductor/src/executor/state.rs @@ -10,10 +10,6 @@ use astria_core::{ }, primitive::v1::RollupId, }; -use astria_eyre::{ - eyre, - eyre::WrapErr as _, -}; use bytes::Bytes; use sequencer_client::tendermint::block::Height as SequencerHeight; use tokio::sync::watch::{ @@ -22,8 +18,8 @@ use tokio::sync::watch::{ }; use tracing::instrument; -pub(super) fn channel() -> (StateSender, StateReceiver) { - let (tx, rx) = watch::channel(None); +pub(super) fn channel(state: State) -> (StateSender, StateReceiver) { + let (tx, rx) = watch::channel(state); let sender = StateSender { inner: tx, }; @@ -46,25 +42,14 @@ pub(super) struct InvalidState { } #[derive(Clone, Debug)] -pub(super) struct StateReceiver { - inner: watch::Receiver>, +pub(crate) struct StateReceiver { + inner: watch::Receiver, } impl StateReceiver { - #[instrument(skip_all, err)] - pub(super) async fn wait_for_init(&mut self) -> eyre::Result<()> { - self.inner - .wait_for(Option::is_some) - .await - .wrap_err("channel failed while waiting for state to become initialized")?; - Ok(()) - } - - pub(super) fn next_expected_firm_sequencer_height(&self) -> SequencerHeight { + pub(crate) fn next_expected_firm_sequencer_height(&self) -> SequencerHeight { self.inner .borrow() - .as_ref() - .expect("the state is initialized") .next_expected_firm_sequencer_height() .expect( "the tracked state must never be set to a genesis/commitment state that cannot be \ @@ -72,11 +57,9 @@ impl StateReceiver { ) } - pub(super) fn next_expected_soft_sequencer_height(&self) -> SequencerHeight { + pub(crate) fn next_expected_soft_sequencer_height(&self) -> SequencerHeight { self.inner .borrow() - .as_ref() - .expect("the state is initialized") .next_expected_soft_sequencer_height() .expect( "the tracked state must never be set to a genesis/commitment state that cannot be \ @@ -94,11 +77,11 @@ impl StateReceiver { } pub(super) struct StateSender { - inner: watch::Sender>, + inner: watch::Sender, } fn can_map_firm_to_sequencer_height( - genesis_info: GenesisInfo, + genesis_info: &GenesisInfo, commitment_state: &CommitmentState, ) -> Result<(), InvalidState> { let sequencer_genesis_height = genesis_info.sequencer_genesis_block_height(); @@ -115,7 +98,7 @@ fn can_map_firm_to_sequencer_height( } fn can_map_soft_to_sequencer_height( - genesis_info: GenesisInfo, + genesis_info: &GenesisInfo, commitment_state: &CommitmentState, ) -> Result<(), InvalidState> { let sequencer_genesis_height = genesis_info.sequencer_genesis_block_height(); @@ -132,21 +115,29 @@ fn can_map_soft_to_sequencer_height( } impl StateSender { - pub(super) fn try_init( - &mut self, - genesis_info: GenesisInfo, - commitment_state: CommitmentState, - ) -> Result<(), InvalidState> { - can_map_firm_to_sequencer_height(genesis_info, &commitment_state)?; - can_map_soft_to_sequencer_height(genesis_info, &commitment_state)?; - self.inner.send_modify(move |state| { - let old_state = state.replace(State::new(genesis_info, commitment_state)); - assert!( - old_state.is_none(), - "the state must be initialized only once", - ); - }); - Ok(()) + pub(super) fn subscribe(&self) -> StateReceiver { + StateReceiver { + inner: self.inner.subscribe(), + } + } + + /// Calculates the maximum allowed spread between firm and soft commitments heights. + /// + /// The maximum allowed spread is taken as `max_spread = variance * 6`, where `variance` + /// is the `celestia_block_variance` as defined in the rollup node's genesis that this + /// executor/conductor talks to. + /// + /// The heuristic 6 is the largest number of Sequencer heights that will be found at + /// one Celestia height. + /// + /// # Panics + /// Panics if the `u32` underlying the celestia block variance tracked in the state could + /// not be converted to a `usize`. This should never happen on any reasonable architecture + /// that Conductor will run on. + pub(super) fn calculate_max_spread(&self) -> usize { + usize::try_from(self.celestia_block_variance()) + .expect("converting a u32 to usize should work on any architecture conductor runs on") + .saturating_mul(6) } pub(super) fn try_update_commitment_state( @@ -154,26 +145,21 @@ impl StateSender { commitment_state: CommitmentState, ) -> Result<(), InvalidState> { let genesis_info = self.genesis_info(); - can_map_firm_to_sequencer_height(genesis_info, &commitment_state)?; - can_map_soft_to_sequencer_height(genesis_info, &commitment_state)?; + can_map_firm_to_sequencer_height(&genesis_info, &commitment_state)?; + can_map_soft_to_sequencer_height(&genesis_info, &commitment_state)?; self.inner.send_modify(move |state| { - state - .as_mut() - .expect("the state must be initialized") - .set_commitment_state(commitment_state); + state.set_commitment_state(commitment_state); }); Ok(()) } - pub(super) fn get(&self) -> tokio::sync::watch::Ref<'_, Option> { + pub(super) fn get(&self) -> tokio::sync::watch::Ref<'_, State> { self.inner.borrow() } pub(super) fn next_expected_firm_sequencer_height(&self) -> SequencerHeight { self.inner .borrow() - .as_ref() - .expect("the state is initialized") .next_expected_firm_sequencer_height() .expect( "the tracked state must never be set to a genesis/commitment state that cannot be \ @@ -184,8 +170,6 @@ impl StateSender { pub(super) fn next_expected_soft_sequencer_height(&self) -> SequencerHeight { self.inner .borrow() - .as_ref() - .expect("the state is initialized") .next_expected_soft_sequencer_height() .expect( "the tracked state must never be set to a genesis/commitment state that cannot be \ @@ -198,11 +182,9 @@ macro_rules! forward_impls { ($target:ident: $([$fn:ident -> $ret:ty]),*$(,)?) => { impl $target { $( - pub(super) fn $fn(&self) -> $ret { + pub(crate) fn $fn(&self) -> $ret { self.inner .borrow() - .as_ref() - .expect("the state is initialized") .$fn() .clone() } @@ -241,11 +223,16 @@ pub(super) struct State { } impl State { - fn new(genesis_info: GenesisInfo, commitment_state: CommitmentState) -> Self { - Self { + pub(super) fn try_from_genesis_info_and_commitment_state( + genesis_info: GenesisInfo, + commitment_state: CommitmentState, + ) -> Result { + can_map_firm_to_sequencer_height(&genesis_info, &commitment_state)?; + can_map_soft_to_sequencer_height(&genesis_info, &commitment_state)?; + Ok(State { commitment_state, genesis_info, - } + }) } /// Sets the inner commitment state. @@ -390,16 +377,21 @@ mod tests { .unwrap() } - fn make_state() -> (StateSender, StateReceiver) { - let (mut tx, rx) = super::channel(); - tx.try_init(make_genesis_info(), make_commitment_state()) - .unwrap(); - (tx, rx) + fn make_state() -> State { + State::try_from_genesis_info_and_commitment_state( + make_genesis_info(), + make_commitment_state(), + ) + .unwrap() + } + + fn make_channel() -> (StateSender, StateReceiver) { + super::channel(make_state()) } #[test] fn next_firm_sequencer_height_is_correct() { - let (_, rx) = make_state(); + let (_, rx) = make_channel(); assert_eq!( SequencerHeight::from(12u32), rx.next_expected_firm_sequencer_height(), @@ -408,7 +400,7 @@ mod tests { #[test] fn next_soft_sequencer_height_is_correct() { - let (_, rx) = make_state(); + let (_, rx) = make_channel(); assert_eq!( SequencerHeight::from(13u32), rx.next_expected_soft_sequencer_height(), diff --git a/crates/astria-conductor/src/executor/tests.rs b/crates/astria-conductor/src/executor/tests.rs index e8ead9dc7..a5206cb14 100644 --- a/crates/astria-conductor/src/executor/tests.rs +++ b/crates/astria-conductor/src/executor/tests.rs @@ -13,6 +13,7 @@ use bytes::Bytes; use super::{ should_execute_firm_block, state::{ + State, StateReceiver, StateSender, }, @@ -57,9 +58,9 @@ fn make_state( base_celestia_height: 1, }) .unwrap(); - let (mut tx, rx) = super::state::channel(); - tx.try_init(genesis_info, commitment_state).unwrap(); - (tx, rx) + let state = + State::try_from_genesis_info_and_commitment_state(genesis_info, commitment_state).unwrap(); + super::state::channel(state) } #[track_caller] diff --git a/crates/astria-conductor/src/sequencer/builder.rs b/crates/astria-conductor/src/sequencer/builder.rs index c71aa0e7d..a95b98e13 100644 --- a/crates/astria-conductor/src/sequencer/builder.rs +++ b/crates/astria-conductor/src/sequencer/builder.rs @@ -1,31 +1,36 @@ use std::time::Duration; +use astria_core::sequencerblock::v1::block::FilteredSequencerBlock; +use tokio::sync::mpsc; use tokio_util::sync::CancellationToken; use super::SequencerGrpcClient; -use crate::executor; +use crate::executor::StateReceiver; pub(crate) struct Builder { - pub(crate) executor: executor::Handle, pub(crate) sequencer_grpc_client: SequencerGrpcClient, pub(crate) sequencer_cometbft_client: sequencer_client::HttpClient, pub(crate) sequencer_block_time: Duration, pub(crate) expected_sequencer_chain_id: String, pub(crate) shutdown: CancellationToken, + pub(crate) rollup_state: StateReceiver, + pub(crate) soft_blocks: mpsc::Sender, } impl Builder { pub(crate) fn build(self) -> super::Reader { let Self { - executor, sequencer_grpc_client, sequencer_cometbft_client, sequencer_block_time, expected_sequencer_chain_id, shutdown, + rollup_state, + soft_blocks, } = self; super::Reader { - executor, + rollup_state, + soft_blocks, sequencer_grpc_client, sequencer_cometbft_client, sequencer_block_time, diff --git a/crates/astria-conductor/src/sequencer/mod.rs b/crates/astria-conductor/src/sequencer/mod.rs index df9d11b5a..c6006ecc5 100644 --- a/crates/astria-conductor/src/sequencer/mod.rs +++ b/crates/astria-conductor/src/sequencer/mod.rs @@ -25,7 +25,10 @@ use sequencer_client::{ LatestHeightStream, StreamLatestHeight as _, }; -use tokio::select; +use tokio::{ + select, + sync::mpsc, +}; use tokio_util::sync::CancellationToken; use tracing::{ debug, @@ -40,12 +43,6 @@ use tracing::{ use crate::{ block_cache::BlockCache, - executor::{ - self, - SoftSendError, - SoftTrySendError, - StateIsInit, - }, sequencer::block_stream::BlocksFromHeightStream, }; @@ -56,16 +53,15 @@ mod reporting; pub(crate) use builder::Builder; pub(crate) use client::SequencerGrpcClient; +use crate::executor::StateReceiver; + /// [`Reader`] reads Sequencer blocks and forwards them to the [`crate::Executor`] task. /// /// The blocks are forwarded in strictly sequential order of their Sequencr heights. /// A [`Reader`] is created with [`Builder::build`] and run with [`Reader::run_until_stopped`]. pub(crate) struct Reader { - /// The handle for sending sequencer blocks as soft commits to the executor - /// and checking it for the next expected height, and rollup ID associated with - /// this instance of Conductor. - /// Must be initialized before it can be used. - executor: executor::Handle, + rollup_state: StateReceiver, + soft_blocks: mpsc::Sender, /// The gRPC client to fetch new blocks from the Sequencer network. sequencer_grpc_client: SequencerGrpcClient, @@ -87,22 +83,22 @@ pub(crate) struct Reader { impl Reader { pub(crate) async fn run_until_stopped(mut self) -> eyre::Result<()> { - let executor = select!( + select!( () = self.shutdown.clone().cancelled_owned() => { return report_exit(Ok("received shutdown signal while waiting for Sequencer reader task to initialize"), ""); } res = self.initialize() => { - res? + res?; } ); - RunningReader::try_from_parts(self, executor) + RunningReader::try_from_parts(self) .wrap_err("failed entering run loop")? .run_until_stopped() .await } #[instrument(skip_all, err)] - async fn initialize(&mut self) -> eyre::Result> { + async fn initialize(&mut self) -> eyre::Result<()> { let actual_sequencer_chain_id = get_sequencer_chain_id(self.sequencer_cometbft_client.clone()) .await @@ -113,20 +109,13 @@ impl Reader { "expected chain id `{expected_sequencer_chain_id}` does not match actual: \ `{actual_sequencer_chain_id}`" ); - - self.executor - .wait_for_init() - .await - .wrap_err("handle to executor failed while waiting for it being initialized") + Ok(()) } } struct RunningReader { - /// The initialized handle to the executor task. - /// Used for sending sequencer blocks as soft commits to the executor - /// and checking it for the next expected height, and rollup ID associated with - /// this instance of Conductor. - executor: executor::Handle, + rollup_state: StateReceiver, + soft_blocks: mpsc::Sender, /// Caches the filtered sequencer blocks retrieved from the Sequencer. /// This cache will yield a block if it contains a block that matches the @@ -143,26 +132,26 @@ struct RunningReader { /// An enqueued block waiting for executor to free up. Set if the executor exhibits /// backpressure. - enqueued_block: Fuse>>, + enqueued_block: + Fuse>>>, /// Token to listen for Conductor being shut down. shutdown: CancellationToken, } impl RunningReader { - fn try_from_parts( - reader: Reader, - mut executor: executor::Handle, - ) -> eyre::Result { + fn try_from_parts(reader: Reader) -> eyre::Result { let Reader { sequencer_grpc_client, sequencer_cometbft_client, sequencer_block_time, shutdown, + rollup_state, + soft_blocks, .. } = reader; - let next_expected_height = executor.next_expected_soft_sequencer_height(); + let next_expected_height = rollup_state.next_expected_soft_sequencer_height(); let latest_height_stream = sequencer_cometbft_client.stream_latest_height(sequencer_block_time); @@ -171,14 +160,15 @@ impl RunningReader { .wrap_err("failed constructing sequential block cache")?; let blocks_from_heights = BlocksFromHeightStream::new( - executor.rollup_id(), + rollup_state.rollup_id(), next_expected_height, sequencer_grpc_client, ); let enqueued_block: Fuse>> = future::Fuse::terminated(); Ok(RunningReader { - executor, + rollup_state, + soft_blocks, block_cache, latest_height_stream, blocks_from_heights, @@ -215,7 +205,7 @@ impl RunningReader { } // Skip heights that executor has already executed (e.g. firm blocks from Celestia) - Ok(next_height) = self.executor.next_expected_soft_height_if_changed() => { + Ok(next_height) = self.rollup_state.next_expected_soft_height_if_changed() => { self.update_next_expected_height(next_height); } @@ -267,34 +257,18 @@ impl RunningReader { /// Enqueues the block is the channel to the executor is full, sending it once /// it frees up. fn send_to_executor(&mut self, block: FilteredSequencerBlock) -> eyre::Result<()> { - if let Err(err) = self.executor.try_send_soft_block(block) { + if let Err(err) = self.soft_blocks.try_send(block) { match err { - SoftTrySendError::Channel { - source, - } => match *source { - executor::channel::TrySendError::Closed(_) => { - bail!("could not send block to executor because its channel was closed"); - } - - executor::channel::TrySendError::NoPermits(block) => { - trace!( - "executor channel is full; scheduling block and stopping block fetch \ - until a slot opens up" - ); - self.enqueued_block = self - .executor - .clone() - .send_soft_block_owned(block) - .boxed() - .fuse(); - } - }, - - SoftTrySendError::NotSet => { - bail!( - "conductor was configured without soft commitments; the sequencer reader \ - task should have never been started", + mpsc::error::TrySendError::Full(block) => { + trace!( + "executor channel is full; scheduling block and stopping block fetch \ + until a slot opens up" ); + let chan = self.soft_blocks.clone(); + self.enqueued_block = async move { chan.send(block).await }.boxed().fuse(); + } + mpsc::error::TrySendError::Closed(_) => { + bail!("could not send block to executor because its channel was closed") } } } diff --git a/crates/astria-conductor/tests/blackbox/firm_only.rs b/crates/astria-conductor/tests/blackbox/firm_only.rs index e634292a1..f08271ce4 100644 --- a/crates/astria-conductor/tests/blackbox/firm_only.rs +++ b/crates/astria-conductor/tests/blackbox/firm_only.rs @@ -125,7 +125,7 @@ async fn simple() { .await .expect( "conductor should have executed the firm block and updated the firm commitment state \ - within 1000ms", + within 2000ms", ); } From da02eddb18e278dfc47e44f7a507e4cdf4a43918 Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Tue, 4 Feb 2025 17:31:25 +0100 Subject: [PATCH 18/23] chore(grpc-mock): make Times part of public API (#1945) ## Summary Makes the `astria_grpc_mock::mock::Times` part of the public API. ## Background Having `Times` (which itself is wrapper around `u64` or one of the Rust `Range*` types) part of the public API is useful when writing abstraction tests. ## Changes - Export `astria_grpc_mock::mock::Times` at the root of the crate. ## Testing Not applicable. The type is now available. ## Changelogs Changelogs updated. --- crates/astria-grpc-mock/CHANGELOG.md | 1 + crates/astria-grpc-mock/src/lib.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/crates/astria-grpc-mock/CHANGELOG.md b/crates/astria-grpc-mock/CHANGELOG.md index 4f22f5290..65a9002ff 100644 --- a/crates/astria-grpc-mock/CHANGELOG.md +++ b/crates/astria-grpc-mock/CHANGELOG.md @@ -11,4 +11,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Export `Times` at the root of the crate. [#1945](https://github.com/astriaorg/astria/pull/1945) - Initial release. diff --git a/crates/astria-grpc-mock/src/lib.rs b/crates/astria-grpc-mock/src/lib.rs index feaeedc04..66d7811d9 100644 --- a/crates/astria-grpc-mock/src/lib.rs +++ b/crates/astria-grpc-mock/src/lib.rs @@ -20,6 +20,7 @@ mod verification; pub use mock::{ Match, Mock, + Times, }; pub use mock_server::{ MockGuard, From 397b32dcd8514ead4d11c5a0885dc52b12d976f3 Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Tue, 4 Feb 2025 20:00:49 +0100 Subject: [PATCH 19/23] chore: make @ethanoroshiba conductor codeowner (#1944) ## Summary Adds @ethanoroshiba to the `crates/astria-conductor` line in CODEOWNERS. --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index c07487cbc..7754ccc0a 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,7 +5,7 @@ astria-bridge-withdrawer/ @SuperFluffy @joroshiba astria-build-info/ @SuperFluffy @joroshiba astria-cli/ @SuperFluffy @joroshiba astria-composer/ @SuperFluffy @joroshiba -astria-conductor/ @SuperFluffy @joroshiba +astria-conductor/ @SuperFluffy @ethanoroshiba @joroshiba astria-config/ @SuperFluffy @joroshiba astria-core/ @SuperFluffy @fraser999 astria-core-address/ @SuperFluffy @fraser999 From cc54ddacfa5601b127d812e2b7936313307a5023 Mon Sep 17 00:00:00 2001 From: Amrik Ajimal Date: Thu, 6 Feb 2025 11:34:52 -0800 Subject: [PATCH 20/23] fix(ci): tag images with full commit sha (#1951) ## Summary Adds an additional tag to docker images corresponding to the full commit hash. ## Background Manually running github actions to build a docker image was not setting the full sha hash, which argocd uses to pull images. This should always set at least the full hash to the merge commit of a PR. ## Changes Updated the docker metadata step to include an additional tag ## Testing Manually with PR commit and triggering GH action. ## Changelogs "No updates required." --- .github/workflows/reusable-docker-build.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/reusable-docker-build.yml b/.github/workflows/reusable-docker-build.yml index 1e36dee0a..35b51b216 100644 --- a/.github/workflows/reusable-docker-build.yml +++ b/.github/workflows/reusable-docker-build.yml @@ -67,7 +67,8 @@ jobs: tags: | type=ref,event=pr type=match,pattern=refs/tags/${{ inputs.binary-name }}-v(.*),group=1,enable=${{ startsWith(env.FULL_REF, 'refs/tags/') }},value=${{ env.FULL_REF }} - type=sha + type=sha,format=short + type=sha,format=long # set the actual commit SHA from the PR head instead of from the PR merge commit (alternatively, we could checkout the PR head in actions/checkout) type=raw,value=sha-${{ github.event.pull_request.head.sha || github.sha }},enable=${{ startsWith(env.FULL_REF, 'refs/pull/') }} # set latest tag for `main` branch From 60eefa98cb70323186a78ace564e258c1a545979 Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Fri, 7 Feb 2025 13:44:47 +0100 Subject: [PATCH 21/23] feat(telemetry): emit span events for span construction, deletion (#1947) ## Summary Instructs the tracing formatting subscriber to emit span events for event construction and deletion. ## Background Developers frequently add an event at the beginning of a function to get information on whether a function is invoked. In tracing telemetry this is strictly speaking superfluous because the existence of a span already implies this information. If Astriia's services were primarily observed via an opentelemetry platform, then this issue wouldn't arise in the first place. But since events/logs are still consumed as plain text, we should mimick this behavior by emitting events when a particular unit of processing is started or finished. Note that this will lead to noisier logs, but that is a justifiable tradeoff because this information is frequently needed when debugging services. ## Changes - Change Astria's human readable trace/event output to constructs events when spans are constructed or deleted (which usually means that an instrumented function or method is invoked or finished). ## Testing These changes can be observed when running a service locally. ## Changelogs Changelog updated. --- crates/astria-telemetry/CHANGELOG.md | 5 +++++ crates/astria-telemetry/src/lib.rs | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/astria-telemetry/CHANGELOG.md b/crates/astria-telemetry/CHANGELOG.md index 4f22f5290..06404f76c 100644 --- a/crates/astria-telemetry/CHANGELOG.md +++ b/crates/astria-telemetry/CHANGELOG.md @@ -9,6 +9,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +### Changed + +- Emit events on span construction or deletion in human readable logs + [#1947](https://github.com/astriaorg/astria/pull/1947). + ### Added - Initial release. diff --git a/crates/astria-telemetry/src/lib.rs b/crates/astria-telemetry/src/lib.rs index 4028a513d..fdeabea36 100644 --- a/crates/astria-telemetry/src/lib.rs +++ b/crates/astria-telemetry/src/lib.rs @@ -34,6 +34,7 @@ use tracing_subscriber::{ LevelFilter, ParseError, }, + fmt::format::FmtSpan, layer::SubscriberExt as _, util::{ SubscriberInitExt as _, @@ -229,7 +230,11 @@ impl Config { let mut pretty_printer = None; if force_stdout || std::io::stdout().is_terminal() { if pretty_print { - pretty_printer = Some(tracing_subscriber::fmt::layer().compact()); + pretty_printer = Some( + tracing_subscriber::fmt::layer() + .compact() + .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE), + ); } else { tracer_provider = tracer_provider.with_simple_exporter( SpanExporter::builder() From 456beb00eb282c674207105090427743971fa658 Mon Sep 17 00:00:00 2001 From: noot <36753753+noot@users.noreply.github.com> Date: Tue, 11 Feb 2025 11:53:23 -0500 Subject: [PATCH 22/23] feat(sequencer)!: implement `BridgeTransfer` action (#1934) ## Summary implement `BridgeTransfer` action which transfers funds from one bridge account to another. essentially, it atomically performs a `BridgeUnlock` and a `BridgeLock`. ## Background this is functionality we want and `BridgeUnlock`ing to another bridge account wouldn't create a `Deposit` correctly. the issue is that `BridgeUnlock` does not contain all the information needed to create a deposit event, namely `destination_chain_address`, so `BridgeUnlock` directly into another bridge account doesn't work as a lock. `BridgeLock` from a bridge account to another also doesn't contain the desired withdrawal information (eg `rollup_withdrawal_event_id`). an atomic `BridgeUnlock`/`BridgeLock` combo would only work if the `BridgeUnlock` unlocks the funds to itself, providing `rollup_withdrawal_event_id`, and then `BridgeLock`s to the receiving account. `BridgeUnlock` cannot go directly into a different account as the bridge would no longer be able to access it. however this seems unwieldly as opposed to just having one action which makes the intention clear. ## Changes - slightly refactor `BridgeUnlock` to move checks to their own function - also slightly refactor `BridgeLock` to move execution and `Deposit` emission to their own function - implement `BridgeTransfer` action using existing `BridgeUnlock` checks and `BridgeLock` execution logic. ## Testing unit tests ## Changelogs Changelogs updated. ## Breaking Changelist - a new action was added, which is a breaking sequencer change. ## Related Issues closes #1921 --- crates/astria-core/CHANGELOG.md | 1 + .../src/generated/astria.protocol.fees.v1.rs | 15 ++ .../astria.protocol.fees.v1.serde.rs | 108 ++++++++ .../generated/astria.protocol.genesis.v1.rs | 4 + .../astria.protocol.genesis.v1.serde.rs | 18 ++ .../astria.protocol.transaction.v1.rs | 49 +++- .../astria.protocol.transaction.v1.serde.rs | 229 ++++++++++++++++ crates/astria-core/src/protocol/fees/v1.rs | 2 + ...ol__genesis__v1__tests__genesis_state.snap | 6 + crates/astria-core/src/protocol/genesis/v1.rs | 14 + .../transaction/v1/action/group/mod.rs | 3 + .../src/protocol/transaction/v1/action/mod.rs | 182 +++++++++++++ .../src/genesis_example.rs | 2 + crates/astria-sequencer/CHANGELOG.md | 1 + .../src/action_handler/impls/bridge_lock.rs | 117 +++++---- .../action_handler/impls/bridge_transfer.rs | 248 ++++++++++++++++++ .../src/action_handler/impls/bridge_unlock.rs | 71 ++--- .../src/action_handler/impls/fee_change.rs | 8 + .../src/action_handler/impls/mod.rs | 1 + .../src/action_handler/impls/transaction.rs | 7 + .../src/app/benchmark_and_test_utils.rs | 2 + ...breaking_changes__app_hash_at_genesis.snap | 62 ++--- ...hanges__app_hash_execute_every_action.snap | 60 ++--- ...king_changes__app_hash_finalize_block.snap | 58 ++-- crates/astria-sequencer/src/fees/mod.rs | 23 ++ crates/astria-sequencer/src/fees/query.rs | 10 + .../src/fees/storage/values.rs | 3 + .../astria-sequencer/src/service/info/mod.rs | 8 + .../src/transaction/checks.rs | 10 + .../astria/protocol/fees/v1/types.proto | 5 + .../astria/protocol/genesis/v1/types.proto | 1 + .../protocol/transaction/v1/action.proto | 26 ++ 32 files changed, 1178 insertions(+), 176 deletions(-) create mode 100644 crates/astria-sequencer/src/action_handler/impls/bridge_transfer.rs diff --git a/crates/astria-core/CHANGELOG.md b/crates/astria-core/CHANGELOG.md index c14c392d3..33db19f03 100644 --- a/crates/astria-core/CHANGELOG.md +++ b/crates/astria-core/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add method `TracePrefixed::leading_channel` to read the left-most channel of a trace prefixed ICS20 asset [#1768](https://github.com/astriaorg/astria/pull/1768). - Add `impl Protobuf for Address` [#1802](https://github.com/astriaorg/astria/pull/1802). +- Add `BridgeTransfer` action and `BridgeTransfer` variant to `FeeChange`. ### Changed diff --git a/crates/astria-core/src/generated/astria.protocol.fees.v1.rs b/crates/astria-core/src/generated/astria.protocol.fees.v1.rs index 8d03de6cd..e30935ec0 100644 --- a/crates/astria-core/src/generated/astria.protocol.fees.v1.rs +++ b/crates/astria-core/src/generated/astria.protocol.fees.v1.rs @@ -105,6 +105,21 @@ impl ::prost::Name for BridgeSudoChangeFeeComponents { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct BridgeTransferFeeComponents { + #[prost(message, optional, tag = "1")] + pub base: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub multiplier: ::core::option::Option, +} +impl ::prost::Name for BridgeTransferFeeComponents { + const NAME: &'static str = "BridgeTransferFeeComponents"; + const PACKAGE: &'static str = "astria.protocol.fees.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.protocol.fees.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct Ics20WithdrawalFeeComponents { #[prost(message, optional, tag = "1")] pub base: ::core::option::Option, diff --git a/crates/astria-core/src/generated/astria.protocol.fees.v1.serde.rs b/crates/astria-core/src/generated/astria.protocol.fees.v1.serde.rs index 90c71b03f..79a263cf1 100644 --- a/crates/astria-core/src/generated/astria.protocol.fees.v1.serde.rs +++ b/crates/astria-core/src/generated/astria.protocol.fees.v1.serde.rs @@ -214,6 +214,114 @@ impl<'de> serde::Deserialize<'de> for BridgeSudoChangeFeeComponents { deserializer.deserialize_struct("astria.protocol.fees.v1.BridgeSudoChangeFeeComponents", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for BridgeTransferFeeComponents { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.base.is_some() { + len += 1; + } + if self.multiplier.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.protocol.fees.v1.BridgeTransferFeeComponents", len)?; + if let Some(v) = self.base.as_ref() { + struct_ser.serialize_field("base", v)?; + } + if let Some(v) = self.multiplier.as_ref() { + struct_ser.serialize_field("multiplier", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for BridgeTransferFeeComponents { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "base", + "multiplier", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Base, + Multiplier, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "base" => Ok(GeneratedField::Base), + "multiplier" => Ok(GeneratedField::Multiplier), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = BridgeTransferFeeComponents; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.protocol.fees.v1.BridgeTransferFeeComponents") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut base__ = None; + let mut multiplier__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Base => { + if base__.is_some() { + return Err(serde::de::Error::duplicate_field("base")); + } + base__ = map_.next_value()?; + } + GeneratedField::Multiplier => { + if multiplier__.is_some() { + return Err(serde::de::Error::duplicate_field("multiplier")); + } + multiplier__ = map_.next_value()?; + } + } + } + Ok(BridgeTransferFeeComponents { + base: base__, + multiplier: multiplier__, + }) + } + } + deserializer.deserialize_struct("astria.protocol.fees.v1.BridgeTransferFeeComponents", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for BridgeUnlockFeeComponents { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/astria-core/src/generated/astria.protocol.genesis.v1.rs b/crates/astria-core/src/generated/astria.protocol.genesis.v1.rs index 4978ffdd2..c2fbbd712 100644 --- a/crates/astria-core/src/generated/astria.protocol.genesis.v1.rs +++ b/crates/astria-core/src/generated/astria.protocol.genesis.v1.rs @@ -145,6 +145,10 @@ pub struct GenesisFees { pub validator_update: ::core::option::Option< super::super::fees::v1::ValidatorUpdateFeeComponents, >, + #[prost(message, optional, tag = "15")] + pub bridge_transfer: ::core::option::Option< + super::super::fees::v1::BridgeTransferFeeComponents, + >, } impl ::prost::Name for GenesisFees { const NAME: &'static str = "GenesisFees"; diff --git a/crates/astria-core/src/generated/astria.protocol.genesis.v1.serde.rs b/crates/astria-core/src/generated/astria.protocol.genesis.v1.serde.rs index d3b4b59d6..c6b0e4656 100644 --- a/crates/astria-core/src/generated/astria.protocol.genesis.v1.serde.rs +++ b/crates/astria-core/src/generated/astria.protocol.genesis.v1.serde.rs @@ -517,6 +517,9 @@ impl serde::Serialize for GenesisFees { if self.validator_update.is_some() { len += 1; } + if self.bridge_transfer.is_some() { + len += 1; + } let mut struct_ser = serializer.serialize_struct("astria.protocol.genesis.v1.GenesisFees", len)?; if let Some(v) = self.bridge_lock.as_ref() { struct_ser.serialize_field("bridgeLock", v)?; @@ -560,6 +563,9 @@ impl serde::Serialize for GenesisFees { if let Some(v) = self.validator_update.as_ref() { struct_ser.serialize_field("validatorUpdate", v)?; } + if let Some(v) = self.bridge_transfer.as_ref() { + struct_ser.serialize_field("bridgeTransfer", v)?; + } struct_ser.end() } } @@ -597,6 +603,8 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { "transfer", "validator_update", "validatorUpdate", + "bridge_transfer", + "bridgeTransfer", ]; #[allow(clippy::enum_variant_names)] @@ -615,6 +623,7 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { SudoAddressChange, Transfer, ValidatorUpdate, + BridgeTransfer, } impl<'de> serde::Deserialize<'de> for GeneratedField { fn deserialize(deserializer: D) -> std::result::Result @@ -650,6 +659,7 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { "sudoAddressChange" | "sudo_address_change" => Ok(GeneratedField::SudoAddressChange), "transfer" => Ok(GeneratedField::Transfer), "validatorUpdate" | "validator_update" => Ok(GeneratedField::ValidatorUpdate), + "bridgeTransfer" | "bridge_transfer" => Ok(GeneratedField::BridgeTransfer), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } @@ -683,6 +693,7 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { let mut sudo_address_change__ = None; let mut transfer__ = None; let mut validator_update__ = None; + let mut bridge_transfer__ = None; while let Some(k) = map_.next_key()? { match k { GeneratedField::BridgeLock => { @@ -769,6 +780,12 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { } validator_update__ = map_.next_value()?; } + GeneratedField::BridgeTransfer => { + if bridge_transfer__.is_some() { + return Err(serde::de::Error::duplicate_field("bridgeTransfer")); + } + bridge_transfer__ = map_.next_value()?; + } } } Ok(GenesisFees { @@ -786,6 +803,7 @@ impl<'de> serde::Deserialize<'de> for GenesisFees { sudo_address_change: sudo_address_change__, transfer: transfer__, validator_update: validator_update__, + bridge_transfer: bridge_transfer__, }) } } diff --git a/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs b/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs index 0b3862839..2d2fd3592 100644 --- a/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs +++ b/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs @@ -3,7 +3,7 @@ pub struct Action { #[prost( oneof = "action::Value", - tags = "1, 2, 11, 12, 13, 14, 21, 22, 50, 51, 52, 53, 55, 56" + tags = "1, 2, 11, 12, 13, 14, 15, 21, 22, 50, 51, 52, 53, 55, 56" )] pub value: ::core::option::Option, } @@ -26,6 +26,8 @@ pub mod action { BridgeUnlock(super::BridgeUnlock), #[prost(message, tag = "14")] BridgeSudoChange(super::BridgeSudoChange), + #[prost(message, tag = "15")] + BridgeTransfer(super::BridgeTransfer), /// IBC user actions are defined on 21-30 #[prost(message, tag = "21")] Ibc(::penumbra_proto::core::component::ibc::v1::IbcRelay), @@ -398,11 +400,52 @@ impl ::prost::Name for BridgeSudoChange { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct BridgeTransfer { + /// the address of the bridge account to transfer to + #[prost(message, optional, tag = "1")] + pub to: ::core::option::Option, + /// the amount to transfer + #[prost(message, optional, tag = "2")] + pub amount: ::core::option::Option, + /// the asset used to pay the transaction fee + #[prost(string, tag = "3")] + pub fee_asset: ::prost::alloc::string::String, + /// the address on the destination chain which + /// will receive the bridged funds + #[prost(string, tag = "4")] + pub destination_chain_address: ::prost::alloc::string::String, + /// the address of the bridge account to transfer from + #[prost(message, optional, tag = "5")] + pub bridge_address: ::core::option::Option< + super::super::super::primitive::v1::Address, + >, + /// The block number on the rollup that triggered this transfer. + #[prost(uint64, tag = "6")] + pub rollup_block_number: u64, + /// An identifier of the original rollup event, such as a transaction hash which + /// triggered a bridge unlock and is underlying event that led to this bridge + /// unlock. This can be utilized for tracing from the bridge back to + /// distinct rollup events. + /// + /// This field is of type `string` so that it can be formatted in the preferred + /// format of the rollup when targeting plain text encoding. + #[prost(string, tag = "7")] + pub rollup_withdrawal_event_id: ::prost::alloc::string::String, +} +impl ::prost::Name for BridgeTransfer { + const NAME: &'static str = "BridgeTransfer"; + const PACKAGE: &'static str = "astria.protocol.transaction.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.protocol.transaction.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct FeeChange { /// the new fee components values #[prost( oneof = "fee_change::FeeComponents", - tags = "1, 2, 3, 4, 5, 7, 6, 8, 9, 10, 11, 12, 13, 14" + tags = "1, 2, 3, 4, 5, 7, 6, 8, 9, 10, 11, 12, 13, 14, 15" )] pub fee_components: ::core::option::Option, } @@ -442,6 +485,8 @@ pub mod fee_change { Transfer(super::super::super::fees::v1::TransferFeeComponents), #[prost(message, tag = "14")] ValidatorUpdate(super::super::super::fees::v1::ValidatorUpdateFeeComponents), + #[prost(message, tag = "15")] + BridgeTransfer(super::super::super::fees::v1::BridgeTransferFeeComponents), } } impl ::prost::Name for FeeChange { diff --git a/crates/astria-core/src/generated/astria.protocol.transaction.v1.serde.rs b/crates/astria-core/src/generated/astria.protocol.transaction.v1.serde.rs index 4c77666d1..5f94d1867 100644 --- a/crates/astria-core/src/generated/astria.protocol.transaction.v1.serde.rs +++ b/crates/astria-core/src/generated/astria.protocol.transaction.v1.serde.rs @@ -30,6 +30,9 @@ impl serde::Serialize for Action { action::Value::BridgeSudoChange(v) => { struct_ser.serialize_field("bridgeSudoChange", v)?; } + action::Value::BridgeTransfer(v) => { + struct_ser.serialize_field("bridgeTransfer", v)?; + } action::Value::Ibc(v) => { struct_ser.serialize_field("ibc", v)?; } @@ -77,6 +80,8 @@ impl<'de> serde::Deserialize<'de> for Action { "bridgeUnlock", "bridge_sudo_change", "bridgeSudoChange", + "bridge_transfer", + "bridgeTransfer", "ibc", "ics20_withdrawal", "ics20Withdrawal", @@ -102,6 +107,7 @@ impl<'de> serde::Deserialize<'de> for Action { BridgeLock, BridgeUnlock, BridgeSudoChange, + BridgeTransfer, Ibc, Ics20Withdrawal, SudoAddressChange, @@ -137,6 +143,7 @@ impl<'de> serde::Deserialize<'de> for Action { "bridgeLock" | "bridge_lock" => Ok(GeneratedField::BridgeLock), "bridgeUnlock" | "bridge_unlock" => Ok(GeneratedField::BridgeUnlock), "bridgeSudoChange" | "bridge_sudo_change" => Ok(GeneratedField::BridgeSudoChange), + "bridgeTransfer" | "bridge_transfer" => Ok(GeneratedField::BridgeTransfer), "ibc" => Ok(GeneratedField::Ibc), "ics20Withdrawal" | "ics20_withdrawal" => Ok(GeneratedField::Ics20Withdrawal), "sudoAddressChange" | "sudo_address_change" => Ok(GeneratedField::SudoAddressChange), @@ -207,6 +214,13 @@ impl<'de> serde::Deserialize<'de> for Action { return Err(serde::de::Error::duplicate_field("bridgeSudoChange")); } value__ = map_.next_value::<::std::option::Option<_>>()?.map(action::Value::BridgeSudoChange) +; + } + GeneratedField::BridgeTransfer => { + if value__.is_some() { + return Err(serde::de::Error::duplicate_field("bridgeTransfer")); + } + value__ = map_.next_value::<::std::option::Option<_>>()?.map(action::Value::BridgeTransfer) ; } GeneratedField::Ibc => { @@ -582,6 +596,207 @@ impl<'de> serde::Deserialize<'de> for BridgeSudoChange { deserializer.deserialize_struct("astria.protocol.transaction.v1.BridgeSudoChange", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for BridgeTransfer { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.to.is_some() { + len += 1; + } + if self.amount.is_some() { + len += 1; + } + if !self.fee_asset.is_empty() { + len += 1; + } + if !self.destination_chain_address.is_empty() { + len += 1; + } + if self.bridge_address.is_some() { + len += 1; + } + if self.rollup_block_number != 0 { + len += 1; + } + if !self.rollup_withdrawal_event_id.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.protocol.transaction.v1.BridgeTransfer", len)?; + if let Some(v) = self.to.as_ref() { + struct_ser.serialize_field("to", v)?; + } + if let Some(v) = self.amount.as_ref() { + struct_ser.serialize_field("amount", v)?; + } + if !self.fee_asset.is_empty() { + struct_ser.serialize_field("feeAsset", &self.fee_asset)?; + } + if !self.destination_chain_address.is_empty() { + struct_ser.serialize_field("destinationChainAddress", &self.destination_chain_address)?; + } + if let Some(v) = self.bridge_address.as_ref() { + struct_ser.serialize_field("bridgeAddress", v)?; + } + if self.rollup_block_number != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("rollupBlockNumber", ToString::to_string(&self.rollup_block_number).as_str())?; + } + if !self.rollup_withdrawal_event_id.is_empty() { + struct_ser.serialize_field("rollupWithdrawalEventId", &self.rollup_withdrawal_event_id)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for BridgeTransfer { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "to", + "amount", + "fee_asset", + "feeAsset", + "destination_chain_address", + "destinationChainAddress", + "bridge_address", + "bridgeAddress", + "rollup_block_number", + "rollupBlockNumber", + "rollup_withdrawal_event_id", + "rollupWithdrawalEventId", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + To, + Amount, + FeeAsset, + DestinationChainAddress, + BridgeAddress, + RollupBlockNumber, + RollupWithdrawalEventId, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "to" => Ok(GeneratedField::To), + "amount" => Ok(GeneratedField::Amount), + "feeAsset" | "fee_asset" => Ok(GeneratedField::FeeAsset), + "destinationChainAddress" | "destination_chain_address" => Ok(GeneratedField::DestinationChainAddress), + "bridgeAddress" | "bridge_address" => Ok(GeneratedField::BridgeAddress), + "rollupBlockNumber" | "rollup_block_number" => Ok(GeneratedField::RollupBlockNumber), + "rollupWithdrawalEventId" | "rollup_withdrawal_event_id" => Ok(GeneratedField::RollupWithdrawalEventId), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = BridgeTransfer; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.protocol.transaction.v1.BridgeTransfer") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut to__ = None; + let mut amount__ = None; + let mut fee_asset__ = None; + let mut destination_chain_address__ = None; + let mut bridge_address__ = None; + let mut rollup_block_number__ = None; + let mut rollup_withdrawal_event_id__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::To => { + if to__.is_some() { + return Err(serde::de::Error::duplicate_field("to")); + } + to__ = map_.next_value()?; + } + GeneratedField::Amount => { + if amount__.is_some() { + return Err(serde::de::Error::duplicate_field("amount")); + } + amount__ = map_.next_value()?; + } + GeneratedField::FeeAsset => { + if fee_asset__.is_some() { + return Err(serde::de::Error::duplicate_field("feeAsset")); + } + fee_asset__ = Some(map_.next_value()?); + } + GeneratedField::DestinationChainAddress => { + if destination_chain_address__.is_some() { + return Err(serde::de::Error::duplicate_field("destinationChainAddress")); + } + destination_chain_address__ = Some(map_.next_value()?); + } + GeneratedField::BridgeAddress => { + if bridge_address__.is_some() { + return Err(serde::de::Error::duplicate_field("bridgeAddress")); + } + bridge_address__ = map_.next_value()?; + } + GeneratedField::RollupBlockNumber => { + if rollup_block_number__.is_some() { + return Err(serde::de::Error::duplicate_field("rollupBlockNumber")); + } + rollup_block_number__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::RollupWithdrawalEventId => { + if rollup_withdrawal_event_id__.is_some() { + return Err(serde::de::Error::duplicate_field("rollupWithdrawalEventId")); + } + rollup_withdrawal_event_id__ = Some(map_.next_value()?); + } + } + } + Ok(BridgeTransfer { + to: to__, + amount: amount__, + fee_asset: fee_asset__.unwrap_or_default(), + destination_chain_address: destination_chain_address__.unwrap_or_default(), + bridge_address: bridge_address__, + rollup_block_number: rollup_block_number__.unwrap_or_default(), + rollup_withdrawal_event_id: rollup_withdrawal_event_id__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.protocol.transaction.v1.BridgeTransfer", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for BridgeUnlock { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -945,6 +1160,9 @@ impl serde::Serialize for FeeChange { fee_change::FeeComponents::ValidatorUpdate(v) => { struct_ser.serialize_field("validatorUpdate", v)?; } + fee_change::FeeComponents::BridgeTransfer(v) => { + struct_ser.serialize_field("bridgeTransfer", v)?; + } } } struct_ser.end() @@ -984,6 +1202,8 @@ impl<'de> serde::Deserialize<'de> for FeeChange { "transfer", "validator_update", "validatorUpdate", + "bridge_transfer", + "bridgeTransfer", ]; #[allow(clippy::enum_variant_names)] @@ -1002,6 +1222,7 @@ impl<'de> serde::Deserialize<'de> for FeeChange { SudoAddressChange, Transfer, ValidatorUpdate, + BridgeTransfer, } impl<'de> serde::Deserialize<'de> for GeneratedField { fn deserialize(deserializer: D) -> std::result::Result @@ -1037,6 +1258,7 @@ impl<'de> serde::Deserialize<'de> for FeeChange { "sudoAddressChange" | "sudo_address_change" => Ok(GeneratedField::SudoAddressChange), "transfer" => Ok(GeneratedField::Transfer), "validatorUpdate" | "validator_update" => Ok(GeneratedField::ValidatorUpdate), + "bridgeTransfer" | "bridge_transfer" => Ok(GeneratedField::BridgeTransfer), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } @@ -1155,6 +1377,13 @@ impl<'de> serde::Deserialize<'de> for FeeChange { return Err(serde::de::Error::duplicate_field("validatorUpdate")); } fee_components__ = map_.next_value::<::std::option::Option<_>>()?.map(fee_change::FeeComponents::ValidatorUpdate) +; + } + GeneratedField::BridgeTransfer => { + if fee_components__.is_some() { + return Err(serde::de::Error::duplicate_field("bridgeTransfer")); + } + fee_components__ = map_.next_value::<::std::option::Option<_>>()?.map(fee_change::FeeComponents::BridgeTransfer) ; } } diff --git a/crates/astria-core/src/protocol/fees/v1.rs b/crates/astria-core/src/protocol/fees/v1.rs index fb5571e9b..3fdf425ec 100644 --- a/crates/astria-core/src/protocol/fees/v1.rs +++ b/crates/astria-core/src/protocol/fees/v1.rs @@ -16,6 +16,7 @@ use crate::{ protocol::transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -100,6 +101,7 @@ impl_protobuf_for_fee_components!( FeeComponents => raw::InitBridgeAccountFeeComponents, FeeComponents => raw::BridgeLockFeeComponents, FeeComponents => raw::BridgeUnlockFeeComponents, + FeeComponents => raw::BridgeTransferFeeComponents, FeeComponents => raw::BridgeSudoChangeFeeComponents, FeeComponents => raw::ValidatorUpdateFeeComponents, FeeComponents => raw::IbcRelayerChangeFeeComponents, diff --git a/crates/astria-core/src/protocol/genesis/snapshots/astria_core__protocol__genesis__v1__tests__genesis_state.snap b/crates/astria-core/src/protocol/genesis/snapshots/astria_core__protocol__genesis__v1__tests__genesis_state.snap index 05a657196..14e975768 100644 --- a/crates/astria-core/src/protocol/genesis/snapshots/astria_core__protocol__genesis__v1__tests__genesis_state.snap +++ b/crates/astria-core/src/protocol/genesis/snapshots/astria_core__protocol__genesis__v1__tests__genesis_state.snap @@ -131,6 +131,12 @@ expression: genesis_state() "validatorUpdate": { "base": {}, "multiplier": {} + }, + "bridgeTransfer": { + "base": { + "lo": "24" + }, + "multiplier": {} } } } diff --git a/crates/astria-core/src/protocol/genesis/v1.rs b/crates/astria-core/src/protocol/genesis/v1.rs index d6102f23d..da793345f 100644 --- a/crates/astria-core/src/protocol/genesis/v1.rs +++ b/crates/astria-core/src/protocol/genesis/v1.rs @@ -24,6 +24,7 @@ use crate::{ transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -585,6 +586,7 @@ pub struct GenesisFees { pub init_bridge_account: Option>, pub bridge_lock: Option>, pub bridge_unlock: Option>, + pub bridge_transfer: Option>, pub bridge_sudo_change: Option>, pub ibc_relay: Option>, pub validator_update: Option>, @@ -611,6 +613,7 @@ impl Protobuf for GenesisFees { init_bridge_account, bridge_lock, bridge_unlock, + bridge_transfer, bridge_sudo_change, ibc_relay, validator_update, @@ -656,6 +659,12 @@ impl Protobuf for GenesisFees { .transpose() .map_err(|e| FeesError::fee_components("bridge_unlock", e))?; + let bridge_transfer = bridge_transfer + .clone() + .map(FeeComponents::::try_from_raw) + .transpose() + .map_err(|e| FeesError::fee_components("bridge_transfer", e))?; + let bridge_sudo_change = bridge_sudo_change .clone() .map(FeeComponents::::try_from_raw) @@ -712,6 +721,7 @@ impl Protobuf for GenesisFees { init_bridge_account, bridge_lock, bridge_unlock, + bridge_transfer, bridge_sudo_change, ibc_relay, validator_update, @@ -731,6 +741,7 @@ impl Protobuf for GenesisFees { init_bridge_account, bridge_lock, bridge_unlock, + bridge_transfer, bridge_sudo_change, ibc_relay, validator_update, @@ -750,6 +761,8 @@ impl Protobuf for GenesisFees { .map(|act| FeeComponents::::to_raw(&act)), bridge_lock: bridge_lock.map(|act| FeeComponents::::to_raw(&act)), bridge_unlock: bridge_unlock.map(|act| FeeComponents::::to_raw(&act)), + bridge_transfer: bridge_transfer + .map(|act| FeeComponents::::to_raw(&act)), bridge_sudo_change: bridge_sudo_change .map(|act| FeeComponents::::to_raw(&act)), ibc_relay: ibc_relay.map(|act| FeeComponents::::to_raw(&act)), @@ -877,6 +890,7 @@ mod tests { init_bridge_account: Some(FeeComponents::::new(48, 0).to_raw()), bridge_lock: Some(FeeComponents::::new(12, 1).to_raw()), bridge_unlock: Some(FeeComponents::::new(12, 0).to_raw()), + bridge_transfer: Some(FeeComponents::::new(24, 0).to_raw()), bridge_sudo_change: Some(FeeComponents::::new(24, 0).to_raw()), ics20_withdrawal: Some(FeeComponents::::new(24, 0).to_raw()), ibc_relay: Some(FeeComponents::::new(0, 0).to_raw()), diff --git a/crates/astria-core/src/protocol/transaction/v1/action/group/mod.rs b/crates/astria-core/src/protocol/transaction/v1/action/group/mod.rs index c14517680..1a94f1dca 100644 --- a/crates/astria-core/src/protocol/transaction/v1/action/group/mod.rs +++ b/crates/astria-core/src/protocol/transaction/v1/action/group/mod.rs @@ -13,6 +13,7 @@ use super::{ ActionName, BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -51,6 +52,7 @@ impl_belong_to_group!( (BridgeLock, Group::BundleableGeneral), (BridgeUnlock, Group::BundleableGeneral), (BridgeSudoChange, Group::UnbundleableGeneral), + (BridgeTransfer, Group::BundleableGeneral), (FeeChange, Group::BundleableSudo), (FeeAssetChange, Group::BundleableSudo), (IbcRelay, Group::BundleableGeneral), @@ -70,6 +72,7 @@ impl Action { Action::BridgeLock(_) => BridgeLock::GROUP, Action::BridgeUnlock(_) => BridgeUnlock::GROUP, Action::BridgeSudoChange(_) => BridgeSudoChange::GROUP, + Action::BridgeTransfer(_) => BridgeTransfer::GROUP, Action::FeeChange(_) => FeeChange::GROUP, Action::FeeAssetChange(_) => FeeAssetChange::GROUP, Action::Ibc(_) => IbcRelay::GROUP, diff --git a/crates/astria-core/src/protocol/transaction/v1/action/mod.rs b/crates/astria-core/src/protocol/transaction/v1/action/mod.rs index 5c2b93745..35330c48f 100644 --- a/crates/astria-core/src/protocol/transaction/v1/action/mod.rs +++ b/crates/astria-core/src/protocol/transaction/v1/action/mod.rs @@ -50,6 +50,7 @@ pub enum Action { BridgeLock(BridgeLock), BridgeUnlock(BridgeUnlock), BridgeSudoChange(BridgeSudoChange), + BridgeTransfer(BridgeTransfer), FeeChange(FeeChange), } @@ -74,6 +75,7 @@ impl Protobuf for Action { Action::BridgeLock(act) => Value::BridgeLock(act.to_raw()), Action::BridgeUnlock(act) => Value::BridgeUnlock(act.to_raw()), Action::BridgeSudoChange(act) => Value::BridgeSudoChange(act.to_raw()), + Action::BridgeTransfer(act) => Value::BridgeTransfer(act.to_raw()), Action::FeeChange(act) => Value::FeeChange(act.to_raw()), }; raw::Action { @@ -145,6 +147,9 @@ impl Protobuf for Action { Value::BridgeSudoChange(act) => Self::BridgeSudoChange( BridgeSudoChange::try_from_raw(act).map_err(Error::bridge_sudo_change)?, ), + Value::BridgeTransfer(act) => Self::BridgeTransfer( + BridgeTransfer::try_from_raw(act).map_err(Error::bridge_transfer)?, + ), Value::FeeChange(act) => { Self::FeeChange(FeeChange::try_from_raw_ref(&act).map_err(Error::fee_change)?) } @@ -252,6 +257,12 @@ impl From for Action { } } +impl From for Action { + fn from(value: BridgeTransfer) -> Self { + Self::BridgeTransfer(value) + } +} + impl From for Action { fn from(value: FeeChange) -> Self { Self::FeeChange(value) @@ -294,6 +305,7 @@ impl ActionName for Action { Action::BridgeLock(_) => "BridgeLock", Action::BridgeUnlock(_) => "BridgeUnlock", Action::BridgeSudoChange(_) => "BridgeSudoChange", + Action::BridgeTransfer(_) => "BridgeTransfer", Action::FeeChange(_) => "FeeChange", } } @@ -360,6 +372,10 @@ impl Error { Self(ActionErrorKind::BridgeSudoChange(inner)) } + fn bridge_transfer(inner: BridgeTransferError) -> Self { + Self(ActionErrorKind::BridgeTransfer(inner)) + } + fn fee_change(inner: FeeChangeError) -> Self { Self(ActionErrorKind::FeeChange(inner)) } @@ -395,6 +411,8 @@ enum ActionErrorKind { BridgeUnlock(#[source] BridgeUnlockError), #[error("bridge sudo change action was not valid")] BridgeSudoChange(#[source] BridgeSudoChangeError), + #[error("bridge transfer action was not valid")] + BridgeTransfer(#[source] BridgeTransferError), #[error("fee change action was not valid")] FeeChange(#[source] FeeChangeError), } @@ -1894,6 +1912,155 @@ enum BridgeSudoChangeErrorKind { InvalidFeeAsset(#[source] asset::ParseDenomError), } +#[derive(Debug, Clone)] +pub struct BridgeTransfer { + pub to: Address, + pub amount: u128, + // asset to use for fee payment. + pub fee_asset: asset::Denom, + // the address on the destination chain to send the transfer to. + pub destination_chain_address: String, + // the address of the bridge account to transfer from. + pub bridge_address: Address, + // The block number of the rollup block containing the withdrawal event. + pub rollup_block_number: u64, + // The identifier of the withdrawal event in the rollup block. + pub rollup_withdrawal_event_id: String, +} + +impl Protobuf for BridgeTransfer { + type Error = BridgeTransferError; + type Raw = raw::BridgeTransfer; + + #[must_use] + fn into_raw(self) -> raw::BridgeTransfer { + raw::BridgeTransfer { + to: Some(self.to.into_raw()), + amount: Some(self.amount.into()), + fee_asset: self.fee_asset.to_string(), + bridge_address: Some(self.bridge_address.into_raw()), + destination_chain_address: self.destination_chain_address, + rollup_block_number: self.rollup_block_number, + rollup_withdrawal_event_id: self.rollup_withdrawal_event_id, + } + } + + #[must_use] + fn to_raw(&self) -> raw::BridgeTransfer { + raw::BridgeTransfer { + to: Some(self.to.to_raw()), + amount: Some(self.amount.into()), + fee_asset: self.fee_asset.to_string(), + bridge_address: Some(self.bridge_address.to_raw()), + destination_chain_address: self.destination_chain_address.clone(), + rollup_block_number: self.rollup_block_number, + rollup_withdrawal_event_id: self.rollup_withdrawal_event_id.clone(), + } + } + + /// Convert from a raw, unchecked protobuf [`raw::BridgeTransferAction`]. + /// + /// # Errors + /// + /// - if the `to` field is not set + /// - if the `to` field is invalid + /// - if the `amount` field is invalid + /// - if the `fee_asset` field is invalid + /// - if the `from` field is invalid + /// - if `destination_chain_address` is not set + fn try_from_raw(proto: raw::BridgeTransfer) -> Result { + let raw::BridgeTransfer { + to, + amount, + fee_asset, + bridge_address, + destination_chain_address, + rollup_block_number, + rollup_withdrawal_event_id, + } = proto; + let to = to + .ok_or_else(|| BridgeTransferError::field_not_set("to")) + .and_then(|to| Address::try_from_raw(to).map_err(BridgeTransferError::address))?; + let amount = amount.ok_or_else(|| BridgeTransferError::field_not_set("amount"))?; + let fee_asset = fee_asset.parse().map_err(BridgeTransferError::fee_asset)?; + if destination_chain_address.is_empty() { + return Err(BridgeTransferError::field_not_set( + "destination_chain_address", + )); + } + + let bridge_address = bridge_address + .ok_or_else(|| BridgeTransferError::field_not_set("bridge_address")) + .and_then(|to| { + Address::try_from_raw(to).map_err(BridgeTransferError::bridge_address) + })?; + Ok(Self { + to, + amount: amount.into(), + fee_asset, + bridge_address, + destination_chain_address, + rollup_block_number, + rollup_withdrawal_event_id, + }) + } + + /// Convert from a reference to a raw, unchecked protobuf [`raw::BridgeTransferAction`]. + /// # Errors + /// - if the `to` field is not set + /// - if the `to` field is invalid + /// - if the `amount` field is invalid + /// - if the `fee_asset` field is invalid + /// - if the `from` field is invalid + fn try_from_raw_ref(proto: &raw::BridgeTransfer) -> Result { + Self::try_from_raw(proto.clone()) + } +} + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct BridgeTransferError(BridgeTransferErrorKind); + +impl BridgeTransferError { + #[must_use] + fn field_not_set(field: &'static str) -> Self { + Self(BridgeTransferErrorKind::FieldNotSet(field)) + } + + #[must_use] + fn address(source: AddressError) -> Self { + Self(BridgeTransferErrorKind::Address { + source, + }) + } + + #[must_use] + fn fee_asset(source: asset::ParseDenomError) -> Self { + Self(BridgeTransferErrorKind::FeeAsset { + source, + }) + } + + #[must_use] + fn bridge_address(source: AddressError) -> Self { + Self(BridgeTransferErrorKind::BridgeAddress { + source, + }) + } +} + +#[derive(Debug, thiserror::Error)] +enum BridgeTransferErrorKind { + #[error("the expected field in the raw source type was not set: `{0}`")] + FieldNotSet(&'static str), + #[error("the `to` field was invalid")] + Address { source: AddressError }, + #[error("the `fee_asset` field was invalid")] + FeeAsset { source: asset::ParseDenomError }, + #[error("the `bridge_address` field was invalid")] + BridgeAddress { source: AddressError }, +} + #[derive(Debug, thiserror::Error)] #[error(transparent)] pub struct FeeChangeError(FeeChangeErrorKind); @@ -1943,6 +2110,7 @@ pub enum FeeChange { IbcRelayerChange(FeeComponents), SudoAddressChange(FeeComponents), IbcSudoChange(FeeComponents), + BridgeTransfer(FeeComponents), } impl Protobuf for FeeChange { @@ -1995,6 +2163,9 @@ impl Protobuf for FeeChange { Self::IbcSudoChange(fee_change) => { raw::fee_change::FeeComponents::IbcSudoChange(fee_change.to_raw()) } + Self::BridgeTransfer(fee_change) => { + raw::fee_change::FeeComponents::BridgeTransfer(fee_change.to_raw()) + } }), } } @@ -2065,6 +2236,11 @@ impl Protobuf for FeeChange { Some(raw::fee_change::FeeComponents::IbcSudoChange(fee_change)) => Self::IbcSudoChange( FeeComponents::::try_from_raw_ref(fee_change)?, ), + Some(raw::fee_change::FeeComponents::BridgeTransfer(fee_change)) => { + Self::BridgeTransfer(FeeComponents::::try_from_raw_ref( + fee_change, + )?) + } None => return Err(FeeChangeError::field_unset("fee_components")), }) } @@ -2153,3 +2329,9 @@ impl From> for FeeChange { FeeChange::IbcSudoChange(fee) } } + +impl From> for FeeChange { + fn from(fee: FeeComponents) -> Self { + FeeChange::BridgeTransfer(fee) + } +} diff --git a/crates/astria-sequencer-utils/src/genesis_example.rs b/crates/astria-sequencer-utils/src/genesis_example.rs index 12a09ca70..f672cd12b 100644 --- a/crates/astria-sequencer-utils/src/genesis_example.rs +++ b/crates/astria-sequencer-utils/src/genesis_example.rs @@ -20,6 +20,7 @@ use astria_core::{ transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -114,6 +115,7 @@ fn proto_genesis_state() -> astria_core::generated::astria::protocol::genesis::v init_bridge_account: Some(FeeComponents::::new(48, 0).to_raw()), bridge_lock: Some(FeeComponents::::new(12, 1).to_raw()), bridge_unlock: Some(FeeComponents::::new(12, 0).to_raw()), + bridge_transfer: Some(FeeComponents::::new(24, 0).to_raw()), bridge_sudo_change: Some(FeeComponents::::new(24, 0).to_raw()), ics20_withdrawal: Some(FeeComponents::::new(24, 0).to_raw()), ibc_relay: Some(FeeComponents::::new(0, 0).to_raw()), diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index 9dd713e98..80192e36f 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -20,6 +20,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Use bridge address to determine asset in bridge unlock cost estimation instead of signer [#1905](https://github.com/astriaorg/astria/pull/1905). - Add more thorough unit tests for all actions [#1916](https://github.com/astriaorg/astria/pull/1916). +- Implement `BridgeTransfer` action [#1934](https://github.com/astriaorg/astria/pull/1934). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs index 27ec288c8..f33ac8287 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_lock.rs @@ -1,4 +1,5 @@ use astria_core::{ + primitive::v1::asset::Denom, protocol::transaction::v1::action::{ BridgeLock, Transfer, @@ -41,75 +42,85 @@ impl ActionHandler for BridgeLock { } #[instrument(skip_all, err(level = Level::DEBUG))] - async fn check_and_execute(&self, mut state: S) -> Result<()> { - let from = state - .get_transaction_context() - .expect("transaction source must be present in state when executing an action") - .address_bytes(); + async fn check_and_execute(&self, state: S) -> Result<()> { state .ensure_base_prefix(&self.to) .await .wrap_err("failed check for base prefix of destination address")?; - // ensure the recipient is a bridge account. - let rollup_id = state - .get_bridge_account_rollup_id(&self.to) - .await - .wrap_err("failed to get bridge account rollup id")? - .ok_or_eyre("bridge lock must be sent to a bridge account")?; + // check that the asset to be transferred matches the bridge account asset. + // this also implicitly ensures the recipient is a bridge account. let allowed_asset = state .get_bridge_account_ibc_asset(&self.to) .await - .wrap_err("failed to get bridge account asset ID")?; + .wrap_err("failed to get bridge account asset ID; account is not a bridge account")?; ensure!( allowed_asset == self.asset.to_ibc_prefixed(), "asset ID is not authorized for transfer to bridge account", ); - let source_transaction_id = state - .get_transaction_context() - .expect("current source should be set before executing action") - .transaction_id; - let source_action_index = state - .get_transaction_context() - .expect("current source should be set before executing action") - .position_in_transaction; - - // map asset to trace prefixed asset for deposit, if it is not already - let deposit_asset = match self.asset.as_trace_prefixed() { - Some(asset) => asset.clone(), - None => state - .map_ibc_to_trace_prefixed_asset(&allowed_asset) - .await - .wrap_err("failed to map IBC asset to trace prefixed asset")? - .ok_or_eyre("mapping from IBC prefixed bridge asset to trace prefixed not found")?, - }; + execute_bridge_lock(self, state).await?; + Ok(()) + } +} - let deposit = Deposit { - bridge_address: self.to, - rollup_id, - amount: self.amount, - asset: deposit_asset.into(), - destination_chain_address: self.destination_chain_address.clone(), - source_transaction_id, - source_action_index, - }; - let deposit_abci_event = create_deposit_event(&deposit); +pub(super) async fn execute_bridge_lock( + bridge_lock: &BridgeLock, + mut state: S, +) -> Result<()> { + let from = state + .get_transaction_context() + .expect("transaction source must be present in state when executing an action") + .address_bytes(); + let rollup_id = state + .get_bridge_account_rollup_id(&bridge_lock.to) + .await + .wrap_err("failed to get bridge account rollup id")? + .ok_or_eyre("bridge lock must be sent to a bridge account")?; + + let source_transaction_id = state + .get_transaction_context() + .expect("current source should be set before executing action") + .transaction_id; + let source_action_index = state + .get_transaction_context() + .expect("current source should be set before executing action") + .position_in_transaction; + + // map asset to trace prefixed asset for deposit, if it is not already + let deposit_asset = match &bridge_lock.asset { + Denom::TracePrefixed(asset) => asset.clone(), + Denom::IbcPrefixed(asset) => state + .map_ibc_to_trace_prefixed_asset(asset) + .await + .wrap_err("failed to map IBC asset to trace prefixed asset")? + .ok_or_eyre("mapping from IBC prefixed bridge asset to trace prefixed not found")?, + }; - let transfer_action = Transfer { - to: self.to, - asset: self.asset.clone(), - amount: self.amount, - fee_asset: self.fee_asset.clone(), - }; + let deposit = Deposit { + bridge_address: bridge_lock.to, + rollup_id, + amount: bridge_lock.amount, + asset: deposit_asset.into(), + destination_chain_address: bridge_lock.destination_chain_address.clone(), + source_transaction_id, + source_action_index, + }; + let deposit_abci_event = create_deposit_event(&deposit); - check_transfer(&transfer_action, &from, &state).await?; - execute_transfer(&transfer_action, &from, &mut state).await?; + let transfer_action = Transfer { + to: bridge_lock.to, + asset: bridge_lock.asset.clone(), + amount: bridge_lock.amount, + fee_asset: bridge_lock.fee_asset.clone(), + }; - state.cache_deposit_event(deposit); - state.record(deposit_abci_event); - Ok(()) - } + check_transfer(&transfer_action, &from, &state).await?; + execute_transfer(&transfer_action, &from, &mut state).await?; + + state.cache_deposit_event(deposit); + state.record(deposit_abci_event); + Ok(()) } #[cfg(test)] @@ -227,7 +238,7 @@ mod tests { .check_and_execute(&mut state) .await .unwrap_err(), - "bridge lock must be sent to a bridge account", + "failed to get bridge account asset ID; account is not a bridge account", ); } diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_transfer.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_transfer.rs new file mode 100644 index 000000000..946743b9b --- /dev/null +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_transfer.rs @@ -0,0 +1,248 @@ +use astria_core::protocol::transaction::v1::action::{ + BridgeLock, + BridgeTransfer, + BridgeUnlock, +}; +use astria_eyre::eyre::{ + ensure, + Result, + WrapErr as _, +}; +use async_trait::async_trait; +use cnidarium::StateWrite; +use tracing::{ + instrument, + Level, +}; + +use crate::{ + action_handler::{ + impls::{ + bridge_lock::execute_bridge_lock, + bridge_unlock::check_bridge_unlock, + }, + ActionHandler, + }, + bridge::{ + StateReadExt as _, + StateWriteExt as _, + }, +}; + +#[async_trait] +impl ActionHandler for BridgeTransfer { + async fn check_stateless(&self) -> Result<()> { + let bridge_unlock = BridgeUnlock { + to: self.to, + amount: self.amount, + memo: String::new(), + rollup_withdrawal_event_id: self.rollup_withdrawal_event_id.clone(), + rollup_block_number: self.rollup_block_number, + fee_asset: self.fee_asset.clone(), + bridge_address: self.bridge_address, + }; + bridge_unlock.check_stateless().await?; + Ok(()) + } + + #[instrument(skip_all, err(level = Level::DEBUG))] + async fn check_and_execute(&self, mut state: S) -> Result<()> { + // first, check that the bridge unlock is valid + let bridge_unlock = BridgeUnlock { + to: self.to, + amount: self.amount, + memo: String::new(), + rollup_withdrawal_event_id: self.rollup_withdrawal_event_id.clone(), + rollup_block_number: self.rollup_block_number, + fee_asset: self.fee_asset.clone(), + bridge_address: self.bridge_address, + }; + check_bridge_unlock(&bridge_unlock, &state).await?; + + // check that the assets for both bridge accounts match + // also implicitly checks that both accounts are bridge accounts, as + // only bridge accounts have an associated asset set + let from_asset = state + .get_bridge_account_ibc_asset(&self.bridge_address) + .await + .wrap_err("failed to get bridge's asset id, must be a bridge account")?; + let to_asset = state + .get_bridge_account_ibc_asset(&self.to) + .await + .wrap_err("failed to get bridge's asset id, must be a bridge account")?; + ensure!( + from_asset == to_asset, + "bridge accounts must have the same asset", + ); + + state + .check_and_set_withdrawal_event_block_for_bridge_account( + &self.bridge_address, + &self.rollup_withdrawal_event_id, + self.rollup_block_number, + ) + .await + .context("withdrawal event already processed")?; + + // execute the actual transfer as a BridgeLock + let bridge_lock = BridgeLock { + to: self.to, + asset: from_asset.into(), + amount: self.amount, + fee_asset: self.fee_asset.clone(), + destination_chain_address: self.destination_chain_address.clone(), + }; + execute_bridge_lock(&bridge_lock, state).await?; + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use astria_core::{ + primitive::v1::{ + asset::Denom, + RollupId, + TransactionId, + }, + protocol::transaction::v1::action::BridgeTransfer, + }; + use cnidarium::StateDelta; + + use super::*; + use crate::{ + accounts::{ + AddressBytes, + StateWriteExt, + }, + action_handler::impls::test_utils::test_asset, + address::StateWriteExt as _, + assets::StateWriteExt as _, + benchmark_and_test_utils::{ + assert_eyre_error, + astria_address, + ASTRIA_PREFIX, + }, + transaction::{ + StateWriteExt as _, + TransactionContext, + }, + }; + + #[tokio::test] + async fn bridge_transfer_ok() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let from_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *from_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + let asset = test_asset(); + let transfer_amount = 100; + + let to_address = astria_address(&[2; 20]); + state + .put_bridge_account_ibc_asset(&from_address, &asset) + .unwrap(); + state + .put_bridge_account_withdrawer_address(&from_address, from_address) + .unwrap(); + state + .put_bridge_account_ibc_asset(&to_address, &asset) + .unwrap(); + let to_rollup_id = RollupId::new([3; 32]); + state + .put_bridge_account_rollup_id(&to_address, to_rollup_id) + .unwrap(); + state + .put_ibc_asset(test_asset().unwrap_trace_prefixed().clone()) + .unwrap(); + state + .put_account_balance(&from_address, &asset, transfer_amount) + .unwrap(); + + let bridge_unlock = BridgeTransfer { + to: to_address, + amount: transfer_amount, + fee_asset: asset.clone(), + bridge_address: from_address, + rollup_block_number: 1, + rollup_withdrawal_event_id: "a-rollup-defined-hash".to_string(), + destination_chain_address: "noot".to_string(), + }; + + bridge_unlock.check_stateless().await.unwrap(); + bridge_unlock.check_and_execute(&mut state).await.unwrap(); + + let deposits = state + .get_cached_block_deposits() + .values() + .next() + .unwrap() + .clone(); + assert_eq!(deposits.len(), 1); + } + + #[tokio::test] + async fn bridge_transfer_accounts_have_different_asset_fails() { + let storage = cnidarium::TempStorage::new().await.unwrap(); + let snapshot = storage.latest_snapshot(); + let mut state = StateDelta::new(snapshot); + + let from_address = astria_address(&[1; 20]); + state.put_transaction_context(TransactionContext { + address_bytes: *from_address.address_bytes(), + transaction_id: TransactionId::new([0; 32]), + position_in_transaction: 0, + }); + state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + + let asset = test_asset(); + let transfer_amount = 100; + + let to_address = astria_address(&[2; 20]); + state + .put_bridge_account_ibc_asset(&from_address, &asset) + .unwrap(); + state + .put_bridge_account_withdrawer_address(&from_address, from_address) + .unwrap(); + state + .put_bridge_account_ibc_asset(&to_address, "other-asset".parse::().unwrap()) + .unwrap(); + let to_rollup_id = RollupId::new([3; 32]); + state + .put_bridge_account_rollup_id(&to_address, to_rollup_id) + .unwrap(); + state + .put_ibc_asset(test_asset().unwrap_trace_prefixed().clone()) + .unwrap(); + state + .put_account_balance(&from_address, &asset, transfer_amount) + .unwrap(); + + let bridge_unlock = BridgeTransfer { + to: to_address, + amount: transfer_amount, + fee_asset: asset.clone(), + bridge_address: from_address, + rollup_block_number: 1, + rollup_withdrawal_event_id: "a-rollup-defined-hash".to_string(), + destination_chain_address: "noot".to_string(), + }; + + bridge_unlock.check_stateless().await.unwrap(); + let result = bridge_unlock.check_and_execute(state).await; + assert_eyre_error( + &result.unwrap_err(), + "bridge accounts must have the same asset", + ); + } +} diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs index d5721f61f..94afabbef 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs @@ -9,7 +9,10 @@ use astria_eyre::eyre::{ WrapErr as _, }; use async_trait::async_trait; -use cnidarium::StateWrite; +use cnidarium::{ + StateRead, + StateWrite, +}; use tracing::{ instrument, Level, @@ -53,38 +56,13 @@ impl ActionHandler for BridgeUnlock { #[instrument(skip_all, err(level = Level::DEBUG))] async fn check_and_execute(&self, mut state: S) -> Result<()> { - let from = state - .get_transaction_context() - .expect("transaction source must be present in state when executing an action") - .address_bytes(); - state - .ensure_base_prefix(&self.to) - .await - .wrap_err("failed check for base prefix of destination address")?; - state - .ensure_base_prefix(&self.bridge_address) - .await - .wrap_err("failed check for base prefix of bridge address")?; + check_bridge_unlock(self, &state).await?; let asset = state .get_bridge_account_ibc_asset(&self.bridge_address) .await .wrap_err("failed to get bridge's asset id, must be a bridge account")?; - // check that the sender of this tx is the authorized withdrawer for the bridge account - let Some(withdrawer_address) = state - .get_bridge_account_withdrawer_address(&self.bridge_address) - .await - .wrap_err("failed to get bridge account withdrawer address")? - else { - bail!("bridge account does not have an associated withdrawer address"); - }; - - ensure!( - withdrawer_address == from, - "unauthorized to unlock bridge account", - ); - let transfer_action = Transfer { to: self.to, asset: asset.into(), @@ -107,6 +85,39 @@ impl ActionHandler for BridgeUnlock { } } +pub(super) async fn check_bridge_unlock( + bridge_unlock: &BridgeUnlock, + state: &S, +) -> Result<()> { + let from = state + .get_transaction_context() + .expect("transaction source must be present in state when executing an action") + .address_bytes(); + state + .ensure_base_prefix(&bridge_unlock.to) + .await + .wrap_err("failed check for base prefix of destination address")?; + state + .ensure_base_prefix(&bridge_unlock.bridge_address) + .await + .wrap_err("failed check for base prefix of bridge address")?; + + // check that the sender of this tx is the authorized withdrawer for the bridge account + let Some(withdrawer_address) = state + .get_bridge_account_withdrawer_address(&bridge_unlock.bridge_address) + .await + .wrap_err("failed to get bridge account withdrawer address")? + else { + bail!("bridge account does not have an associated withdrawer address"); + }; + + ensure!( + withdrawer_address == from, + "unauthorized to unlock bridge account", + ); + Ok(()) +} + #[cfg(test)] mod tests { use astria_core::{ @@ -237,7 +248,6 @@ mod tests { let asset = test_asset(); let transfer_amount = 100; - let to_address = astria_address(&[2; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"test_rollup_id"); @@ -250,9 +260,8 @@ mod tests { state .put_bridge_account_withdrawer_address(&bridge_address, bridge_address) .unwrap(); - // Put plenty of balance state - .put_account_balance(&bridge_address, &asset, 3 * transfer_amount) + .put_account_balance(&bridge_address, &asset, 2 * transfer_amount) .unwrap(); let bridge_unlock_first = BridgeUnlock { @@ -390,7 +399,7 @@ mod tests { assert_eyre_error( &action.check_and_execute(&mut state).await.unwrap_err(), - "failed to get bridge's asset id, must be a bridge account", + "bridge account does not have an associated withdrawer address", ); } } diff --git a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs index 627d12c87..fa651f44b 100644 --- a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs @@ -82,6 +82,9 @@ impl ActionHandler for FeeChange { Self::IbcSudoChange(fees) => state .put_fees(*fees) .wrap_err("failed to put ibc sudo change fees"), + Self::BridgeTransfer(fees) => state + .put_fees(*fees) + .wrap_err("failed to put bridge transfer fees"), } } } @@ -211,6 +214,11 @@ mod tests { test_fee_change_action::().await; } + #[tokio::test] + async fn bridge_transfer_fee_change_action_executes_as_expected() { + test_fee_change_action::().await; + } + async fn test_fee_change_action<'a, F>() where F: FeeHandler, diff --git a/crates/astria-sequencer/src/action_handler/impls/mod.rs b/crates/astria-sequencer/src/action_handler/impls/mod.rs index c273cc26f..eb2b38da1 100644 --- a/crates/astria-sequencer/src/action_handler/impls/mod.rs +++ b/crates/astria-sequencer/src/action_handler/impls/mod.rs @@ -1,5 +1,6 @@ pub(crate) mod bridge_lock; pub(crate) mod bridge_sudo_change; +pub(crate) mod bridge_transfer; pub(crate) mod bridge_unlock; pub(crate) mod fee_asset_change; pub(crate) mod fee_change; diff --git a/crates/astria-sequencer/src/action_handler/impls/transaction.rs b/crates/astria-sequencer/src/action_handler/impls/transaction.rs index 8605906ad..bcea21b47 100644 --- a/crates/astria-sequencer/src/action_handler/impls/transaction.rs +++ b/crates/astria-sequencer/src/action_handler/impls/transaction.rs @@ -142,6 +142,10 @@ impl ActionHandler for Transaction { .check_stateless() .await .wrap_err("stateless check failed for BridgeUnlock action")?, + Action::BridgeTransfer(act) => act + .check_stateless() + .await + .wrap_err("stateless check failed for BridgeTransfer action")?, Action::BridgeSudoChange(act) => act .check_stateless() .await @@ -269,6 +273,9 @@ impl ActionHandler for Transaction { Action::BridgeUnlock(act) => check_execute_and_pay_fees(act, &mut state) .await .wrap_err("failed executing bridge unlock")?, + Action::BridgeTransfer(act) => check_execute_and_pay_fees(act, &mut state) + .await + .wrap_err("failed executing bridge transfer")?, Action::BridgeSudoChange(act) => check_execute_and_pay_fees(act, &mut state) .await .wrap_err("failed executing bridge sudo change")?, diff --git a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs index 889082345..97ba6f6de 100644 --- a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs +++ b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs @@ -15,6 +15,7 @@ use astria_core::{ transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -77,6 +78,7 @@ pub(crate) fn default_fees() -> astria_core::protocol::genesis::v1::GenesisFees bridge_lock: Some(FeeComponents::::new(12, 1)), bridge_sudo_change: Some(FeeComponents::::new(24, 0)), ics20_withdrawal: Some(FeeComponents::::new(24, 0)), + bridge_transfer: Some(FeeComponents::::new(24, 0)), // should reflect transfer fee bridge_unlock: Some(FeeComponents::::new(12, 0)), ibc_relay: Some(FeeComponents::::new(0, 0)), diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap index 15352db3e..9bdacbc6f 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 163, - 247, - 139, - 47, - 78, - 129, - 169, - 19, - 217, - 165, - 120, - 82, - 190, - 249, - 77, - 186, - 153, - 51, - 213, - 253, - 37, - 38, - 99, - 100, - 91, - 245, - 28, - 150, - 61, - 214, + 236, 212, - 12 + 100, + 47, + 191, + 2, + 11, + 43, + 159, + 43, + 239, + 162, + 79, + 57, + 36, + 115, + 251, + 145, + 205, + 230, + 115, + 163, + 142, + 124, + 154, + 22, + 225, + 211, + 113, + 50, + 182, + 221 ] diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap index 200653097..cfb9ba510 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 195, - 205, + 44, + 87, + 189, + 6, + 158, + 176, + 254, + 184, + 219, + 17, + 65, + 250, 225, - 173, - 118, - 201, - 149, - 122, - 173, - 117, - 237, - 146, - 148, - 114, - 152, + 216, 59, - 68, - 60, - 33, - 65, - 41, - 154, - 249, - 85, - 76, - 183, - 32, - 108, - 175, - 88, - 197, - 63 + 145, + 9, + 240, + 203, + 167, + 55, + 202, + 64, + 54, + 118, + 241, + 219, + 128, + 230, + 125, + 104, + 164 ] diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap index 218d82f1f..5f4f79499 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 48, - 214, - 34, - 61, - 4, - 228, - 103, - 148, - 143, - 144, - 228, - 158, - 243, - 185, - 202, - 88, - 179, - 89, - 99, - 98, + 121, + 120, + 171, + 97, + 36, + 10, + 69, + 50, + 212, + 147, + 154, + 63, + 94, + 165, + 137, + 32, 113, - 240, - 167, - 127, - 88, - 153, - 200, + 38, + 52, + 70, + 119, + 110, 213, - 136, + 112, + 77, + 113, + 3, 197, - 103, - 12 + 212, + 95, + 149, + 52 ] diff --git a/crates/astria-sequencer/src/fees/mod.rs b/crates/astria-sequencer/src/fees/mod.rs index 85519f7cd..cf245c8a8 100644 --- a/crates/astria-sequencer/src/fees/mod.rs +++ b/crates/astria-sequencer/src/fees/mod.rs @@ -5,6 +5,7 @@ use astria_core::{ transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -237,6 +238,28 @@ impl FeeHandler for BridgeUnlock { } } +impl FeeHandler for BridgeTransfer { + fn name() -> &'static str { + ::Raw::NAME + } + + fn full_name() -> String { + ::full_name() + } + + fn snake_case_name() -> &'static str { + "bridge_transfer" + } + + fn variable_component(&self) -> u128 { + 0 + } + + fn fee_asset(&self) -> Option<&asset::Denom> { + Some(&self.fee_asset) + } +} + impl FeeHandler for InitBridgeAccount { fn name() -> &'static str { ::Raw::NAME diff --git a/crates/astria-sequencer/src/fees/query.rs b/crates/astria-sequencer/src/fees/query.rs index b551e5095..dafa13d8d 100644 --- a/crates/astria-sequencer/src/fees/query.rs +++ b/crates/astria-sequencer/src/fees/query.rs @@ -17,6 +17,7 @@ use astria_core::{ action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -291,6 +292,7 @@ pub(crate) async fn get_fees_for_transaction( OnceCell::new(); let bridge_lock_fees: OnceCell>> = OnceCell::new(); let bridge_unlock_fees: OnceCell>> = OnceCell::new(); + let bridge_transfer_fees: OnceCell>> = OnceCell::new(); let bridge_sudo_change_fees: OnceCell>> = OnceCell::new(); let validator_update_fees: OnceCell>> = OnceCell::new(); @@ -330,6 +332,10 @@ pub(crate) async fn get_fees_for_transaction( let fees = get_or_init_fees(state, &bridge_unlock_fees).await?; calculate_and_add_fees(act, &mut fees_by_asset, fees); } + Action::BridgeTransfer(act) => { + let fees = get_or_init_fees(state, &bridge_transfer_fees).await?; + calculate_and_add_fees(act, &mut fees_by_asset, fees); + } Action::BridgeSudoChange(act) => { let fees = get_or_init_fees(state, &bridge_sudo_change_fees).await?; calculate_and_add_fees(act, &mut fees_by_asset, fees); @@ -450,6 +456,7 @@ struct AllFeeComponents { init_bridge_account: FetchResult, bridge_lock: FetchResult, bridge_unlock: FetchResult, + bridge_transfer: FetchResult, bridge_sudo_change: FetchResult, ibc_relay: FetchResult, validator_update: FetchResult, @@ -489,6 +496,7 @@ async fn get_all_fee_components(state: &S) -> AllFeeComponents { init_bridge_account, bridge_lock, bridge_unlock, + bridge_transfer, bridge_sudo_change, validator_update, sudo_address_change, @@ -506,6 +514,7 @@ async fn get_all_fee_components(state: &S) -> AllFeeComponents { state.get_fees::().map(FetchResult::from), state.get_fees::().map(FetchResult::from), state.get_fees::().map(FetchResult::from), + state.get_fees::().map(FetchResult::from), state.get_fees::().map(FetchResult::from), state.get_fees::().map(FetchResult::from), state.get_fees::().map(FetchResult::from), @@ -522,6 +531,7 @@ async fn get_all_fee_components(state: &S) -> AllFeeComponents { init_bridge_account, bridge_lock, bridge_unlock, + bridge_transfer, bridge_sudo_change, ibc_relay, validator_update, diff --git a/crates/astria-sequencer/src/fees/storage/values.rs b/crates/astria-sequencer/src/fees/storage/values.rs index 138b2f8cb..967ec0a24 100644 --- a/crates/astria-sequencer/src/fees/storage/values.rs +++ b/crates/astria-sequencer/src/fees/storage/values.rs @@ -3,6 +3,7 @@ use astria_core::protocol::{ transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -38,6 +39,7 @@ enum ValueImpl { InitBridgeAccountFees(FeeComponents), BridgeLockFees(FeeComponents), BridgeUnlockFees(FeeComponents), + BridgeTransferFees(FeeComponents), BridgeSudoChangeFees(FeeComponents), IbcRelayFees(FeeComponents), ValidatorUpdateFees(FeeComponents), @@ -101,6 +103,7 @@ impl_from_for_fee_storage!( DomainFeeComponents => InitBridgeAccountFees, DomainFeeComponents => BridgeLockFees, DomainFeeComponents => BridgeUnlockFees, + DomainFeeComponents => BridgeTransferFees, DomainFeeComponents => BridgeSudoChangeFees, DomainFeeComponents => IbcRelayFees, DomainFeeComponents => ValidatorUpdateFees, diff --git a/crates/astria-sequencer/src/service/info/mod.rs b/crates/astria-sequencer/src/service/info/mod.rs index e093514ca..14155638e 100644 --- a/crates/astria-sequencer/src/service/info/mod.rs +++ b/crates/astria-sequencer/src/service/info/mod.rs @@ -179,6 +179,7 @@ mod tests { transaction::v1::action::{ BridgeLock, BridgeSudoChange, + BridgeTransfer, BridgeUnlock, FeeAssetChange, FeeChange, @@ -444,6 +445,10 @@ mod tests { "base": 2, "multiplier": 2 }, + "bridge_transfer": { + "base": 13, + "multiplier": 13 + }, "fee_asset_change": { "base": 4, "multiplier": 4 @@ -498,6 +503,9 @@ mod tests { state .put_fees(FeeComponents::::new(2, 2)) .unwrap(); + state + .put_fees(FeeComponents::::new(13, 13)) + .unwrap(); state .put_fees(FeeComponents::::new(3, 3)) .unwrap(); diff --git a/crates/astria-sequencer/src/transaction/checks.rs b/crates/astria-sequencer/src/transaction/checks.rs index df18bd4c7..4f2688461 100644 --- a/crates/astria-sequencer/src/transaction/checks.rs +++ b/crates/astria-sequencer/src/transaction/checks.rs @@ -120,6 +120,16 @@ async fn add_total_transfers_for_transaction( .and_modify(|amt| *amt = amt.saturating_add(act.amount)) .or_insert(act.amount); } + Action::BridgeTransfer(act) => { + let asset = state + .get_bridge_account_ibc_asset(&act.bridge_address) + .await + .wrap_err("failed to get bridge account asset id")?; + cost_by_asset + .entry(asset) + .and_modify(|amt| *amt = amt.saturating_add(act.amount)) + .or_insert(act.amount); + } Action::ValidatorUpdate(_) | Action::SudoAddressChange(_) | Action::IbcSudoChange(_) diff --git a/proto/protocolapis/astria/protocol/fees/v1/types.proto b/proto/protocolapis/astria/protocol/fees/v1/types.proto index 1d3edc270..ee344f427 100644 --- a/proto/protocolapis/astria/protocol/fees/v1/types.proto +++ b/proto/protocolapis/astria/protocol/fees/v1/types.proto @@ -39,6 +39,11 @@ message BridgeSudoChangeFeeComponents { astria.primitive.v1.Uint128 multiplier = 2; } +message BridgeTransferFeeComponents { + astria.primitive.v1.Uint128 base = 1; + astria.primitive.v1.Uint128 multiplier = 2; +} + message Ics20WithdrawalFeeComponents { astria.primitive.v1.Uint128 base = 1; astria.primitive.v1.Uint128 multiplier = 2; diff --git a/proto/protocolapis/astria/protocol/genesis/v1/types.proto b/proto/protocolapis/astria/protocol/genesis/v1/types.proto index b32adf48a..a14d4f198 100644 --- a/proto/protocolapis/astria/protocol/genesis/v1/types.proto +++ b/proto/protocolapis/astria/protocol/genesis/v1/types.proto @@ -56,4 +56,5 @@ message GenesisFees { astria.protocol.fees.v1.SudoAddressChangeFeeComponents sudo_address_change = 12; astria.protocol.fees.v1.TransferFeeComponents transfer = 13; astria.protocol.fees.v1.ValidatorUpdateFeeComponents validator_update = 14; + astria.protocol.fees.v1.BridgeTransferFeeComponents bridge_transfer = 15; } diff --git a/proto/protocolapis/astria/protocol/transaction/v1/action.proto b/proto/protocolapis/astria/protocol/transaction/v1/action.proto index 26cab7579..061116d1f 100644 --- a/proto/protocolapis/astria/protocol/transaction/v1/action.proto +++ b/proto/protocolapis/astria/protocol/transaction/v1/action.proto @@ -18,6 +18,7 @@ message Action { BridgeLock bridge_lock = 12; BridgeUnlock bridge_unlock = 13; BridgeSudoChange bridge_sudo_change = 14; + BridgeTransfer bridge_transfer = 15; // IBC user actions are defined on 21-30 astria_vendored.penumbra.core.component.ibc.v1.IbcRelay ibc = 21; @@ -211,6 +212,30 @@ message BridgeSudoChange { string fee_asset = 4; } +message BridgeTransfer { + // the address of the bridge account to transfer to + astria.primitive.v1.Address to = 1; + // the amount to transfer + astria.primitive.v1.Uint128 amount = 2; + // the asset used to pay the transaction fee + string fee_asset = 3; + // the address on the destination chain which + // will receive the bridged funds + string destination_chain_address = 4; + // the address of the bridge account to transfer from + astria.primitive.v1.Address bridge_address = 5; + // The block number on the rollup that triggered this transfer. + uint64 rollup_block_number = 6; + // An identifier of the original rollup event, such as a transaction hash which + // triggered a bridge unlock and is underlying event that led to this bridge + // unlock. This can be utilized for tracing from the bridge back to + // distinct rollup events. + // + // This field is of type `string` so that it can be formatted in the preferred + // format of the rollup when targeting plain text encoding. + string rollup_withdrawal_event_id = 7; +} + message FeeChange { // the new fee components values oneof fee_components { @@ -228,6 +253,7 @@ message FeeChange { astria.protocol.fees.v1.SudoAddressChangeFeeComponents sudo_address_change = 12; astria.protocol.fees.v1.TransferFeeComponents transfer = 13; astria.protocol.fees.v1.ValidatorUpdateFeeComponents validator_update = 14; + astria.protocol.fees.v1.BridgeTransferFeeComponents bridge_transfer = 15; } } From f043f5b275f932c45af249d4a92096fac0ff2ec0 Mon Sep 17 00:00:00 2001 From: Richard Janis Goldschmidt Date: Thu, 13 Feb 2025 11:34:00 +0100 Subject: [PATCH 23/23] chore(core): add missing link to changelog for bridge transfer impl (#1963) ## Summary Added the link for the bridge transfer action PR to the PR. https://github.com/astriaorg/astria/pull/1934 ## Background We want links to all changes to keep track of what was changed. ## Changes - List changes which were made. ## Changelogs Changelogs updated. ## Related Issues The PR in question: https://github.com/astriaorg/astria/pull/1934 --- crates/astria-core/CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/astria-core/CHANGELOG.md b/crates/astria-core/CHANGELOG.md index 33db19f03..c3c45c8f6 100644 --- a/crates/astria-core/CHANGELOG.md +++ b/crates/astria-core/CHANGELOG.md @@ -15,7 +15,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add method `TracePrefixed::leading_channel` to read the left-most channel of a trace prefixed ICS20 asset [#1768](https://github.com/astriaorg/astria/pull/1768). - Add `impl Protobuf for Address` [#1802](https://github.com/astriaorg/astria/pull/1802). -- Add `BridgeTransfer` action and `BridgeTransfer` variant to `FeeChange`. +- Add `BridgeTransfer` action and `BridgeTransfer` variant to `FeeChange` + [#1934](https://github.com/astriaorg/astria/pull/1934). ### Changed