diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index 232ae7c07c..fc9e3c4042 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -12,6 +12,7 @@ on: required: false type: choice options: + - auctioneer - composer - conductor - sequencer @@ -39,6 +40,22 @@ jobs: run_checker: uses: ./.github/workflows/reusable-run-checker.yml + auctioneer: + needs: run_checker + if: needs.run_checker.outputs.run_docker == 'true' || (github.event_name == 'workflow_dispatch' && github.event.inputs.target == 'auctioneer') + uses: "./.github/workflows/reusable-docker-build.yml" + permissions: + contents: read + id-token: write + packages: write + with: + depot-project-id: 1kp2p2bvbr + package-name: auctioneer + binary-name: auctioneer + tag: ${{ inputs.tag }} + force: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.target == 'auctioneer' }} + secrets: inherit + composer: needs: run_checker if: needs.run_checker.outputs.run_docker == 'true' || (github.event_name == 'workflow_dispatch' && github.event.inputs.target == 'composer') @@ -137,7 +154,7 @@ jobs: smoke-test: needs: [run_checker, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli] if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'astriaorg/astria') && (github.event_name == 'merge_group' || needs.run_checker.outputs.run_docker == 'true') - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 steps: - uses: actions/checkout@v4 - name: Install just @@ -169,7 +186,7 @@ jobs: smoke-cli: needs: [run_checker, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli] if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'astriaorg/astria') && (github.event_name == 'merge_group' || needs.run_checker.outputs.run_docker == 'true') - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 steps: - uses: actions/checkout@v4 - name: Install just @@ -201,7 +218,7 @@ jobs: ibc-bridge-test: needs: [ run_checker, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli ] if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'astriaorg/astria') && (github.event_name == 'merge_group' || needs.run_checker.outputs.run_docker == 'true') - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 steps: - uses: actions/checkout@v4 - name: Install just @@ -233,7 +250,7 @@ jobs: ibc-no-native-asset-test: needs: [ run_checker, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli ] if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'astriaorg/astria') && (github.event_name == 'merge_group' || needs.run_checker.outputs.run_docker == 'true') - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 steps: - uses: actions/checkout@v4 - name: Install just @@ -265,7 +282,7 @@ jobs: ibc-timeout-refund: needs: [ run_checker, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli ] if: (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == 'astriaorg/astria') && (github.event_name == 'merge_group' || needs.run_checker.outputs.run_docker == 'true') - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 steps: - uses: actions/checkout@v4 - name: Install just @@ -296,7 +313,7 @@ jobs: docker: if: ${{ always() && !cancelled() }} - needs: [composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli, smoke-test, smoke-cli, ibc-bridge-test, ibc-no-native-asset-test, ibc-timeout-refund] + needs: [auctioneer, composer, conductor, sequencer, sequencer-relayer, evm-bridge-withdrawer, cli, smoke-test, smoke-cli, ibc-bridge-test, ibc-no-native-asset-test, ibc-timeout-refund] uses: ./.github/workflows/reusable-success.yml with: success: ${{ !contains(needs.*.result, 'failure') }} diff --git a/.github/workflows/reusable-build.yml b/.github/workflows/reusable-build.yml index fedbd889ca..8f1722d971 100644 --- a/.github/workflows/reusable-build.yml +++ b/.github/workflows/reusable-build.yml @@ -23,7 +23,7 @@ jobs: matrix: include: - target: x86_64-unknown-linux-gnu - os: buildjet-4vcpu-ubuntu-2004 + os: depot-ubuntu-24.04-4 build-tool: cargo - target: aarch64-apple-darwin os: macos-latest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 10849db975..674a2de768 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -77,7 +77,7 @@ jobs: fi compiles: - runs-on: buildjet-4vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-4 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' steps: @@ -132,7 +132,7 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} rust: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' steps: @@ -162,7 +162,7 @@ jobs: cargo nextest run --archive-file=archive.tar.zst rust-ethereum: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' steps: @@ -189,7 +189,7 @@ jobs: cargo nextest run --package astria-bridge-withdrawer -- --include-ignored doctest: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' steps: @@ -210,7 +210,7 @@ jobs: run: cargo test --doc --all-features clippy: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' && needs.run_checker.outputs.run_lint_rust == 'true' steps: @@ -238,7 +238,7 @@ jobs: just lint rust-clippy-tools custom-lints: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: depot-ubuntu-24.04-8 needs: run_checker if: needs.run_checker.outputs.run_tests == 'true' && needs.run_checker.outputs.run_lint_rust == 'true' steps: diff --git a/CODEOWNERS b/CODEOWNERS index 7754ccc0a4..a65164ab5d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,5 +1,6 @@ * @joroshiba +astria-auctioneer/ @SuperFluffy @bharath-123 astria-bridge-contracts/ @SuperFluffy @joroshiba astria-bridge-withdrawer/ @SuperFluffy @joroshiba astria-build-info/ @SuperFluffy @joroshiba diff --git a/Cargo.lock b/Cargo.lock index cf483afff4..e572c4fd71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -497,6 +497,41 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "astria-auctioneer" +version = "0.0.1" +dependencies = [ + "astria-build-info", + "astria-config", + "astria-core", + "astria-eyre", + "astria-sequencer-client", + "astria-telemetry", + "base64 0.21.7", + "bytes", + "futures", + "hex", + "http 0.2.12", + "http-body 0.4.6", + "humantime", + "hyper 0.14.30", + "itertools 0.12.1", + "pbjson-types", + "pin-project-lite", + "prost", + "serde", + "serde_json", + "thiserror 1.0.63", + "tokio", + "tokio-stream", + "tokio-util 0.7.13", + "tonic 0.10.2", + "tower 0.5.2", + "tower-http", + "tracing", + "tryhard", +] + [[package]] name = "astria-bridge-contracts" version = "0.1.0" @@ -545,7 +580,7 @@ dependencies = [ "tendermint-rpc", "tokio", "tokio-stream", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tonic 0.10.2", "tracing", "tryhard", @@ -628,7 +663,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-test", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tonic 0.10.2", "tonic-health", "tracing", @@ -679,9 +714,9 @@ dependencies = [ "thiserror 1.0.63", "tokio", "tokio-stream", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", "tryhard", "wiremock", @@ -840,6 +875,7 @@ dependencies = [ "ibc-proto", "ibc-types", "insta", + "itertools 0.12.1", "maplit", "matchit", "penumbra-ibc", @@ -857,12 +893,14 @@ dependencies = [ "tendermint-proto", "thiserror 1.0.63", "tokio", + "tokio-util 0.7.13", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tower-abci", "tower-actor", "tower-http", "tracing", + "url", ] [[package]] @@ -940,9 +978,9 @@ dependencies = [ "tokio", "tokio-stream", "tokio-test", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", "tryhard", "wiremock", @@ -1166,9 +1204,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", ] @@ -3721,7 +3759,7 @@ dependencies = [ "indexmap 2.4.0", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tracing", ] @@ -3740,7 +3778,7 @@ dependencies = [ "indexmap 2.4.0", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tracing", ] @@ -4082,7 +4120,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -4864,7 +4902,7 @@ dependencies = [ "thiserror 1.0.63", "tokio", "tokio-rustls", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tracing", "url", ] @@ -4909,7 +4947,7 @@ dependencies = [ "serde_json", "thiserror 1.0.63", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -4945,8 +4983,8 @@ dependencies = [ "thiserror 1.0.63", "tokio", "tokio-stream", - "tokio-util 0.7.11", - "tower", + "tokio-util 0.7.13", + "tower 0.4.13", "tracing", ] @@ -5970,7 +6008,7 @@ dependencies = [ "tendermint-light-client-verifier", "time", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", ] @@ -6163,9 +6201,9 @@ dependencies = [ "tendermint-proto", "tokio", "tokio-stream", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -6243,9 +6281,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -6830,7 +6868,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-rustls", @@ -7780,6 +7818,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.1" @@ -8195,6 +8239,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.13", ] [[package]] @@ -8238,9 +8283,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -8356,7 +8401,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8384,7 +8429,7 @@ dependencies = [ "prost", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8431,12 +8476,26 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.11", + "tokio-util 0.7.13", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-abci" version = "0.12.0" @@ -8452,7 +8511,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.6.10", - "tower", + "tower 0.4.13", "tracing", ] @@ -8466,8 +8525,8 @@ dependencies = [ "pin-project", "thiserror 1.0.63", "tokio", - "tokio-util 0.7.11", - "tower", + "tokio-util 0.7.13", + "tower 0.4.13", "tracing", ] @@ -8487,6 +8546,7 @@ dependencies = [ "pin-project-lite", "tower-layer", "tower-service", + "tracing", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 33ab556fb5..007fe2c105 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ exclude = ["tools/protobuf-compiler", "tools/solidity-compiler"] members = [ + "crates/astria-auctioneer", "crates/astria-bridge-contracts", "crates/astria-bridge-withdrawer", "crates/astria-build-info", @@ -29,6 +30,7 @@ members = [ # Specify default members so that cargo invocations in github actions will # not act on lints default-members = [ + "crates/astria-auctioneer", "crates/astria-bridge-contracts", "crates/astria-bridge-withdrawer", "crates/astria-build-info", @@ -106,7 +108,7 @@ thiserror = "1" tokio = "1.28" tokio-stream = { version = "0.1.14" } tokio-test = "0.4.2" -tokio-util = "0.7.9" +tokio-util = "0.7.13" tonic = "0.10" tracing = "0.1" tryhard = "0.5.1" diff --git a/charts/auctioneer/Chart.yaml b/charts/auctioneer/Chart.yaml new file mode 100644 index 0000000000..69ce9ca7a5 --- /dev/null +++ b/charts/auctioneer/Chart.yaml @@ -0,0 +1,34 @@ +apiVersion: v2 +name: auctioneer +description: Astria auctioneer helm chart + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.2 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" + +maintainers: + - name: wafflesvonmaple + url: astria.org + - name: quasystaty1 + url: astria.org + - name: steezeburger + url: astria.org + - name: joroshiba + url: astria.org diff --git a/charts/auctioneer/templates/_helpers.tpl b/charts/auctioneer/templates/_helpers.tpl new file mode 100644 index 0000000000..a0e1bbffcc --- /dev/null +++ b/charts/auctioneer/templates/_helpers.tpl @@ -0,0 +1,31 @@ +{{/* +Namepsace to deploy elements into. +*/}} +{{- define "auctioneer.namespace" -}} +{{- default .Release.Namespace .Values.global.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +application name to deploy elements into. +*/}} +{{- define "auctioneer.appName" -}} +auctioneer +{{- end }} + +{{/* +Common labels +*/}} +{{- define "auctioneer.labels" -}} +{{ include "auctioneer.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "auctioneer.selectorLabels" -}} +app: {{ include "auctioneer.appName" . }} +{{- end }} + +{{- define "auctioneer.image" -}} +{{ .Values.images.auctioneer.repo }}:{{ if .Values.global.dev }}{{ .Values.images.auctioneer.devTag }}{{ else }}{{ .Values.images.auctioneer.tag }}{{ end }} +{{- end }} diff --git a/charts/auctioneer/templates/configmap.yaml b/charts/auctioneer/templates/configmap.yaml new file mode 100644 index 0000000000..ee209af9d5 --- /dev/null +++ b/charts/auctioneer/templates/configmap.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: auctioneer-env + namespace: {{ include "auctioneer.namespace" . }} +data: + ASTRIA_AUCTIONEER_SEQUENCER_GRPC_ENDPOINT: "{{ tpl .Values.config.sequencerGrpcEndpoint . }}" + ASTRIA_AUCTIONEER_SEQUENCER_ABCI_ENDPOINT: "{{ tpl .Values.config.sequencerAbciEndpoint . }}" + ASTRIA_AUCTIONEER_SEQUENCER_CHAIN_ID: "{{ tpl .Values.config.sequencerChainId . }}" + ASTRIA_AUCTIONEER_SEQUENCER_PRIVATE_KEY_PATH: "/var/secrets/{{ .Values.config.sequencerPrivateKey.secret.filename }}" + ASTRIA_AUCTIONEER_FEE_ASSET_DENOMINATION: "{{ .Values.config.feeAssetDenomination }}" + ASTRIA_AUCTIONEER_SEQUENCER_ADDRESS_PREFIX: "{{ .Values.config.sequencerAddressPrefix }}" + ASTRIA_AUCTIONEER_ROLLUP_GRPC_ENDPOINT: "{{ .Values.config.rollupGrpcEndpoint }}" + ASTRIA_AUCTIONEER_ROLLUP_ID: "{{ .Values.config.rollupId }}" + ASTRIA_AUCTIONEER_LATENCY_MARGIN_MS: "{{ .Values.config.latencyMarginMs }}" + ASTRIA_AUCTIONEER_LOG: "astria_auctioneer={{ .Values.config.logLevel }}" + ASTRIA_AUCTIONEER_FORCE_STDOUT: "{{ .Values.global.useTTY }}" + ASTRIA_AUCTIONEER_PRETTY_PRINT: "{{ .Values.global.useTTY }}" + NO_COLOR: "{{ .Values.global.useTTY }}" + ASTRIA_AUCTIONEER_NO_METRICS: "{{ not .Values.metrics.enabled }}" + ASTRIA_AUCTIONEER_METRICS_HTTP_LISTENER_ADDR: "0.0.0.0:{{ .Values.ports.metrics }}" + ASTRIA_AUCTIONEER_NO_OTEL: "{{ not .Values.otel.enabled }}" + ASTRIA_AUCTIONEER_NO_METRICS: "{{ not .Values.metrics.enabled }}" + ASTRIA_AUCTIONEER_METRICS_HTTP_LISTENER_ADDR: "0.0.0.0:{{ .Values.ports.metrics }}" + OTEL_EXPORTER_OTLP_ENDPOINT: "{{ tpl .Values.otel.endpoint . }}" + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "{{ tpl .Values.otel.tracesEndpoint . }}" + OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "{{ tpl .Values.otel.tracesTimeout . }}" + OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "{{ tpl .Values.otel.tracesCompression . }}" + OTEL_EXPORTER_OTLP_HEADERS: "{{ tpl .Values.otel.otlpHeaders . }}" + OTEL_EXPORTER_OTLP_TRACE_HEADERS: "{{ tpl .Values.otel.traceHeaders . }}" + OTEL_SERVICE_NAME: "{{ tpl .Values.otel.serviceName . }}" + {{- if not .Values.global.dev }} + {{- else }} + {{- end }} +--- +{{- if not .Values.secretProvider.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ include "auctioneer.namespace" . }} + name: auctioneer-private-key +data: + {{ .Values.config.sequencerPrivateKey.secret.filename }}: | + {{ .Values.config.sequencerPrivateKey.devContent }} +--- +{{- end }} diff --git a/charts/auctioneer/templates/deployment.yaml b/charts/auctioneer/templates/deployment.yaml new file mode 100644 index 0000000000..79a7de7e8a --- /dev/null +++ b/charts/auctioneer/templates/deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "auctioneer.appName" . }} + namespace: {{ include "auctioneer.namespace" . }} + labels: + {{ include "auctioneer.labels" . }} +spec: + replicas: {{ .Values.global.replicaCount }} + selector: + matchLabels: + {{ include "auctioneer.labels" . }} + template: + metadata: + labels: + {{ include "auctioneer.labels" . }} + spec: + containers: + - name: auctioneer + image: {{ include "auctioneer.image" . }} + imagePullPolicy: {{ .Values.images.auctioneer.pullPolicy }} + command: ["/usr/local/bin/astria-auctioneer"] + stdin: {{ .Values.global.useTTY }} + tty: {{ .Values.global.useTTY }} + envFrom: + - configMapRef: + name: auctioneer-env + volumeMounts: + - mountPath: "/var/secrets" + name: auctioneer-private-key + {{- if .Values.metrics.enabled }} + ports: + - containerPort: {{ .Values.ports.metrics }} + name: auct-metrics + {{- end }} + resources: + {{- toYaml .Values.resources | trim | nindent 12 }} + volumes: + - name: auctioneer-private-key + {{- if .Values.secretProvider.enabled }} + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: auctioneer-private-key + {{- else }} + configMap: + name: auctioneer-private-key + {{- end }} diff --git a/charts/auctioneer/templates/prometheusrule.yaml b/charts/auctioneer/templates/prometheusrule.yaml new file mode 100644 index 0000000000..4e98bd024c --- /dev/null +++ b/charts/auctioneer/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if .Values.alerting.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "auctioneer.appName" . }}-alerting +{{- if .Values.alerting.prometheusRule.namespace }} + namespace: {{ .Values.alerting.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "auctioneer.labels" . | nindent 4 }} + {{- if .Values.alerting.prometheusRule.additionalLabels }} + {{- toYaml .Values.alerting.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.alerting.prometheusRule.rules }} + groups: + - name: {{ template "auctioneer.appName" . }} + rules: {{- toYaml .Values.alerting.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/auctioneer/templates/secretproviderclass.yaml b/charts/auctioneer/templates/secretproviderclass.yaml new file mode 100644 index 0000000000..b80e3fd08a --- /dev/null +++ b/charts/auctioneer/templates/secretproviderclass.yaml @@ -0,0 +1,13 @@ +--- +{{- if .Values.secretProvider.enabled }} +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: auctioneer-private-key +spec: + provider: {{ .Values.secretProvider.provider }} + parameters: + {{- $_ := set $ "key" .Values.config.sequencerPrivateKey.secret }} + {{- tpl $.Values.secretProvider.parametersTemplate $ | nindent 4 }} +--- +{{- end }} diff --git a/charts/auctioneer/templates/service.yaml b/charts/auctioneer/templates/service.yaml new file mode 100644 index 0000000000..0cdd42cd1f --- /dev/null +++ b/charts/auctioneer/templates/service.yaml @@ -0,0 +1,16 @@ +{{- if .Values.metrics.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "auctioneer.appName" . }}-metrics + namespace: {{ include "auctioneer.namespace" . }} + labels: + {{ include "auctioneer.labels" . }} +spec: + selector: + {{ include "auctioneer.selectorLabels" . }} + ports: + - name: auct-metrics + port: {{ .Values.ports.metrics }} + targetPort: auct-metrics +{{- end }} diff --git a/charts/auctioneer/templates/servicemonitor.yaml b/charts/auctioneer/templates/servicemonitor.yaml new file mode 100644 index 0000000000..e785a10fa2 --- /dev/null +++ b/charts/auctioneer/templates/servicemonitor.yaml @@ -0,0 +1,27 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: auctioneer-metrics + labels: + {{- with .Values.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: auctioneer-metrics + namespaceSelector: + matchNames: + - {{ include "auctioneer.namespace" . }} + selector: + matchLabels: + app: auctioneer + endpoints: + - port: auct-metrics + path: / + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/auctioneer/values.yaml b/charts/auctioneer/values.yaml new file mode 100644 index 0000000000..e765555781 --- /dev/null +++ b/charts/auctioneer/values.yaml @@ -0,0 +1,96 @@ +global: + namespaceOverride: "" + replicaCount: 1 + # Whether to use tty readable logging for astria services, when false use json. + useTTY: false + dev: false + +images: + auctioneer: + repo: ghcr.io/astriaorg/auctioneer + pullPolicy: IfNotPresent + # TODO - update to latest tag + tag: "pr-1839" + devTag: "pr-1839" + +config: + sequencerGrpcEndpoint: "" + sequencerAbciEndpoint: "" + sequencerChainId: "" + sequencerPrivateKey: + devContent: "" + secret: + filename: "key.hex" + resourceName: "projects/$PROJECT_ID/secrets/sequencerPrivateKey/versions/latest" + feeAssetDenomination: "" + sequencerAddressPrefix: astria + rollupGrpcEndpoint: "" + rollupId: "" + latencyMarginMs: "" + logLevel: "info" + +otel: + enabled: false + serviceName: "auctioneer" + endpoint: "" + tracesEndpoint: "" + tracesCompression: "gzip" + tracesTimeout: "10" + otlpHeaders: "" + traceHeaders: "" + +metrics: + enabled: false + +serviceMonitor: + # set to enable port svc and service monitor + enabled: false + port: 6060 + additionalLabels: + release: kube-prometheus-stack + +alerting: + enabled: false + interval: "" + additionalLabels: + release: kube-prometheus-stack + annotations: {} + # scrapeTimeout: 10s + # path: /metrics + prometheusRule: + enabled: true + additionalLabels: + release: kube-prometheus-stack + namespace: monitoring + rules: + - alert: Auctioneer_Node_Down + expr: up{container="auctioneer"} == 0 # Insert your query Expression + for: 1m # Rough number but should be enough to init warn + labels: + severity: warning + annotations: + summary: Auctioneer is Down (instance {{ $labels.instance }}) + description: "auctioneer node '{{ $labels.namespace }}' has disappeared from Prometheus target discovery.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + +# When deploying in a production environment should use a secret provider +# This is configured for use with GCP, need to set own resource names +# and keys +secretProvider: + enabled: false + provider: gcp + parametersTemplate: |- + secrets: | + - resourceName: {{ .key.resourceName }} + fileName: "{{ .key.filename }}" + +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1000m + memory: 1Gi + +# Default service ports +ports: + metrics: 6060 diff --git a/charts/deploy.just b/charts/deploy.just index 301620431e..797554581b 100644 --- a/charts/deploy.just +++ b/charts/deploy.just @@ -130,12 +130,32 @@ deploy-dev-rollup rollupName=defaultRollupName networkId=defaultNetworkId: -f dev/values/rollup/dev.yaml \ {{rollupName}}-chain-chart ./charts/evm-stack --namespace astria-dev-cluster +deploy-flame-dev-rollup rollupName=defaultRollupName networkId=defaultNetworkId: + helm dependency update charts/evm-stack > /dev/null + helm install \ + {{ if rollupName != '' { replace('--set config.rollup.name=# --set celestia-node.config.labelPrefix=#', '#', rollupName) } else { '' } }} \ + {{ if networkId != '' { replace('--set config.rollup.networkId=#', '#', networkId) } else { '' } }} \ + -f dev/values/rollup/flame-dev.yaml \ + {{rollupName}}-chain-chart ./charts/evm-stack --namespace astria-dev-cluster + delete-dev-rollup rollupName=defaultRollupName: @just delete chart {{rollupName}}-chain +delete-flame-dev-rollup rollupName=defaultRollupName: + @just delete-dev-rollup + wait-for-dev-rollup rollupName=defaultRollupName: kubectl rollout status --watch statefulset/{{rollupName}}-geth -n astria-dev-cluster --timeout=600s +deploy-auctioneer: + helm dependency update charts/auctioneer > /dev/null + helm install auctioneer-chart ./charts/auctioneer \ + --namespace astria-dev-cluster \ + -f dev/values/auctioneer/values.yaml + +delete-auctioneer: + @just delete chart auctioneer astria-dev-cluster + deploy-bridge-withdrawer: helm install evm-bridge-withdrawer-chart ./charts/evm-bridge-withdrawer \ --namespace astria-dev-cluster \ diff --git a/charts/evm-rollup/Chart.yaml b/charts/evm-rollup/Chart.yaml index fd614d54d6..404198bebf 100644 --- a/charts/evm-rollup/Chart.yaml +++ b/charts/evm-rollup/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.1.0 +version: 1.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/charts/evm-rollup/files/genesis/geth-genesis.json b/charts/evm-rollup/files/genesis/geth-genesis.json index 68027198d0..116aa9b88c 100644 --- a/charts/evm-rollup/files/genesis/geth-genesis.json +++ b/charts/evm-rollup/files/genesis/geth-genesis.json @@ -15,10 +15,10 @@ {{- if .Values.genesis.cancunTime }} "cancunTime": {{ toString .Values.genesis.cancunTime | replace "\"" "" }}, {{- end }} - {{- if .Values.genesis.cancunTime }} + {{- if .Values.genesis.pragueTime }} "pragueTime": {{ toString .Values.genesis.pragueTime | replace "\"" "" }}, {{- end }} - {{- if .Values.genesis.cancunTime }} + {{- if .Values.genesis.verkleTime }} "verkleTime": {{ toString .Values.genesis.verkleTime | replace "\"" "" }}, {{- end }} "terminalTotalDifficulty": 0, diff --git a/charts/evm-rollup/files/scripts/init-geth.sh b/charts/evm-rollup/files/scripts/init-geth.sh index 6c01916d98..27357924ac 100755 --- a/charts/evm-rollup/files/scripts/init-geth.sh +++ b/charts/evm-rollup/files/scripts/init-geth.sh @@ -9,9 +9,10 @@ if [ ! -d "$data_dir/" ]; then exec geth \ {{- range $arg := .Values.config.geth.flags -}} - {{- if $arg.condition | default true -}} + {{- $noCondition := not (hasKey $arg "condition") }} + {{- if or ($noCondition) (eq (tpl $arg.condition $) "true") }} --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} \ - {{ end }} + {{- end }} {{- end -}} init $home_dir/genesis.json elif ! cmp -s "/scripts/geth-genesis.json" "$home_dir/genesis.json"; then diff --git a/charts/evm-rollup/templates/statefulsets.yaml b/charts/evm-rollup/templates/statefulsets.yaml index 408638328a..57733e35b2 100644 --- a/charts/evm-rollup/templates/statefulsets.yaml +++ b/charts/evm-rollup/templates/statefulsets.yaml @@ -67,7 +67,8 @@ spec: command: [ "geth" ] args: {{- range $arg := .Values.config.geth.flags }} - {{- if $arg.condition | default true }} + {{- $noCondition := not (hasKey $arg "condition") }} + {{- if or ($noCondition) (eq (tpl $arg.condition $) "true") }} - --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} {{- end }} {{- end }} diff --git a/charts/evm-rollup/values.yaml b/charts/evm-rollup/values.yaml index a2b3b96e0d..ec9cd7a9c4 100644 --- a/charts/evm-rollup/values.yaml +++ b/charts/evm-rollup/values.yaml @@ -174,13 +174,13 @@ config: - name: history.state value: "{{- if .Values.config.geth.archiveNode -}} 0 {{- else -}} 540000 {{- end }}" - name: metrics - condition: .Values.metrics.enabled + condition: "{{ .Values.metrics.enabled }}" - name: metrics.addr value: 0.0.0.0 - condition: .Values.metrics.enabled + condition: "{{ .Values.metrics.enabled }}" - name: metrics.port value: "{{ .Values.ports.metrics }}" - condition: .Values.metrics.enabled + condition: "{{ .Values.metrics.enabled }}" - name: txpool.nolocals value: "true" diff --git a/charts/evm-stack/Chart.lock b/charts/evm-stack/Chart.lock index 297cbc4735..a8e13b2518 100644 --- a/charts/evm-stack/Chart.lock +++ b/charts/evm-stack/Chart.lock @@ -4,10 +4,16 @@ dependencies: version: 0.4.0 - name: evm-rollup repository: file://../evm-rollup - version: 1.1.0 + version: 1.1.2 +- name: flame-rollup + repository: file://../flame-rollup + version: 0.0.2 - name: composer repository: file://../composer version: 1.0.0 +- name: auctioneer + repository: file://../auctioneer + version: 0.0.2 - name: evm-faucet repository: file://../evm-faucet version: 0.1.4 @@ -20,5 +26,5 @@ dependencies: - name: blockscout-stack repository: https://blockscout.github.io/helm-charts version: 1.6.8 -digest: sha256:c437d6967341b9bb6e10a809ce13e81130bfb95fb111c8712088ab443adea3f1 -generated: "2025-01-28T23:46:42.687706-05:00" +digest: sha256:ec55e7e1427dd7af6b3764d7cedc7b5b168ec2443fa140cb3000c8ed68d711ac +generated: "2025-02-20T10:44:04.460576+02:00" diff --git a/charts/evm-stack/Chart.yaml b/charts/evm-stack/Chart.yaml index 17bceddce5..640f1f9658 100644 --- a/charts/evm-stack/Chart.yaml +++ b/charts/evm-stack/Chart.yaml @@ -15,20 +15,28 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.8 - +version: 1.0.12 dependencies: - name: celestia-node version: 0.4.0 repository: "file://../celestia-node" condition: celestia-node.enabled - name: evm-rollup - version: 1.1.0 + version: 1.1.2 repository: "file://../evm-rollup" + condition: evm-rollup.enabled + - name: flame-rollup + version: 0.0.2 + repository: "file://../flame-rollup" + condition: flame-rollup.enabled - name: composer version: 1.0.0 repository: "file://../composer" condition: composer.enabled + - name: auctioneer + version: 0.0.2 + repository: "file://../auctioneer" + condition: auctioneer.enabled - name: evm-faucet version: 0.1.4 repository: "file://../evm-faucet" diff --git a/charts/evm-stack/values.yaml b/charts/evm-stack/values.yaml index e18df9b10c..18ad6e4350 100644 --- a/charts/evm-stack/values.yaml +++ b/charts/evm-stack/values.yaml @@ -41,6 +41,25 @@ evm-rollup: otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" traceHeaders: "{{ .Values.global.otel.traceHeaders }}" +flame-rollup: + enabled: false + genesis: + rollupName: "{{ .Values.global.rollupName }}" + chainId: "{{ .Values.global.evmChainId }}" + config: + conductor: + sequencerChainId: "{{ .Values.global.sequencerChainId }}" + celestiaChainId: "{{ .Values.global.celestiaChainId }}" + sequencerRpc: "{{ .Values.global.sequencerRpc }}" + sequencerGrpc: "{{ .Values.global.sequencerGrpc }}" + otel: + endpoint: "{{ .Values.global.otel.endpoint }}" + tracesEndpoint: "{{ .Values.global.otel.tracesEndpoint }}" + tracesCompression: "{{ .Values.global.otel.tracesCompression }}" + tracesTimeout: "{{ .Values.global.otel.tracesTimeout }}" + otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" + traceHeaders: "{{ .Values.global.otel.traceHeaders }}" + celestia-node: enabled: false @@ -61,6 +80,21 @@ composer: otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" traceHeaders: "{{ .Values.global.otel.traceHeaders }}" +auctioneer: + enabled: false + config: + sequencerGrpcEndpoint: "{{ .Values.global.sequencerGrpc }}" + sequencerAbciEndpoint: "{{ .Values.global.sequencerRpc }}" + sequencerChainId: "{{ .Values.global.sequencerChainId }}" + + otel: + endpoint: "{{ .Values.global.otel.endpoint }}" + tracesEndpoint: "{{ .Values.global.otel.tracesEndpoint }}" + tracesCompression: "{{ .Values.global.otel.tracesCompression }}" + tracesTimeout: "{{ .Values.global.otel.tracesTimeout }}" + otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" + traceHeaders: "{{ .Values.global.otel.traceHeaders }}" + evm-faucet: enabled: false config: diff --git a/charts/flame-rollup/.helmignore b/charts/flame-rollup/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/flame-rollup/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/flame-rollup/Chart.yaml b/charts/flame-rollup/Chart.yaml new file mode 100644 index 0000000000..36313aa9db --- /dev/null +++ b/charts/flame-rollup/Chart.yaml @@ -0,0 +1,34 @@ +apiVersion: v2 +name: flame-rollup +description: A Helm chart for Flame EVM rollup in k8s deployed on top of Astria Sequencer Network + Celestia. + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) + +version: 0.0.2 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.0.1" + +maintainers: + - name: wafflesvonmaple + url: astria.org + - name: quasystaty1 + url: astria.org + - name: steezeburger + url: astria.org + - name: joroshiba + url: astria.org diff --git a/charts/flame-rollup/files/genesis/geth-genesis.json b/charts/flame-rollup/files/genesis/geth-genesis.json new file mode 100644 index 0000000000..497f2da41e --- /dev/null +++ b/charts/flame-rollup/files/genesis/geth-genesis.json @@ -0,0 +1,55 @@ +{ + "config": { + "chainId": {{ toString ( tpl .Values.genesis.chainId . ) | replace "\"" "" }}, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "shanghaiTime": 0, + {{- if .Values.genesis.cancunTime }} + "cancunTime": {{ toString .Values.genesis.cancunTime | replace "\"" "" }}, + {{- end }} + {{- if .Values.genesis.pragueTime }} + "pragueTime": {{ toString .Values.genesis.pragueTime | replace "\"" "" }}, + {{- end }} + {{- if .Values.genesis.verkleTime }} + "verkleTime": {{ toString .Values.genesis.verkleTime | replace "\"" "" }}, + {{- end }} + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "ethash": {}, + {{- range $key, $value := .Values.genesis.extra }} + "{{ $key }}": {{ toPrettyJson $value | indent 8 | trim }}, + {{- end }} + {{- if .Values.genesis.extraDataOverride }} + "astriaExtraDataOverride": "{{ .Values.genesis.extraDataOverride }}", + {{- end }} + "astriaOverrideGenesisExtraData": {{ .Values.genesis.overrideGenesisExtraData }}, + "astriaSequencerInitialHeight": {{ toString .Values.genesis.sequencerInitialHeight | replace "\"" "" }}, + "astriaRollupName": "{{ tpl .Values.genesis.rollupName . }}", + "astriaCelestiaInitialHeight": {{ toString .Values.genesis.celestiaInitialHeight | replace "\"" "" }}, + "astriaCelestiaHeightVariance": {{ toString .Values.genesis.celestiaHeightVariance | replace "\"" "" }}, + "astriaBridgeAddresses": {{ toPrettyJson .Values.genesis.bridgeAddresses | indent 8 | trim }}, + "astriaFeeCollectors": {{ toPrettyJson .Values.genesis.feeCollectors | indent 8 | trim }}, + "astriaEIP1559Params": {{ toPrettyJson .Values.genesis.eip1559Params | indent 8 | trim }}, + "astriaSequencerAddressPrefix": "{{ .Values.genesis.sequencerAddressPrefix }}", + "astriaAuctioneerAddresses": {{ toPrettyJson .Values.genesis.auctioneerAddresses | indent 8 | trim }} + {{- if not .Values.global.dev }} + {{- else }} + {{- end }} + }, + "difficulty": "0", + "gasLimit": "{{ toString .Values.genesis.gasLimit | replace "\"" "" }}", + "alloc": { + {{- range $index, $value := .Values.genesis.alloc }} + {{- if $index }},{{- end }} + "{{ $value.address }}": {{ toPrettyJson $value.value | indent 8 | trim }} + {{- end }} + } +} diff --git a/charts/flame-rollup/files/keys/private_key.txt b/charts/flame-rollup/files/keys/private_key.txt new file mode 100644 index 0000000000..382cf3266f --- /dev/null +++ b/charts/flame-rollup/files/keys/private_key.txt @@ -0,0 +1 @@ +8b3a7999072c9c9314c084044fe705db11714c6c4ed7cddb64da18ea270dd203 \ No newline at end of file diff --git a/charts/flame-rollup/files/scripts/init-geth.sh b/charts/flame-rollup/files/scripts/init-geth.sh new file mode 100755 index 0000000000..77e450c30e --- /dev/null +++ b/charts/flame-rollup/files/scripts/init-geth.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -o errexit -o nounset + +if [ ! -d "$data_dir/" ]; then + echo "Initializing geth db..." + + cp /scripts/geth-genesis.json $home_dir/genesis.json + + exec geth \ + {{- range $arg := .Values.config.geth.flags -}} + {{- if hasKey $arg "condition" -}} + {{- if eq (tpl $arg.condition $) "true" -}} + --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} \ + {{- end -}} + {{- else }} + --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} \ + {{- end }} + {{- end -}} + init $home_dir/genesis.json +elif ! cmp -s "/scripts/geth-genesis.json" "$home_dir/genesis.json"; then + echo "Geth DB already initialized, but genesis file upgraded..." + + cp /scripts/geth-genesis.json $home_dir/genesis.json + + exec geth --datadir "$data_dir/" init $home_dir/genesis.json +fi diff --git a/charts/flame-rollup/templates/_helpers.tpl b/charts/flame-rollup/templates/_helpers.tpl new file mode 100644 index 0000000000..b6c1be6443 --- /dev/null +++ b/charts/flame-rollup/templates/_helpers.tpl @@ -0,0 +1,102 @@ +{{/* +Namepsace to deploy elements into. +*/}} +{{- define "rollup.namespace" -}} +{{- default .Release.Namespace .Values.global.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* The name of the rollup */}} +{{- define "rollup.name" -}} +{{- tpl .Values.genesis.rollupName . }} +{{- end }} + +{{/* +Expand the name of the chart. +*/}} +{{- define "rollup.appName" -}} +{{- default (include "rollup.name" .) | trunc 63 | trimSuffix "-" }}-astria-dev-cluster +{{- end }} + +{{/* +Common labels +*/}} +{{- define "rollup.labels" -}} +{{ include "rollup.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "rollup.selectorLabels" -}} +app: {{ include "rollup.appName" . }} +{{- end }} + +{{/* +The log level represented as a number +*/}} +{{- define "rollup.logLevelNum" -}} +{{- if eq .Values.config.logLevel "error" }} +1 +{{- else if eq .Values.config.logLevel "warn" }} +2 +{{- else if eq .Values.config.logLevel "info" }} +3 +{{- else if eq .Values.config.logLevel "debug" }} +4 +{{- else if eq .Values.config.logLevel "trace" }} +5 +{{- end }} +{{- end }} + +{{/* +Full image paths for Astria built images +*/}} +{{- define "rollup.image" -}} +{{ .Values.images.geth.repo }}:{{ if .Values.global.dev }}{{ .Values.images.geth.devTag }}{{ else }}{{ .Values.images.geth.tag }}{{ end }} +{{- end }} +{{- define "conductor.image" -}} +{{ .Values.images.conductor.repo }}:{{ if .Values.global.dev }}{{ .Values.images.conductor.devTag }}{{ else }}{{ .Values.images.conductor.tag }}{{ end }} +{{- end }} + + +{{/* +Return if ingress is stable. +*/}} +{{- define "rollup.ingress.isStable" -}} +{{- eq (include "rollup.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "rollup.ingress.supportsIngressClassName" -}} +{{- or (eq (include "rollup.ingress.isStable" .) "true") (and (eq (include "rollup.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "rollup.ingress.supportsPathType" -}} +{{- or (eq (include "rollup.ingress.isStable" .) "true") (and (eq (include "rollup.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "rollup.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{- define "rollup.gethHomeDir" -}} +/home/geth +{{- end }} + +{{- define "rollup.gethDataDir" -}} +{{ include "rollup.gethHomeDir" . }}/{{ include "rollup.name" . }} +{{- end }} diff --git a/charts/flame-rollup/templates/configmap.yaml b/charts/flame-rollup/templates/configmap.yaml new file mode 100644 index 0000000000..bf733912bf --- /dev/null +++ b/charts/flame-rollup/templates/configmap.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "rollup.name" . }}-conductor-env + namespace: {{ include "rollup.namespace" . }} +data: + ASTRIA_CONDUCTOR_LOG: "astria_conductor={{ .Values.config.logLevel }}" + ASTRIA_CONDUCTOR_CELESTIA_NODE_HTTP_URL: "{{ .Values.config.celestia.rpc }}" + ASTRIA_CONDUCTOR_EXPECTED_CELESTIA_CHAIN_ID: "{{ tpl .Values.config.conductor.celestiaChainId . }}" + ASTRIA_CONDUCTOR_CELESTIA_BEARER_TOKEN: "{{ .Values.config.celestia.token }}" + ASTRIA_CONDUCTOR_CELESTIA_BLOCK_TIME_MS: "{{ .Values.config.conductor.celestiaBlockTimeMs }}" + ASTRIA_CONDUCTOR_EXECUTION_RPC_URL: "http://127.0.0.1:{{ .Values.ports.executionGRPC }}" + ASTRIA_CONDUCTOR_EXECUTION_COMMIT_LEVEL: "{{ .Values.config.conductor.executionCommitLevel }}" + ASTRIA_CONDUCTOR_SEQUENCER_GRPC_URL: "{{ tpl .Values.config.conductor.sequencerGrpc . }}" + ASTRIA_CONDUCTOR_SEQUENCER_COMETBFT_URL: "{{ tpl .Values.config.conductor.sequencerRpc . }}" + ASTRIA_CONDUCTOR_EXPECTED_SEQUENCER_CHAIN_ID: "{{ tpl .Values.config.conductor.sequencerChainId . }}" + ASTRIA_CONDUCTOR_SEQUENCER_BLOCK_TIME_MS: "{{ .Values.config.conductor.sequencerBlockTimeMs }}" + ASTRIA_CONDUCTOR_NO_METRICS: "{{ not .Values.metrics.enabled }}" + ASTRIA_CONDUCTOR_METRICS_HTTP_LISTENER_ADDR: "0.0.0.0:{{ .Values.ports.conductorMetrics }}" + ASTRIA_CONDUCTOR_SEQUENCER_REQUESTS_PER_SECOND: "{{ .Values.config.conductor.sequencerRequestsPerSecond }}" + ASTRIA_CONDUCTOR_FORCE_STDOUT: "{{ .Values.global.useTTY }}" + ASTRIA_CONDUCTOR_PRETTY_PRINT: "{{ .Values.global.useTTY }}" + NO_COLOR: "{{ .Values.global.useTTY }}" + ASTRIA_CONDUCTOR_NO_OTEL: "{{ not .Values.otel.enabled }}" + ASTRIA_CONDUCTOR_NO_CELESTIA_AUTH: "{{ not .Values.config.celestia.token }}" + OTEL_EXPORTER_OTLP_ENDPOINT: "{{ tpl .Values.otel.endpoint . }}" + OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: "{{ tpl .Values.otel.tracesEndpoint . }}" + OTEL_EXPORTER_OTLP_TRACES_TIMEOUT: "{{ tpl .Values.otel.tracesTimeout . }}" + OTEL_EXPORTER_OTLP_TRACES_COMPRESSION: "{{ tpl .Values.otel.tracesCompression . }}" + OTEL_EXPORTER_OTLP_HEADERS: "{{ tpl .Values.otel.otlpHeaders . }}" + OTEL_EXPORTER_OTLP_TRACE_HEADERS: "{{ tpl .Values.otel.traceHeaders .}}" + OTEL_SERVICE_NAME: "{{ tpl .Values.otel.serviceNamePrefix . }}-conductor" + {{- if not .Values.global.dev }} + {{- else }} + {{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "rollup.name" . }}-geth-env + namespace: {{ include "rollup.namespace" . }} +data: + home_dir: '{{ include "rollup.gethHomeDir" . }}' + data_dir: '{{ include "rollup.gethDataDir" . }}' +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "rollup.name" . }}-executor-scripts + namespace: {{ include "rollup.namespace" . }} +data: + geth-genesis.json: | + {{- tpl (.Files.Get "files/genesis/geth-genesis.json") $ | nindent 4 }} + init-geth.sh: | + {{- tpl (.Files.Get "files/scripts/init-geth.sh") $ | nindent 4 }} +--- diff --git a/charts/flame-rollup/templates/ingress.yaml b/charts/flame-rollup/templates/ingress.yaml new file mode 100644 index 0000000000..efb13ca03a --- /dev/null +++ b/charts/flame-rollup/templates/ingress.yaml @@ -0,0 +1,70 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "rollup.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "rollup.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "rollup.ingress.supportsPathType" .) "true" -}} + +{{- range $service, $ingress := .Values.ingress.services }} +{{- if $ingress.enabled -}} +{{- $servicePort := $ingress.service.port -}} +{{- $serviceName := tpl $ingress.service.name $ -}} +{{- $ingressPath := $ingress.path -}} +{{- $ingressPathType := $ingress.pathType -}} +{{- $extraPaths := $ingress.extraPaths }} +--- +apiVersion: {{ include "rollup.ingress.apiVersion" $ }} +kind: Ingress +metadata: + name: {{ include "rollup.name" $ }}-{{ $service }}-ingress + namespace: {{ include "rollup.namespace" $ }} + labels: + {{- with $ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if not $ingressSupportsIngressClassName }} + kubernetes.io/ingress.class: {{ $.Values.ingress.className }} + {{- end }} + {{- if $ingressApiIsStable }} + {{- range $key, $value := $ingress.annotations }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if $ingressSupportsIngressClassName }} + ingressClassName: {{ $.Values.ingress.className }} + {{- end -}} + {{- with $ingress.service }} + defaultBackend: + service: + {{- tpl (toYaml .) $ | nindent 6 }} + {{- end }} + rules: + {{- with $ingress.hosts }} + {{- range $host := . }} + - host: {{ tpl $host $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + {{- tpl (toYaml $ingress.service) $ | nindent 16 }} + {{- else }} + serviceName: {{ tpl $serviceName $ }} + servicePort: {{ tpl $servicePort $ }} + {{- end }} + {{- end }} + {{- end }} + {{- if $ingress.tls }} + tls: + {{- tpl (toYaml $ingress.tls) $ | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/flame-rollup/templates/prometheusrule.yaml b/charts/flame-rollup/templates/prometheusrule.yaml new file mode 100644 index 0000000000..225805dd51 --- /dev/null +++ b/charts/flame-rollup/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if .Values.alerting.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "rollup.appName" . }} +{{- if .Values.alerting.prometheusRule.namespace }} + namespace: {{ .Values.alerting.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "rollup.labels" . | nindent 4 }} + {{- if .Values.alerting.prometheusRule.additionalLabels }} + {{- toYaml .Values.alerting.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.alerting.prometheusRule.rules }} + groups: + - name: {{ template "rollup.name" . }} + rules: {{- toYaml .Values.alerting.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/charts/flame-rollup/templates/service.yaml b/charts/flame-rollup/templates/service.yaml new file mode 100644 index 0000000000..eee3a07c5e --- /dev/null +++ b/charts/flame-rollup/templates/service.yaml @@ -0,0 +1,38 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ include "rollup.name" . }}-evm-service + namespace: {{ include "rollup.namespace" . }} +spec: + selector: + app: {{ include "rollup.appName" . }} + ports: + - name: json-rpc-svc + port: {{ .Values.ports.jsonRPC }} + targetPort: json-rpc + - name: ws-rpc-svc + port: {{ .Values.ports.wsRPC }} + targetPort: ws-rpc + - name: exec-grpc-svc + port: {{ .Values.ports.executionGRPC }} + targetPort: execution-grpc +--- +{{- if .Values.metrics.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: {{ include "rollup.name" . }}-metrics + namespace: {{ include "rollup.namespace" . }} + labels: + app: {{ include "rollup.appName" . }} +spec: + selector: + app: {{ include "rollup.appName" . }} + ports: + - name: geth-metr + port: {{ .Values.ports.metrics }} + targetPort: geth-metr + - name: conductor-metr + port: {{ .Values.ports.conductorMetrics }} + targetPort: conductor-metr +{{- end }} diff --git a/charts/flame-rollup/templates/servicemonitor.yaml b/charts/flame-rollup/templates/servicemonitor.yaml new file mode 100644 index 0000000000..e7b6ad4493 --- /dev/null +++ b/charts/flame-rollup/templates/servicemonitor.yaml @@ -0,0 +1,36 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{include "rollup.name" . }}-geth-metrics + labels: + {{- include "rollup.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: geth-metrics + namespaceSelector: + matchNames: + - {{ include "rollup.namespace" . }} + selector: + matchLabels: + app: {{ include "rollup.appName" . }} + endpoints: + - port: geth-metr + path: /debug/metrics/prometheus + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + - port: conductor-metr + path: / + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/flame-rollup/templates/statefulsets.yaml b/charts/flame-rollup/templates/statefulsets.yaml new file mode 100644 index 0000000000..6b5c070073 --- /dev/null +++ b/charts/flame-rollup/templates/statefulsets.yaml @@ -0,0 +1,109 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rollup.name" . }}-geth + labels: + app: {{ include "rollup.appName" . }} + namespace: {{ include "rollup.namespace" . }} +spec: + replicas : {{ .Values.global.replicaCount }} + selector: + matchLabels: + app: {{ include "rollup.appName" . }} + template: + metadata: + name: {{ include "rollup.name" . }}-execution-chain + labels: + app: {{ include "rollup.appName" . }} + spec: + initContainers: + {{- if .Values.config.geth.purgeMempool }} + - name: purge-mempool + image: {{ include "rollup.image" . }} + imagePullPolicy: {{ .Values.images.geth.pullPolicy }} + command: [ "sh", "-c", "rm" ] + args: + - -f + - {{ include "rollup.gethDataDir" . }}/geth/transactions.rlp + volumeMounts: + - mountPath: /home/geth + name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + subPath: {{ include "rollup.name" . }}/executor + {{- end }} + - name: init-geth + command: [ "/scripts/init-geth.sh" ] + image: {{ include "rollup.image" . }} + imagePullPolicy: {{ .Values.images.geth.pullPolicy }} + envFrom: + - configMapRef: + name: {{ include "rollup.name" . }}-geth-env + volumeMounts: + - mountPath: /scripts/ + name: {{ include "rollup.name" . }}-executor-scripts-volume + - mountPath: /home/geth + name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + subPath: {{ include "rollup.name" . }}/executor + containers: + - name: geth + command: [ "geth" ] + args: + {{- range $arg := .Values.config.geth.flags }} + {{- if hasKey $arg "condition" }} + {{- if eq (tpl $arg.condition $) "true" }} + - --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} + {{- end }} + {{- else }} + - --{{ $arg.name }}{{ if $arg.value }}={{ tpl $arg.value $ }}{{ end }} + {{- end }} + {{- end }} + image: {{ include "rollup.image" . }} + imagePullPolicy: {{ .Values.images.geth.pullPolicy }} + volumeMounts: + - mountPath: /scripts/ + name: {{ include "rollup.name" . }}-executor-scripts-volume + readOnly: true + - mountPath: /home/geth + name: {{ include "rollup.name" . }}-rollup-shared-storage-vol + subPath: {{ include "rollup.name" . }}/executor + ports: + - containerPort: {{ .Values.ports.jsonRPC }} + name: json-rpc + - containerPort: {{ .Values.ports.wsRPC }} + name: ws-rpc + - containerPort: {{ .Values.ports.executionGRPC }} + name: execution-grpc + {{- if .Values.metrics.enabled }} + - containerPort: {{ .Values.ports.metrics }} + name: geth-metr + {{- end }} + resources: + {{- toYaml .Values.resources.geth | trim | nindent 12 }} + - name: conductor + image: {{ include "conductor.image" . }} + imagePullPolicy: {{ .Values.images.conductor.pullPolicy }} + command: [ "/usr/local/bin/astria-conductor" ] + stdin: {{ .Values.global.useTTY }} + tty: {{ .Values.global.useTTY }} + envFrom: + - configMapRef: + name: {{ include "rollup.name" . }}-conductor-env + resources: + {{- toYaml .Values.resources.conductor | trim | nindent 12 }} + {{- if .Values.metrics.enabled }} + ports: + - containerPort: {{ .Values.ports.conductorMetrics }} + name: conductor-metr + {{- end }} + volumes: + - name: {{ include "rollup.name" . }}-executor-scripts-volume + configMap: + name: {{ include "rollup.name" . }}-executor-scripts + defaultMode: 0500 + - name: {{ include "rollup.name" $ }}-rollup-shared-storage-vol + {{- if .Values.storage.enabled }} + persistentVolumeClaim: + claimName: {{ include "rollup.name" $ }}-rollup-shared-storage-pvc-geth + {{- else }} + emptyDir: {} + {{- end }} +--- diff --git a/charts/flame-rollup/templates/storageclasses.yaml b/charts/flame-rollup/templates/storageclasses.yaml new file mode 100644 index 0000000000..ab93a6ece6 --- /dev/null +++ b/charts/flame-rollup/templates/storageclasses.yaml @@ -0,0 +1,13 @@ +{{/* We only want to create a storage class if we are local. */}} +{{/* For production, you need to create a StorageClass on GKE. */}} +{{- if and .Values.storage.enabled .Values.storage.local }} + {{- range $key, $value := .Values.storage.entities }} +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ include "rollup.name" $ }}-{{ $value.persistentVolumeName }}-geth-local +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Retain + {{- end }} +{{- end }} diff --git a/charts/flame-rollup/templates/volumes.yaml b/charts/flame-rollup/templates/volumes.yaml new file mode 100644 index 0000000000..7aa5e3658f --- /dev/null +++ b/charts/flame-rollup/templates/volumes.yaml @@ -0,0 +1,54 @@ +{{/* We need to manually create a PersistentVolume when local. */}} +{{/* In prod, a PV will be created by the StorageClass' provisioner using dynamic provisioning feature. */}} +{{- if and .Values.storage.enabled }} + {{- range $key, $value := .Values.storage.entities }} + {{- if $.Values.storage.local }} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ include "rollup.name" $ }}-{{ $value.persistentVolumeName }}-geth-pv +spec: + capacity: + storage: {{ $value.size }} + volumeMode: Filesystem + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: {{ include "rollup.name" $ }}-{{ $value.persistentVolumeName }}-geth-local + local: + path: {{ $value.path }} + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - astria-dev-cluster-control-plane + - astria-dev-cluster-worker +--- + {{- end }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "rollup.name" $ }}-{{ $value.persistentVolumeName }}-pvc-geth + namespace: {{ include "rollup.namespace" $ }} + labels: + "app.kubernetes.io/name": "{{ include "rollup.name" $ }}-{{ $.Chart.Name }}" + "app.kubernetes.io/managed-by": {{ $.Release.Service | quote }} + "helm.sh/chart": {{ $.Chart.Name }}-{{ $.Chart.Version | replace "+" "_" }} +spec: + {{- if $.Values.storage.local }} + storageClassName: {{ include "rollup.name" $ }}-{{ $value.persistentVolumeName }}-geth-local + {{- end }} + {{- if $value.storageClassName }} + storageClassName: {{ $value.storageClassName }} + {{- end }} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ $value.size }} +--- + {{- end }} +{{- end }} diff --git a/charts/flame-rollup/values.yaml b/charts/flame-rollup/values.yaml new file mode 100644 index 0000000000..7ed440cf49 --- /dev/null +++ b/charts/flame-rollup/values.yaml @@ -0,0 +1,313 @@ +global: + namespaceOverride: "" + replicaCount: 1 + # Whether to use tty readable logging for astria services, when false use json. + # Best to be false in production environments, true for clean logs on local dev. + useTTY: false + dev: false + +images: + geth: + repo: ghcr.io/astriaorg/flame + pullPolicy: IfNotPresent + tag: sha-457e1f9 + devTag: sha-457e1f9 + conductor: + repo: ghcr.io/astriaorg/conductor + pullPolicy: IfNotPresent + tag: 1.0.0 + devTag: latest + + +genesis: + ## These values are used to configure the genesis block of the rollup chain + ## no defaults as they are unique to each chain + + # The name of the rollup chain, used to generate the Rollup ID + rollupName: "" + # Block height to start syncing rollup from, lowest possible is 2 + sequencerInitialHeight: "" + # The first Celestia height to utilize when looking for rollup data + celestiaInitialHeight: "" + # The variance in Celestia height to allow before halting the chain + celestiaHeightVariance: "" + # Will fill the extra data in each block, can be left empty + # can also fill with something unique for your chain. + extraDataOverride: "" + + ## These are general configuration values with some recommended defaults + + # Configure the gas Limit + gasLimit: "50000000" + # If set to true the genesis block will contain extra data + overrideGenesisExtraData: true + # The hrp for bech32m addresses, unlikely to be changed + sequencerAddressPrefix: "astria" + + ## These values are used to configure astria native bridging + ## Many of the fields have commented out example fields + + # Configure the sequencer bridge addresses and allowed assets if using + # the astria canonical bridge. Recommend removing alloc values if so. + bridgeAddresses: [] + # - address: "684ae50c49a434199199c9c698115391152d7b3f" + # startHeight: 1 + # assetDenom: "nria" + # senderAddress: "0x0000000000000000000000000000000000000000" + # assetPrecision: 9 + + + ## Fee configuration + + # Configure the fee collector for the evm tx fees, activated at block heights. + # If not configured, all tx fees will be burned. + feeCollectors: {} + # 1: "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" + # Configure EIP-1559 params, activated at block heights + eip1559Params: {} + # 1: + # minBaseFee: 0 + # elasticityMultiplier: 2 + # baseFeeChangeDenominator: 8 + auctioneerAddresses: {} + # 1: "astria1ferdmm38w7zr4ankmntst0g0qg8e7ygeu3vxcy" + + ## Standard Eth Genesis config values + # An EVM chain number id, different from the astria rollup name + chainId: "" + # Configuration of Eth forks, setting to 0 will enable from height, + # left as is these forks will not activate. + cancunTime: "" + pragueTime: "" + verkleTime: "" + # Can configure the genesis allocs for the chain + alloc: + # Deploying the deterministic deploy proxy contract in genesis + # Forge and other tools use this for their CREATE2 usage, but + # can only be included through the genesis block after EIP-155 + # https://github.com/Arachnid/deterministic-deployment-proxy + - address: "0x4e59b44847b379578588920cA78FbF26c0B4956C" + value: + balance: "0" + code: "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3" + # Example of simple genesis account funding + # - address: "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" + # value: + # # The balance must be a string due to size + # balance: "1000000000000000000000000000" + +config: + # The level at which core astria components will log out + # Options are: error, warn, info, and debug + logLevel: "debug" + + geth: + # Set to true to keep history of all blocks + archiveNode: false + # Set to true to enable auctioneer functionality + auctioneer: false + # Set to true to clear the mempool on startup/restart + purgeMempool: false + flags: + - name: datadir + value: '{{ include "rollup.gethDataDir" . }}' + - name: networkid + value: "{{ tpl .Values.genesis.chainId . }}" + - name: http + - name: http.addr + value: 0.0.0.0 + - name: http.port + value: "{{ .Values.ports.jsonRPC }}" + - name: http.corsdomain + value: "*" + - name: http.vhosts + value: "*" + - name: http.api + value: eth,net,web3,debug,txpool + - name: ws + - name: ws.addr + value: 0.0.0.0 + - name: ws.port + value: "{{ .Values.ports.wsRPC }}" + - name: ws.origins + value: "*" + - name: grpc + - name: grpc.addr + value: 0.0.0.0 + - name: grpc.port + value: "{{ .Values.ports.executionGRPC }}" + - name: db.engine + value: pebble # Can be set to 'pebble' or 'leveldb' + - name: maxpeers + value: "0" + - name: rpc.gascap + value: "{{ .Values.genesis.gasLimit }}" + - name: rpc.txfeecap + value: "0" + - name: gcmode + value: "{{- if .Values.config.geth.archiveNode -}} archive {{- else -}} full {{- end }}" + - name: state.scheme + value: "{{- if .Values.config.geth.archiveNode -}} hash {{- else -}} path {{- end }}" + - name: history.transactions + value: "{{- if .Values.config.geth.archiveNode -}} 0 {{- else -}} 2350000 {{- end }}" + - name: history.state + value: "{{- if .Values.config.geth.archiveNode -}} 0 {{- else -}} 540000 {{- end }}" + - name: metrics + condition: "{{- if .Values.metrics.enabled -}} true {{- else -}} false {{- end }}" + - name: metrics.addr + value: 0.0.0.0 + condition: "{{- if .Values.metrics.enabled -}} true {{- else -}} false {{- end }}" + - name: metrics.port + value: "{{ .Values.ports.metrics }}" + condition: "{{- if .Values.metrics.enabled -}} true {{- else -}} false {{- end }}" + - name: txpool.nolocals + value: "true" + - name: auctioneer + condition: "{{- if .Values.config.geth.auctioneer -}} true {{- else -}} false {{- end }}" + + conductor: + # Determines what will drive block execution, options are: + # - "SoftOnly" -> blocks are only pulled from the sequencer + # - "FirmOnly" -> blocks are only pulled from DA + # - "SoftAndFirm" -> blocks are pulled from both the sequencer and DA + executionCommitLevel: 'SoftAndFirm' + # The chain id of the Astria sequencer chain conductor communicates with + sequencerChainId: "" + # The expected fastest block time possible from sequencer, determines polling + # rate. + sequencerBlockTimeMs: 2000 + # The expected fastest block time possible from DA, determines polling rate. + celestiaBlockTimeMs: 6000 + # URL path for the sequencer + sequencerRpc: "" + # gRPC path for the sequencer + sequencerGrpc: "" + # The maximum number of requests to make to the sequencer per second + sequencerRequestsPerSecond: 500 + # The chain id of the celestia network the conductor communicates with + celestiaChainId: "" + + celestia: + # if config.rollup.executionLevel is NOT 'SoftOnly' AND celestia-node is not enabled + # the rpc, ws, and token fields must be set to access celestia network. + rpc: "" + token: "" + +metrics: + # set to enable prometheus metrics + enabled: false + +otel: + enabled: false + serviceNamePrefix: '{{ include "rollup.name" . }}' + endpoint: "" + tracesEndpoint: "" + tracesCompression: "gzip" + tracesTimeout: "10" + otlpHeaders: "" + traceHeaders: "" + +serviceMonitor: + # set to enable port svc and service monitor + enabled: false + port: 6060 + additionalLabels: + release: kube-prometheus-stack +alerting: + enabled: false + interval: "" + additionalLabels: + release: kube-prometheus-stack + annotations: {} + # scrapeTimeout: 10s + # path: /metrics + prometheusRule: + enabled: true + additionalLabels: + release: kube-prometheus-stack + namespace: monitoring + rules: + - alert: Geth_Slow_Finalized + expr: increase(chain_head_finalized{namespace="astria-dusk3-cluster"}[5m]) < 20 # Insert your query Expression + for: 5m # Rough number but should be enough to init warn + labels: + severity: critical + annotations: + summary: Slow Chain Head Finalized (instance {{ $labels.instance }}) + description: "Chain Head Finalized on {{ $labels.namespace }} is increasing at a rate slower then 20 blocks per 5 minute\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + +ingress: + enabled: false + labels: {} + hostname: localdev.me + className: nginx + services: + rpc: + enabled: true + hosts: + - 'executor.{{ include "rollup.name" . }}.{{ .Values.ingress.hostname }}' + path: / + pathType: Prefix + service: + name: '{{ include "rollup.name" . }}-evm-service' + port: + name: json-rpc-svc + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + tls: {} + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + ws: + enabled: true + hosts: + - ws-executor.{{ include "rollup.name" . }}.{{ .Values.ingress.hostname }} + path: / + pathType: Prefix + service: + name: '{{ include "rollup.name" . }}-evm-service' + port: + name: ws-rpc-svc + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + tls: {} + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +# Default persistent storage values +# NOTE - `rollupName` will be used with `persistentVolumeName` to generate names for kubernetes resources. +# e.g. astria-executor-pv, astria-executor-pvc +resources: + conductor: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 1000m + memory: 2Gi + geth: + requests: + cpu: 16000m + memory: 32Gi + +storage: + enabled: false + local: true + entities: + rollupSharedStorage: + size: "5Gi" + persistentVolumeName: "rollup-shared-storage" + path: "/data/rollup-data" + +# Default service ports +ports: + jsonRPC: 8545 + wsRPC: 8546 + executionGRPC: 50051 + metrics: 6060 + conductorMetrics: 9000 diff --git a/charts/hermes/Chart.yaml b/charts/hermes/Chart.yaml index d7fe9a429e..c1907a915a 100644 --- a/charts/hermes/Chart.yaml +++ b/charts/hermes/Chart.yaml @@ -15,13 +15,13 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.5.2 +version: 0.6.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.0" +appVersion: "0.4.0" maintainers: - name: wafflesvonmaple diff --git a/charts/hermes/values.yaml b/charts/hermes/values.yaml index 7e27fcea9b..3393e4a31b 100644 --- a/charts/hermes/values.yaml +++ b/charts/hermes/values.yaml @@ -3,7 +3,7 @@ global: replicaCount: 1 logLevel: debug -image: ghcr.io/astriaorg/hermes:0.3.0 +image: ghcr.io/astriaorg/hermes:0.4.0 imagePullPolicy: IfNotPresent fullnameOverride: "" diff --git a/charts/sequencer/Chart.yaml b/charts/sequencer/Chart.yaml index 5107f393d6..9e83eb2094 100644 --- a/charts/sequencer/Chart.yaml +++ b/charts/sequencer/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.2 +version: 1.0.5 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/charts/sequencer/files/cometbft/config/config.toml b/charts/sequencer/files/cometbft/config/config.toml index b6d8ae68e1..586227f6e9 100644 --- a/charts/sequencer/files/cometbft/config/config.toml +++ b/charts/sequencer/files/cometbft/config/config.toml @@ -16,7 +16,7 @@ version = "0.38.8" # TCP or UNIX socket address of the ABCI application, # or the name of an ABCI application compiled in with the CometBFT binary -proxy_app = "tcp://127.0.0.1:{{ .Values.ports.sequencerABCI }}" +proxy_app = "{{ include "sequencer.abci_url" . }}" # A custom human readable name for this node moniker = "{{ .Values.moniker }}" diff --git a/charts/sequencer/files/scripts/init-cometbft.sh b/charts/sequencer/files/scripts/init-cometbft.sh index 4c9ddb4389..2cd23ae3f1 100644 --- a/charts/sequencer/files/scripts/init-cometbft.sh +++ b/charts/sequencer/files/scripts/init-cometbft.sh @@ -39,5 +39,3 @@ else echo "Updating config directory..." cp /config/* /cometbft/config/ fi - -chmod -R 0777 /cometbft diff --git a/charts/sequencer/templates/_helpers.tpl b/charts/sequencer/templates/_helpers.tpl index 2dc1da25ab..109bf5cd2a 100644 --- a/charts/sequencer/templates/_helpers.tpl +++ b/charts/sequencer/templates/_helpers.tpl @@ -69,9 +69,23 @@ name: {{ .Values.moniker }}-sequencer-metrics {{- end }} {{/* New sequencer address */}} -{{- define "sequencer.address"}}{ "bech32m": "{{ . }}" } +{{- define "sequencer.address" -}} +{ "bech32m": "{{ . }}" } {{- end }} {{/* uint64 fee converted to a astria proto Uint128 with only lo set */}} -{{- define "sequencer.toUint128Proto"}}{ "lo": {{ . }} } +{{- define "sequencer.toUint128Proto" -}} +{ "lo": {{ . }} } +{{- end }} + +{{- define "sequencer.socket_directory" -}} +/sockets/ +{{- end }} + +{{- define "sequencer.abci_url" -}} +{{- if and .Values.global.dev .Values.sequencer.abciUDS -}} +unix://{{- include "sequencer.socket_directory" . }}abci.sock +{{- else -}} +tcp://127.0.0.1:{{ .Values.ports.sequencerABCI }} +{{- end }} {{- end }} diff --git a/charts/sequencer/templates/configmaps.yaml b/charts/sequencer/templates/configmaps.yaml index 89f9deedea..db6199451c 100644 --- a/charts/sequencer/templates/configmaps.yaml +++ b/charts/sequencer/templates/configmaps.yaml @@ -54,8 +54,7 @@ metadata: name: {{ .Values.moniker }}-sequencer-env namespace: {{ include "sequencer.namespace" . }} data: - ASTRIA_SEQUENCER_LOG: "astria_sequencer=debug" - ASTRIA_SEQUENCER_LISTEN_ADDR: "127.0.0.1:{{ .Values.ports.sequencerABCI }}" + ASTRIA_SEQUENCER_LOG: "info" ASTRIA_SEQUENCER_DB_FILEPATH: "/sequencer/penumbra.db" ASTRIA_SEQUENCER_MEMPOOL_PARKED_MAX_TX_COUNT: "{{ .Values.sequencer.mempool.parked.maxTxCount }}" # Socket address for GRPC server @@ -74,6 +73,9 @@ data: OTEL_EXPORTER_OTLP_TRACE_HEADERS: "{{ .Values.sequencer.otel.traceHeaders }}" OTEL_SERVICE_NAME: "{{ tpl .Values.sequencer.otel.serviceName . }}" {{- if not .Values.global.dev }} + ASTRIA_SEQUENCER_LISTEN_ADDR: "127.0.0.1:{{ .Values.ports.sequencerABCI }}" {{- else }} + ASTRIA_SEQUENCER_ABCI_LISTEN_URL: "{{ include "sequencer.abci_url" . }}" + ASTRIA_SEQUENCER_NO_OPTIMISTIC_BLOCKS: "{{ not .Values.sequencer.optimisticBlockApis.enabled }}" {{- end }} --- diff --git a/charts/sequencer/templates/statefulsets.yaml b/charts/sequencer/templates/statefulsets.yaml index aa1fdf79d0..6e412d662a 100644 --- a/charts/sequencer/templates/statefulsets.yaml +++ b/charts/sequencer/templates/statefulsets.yaml @@ -16,6 +16,9 @@ spec: labels: app: {{ .Values.moniker }}-sequencer spec: + securityContext: + runAsUser: 1000 + fsGroup: 2000 initContainers: - command: [ "/scripts/init-cometbft.sh" ] name: config-cometbft @@ -51,6 +54,8 @@ spec: - mountPath: /sequencer name: sequencer-shared-storage-vol subPath: {{ .Values.moniker }}/sequencer + - mountPath: {{ include "sequencer.socket_directory" . }} + name: socket-volume ports: - containerPort: {{ .Values.ports.sequencerABCI }} name: sequencer-abci @@ -84,6 +89,8 @@ spec: - mountPath: /secrets readOnly: true name: sequencer-secret-keys-vol + - mountPath: {{ include "sequencer.socket_directory" . }} + name: socket-volume ports: - containerPort: {{ .Values.ports.cometbftP2P }} name: cometbft-p2p @@ -101,6 +108,8 @@ spec: cpu: {{ .Values.resources.cometbft.limits.cpu }} memory: {{ .Values.resources.cometbft.limits.memory }} volumes: + - name: socket-volume + emptyDir: {} - name: cometbft-config-volume configMap: name: {{ .Values.moniker }}-cometbft-config diff --git a/charts/sequencer/values.yaml b/charts/sequencer/values.yaml index 2a23f01e26..e3e21f3741 100644 --- a/charts/sequencer/values.yaml +++ b/charts/sequencer/values.yaml @@ -28,8 +28,8 @@ images: cometBFT: repo: docker.io/cometbft/cometbft pullPolicy: IfNotPresent - tag: v0.38.8 - devTag: v0.38.8 + tag: v0.38.17 + devTag: v0.38.17 sequencer: repo: ghcr.io/astriaorg/sequencer pullPolicy: IfNotPresent @@ -115,6 +115,7 @@ genesis: # pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= sequencer: + abciUDS: true mempool: parked: maxTxCount: 200 @@ -130,6 +131,9 @@ sequencer: tracesTimeout: 10 otlpHeaders: traceHeaders: + optimisticBlockApis: + # set to true to enable optimistic block APIs + enabled: false cometbft: config: @@ -321,7 +325,7 @@ storage: local: true entities: sequencerSharedStorage: - size: "5Gi" + size: "50Gi" persistentVolumeName: "sequencer-shared-storage" path: "/data/sequencer-data" diff --git a/crates/astria-auctioneer/CHANGELOG.md b/crates/astria-auctioneer/CHANGELOG.md new file mode 100644 index 0000000000..0d3836732c --- /dev/null +++ b/crates/astria-auctioneer/CHANGELOG.md @@ -0,0 +1,14 @@ + + +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +- Initial release [#1839](https://github.com/astriaorg/astria/pull/1839). diff --git a/crates/astria-auctioneer/Cargo.toml b/crates/astria-auctioneer/Cargo.toml new file mode 100644 index 0000000000..9b9cfe9f8c --- /dev/null +++ b/crates/astria-auctioneer/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "astria-auctioneer" +version = "0.0.1" +edition = "2021" +rust-version = "1.81" +license = "MIT OR Apache-2.0" +readme = "README.md" +repository = "https://github.com/astriaorg/astria" +homepage = "https://astria.org" + +[dependencies] +astria-build-info = { path = "../astria-build-info", features = ["runtime"] } +astria-core = { path = "../astria-core", features = ["serde", "client"] } +astria-eyre = { path = "../astria-eyre" } +astria-telemetry = { path = "../astria-telemetry", features = ["display"] } +config = { package = "astria-config", path = "../astria-config" } +sequencer_client = { package = "astria-sequencer-client", path = "../astria-sequencer-client" } + +base64 = { workspace = true } +bytes = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +humantime = { workspace = true } +hyper = { workspace = true } +itertools = { workspace = true } +pbjson-types = { workspace = true } +prost = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = [ + "macros", + "rt-multi-thread", + "sync", + "time", + "signal", +] } +tokio-util = { workspace = true, features = ["rt"] } +tracing = { workspace = true, features = ["attributes"] } +tryhard = { workspace = true } +tonic = { workspace = true } +tokio-stream = { workspace = true, features = ["sync"] } + +http = "0.2.11" +http-body = "0.4.5" +pin-project-lite = "0.2.15" +tower = { version = "0.5.1", features = ["util"] } +tower-http = { version = "0.4.4", features = ["map-response-body", "trace"] } + +[dev-dependencies] +config = { package = "astria-config", path = "../astria-config", features = [ + "tests", +] } + +[build-dependencies] +astria-build-info = { path = "../astria-build-info", features = ["build"] } diff --git a/crates/astria-auctioneer/README.md b/crates/astria-auctioneer/README.md new file mode 100644 index 0000000000..33936060e0 --- /dev/null +++ b/crates/astria-auctioneer/README.md @@ -0,0 +1,35 @@ +# Astria Auctioneer + +TODO: Add a description of the binary. + +## Running The Auctioneer + +### Dependencies + +We use [just](https://just.systems/man/en/chapter_4.html) for convenient project +specific commands. + +### Configuration + +The Auctioneer is configured via environment variables. An example configuration +can be seen in `local.env.example`. + +To copy a configuration to your `.env` file run: + +```sh + +# Can specify an environment +just copy-env + +# By default will copy `local.env.example` +just copy-env +``` + +### Running locally + +After creating a `.env` file either manually or by copying as above, `just` will +load it and run locally: + +```bash +just run +``` diff --git a/crates/astria-auctioneer/build.rs b/crates/astria-auctioneer/build.rs new file mode 100644 index 0000000000..7347735f49 --- /dev/null +++ b/crates/astria-auctioneer/build.rs @@ -0,0 +1,4 @@ +pub fn main() -> Result<(), Box> { + astria_build_info::emit("auctioneer-v")?; + Ok(()) +} diff --git a/crates/astria-auctioneer/justfile b/crates/astria-auctioneer/justfile new file mode 100644 index 0000000000..e5c9ef9653 --- /dev/null +++ b/crates/astria-auctioneer/justfile @@ -0,0 +1,12 @@ +default: + @just --list + +set dotenv-load +set fallback + +default_env := 'local' +copy-env type=default_env: + cp {{ type }}.env.example .env + +run: + cargo run diff --git a/crates/astria-auctioneer/local.env.example b/crates/astria-auctioneer/local.env.example new file mode 100644 index 0000000000..914d96ecf3 --- /dev/null +++ b/crates/astria-auctioneer/local.env.example @@ -0,0 +1,75 @@ +# Configuration options of Astria Auctioneer. + +# Address of the gRPC server for the sequencer chain +ASTRIA_AUCTIONEER_SEQUENCER_GRPC_ENDPOINT="http://127.0.0.1:8080" + +# Address of the ABCI server for the sequencer chain +ASTRIA_AUCTIONEER_SEQUENCER_ABCI_ENDPOINT="http://127.0.0.1:26657" + +# Chain ID of the sequencer chain which transactions are submitted to. +ASTRIA_AUCTIONEER_SEQUENCER_CHAIN_ID="astria-dev-1" + +# The path to the file storing the private key for the sequencer account used for signing +# transactions. The file should contain a hex-encoded Ed25519 secret key. +ASTRIA_AUCTIONEER_SEQUENCER_PRIVATE_KEY_PATH=/path/to/priv_sequencer_key.json + +# The fee asset denomination that will be used in the submitted sequencer transactions. +ASTRIA_AUCTIONEER_FEE_ASSET_DENOMINATION="nria" + +# The prefix that will be used to construct bech32m sequencer addresses. +ASTRIA_AUCTIONEER_SEQUENCER_ADDRESS_PREFIX=astria + +# Address of the gRPC server for the rollup's Auction and Optimistic Execution services. +ASTRIA_AUCTIONEER_ROLLUP_GRPC_ENDPOINT="http://127.0.0.1:50051" + +# The rollup ID to post the auction result to +ASTRIA_AUCTIONEER_ROLLUP_ID="astriachain" + +# The amount of time in miliseconds to wait between opening the auction and closing it to +# submit the result to the sequencer. +ASTRIA_AUCTIONEER_LATENCY_MARGIN_MS=1000 + +# Log level. One of debug, info, warn, or error +ASTRIA_AUCTIONEER_LOG="info" + +# If true disables tty detection and forces writing telemetry to stdout. +# If false span data is written to stdout only if it is connected to a tty. +ASTRIA_AUCTIONEER_FORCE_STDOUT=false + +# If true uses an exceedingly pretty human readable format to write to stdout. +# If false uses JSON formatted OTEL traces. +# This does nothing unless stdout is connected to a tty or +# `ASTRIA_AUCTIONEER_FORCE_STDOUT` is set to `true`. +ASTRIA_AUCTIONEER_PRETTY_PRINT=false + +# If set to any non-empty value removes ANSI escape characters from the pretty +# printed output. Note that this does nothing unless `ASTRIA_AUCTIONEER_PRETTY_PRINT` +# is set to `true`. +NO_COLOR= + +# Set to true to enable prometheus metrics. +ASTRIA_AUCTIONEER_NO_METRICS=true + +# The address at which the prometheus HTTP listener will bind if enabled. +ASTRIA_AUCTIONEER_METRICS_HTTP_LISTENER_ADDR="127.0.0.1:9000" + +# If true disables writing to the opentelemetry OTLP endpoint. +ASTRIA_AUCTIONEER_NO_OTEL=false + +# The OTEL specific config options follow the OpenTelemetry Protocol Exporter v1 +# specification as defined here: +# https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/protocol/exporter.md + +# Sets the general OTLP endpoint. +OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:4317" +# Sets the OTLP endpoint for trace data. This takes precedence over `OTEL_EXPORTER_OTLP_ENDPOINT` if set. +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="http://localhost:4317/v1/traces" +# The duration in seconds that the OTEL exporter will wait for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT=10 +# The compression format to use for exporting. Only `"gzip"` is supported. +# Don't set the env var if no compression is required. +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION="gzip" +# The HTTP headers that will be set when sending gRPC requests. +OTEL_EXPORTER_OTLP_HEADERS="key1=value1,key2=value2" +# The HTTP headers that will be set when sending gRPC requests. This takes precedence over `OTEL_EXPORTER_OTLP_HEADERS` if set. +OTEL_EXPORTER_OTLP_TRACE_HEADERS="key1=value1,key2=value2" diff --git a/crates/astria-auctioneer/src/auctioneer/auction/allocation_rule.rs b/crates/astria-auctioneer/src/auctioneer/auction/allocation_rule.rs new file mode 100644 index 0000000000..afcca676d2 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/auction/allocation_rule.rs @@ -0,0 +1,57 @@ +//! The allocation rule is the mechanism by which the auction processes incoming bids and determines +//! the winner. +use std::sync::Arc; + +use tracing::{ + info, + instrument, +}; + +use super::Bid; + +pub(super) struct FirstPrice { + highest_bid: Option>, + bids_seen: usize, +} + +impl FirstPrice { + pub(super) fn new() -> Self { + Self { + highest_bid: None, + bids_seen: 0, + } + } + + /// Submit a bid with a bid. + /// + /// Returns `true` if the bid is accepted as the highest bid. + // TODO: identify the incumbant and candidate by their hash? + #[instrument(skip_all, fields( + current_winner.bid = self.highest_bid.as_ref().map(|bid| bid.bid()), + candidate.bid = candidate.bid(), + ))] + pub(super) fn bid(&mut self, candidate: &Arc) { + self.bids_seen = self.bids_seen.saturating_add(1); + let winner = if let Some(current) = self.highest_bid.as_mut() { + if candidate.bid() > current.bid() { + *current = candidate.clone(); + "candidate" + } else { + "incumbant" + } + } else { + self.highest_bid = Some(candidate.clone()); + "candidate" + }; + info!("highest bidder is {winner}"); + } + + pub(super) fn bids_seen(&self) -> usize { + self.bids_seen + } + + /// Returns the winner of the auction, if one exists. + pub(super) fn take_winner(&mut self) -> Option> { + self.highest_bid.take() + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/auction/factory.rs b/crates/astria-auctioneer/src/auctioneer/auction/factory.rs new file mode 100644 index 0000000000..6852f3c7e2 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/auction/factory.rs @@ -0,0 +1,95 @@ +//! The auction [`Factory`] to start new auctions. +use astria_core::{ + primitive::v1::{ + asset, + RollupId, + }, + sequencerblock::v1::block::FilteredSequencerBlock, +}; +use tokio::sync::{ + mpsc, + oneshot, +}; +use tokio_util::sync::CancellationToken; + +use super::{ + Auction, + SequencerKey, + Worker, +}; +use crate::sequencer_channel::SequencerChannel; + +/// The auction `Factory` is used to spawn a new auction. +/// +/// It exposes two methods, `Factory::start_new` to start an +/// [`Auction`] given a `FilteredSequencerBlock`, and +/// `Factory::set_last_successful_nonce` to record the +/// nonce used to submit a winning bid to Sequencer. +/// This last successful nonce is passed to the auction and +/// acts as a cached value in case Sequencer does not return +/// the current pending nonce of the auctioneer account. +pub(in crate::auctioneer) struct Factory { + pub(in crate::auctioneer) sequencer_abci_client: sequencer_client::HttpClient, + pub(in crate::auctioneer) sequencer_channel: SequencerChannel, + pub(in crate::auctioneer) latency_margin: std::time::Duration, + pub(in crate::auctioneer) sequencer_key: SequencerKey, + pub(in crate::auctioneer) fee_asset_denomination: asset::Denom, + pub(in crate::auctioneer) sequencer_chain_id: String, + pub(in crate::auctioneer) rollup_id: RollupId, + pub(in crate::auctioneer) cancellation_token: CancellationToken, + /// `last_successful_nonce + 1` is used for submitting an auction winner to Sequencer + /// if an auction worker was not able to receive the last pending + /// nonce from Sequencer in time. Starts unset at the beginning of the program and + /// is set externally via `Factory::set_last_succesful_nonce`. + pub(in crate::auctioneer) last_successful_nonce: Option, + pub(in crate::auctioneer) metrics: &'static crate::Metrics, +} + +impl Factory { + pub(in crate::auctioneer) fn start_new(&self, block: &FilteredSequencerBlock) -> Auction { + let id = super::Id::from_sequencer_block_hash(block.block_hash()); + let block_hash = *block.block_hash(); + let height = block.height().into(); + + // TODO: get the capacities from config or something instead of using a magic number + let (start_bids_tx, start_bids_rx) = oneshot::channel(); + let (start_timer_tx, start_timer_rx) = oneshot::channel(); + let (bids_tx, bids_rx) = mpsc::unbounded_channel(); + + let cancellation_token = self.cancellation_token.child_token(); + let auction = Worker { + sequencer_abci_client: self.sequencer_abci_client.clone(), + sequencer_channel: self.sequencer_channel.clone(), + start_bids: Some(start_bids_rx), + start_timer: Some(start_timer_rx), + bids: bids_rx, + latency_margin: self.latency_margin, + id, + sequencer_key: self.sequencer_key.clone(), + fee_asset_denomination: self.fee_asset_denomination.clone(), + sequencer_chain_id: self.sequencer_chain_id.clone(), + rollup_id: self.rollup_id, + cancellation_token: cancellation_token.clone(), + last_successful_nonce: self.last_successful_nonce, + metrics: self.metrics, + }; + + Auction { + id, + block_hash, + height, + hash_of_executed_block_on_rollup: None, + start_bids: Some(start_bids_tx), + start_timer: Some(start_timer_tx), + bids: bids_tx, + cancellation_token, + worker: tokio::task::spawn(auction.run()), + metrics: self.metrics, + started_at: std::time::Instant::now(), + } + } + + pub(in crate::auctioneer) fn set_last_successful_nonce(&mut self, nonce: u32) { + self.last_successful_nonce.replace(nonce); + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/auction/mod.rs b/crates/astria-auctioneer/src/auctioneer/auction/mod.rs new file mode 100644 index 0000000000..a066f596e4 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/auction/mod.rs @@ -0,0 +1,292 @@ +use std::{ + fmt::Display, + sync::Arc, +}; + +use astria_core::{ + sequencerblock, + sequencerblock::v1::block, +}; +use astria_eyre::eyre::{ + self, + bail, + ensure, + eyre, + WrapErr as _, +}; +use futures::{ + Future, + FutureExt as _, +}; +use sequencer_client::tendermint_rpc::endpoint::broadcast::tx_sync; +use tokio::{ + sync::{ + mpsc, + oneshot, + }, + task::JoinHandle, +}; +use tokio_util::sync::CancellationToken; +use tracing::instrument; + +use crate::{ + bid::{ + Bid, + RollupBlockHash, + }, + sequencer_key::SequencerKey, +}; + +pub(super) mod factory; +pub(super) use factory::Factory; +mod allocation_rule; +mod worker; +use worker::Worker; + +/// Used to uniquely identify an auction. +/// +/// Currently the same as the proposed sequencer block. +#[derive(Hash, Eq, PartialEq, Clone, Copy, Debug)] +pub(super) struct Id([u8; 32]); + +impl Id { + pub(super) fn from_sequencer_block_hash(block_hash: &block::Hash) -> Self { + Self(block_hash.get()) + } +} + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use base64::{ + display::Base64Display, + engine::general_purpose::STANDARD, + }; + Base64Display::new(self.0.as_ref(), &STANDARD).fmt(f) + } +} + +/// The frontend to interact with a running auction. +pub(super) struct Auction { + /// The idenfifier of the current auction. + id: Id, + /// The block hash of the proposed Sequencer block that triggered the creation of this auction. + block_hash: block::Hash, + /// The height of the proposed Sequencer block that triggered this auction. + height: u64, + /// The hash of the rollup block that was executed and on which all bids will based. + hash_of_executed_block_on_rollup: Option, + /// A oneshot channel to trigger the running auction to start accepting bids. + start_bids: Option>, + /// A oneshot channel to trigger the running auction to start its auction timer. + start_timer: Option>, + /// A channel to forward bids from Auctioneer's stream connected to its Rollup to the + /// background auction task. + bids: mpsc::UnboundedSender>, + /// Used to cancel the worker task. + cancellation_token: CancellationToken, + /// The actual event loop running in the background that receives bids, times the + /// auction, and submits the winner to Sequencer. + worker: JoinHandle>, + metrics: &'static crate::Metrics, + started_at: std::time::Instant, +} + +impl Auction { + pub(super) fn abort(&self) { + self.worker.abort(); + } + + pub(super) fn cancel(&self) { + self.cancellation_token.cancel(); + } + + pub(in crate::auctioneer) fn id(&self) -> &Id { + &self.id + } + + // TODO: identify the commitment in span fields + #[instrument(skip_all, fields(id = %self.id), err)] + pub(super) fn start_timer( + &mut self, + commitment: sequencerblock::optimistic::v1alpha1::SequencerBlockCommit, + ) -> eyre::Result<()> { + ensure!( + &self.block_hash == commitment.block_hash() && self.height == commitment.height(), + "commitment does not match auction; auction.block_hash = `{}`, auction.height = `{}`, \ + commitment.block_hash = `{}`, commitment.height = `{}`", + self.block_hash, + self.height, + commitment.block_hash(), + commitment.height(), + ); + if let Some(start_timer) = self.start_timer.take() { + start_timer + .send(()) + .map_err(|()| eyre!("the auction worker's start timer channel was already dropped")) + } else { + Err(eyre!( + "a previous commitment already triggered the start timer of the auction" + )) + } + } + + // TODO: identify the executed block in the span fields + #[instrument(skip_all, fields(id = %self.id), err)] + pub(in crate::auctioneer) fn start_bids( + &mut self, + block: crate::block::Executed, + ) -> eyre::Result<()> { + ensure!( + &self.block_hash == block.sequencer_block_hash(), + "executed block does not match auction; auction.block_hash = `{}`, \ + executed.block_hash = `{}`", + &self.block_hash, + block.sequencer_block_hash(), + ); + + if let Some(start_bids) = self.start_bids.take() { + start_bids.send(()).map_err(|()| { + eyre!("the auction worker's start bids channel was already dropped") + })?; + } else { + bail!("a previous executed block already triggered the auction to start bids"); + } + + let prev_block = self + .hash_of_executed_block_on_rollup + .replace(block.rollup_block_hash()); + debug_assert!(prev_block.is_none()); + + Ok(()) + } + + // TODO: Use a refinement type for the parente rollup block hash + #[instrument(skip_all, fields( + id = %self.id, + bid.sequencer_block_hash = %bid.sequencer_parent_block_hash(), + bid.parent_roll_block_hash = %bid.rollup_parent_block_hash(), + ), err)] + pub(in crate::auctioneer) fn forward_bid_to_auction( + &mut self, + bid: Arc, + ) -> eyre::Result<()> { + if let err @ Err(_) = self.does_bid_match_auction(&bid) { + self.metrics + .increment_auction_bids_without_matching_auction(); + return err; + } + + self.metrics + .record_auction_bid_delay_since_start(self.started_at.elapsed()); + + self.bids + .send(bid) + .wrap_err("failed to submit bid to auction; the bid is lost") + } + + fn does_bid_match_auction(&self, bid: &Bid) -> eyre::Result<()> { + ensure!( + &self.block_hash == bid.sequencer_parent_block_hash() + && self.hash_of_executed_block_on_rollup.as_ref() + == Some(bid.rollup_parent_block_hash()), + "bid does not match auction; auction.sequencer_parent_block_hash = `{}`, \ + auction.rollup_parent_block_hash = `{}`, bid.sequencer_parent_block_hash = `{}`, \ + bid.rollup_parent_block_hash = `{}`", + self.block_hash, + fmt_none_as_msg( + self.hash_of_executed_block_on_rollup.as_ref(), + "" + ), + bid.sequencer_parent_block_hash(), + bid.rollup_parent_block_hash(), + ); + Ok(()) + } +} + +fn fmt_none_as_msg<'a, T: Display>( + val: Option<&'a T>, + default: &'static str, +) -> FmtNoneAsMsg<'a, T> { + FmtNoneAsMsg { + val, + default, + } +} + +/// Utilty to implement the [`Display`] trait on a type `Option`. +/// +/// Writes `default` if `val` is `None`, or the set value if `Some`. +struct FmtNoneAsMsg<'a, T> { + val: Option<&'a T>, + default: &'static str, +} +impl Display for FmtNoneAsMsg<'_, T> +where + T: Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.val { + Some(val) => val.fmt(f), + None => f.write_str(self.default), + } + } +} + +#[derive(Debug, thiserror::Error)] +pub(super) enum Error { + #[error("the task running the auction panicked")] + Panicked { source: tokio::task::JoinError }, + #[error("the auction failed")] + Failed { source: worker::Error }, +} + +impl Future for Auction { + type Output = (Id, Result); + + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let res = match std::task::ready!(self.worker.poll_unpin(cx)) { + Ok(Ok(summary)) => Ok(summary), + Ok(Err(source)) => Err(Error::Failed { + source, + }), + Err(source) => Err(Error::Panicked { + source, + }), + }; + std::task::Poll::Ready((self.id, res)) + } +} + +pub(super) enum Summary { + CancelledDuringAuction, + NoBids, + Submitted { + response: tx_sync::Response, + nonce_used: u32, + }, +} + +impl Display for Summary { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Summary::CancelledDuringAuction => { + f.write_str("received cancellation signal during auction loop") + } + Summary::NoBids => f.write_str("auction finished without bids"), + Summary::Submitted { + response, + nonce_used, + } => write!( + f, + "auction winner submitted using nonce `{nonce_used}`; Sequencer responded with \ + ABCI code `{}`, log `{}`", + response.code.value(), + response.log, + ), + } + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/auction/worker.rs b/crates/astria-auctioneer/src/auctioneer/auction/worker.rs new file mode 100644 index 0000000000..c5783f2871 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/auction/worker.rs @@ -0,0 +1,379 @@ +//! The event loop that runs an auction. +use std::{ + future::Future, + pin::pin, + sync::Arc, + time::Duration, +}; + +use astria_core::{ + primitive::v1::{ + asset, + Address, + RollupId, + }, + protocol::transaction::v1::Transaction, +}; +use futures::FutureExt as _; +use sequencer_client::{ + tendermint_rpc::endpoint::broadcast::tx_sync, + SequencerClientExt as _, +}; +use tokio::{ + select, + sync::oneshot, + task::{ + JoinError, + JoinHandle, + }, + time::{ + sleep, + Sleep, + }, +}; +use tokio_util::sync::CancellationToken; +use tracing::{ + error, + info, + instrument, + Instrument as _, + Level, +}; + +use super::{ + allocation_rule::FirstPrice, + Summary, +}; +use crate::{ + bid::Bid, + sequencer_channel::SequencerChannel, + sequencer_key::SequencerKey, +}; + +const SUBMISSION_TIMEOUT: Duration = Duration::from_secs(30); + +pub(super) struct Worker { + /// The sequencer's ABCI client, used for submitting transactions + pub(super) sequencer_abci_client: sequencer_client::HttpClient, + pub(super) sequencer_channel: SequencerChannel, + pub(super) start_bids: Option>, + pub(super) start_timer: Option>, + /// Channel for receiving new bids. + pub(super) bids: tokio::sync::mpsc::UnboundedReceiver>, + /// The time between receiving a block commitment + pub(super) latency_margin: Duration, + /// The ID of the auction + pub(super) id: super::Id, + /// The key used to sign transactions on the sequencer + pub(super) sequencer_key: SequencerKey, + /// Fee asset for submitting transactions + pub(super) fee_asset_denomination: asset::Denom, + /// The chain ID used for sequencer transactions + pub(super) sequencer_chain_id: String, + /// Rollup ID to submit the auction result to + pub(super) rollup_id: RollupId, + pub(super) cancellation_token: CancellationToken, + /// `last_successful_nonce + 1` is used for submitting an auction winner to + /// Sequencer if the worker was not able to receive the last pending nonce + /// from Sequencer in time (in time = by the time the winner was ready to be + /// submitted). Is usually only unset if no auction was yet submitted (for example + /// at the beginning of the program). + pub(super) last_successful_nonce: Option, + pub(super) metrics: &'static crate::Metrics, +} + +impl Worker { + // FIXME: consider using Valuable for the return case. + // See this discussion: https://github.com/tokio-rs/tracing/discussions/1906 + #[instrument( + skip_all, + fields(id = %self.id), + err(level = Level::WARN, Display), + ret(Display), + )] + pub(super) async fn run(mut self) -> Result { + let Some(auction_result) = self + .cancellation_token + .clone() + .run_until_cancelled(self.run_auction_loop()) + .await + else { + return Ok(Summary::CancelledDuringAuction); + }; + let AuctionItems { + winner, + nonce_fetch, + } = auction_result?; + let Some(winner) = winner else { + return Ok(Summary::NoBids); + }; + + let nonce_fetch = nonce_fetch.expect( + "if the auction loop produced a winner, then a nonce fetch must have been spawned", + ); + + let pending_nonce = match nonce_fetch.now_or_never() { + Some(Ok(nonce)) => nonce, + Some(Err(source)) => { + return Err(Error::NonceFetchPanicked { + source, + }); + } + None if self.last_successful_nonce.is_some() => { + let nonce = self + .last_successful_nonce + .expect("in arm that checks for last_successful_nonce == Some") + .saturating_add(1); + info!( + "request for latest pending nonce did not return in time; using `{nonce}` + instead (last successful nonce + 1)" + ); + nonce + } + None => return Err(Error::NoNonce), + }; + + // TODO: report the pending nonce that we ended up using. + let transaction = Arc::unwrap_or_clone(winner) + .into_transaction_body( + pending_nonce, + self.rollup_id, + &self.sequencer_key, + self.fee_asset_denomination.clone(), + self.sequencer_chain_id, + ) + .sign(self.sequencer_key.signing_key()); + + // NOTE: Submit fire-and-forget style. If the submission didn't make it in time, + // it's likey lost. + // TODO: We can consider providing a very tight retry mechanism. Maybe resubmit once + // if the response didn't take too long? But it's probably a bad idea to even try. + // Can we detect if a submission failed due to a bad nonce? In this case, we could + // immediately ("optimistically") submit with the most recent pending nonce (if the + // publisher updated it in the meantime) or just nonce + 1 (if it didn't yet update)? + + let submission_fut = + submit_winner_with_timeout(self.sequencer_abci_client.clone(), transaction); + tokio::pin!(submission_fut); + + let submission_start = std::time::Instant::now(); + loop { + select!( + () = self.cancellation_token.clone().cancelled_owned(), + if !self.cancellation_token.is_cancelled() => + { + info!( + "received cancellation token while waiting for Sequencer to respond to \ + transaction submission; still waiting for submission until timeout" + ); + } + + res = &mut submission_fut => { + break match res + { + Ok(response) => { + self.metrics.record_auction_winner_submission_success_latency(submission_start.elapsed()); + Ok(Summary::Submitted { nonce_used: pending_nonce, response, }) + } + Err(err) => { + self.metrics.record_auction_winner_submission_error_latency(submission_start.elapsed()); + Err(err) + } + } + } + ); + } + } + + async fn run_auction_loop(&mut self) -> Result { + let mut latency_margin_timer = pin!(None::); + // TODO: do we want to make this configurable to allow for more complex allocation rules? + let mut allocation_rule = FirstPrice::new(); + let mut auction_is_open = false; + + let mut nonce_fetch = None; + + #[expect( + clippy::semicolon_if_nothing_returned, + reason = "we want to pattern match on the latency timer's return value" + )] + loop { + select! { + biased; + + () = async { + Option::as_pin_mut(latency_margin_timer.as_mut()) + .unwrap() + .await + }, if latency_margin_timer.is_some() => { + info!("timer is up; bids left unprocessed: {}", self.bids.len()); + + self.metrics.record_bids_per_auction_dropped_histogram(self.bids.len()); + self.metrics.record_bids_per_auction_processed_histogram(allocation_rule.bids_seen()); + + let winner = allocation_rule.take_winner(); + if let Some(winner) = &winner { + self.metrics.record_auction_winning_bid_histogram(winner.bid()); + } + + break Ok(AuctionItems { + winner, + nonce_fetch, + }) + } + + Ok(()) = async { + self.start_bids.as_mut().unwrap().await + }, if self.start_bids.is_some() => { + let mut channel = self + .start_bids + .take() + .expect("inside an arm that that checks start_bids == Some"); + channel.close(); + // TODO: if the timer is already running, report how much time is left for the bids + auction_is_open = true; + } + + Ok(()) = async { + self.start_timer.as_mut().unwrap().await + }, if self.start_timer.is_some() => { + let mut channel = self + .start_timer + .take() + .expect("inside an arm that checks start_timer == Some"); + channel.close(); + if !auction_is_open { + info!( + "received signal to start the auction timer before signal to start \ + processing bids; that's ok but eats into the time allotment of the \ + auction" + ); + } + + latency_margin_timer.set(Some(sleep(self.latency_margin))); + nonce_fetch = Some(spawn_aborting(get_pending_nonce( + self.sequencer_channel.clone(), + *self.sequencer_key.address(), + ).in_current_span())); + info!( + duration = %humantime::format_duration(self.latency_margin), + "started auction timer and request for latest nonce", + ); + } + + // TODO: this is an unbounded channel. Can we process multiple bids at a time? + Some(bid) = self.bids.recv(), if auction_is_open => { + allocation_rule.bid(&bid); + } + + else => { + break Err(Error::ChannelsClosed); + } + } + } + } +} + +#[derive(Debug, thiserror::Error)] +pub(in crate::auctioneer) enum Error { + #[error("all channels to the auction worker are closed; the auction cannot continue")] + ChannelsClosed, + // TODO: Is there a way to identify the winning bid? Do we need it? + #[error( + "selected winning bid, but latest nonce was not yet initialized (should only be the case \ + at start of service) and Sequencer did not return the latest nonce in time" + )] + NoNonce, + #[error("task fetching nonce from Sequencer panicked")] + NonceFetchPanicked { source: tokio::task::JoinError }, + #[error( + "submission of winner to Sequencer elapsed after {}", + humantime::format_duration(SUBMISSION_TIMEOUT) + )] + SubmissionElapsed { source: tokio::time::error::Elapsed }, + #[error("encountered an error when sending the winning bid to Sequencer")] + SubmissionFailed { + source: sequencer_client::extension_trait::Error, + }, +} + +fn spawn_aborting(fut: F) -> AbortJoinHandle +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + AbortJoinHandle(tokio::spawn(fut)) +} + +struct AuctionItems { + winner: Option>, + nonce_fetch: Option>, +} + +/// A wrapper around [`JoinHandle`] that aborts the task rather than disassocating. +#[derive(Debug)] +pub(crate) struct AbortJoinHandle(JoinHandle); + +impl Drop for AbortJoinHandle { + fn drop(&mut self) { + self.0.abort(); + } +} + +impl From> for AbortJoinHandle { + fn from(handle: JoinHandle) -> Self { + AbortJoinHandle(handle) + } +} + +impl Future for AbortJoinHandle { + type Output = Result; + + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + self.0.poll_unpin(cx) + } +} + +/// Fetches the pending nonce for `address` with aggressive retries. +/// +/// On failure this method will attempt to immediately refetch the nonce in an +/// infinite loop. It is expected that this future is run in a tokio task, relatively +/// short lived (no longer than the margin timer of the auction), and killed/aborted +/// if not ready by the time an auction result is expected to be available. +#[instrument(skip_all, fields(%address), ret(Display))] +async fn get_pending_nonce(sequencer_channel: SequencerChannel, address: Address) -> u32 { + loop { + match sequencer_channel.get_pending_nonce(address).await { + Ok(nonce) => return nonce, + Err(error) => { + error!(%error, "fetching nonce failed; immediately scheduling next fetch"); + } + } + } +} + +async fn submit_winner_with_timeout( + client: sequencer_client::HttpClient, + transaction: Transaction, +) -> Result { + // TODO(janis): starting from v0.35.0, tendermint-rpc provides a + // mechanism to timeout requests on its http client, so that this + // explicit timeout can be removed. + match tokio::time::timeout( + SUBMISSION_TIMEOUT, + client.submit_transaction_sync(transaction), + ) + .await + { + Ok(Ok(rsp)) => Ok(rsp), + Ok(Err(source)) => Err(Error::SubmissionFailed { + source, + }), + Err(source) => Err(Error::SubmissionElapsed { + source, + }), + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/mod.rs b/crates/astria-auctioneer/src/auctioneer/mod.rs new file mode 100644 index 0000000000..ee4d8786e6 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/mod.rs @@ -0,0 +1,353 @@ +//! The Astria Auctioneer business logic. +use std::{ + sync::Arc, + time::Duration, +}; + +use astria_core::{ + primitive::v1::RollupId, + sequencerblock::{ + optimistic::v1alpha1::SequencerBlockCommit, + v1::block::FilteredSequencerBlock, + }, +}; +use astria_eyre::eyre::{ + self, + OptionExt as _, + WrapErr as _, +}; +use futures::{ + stream::FuturesUnordered, + StreamExt as _, +}; +use tokio::select; +use tokio_util::sync::CancellationToken; +use tracing::{ + error, + field, + info, + instrument, + Level, + Span, +}; + +use crate::{ + rollup_channel::{ + BidStream, + ExecuteOptimisticBlockStream, + }, + sequencer_channel::{ + BlockCommitmentStream, + ProposedBlockStream, + }, + sequencer_key::SequencerKey, + Config, +}; + +mod auction; + +/// The implementation of the auctioneer business logic. +pub(super) struct Auctioneer { + auction_factory: auction::Factory, + block_commitments: BlockCommitmentStream, + bids: BidStream, + cancelled_auctions: FuturesUnordered, + metrics: &'static crate::Metrics, + executed_blocks: ExecuteOptimisticBlockStream, + running_auction: Option, + proposed_blocks: ProposedBlockStream, + rollup_id: RollupId, + shutdown_token: CancellationToken, +} + +impl Auctioneer { + /// Creates an [`Auctioneer`] service from a [`Config`]. + pub(super) fn new( + config: Config, + metrics: &'static crate::Metrics, + shutdown_token: CancellationToken, + ) -> eyre::Result { + let Config { + sequencer_grpc_endpoint, + sequencer_abci_endpoint, + latency_margin_ms, + rollup_grpc_endpoint, + rollup_id, + sequencer_chain_id, + sequencer_private_key_path, + sequencer_address_prefix, + fee_asset_denomination, + .. + } = config; + + let rollup_id = RollupId::from_unhashed_bytes(rollup_id); + let rollup_channel = crate::rollup_channel::open(&rollup_grpc_endpoint)?; + let sequencer_channel = crate::sequencer_channel::open(&sequencer_grpc_endpoint)?; + + let sequencer_key = SequencerKey::builder() + .path(sequencer_private_key_path) + .prefix(sequencer_address_prefix) + .try_build() + .wrap_err("failed to load sequencer private key")?; + info!(address = %sequencer_key.address(), "loaded sequencer signer"); + + let sequencer_abci_client = + sequencer_client::HttpClient::new(sequencer_abci_endpoint.as_str()) + .wrap_err("failed constructing sequencer abci client")?; + + let auction_factory = auction::Factory { + sequencer_abci_client, + sequencer_channel: sequencer_channel.clone(), + latency_margin: Duration::from_millis(latency_margin_ms), + sequencer_key: sequencer_key.clone(), + fee_asset_denomination, + sequencer_chain_id, + rollup_id, + cancellation_token: shutdown_token.child_token(), + last_successful_nonce: None, + metrics, + }; + + Ok(Self { + auction_factory, + block_commitments: sequencer_channel.open_get_block_commitment_stream(), + bids: rollup_channel.open_bid_stream(), + cancelled_auctions: FuturesUnordered::new(), + executed_blocks: rollup_channel.open_execute_optimistic_block_stream(), + metrics, + proposed_blocks: sequencer_channel.open_get_proposed_block_stream(rollup_id), + rollup_id, + running_auction: None, + shutdown_token, + }) + } + + /// Runs the [`Auctioneer`] service until it received an exit signal, or one of the constituent + /// tasks either ends unexpectedly or returns an error. + pub(super) async fn run(mut self) -> eyre::Result<()> { + let reason: eyre::Result<&str> = { + // This is a long running loop. Errors are emitted inside the handlers. + loop { + select! { + biased; + + () = self.shutdown_token.clone().cancelled_owned() => { + break Ok("received shutdown signal"); + }, + + res = self.handle_event() => { + if let Err(err) = res { + break Err(err); + } + } + } + } + }; + + self.shutdown(reason).await + } + + async fn handle_event(&mut self) -> eyre::Result<()> { + select!( + res = self.proposed_blocks.next() => { + let res = res.ok_or_eyre("proposed block stream closed")?; + let _ = self.handle_proposed_block(res); + }, + + res = self.block_commitments.next() => { + let res = res.ok_or_eyre("block commitment stream closed")?; + let _ = self.handle_block_commitment(res); + }, + + res = self.executed_blocks.next() => { + let res = res.ok_or_eyre("executed block stream closed")?; + let _ = self.handle_executed_block(res); + } + + (id, res) = async { self.running_auction.as_mut().unwrap().await }, if self.running_auction.is_some() => { + let _ = self.handle_completed_auction(id, res); + } + + Some(res) = self.bids.next() => { + let _ = self.handle_bids(res); + } + + Some((id, res)) = self.cancelled_auctions.next() => { + let _ = self.handle_cancelled_auction(id, res); + } + ); + Ok(()) + } + + /// Handles the result of an auction running to completion. + /// + /// This method exists to ensure that panicking auctions receive an event. + /// It is assumed that auctions that ran to completion (returning a success or failure) + /// will emit an event in their own span. + #[instrument(skip_all, fields(%auction_id), err)] + fn handle_completed_auction( + &mut self, + auction_id: auction::Id, + res: Result, + ) -> Result { + if let Ok(auction::Summary::Submitted { + nonce_used, .. + }) = &res + { + self.auction_factory.set_last_successful_nonce(*nonce_used); + } + let _ = self.running_auction.take(); + res + } + + /// Handles the result of cancelled auctions. + /// + /// This method only exists to ensure that panicking auctions receive an event. + /// It is assumed that auctions that ran to completion (returnin a success or failure) + /// will emit an event in their own span. + #[instrument(skip_all, fields(%auction_id), err(level = Level::INFO))] + fn handle_cancelled_auction( + &self, + auction_id: auction::Id, + res: Result, + ) -> Result { + res + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_proposed_block( + &mut self, + proposed_block: eyre::Result, + ) -> eyre::Result<()> { + let proposed_block = + proposed_block.wrap_err("encountered problem receiving proposed block message")?; + Span::current().record("block_hash", field::display(proposed_block.block_hash())); + + self.metrics.increment_proposed_blocks_received_counter(); + + let new_auction = self.auction_factory.start_new(&proposed_block); + info!(auction_id = %new_auction.id(), "started new auction"); + + if let Some(old_auction) = self.running_auction.replace(new_auction) { + old_auction.cancel(); + self.metrics.increment_auctions_cancelled_count(); + info!(auction_id = %old_auction.id(), "cancelled running auction"); + self.cancelled_auctions.push(old_auction); + } + + // TODO: do conversion && sending in one operation + let base_block = crate::block::Proposed::new(proposed_block) + .try_into_base_block(self.rollup_id) + // FIXME: give this their proper wire names + .wrap_err("failed to create BaseBlock from FilteredSequencerBlock")?; + self.executed_blocks + .try_send(base_block) + .wrap_err("failed to forward block to execution stream")?; + + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_block_commitment( + &mut self, + commitment: eyre::Result, + ) -> eyre::Result<()> { + let block_commitment = commitment.wrap_err("failed to receive block commitment")?; + Span::current().record("block_hash", field::display(block_commitment.block_hash())); + + self.metrics.increment_block_commitments_received_counter(); + + if let Some(running_auction) = &mut self.running_auction { + running_auction + .start_timer(block_commitment) + .wrap_err("failed to start timer")?; + info!(auction_id = %running_auction.id(), "started auction timer"); + } else { + info!( + "received a block commitment but did not start auction timer because no auction \ + was running" + ); + } + + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_executed_block( + &mut self, + executed_block: eyre::Result, + ) -> eyre::Result<()> { + let executed_block = executed_block.wrap_err("failed to receive executed block")?; + Span::current().record( + "block_hash", + field::display(executed_block.sequencer_block_hash()), + ); + + self.metrics.increment_executed_blocks_received_counter(); + + if let Some(running_auction) = &mut self.running_auction { + running_auction + .start_bids(executed_block) + .wrap_err("failed to start processing bids")?; + info!( + auction_id = %running_auction.id(), + "set auction to start processing bids based on executed block", + ); + } else { + info!( + "received an executed block but did not set auction to start processing bids \ + because no auction was running" + ); + } + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_bids(&mut self, bid: eyre::Result) -> eyre::Result<()> { + let bid = Arc::new(bid.wrap_err("received problematic bid")?); + Span::current().record( + "block_hash", + field::display(bid.sequencer_parent_block_hash()), + ); + + self.metrics.increment_auction_bids_received_counter(); + + if let Some(running_auction) = &mut self.running_auction { + running_auction + .forward_bid_to_auction(bid) + .wrap_err("failed to forward bid to auction")?; + info!( + auction_id = %running_auction.id(), + "forwarded bid auction" + ); + } else { + info!( + "received a bid but did not forward it to the auction because no auction was \ + running", + ); + } + Ok(()) + } + + #[instrument(skip_all)] + async fn shutdown(mut self, reason: eyre::Result<&'static str>) -> eyre::Result<()> { + const WAIT_BEFORE_ABORT: Duration = Duration::from_secs(25); + + // Necessary if we got here because of another reason than receiving an external + // shutdown signal. + self.shutdown_token.cancel(); + + let message = format!( + "waiting {} for all constituent tasks to shutdown before aborting", + humantime::format_duration(WAIT_BEFORE_ABORT), + ); + match &reason { + Ok(reason) => info!(%reason, message), + Err(reason) => error!(%reason, message), + }; + if let Some(running_auction) = self.running_auction.take() { + running_auction.abort(); + } + reason.map(|_| ()) + } +} diff --git a/crates/astria-auctioneer/src/bid/mod.rs b/crates/astria-auctioneer/src/bid/mod.rs new file mode 100644 index 0000000000..363fa2723e --- /dev/null +++ b/crates/astria-auctioneer/src/bid/mod.rs @@ -0,0 +1,188 @@ +use std::fmt::Display; + +use astria_core::{ + crypto::{ + Signature, + VerificationKey, + }, + generated::astria::auction::v1alpha1 as raw, + primitive::v1::{ + asset, + RollupId, + }, + protocol::transaction::v1::{ + action::RollupDataSubmission, + TransactionBody, + }, + sequencerblock::v1::block, +}; +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use bytes::Bytes; +use prost::{ + Message as _, + Name, +}; + +use crate::sequencer_key::SequencerKey; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct RollupBlockHash(Bytes); + +impl RollupBlockHash { + #[must_use] + pub(crate) fn new(inner: Bytes) -> Self { + Self(inner) + } + + #[must_use] + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.0 + } +} + +impl From for RollupBlockHash { + fn from(value: Bytes) -> Self { + Self::new(value) + } +} + +impl Display for RollupBlockHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use base64::{ + display::Base64Display, + engine::general_purpose::STANDARD, + }; + + if f.alternate() { + Base64Display::new(&self.0, &STANDARD).fmt(f)?; + } else { + for byte in &self.0 { + write!(f, "{byte:02x}")?; + } + } + Ok(()) + } +} + +// TODO: this should probably be moved to astria_core::auction? +#[derive(Debug, Clone)] +pub(crate) struct Bid { + /// The fee that will be charged for this bid. + fee: u64, + /// The byte list of transactions fto be included. + transactions: Vec, + /// The hash of the rollup block that this bid is based on. + rollup_parent_block_hash: RollupBlockHash, + /// The hash of the sequencer block used to derive the rollup block that this bid is based + /// on. + sequencer_parent_block_hash: block::Hash, +} + +impl Bid { + pub(crate) fn try_from_raw(raw: raw::Bid) -> eyre::Result { + let raw::Bid { + fee, + transactions, + sequencer_parent_block_hash, + rollup_parent_block_hash, + } = raw; + Ok(Self { + fee, + transactions, + rollup_parent_block_hash: rollup_parent_block_hash.into(), + sequencer_parent_block_hash: sequencer_parent_block_hash + .as_ref() + .try_into() + .wrap_err("invalid field .sequencer_parent_block_hash")?, + }) + } + + fn into_raw(self) -> raw::Bid { + raw::Bid { + fee: self.fee, + transactions: self.transactions, + sequencer_parent_block_hash: Bytes::copy_from_slice( + self.sequencer_parent_block_hash.as_bytes(), + ), + rollup_parent_block_hash: Bytes::copy_from_slice( + self.rollup_parent_block_hash.as_bytes(), + ), + } + } + + pub(crate) fn into_transaction_body( + self, + nonce: u32, + rollup_id: RollupId, + sequencer_key: &SequencerKey, + fee_asset: asset::Denom, + chain_id: String, + ) -> TransactionBody { + let allocation = Allocation::new(self, sequencer_key); + let allocation_data = allocation.into_raw().encode_to_vec(); + + TransactionBody::builder() + .actions(vec![RollupDataSubmission { + rollup_id, + data: allocation_data.into(), + fee_asset, + } + .into()]) + .nonce(nonce) + .chain_id(chain_id) + .try_build() + .expect("failed to build transaction body") + } + + pub(crate) fn bid(&self) -> u64 { + self.fee + } + + pub(crate) fn rollup_parent_block_hash(&self) -> &RollupBlockHash { + &self.rollup_parent_block_hash + } + + pub(crate) fn sequencer_parent_block_hash(&self) -> &block::Hash { + &self.sequencer_parent_block_hash + } +} + +#[derive(Debug)] +pub(crate) struct Allocation { + signature: Signature, + verification_key: VerificationKey, + bid_bytes: pbjson_types::Any, +} + +impl Allocation { + fn new(bid: Bid, sequencer_key: &SequencerKey) -> Self { + let bid_bytes = pbjson_types::Any { + type_url: raw::Bid::type_url(), + value: bid.into_raw().encode_to_vec().into(), + }; + let signature = sequencer_key.signing_key().sign(&bid_bytes.value); + let verification_key = sequencer_key.signing_key().verification_key(); + Self { + signature, + verification_key, + bid_bytes, + } + } + + fn into_raw(self) -> raw::Allocation { + let Self { + signature, + verification_key, + bid_bytes, + } = self; + + raw::Allocation { + signature: Bytes::copy_from_slice(&signature.to_bytes()), + public_key: Bytes::copy_from_slice(&verification_key.to_bytes()), + bid: Some(bid_bytes), + } + } +} diff --git a/crates/astria-auctioneer/src/block/mod.rs b/crates/astria-auctioneer/src/block/mod.rs new file mode 100644 index 0000000000..53475a69d1 --- /dev/null +++ b/crates/astria-auctioneer/src/block/mod.rs @@ -0,0 +1,134 @@ +use astria_core::{ + execution, + generated::astria::{ + optimistic_execution::v1alpha1 as optimistic_execution, + sequencerblock::v1 as raw_sequencer_block, + }, + primitive::v1::RollupId, + sequencerblock::v1::{ + block::{ + self, + FilteredSequencerBlock, + FilteredSequencerBlockParts, + }, + RollupTransactions, + }, + Protobuf, +}; +use astria_eyre::eyre::{ + self, + eyre, + Context, +}; +use bytes::Bytes; +use prost::Message as _; + +use crate::bid::RollupBlockHash; + +/// Converts a [`tendermint::Time`] to a [`prost_types::Timestamp`]. +fn convert_tendermint_time_to_protobuf_timestamp( + value: sequencer_client::tendermint::Time, +) -> pbjson_types::Timestamp { + let sequencer_client::tendermint_proto::google::protobuf::Timestamp { + seconds, + nanos, + } = value.into(); + pbjson_types::Timestamp { + seconds, + nanos, + } +} + +#[derive(Debug, Clone)] +pub(crate) struct Proposed { + /// The proposed block data, filtered for a rollup id. + filtered_sequencer_block: FilteredSequencerBlock, +} + +impl Proposed { + pub(crate) fn new(filtered_sequencer_block: FilteredSequencerBlock) -> Self { + Self { + filtered_sequencer_block, + } + } + + /// Converts this [`Proposed`] into a [`BaseBlock`] for the given `rollup_id`. + /// If there are no transactions for the given `rollup_id`, this will return a `BaseBlock` + /// with no transactions. + /// + /// # Errors + /// Invalid `RollupData` included in the proposed block data will result in an error. + // TODO: add typed errors here? + pub(crate) fn try_into_base_block( + self, + rollup_id: RollupId, + ) -> eyre::Result { + let FilteredSequencerBlockParts { + block_hash, + header, + mut rollup_transactions, + .. + } = self.filtered_sequencer_block.into_parts(); + + let maybe_serialized_transactions = rollup_transactions + .swap_remove(&rollup_id) + .map(RollupTransactions::into_parts); + + let transactions = + maybe_serialized_transactions.map_or(Ok(vec![]), |serialized_transactions| { + serialized_transactions + .transactions + .into_iter() + .map(raw_sequencer_block::RollupData::decode) + .collect::>() + .wrap_err("failed to decode RollupData") + })?; + + let timestamp = Some(convert_tendermint_time_to_protobuf_timestamp(header.time())); + + Ok(optimistic_execution::BaseBlock { + sequencer_block_hash: Bytes::copy_from_slice(block_hash.as_bytes()), + transactions, + timestamp, + }) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct Executed { + /// The rollup block metadata that resulted from executing a proposed Sequencer block. + block: execution::v1::Block, + /// The hash of the sequencer block that was executed optimistically. + sequencer_block_hash: block::Hash, +} + +impl Executed { + pub(crate) fn try_from_raw( + raw: optimistic_execution::ExecuteOptimisticBlockStreamResponse, + ) -> eyre::Result { + let block = if let Some(raw_block) = raw.block { + execution::v1::Block::try_from_raw(raw_block).wrap_err("invalid rollup block")? + } else { + return Err(eyre!("missing block")); + }; + + let sequencer_block_hash = raw + .base_sequencer_block_hash + .as_ref() + .try_into() + .wrap_err("invalid block hash")?; + + Ok(Self { + block, + sequencer_block_hash, + }) + } + + pub(crate) fn sequencer_block_hash(&self) -> &block::Hash { + &self.sequencer_block_hash + } + + pub(crate) fn rollup_block_hash(&self) -> RollupBlockHash { + RollupBlockHash::new(self.block.hash().clone()) + } +} diff --git a/crates/astria-auctioneer/src/build_info.rs b/crates/astria-auctioneer/src/build_info.rs new file mode 100644 index 0000000000..2996fcab96 --- /dev/null +++ b/crates/astria-auctioneer/src/build_info.rs @@ -0,0 +1,3 @@ +use astria_build_info::BuildInfo; + +pub const BUILD_INFO: BuildInfo = astria_build_info::get!(); diff --git a/crates/astria-auctioneer/src/config.rs b/crates/astria-auctioneer/src/config.rs new file mode 100644 index 0000000000..53fe529acb --- /dev/null +++ b/crates/astria-auctioneer/src/config.rs @@ -0,0 +1,66 @@ +use astria_core::primitive::v1::asset; +use serde::{ + Deserialize, + Serialize, +}; + +// Allowed `struct_excessive_bools` because this is used as a container +// for deserialization. Making this a builder-pattern is not actionable. +#[expect( + clippy::struct_excessive_bools, + reason = "represents a config with flags" +)] +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] +/// The single config for creating an astria-auctioneer service. +pub struct Config { + /// The endpoint for the sequencer gRPC service used for the proposed block stream + pub sequencer_grpc_endpoint: String, + /// The endpoint for the sequencer ABCI service used for submitting the auction winner + /// transaction + pub sequencer_abci_endpoint: String, + /// The chain ID for the sequencer network + pub sequencer_chain_id: String, + /// The file path for the private key used to sign sequencer transactions with the auction + /// results + pub sequencer_private_key_path: String, + // The address prefix to use when constructing sequencer addresses using the signing key. + pub sequencer_address_prefix: String, + // The fee asset denomination to use for the sequnecer transactions. + pub fee_asset_denomination: asset::Denom, + /// The endpoint for the rollup gRPC service used for the optimistic execution and bundle + /// streams + pub rollup_grpc_endpoint: String, + /// The rollup ID used to filter the proposed blocks stream + pub rollup_id: String, + /// The amount of time in miliseconds to wait after a commit before closing the auction for + /// bids and submitting the result to the sequencer. + pub latency_margin_ms: u64, + /// Log level for the service. + pub log: String, + /// Forces writing trace data to stdout no matter if connected to a tty or not. + pub force_stdout: bool, + /// Disables writing trace data to an opentelemetry endpoint. + pub no_otel: bool, + /// Set to true to disable the metrics server + pub no_metrics: bool, + /// The endpoint which will be listened on for serving prometheus metrics + pub metrics_http_listener_addr: String, + /// Writes a human readable format to stdout instead of JSON formatted OTEL trace data. + pub pretty_print: bool, +} + +impl config::Config for Config { + const PREFIX: &'static str = "ASTRIA_AUCTIONEER_"; +} + +#[cfg(test)] +mod tests { + use super::Config; + + const EXAMPLE_ENV: &str = include_str!("../local.env.example"); + + #[test] + fn example_env_config_is_up_to_date() { + config::tests::example_env_config_is_up_to_date::(EXAMPLE_ENV); + } +} diff --git a/crates/astria-auctioneer/src/lib.rs b/crates/astria-auctioneer/src/lib.rs new file mode 100644 index 0000000000..9158081d66 --- /dev/null +++ b/crates/astria-auctioneer/src/lib.rs @@ -0,0 +1,147 @@ +//! Astria Auctioneer auctions bids for the top slot of a rollup's block. +//! +//! Auctioneer connects to a Sequencer node's +//! `astria.sequencerblock.optimistic.v1alpha1.OptimisticBlock` +//! gRPC interface, a Rollup's +//! `astria.auction.v1alpha1.OptimisticExecutionService`, and +//! a Rollup's `astria.auction.v1alpha.AuctionService`. +//! +//! # Starting an auction +//! +//! Every new proposed sequencer block (that is a block created +//! during Sequencer's CometBFT prepare and process proposal phase) +//! triggers Auctioneer to cancel a still running auction and start +//! a new one. +//! +//! Auctioneer forwards the block it received from Sequencer to its +//! the Rollup for (optimistic) execution, and then selects a winner +//! among the bids that are on top of this optimistically constructed +//! block. The winner is submitted to Sequencer to be included in +//! the next Sequencer block. +//! +//! # How a single auction works +//! +//! Once started, an auction waits for two signals: +//! +//! 1. one to open the auction for bids. +//! 2. the other to start the auction timer. +//! +//! The signal to open the auction for bids is usually given after +//! Auctioneer receives the executed block hash from its connected +//! rollup. Afterwards the running auction starts processing its received +//! bids given an allocation rule (right now first price only). +//! +//! The signal to start the auction timer is usually given after +//! Auctioneer receives a commit message from Sequencer. Once the +//! timer is up, the winner of the auction (if any) is submitted +//! to Sequencer. +//! +//! # Submitting an Auction to Sequencer +//! +//! An Auction is submitted to Sequencer using an ABCI +//! `broadcast_tx_sync` RPC. The payload is a regular +//! `astria.protocol.Tranasaction` signed by the auctioneer's +//! private ED25519 signing key. +//! +//! The moment an auction task starts its timer, it also requests +//! the latest nonce for auctioneer's account from Sequencer. If +//! Sequencer answers within the timer's duration, that nonce is +//! used to submit the winning allocation. If Sequencer does not +//! answer then the auction worker submits the winning bid using +//! the cached nonce of the last successful submission. + +use std::{ + future::Future, + task::Poll, +}; + +mod auctioneer; +mod bid; +mod block; +mod build_info; +pub mod config; +pub(crate) mod metrics; +mod rollup_channel; +mod sequencer_channel; +mod sequencer_key; +mod streaming_utils; + +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +pub use build_info::BUILD_INFO; +pub use config::Config; +pub use metrics::Metrics; +use tokio::task::{ + JoinError, + JoinHandle, +}; +use tokio_util::sync::CancellationToken; +use tracing::instrument; + +/// The [`Auctioneer`] service returned by [`Auctioneer::spawn`]. +pub struct Auctioneer { + shutdown_token: CancellationToken, + task: Option>>, +} + +impl Auctioneer { + /// Spawns the [`Auctioneer`] service. + /// + /// # Errors + /// Returns an error if the Auctioneer cannot be initialized. + pub fn spawn(cfg: Config, metrics: &'static Metrics) -> eyre::Result { + let shutdown_token = CancellationToken::new(); + let inner = auctioneer::Auctioneer::new(cfg, metrics, shutdown_token.child_token())?; + let task = tokio::spawn(inner.run()); + + Ok(Self { + shutdown_token, + task: Some(task), + }) + } + + /// Shuts down Auctioneer, in turn waiting for its components to shut down. + /// + /// # Errors + /// Returns an error if an error occured during shutdown. + /// + /// # Panics + /// Panics if called twice. + #[instrument(skip_all, err)] + pub async fn shutdown(&mut self) -> eyre::Result<()> { + self.shutdown_token.cancel(); + flatten_join_result( + self.task + .take() + .expect("shutdown must not be called twice") + .await, + ) + } +} + +impl Future for Auctioneer { + type Output = eyre::Result<()>; + + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll { + use futures::future::FutureExt as _; + + let task = self + .task + .as_mut() + .expect("auctioneer must not be polled after shutdown"); + task.poll_unpin(cx).map(flatten_join_result) + } +} + +fn flatten_join_result(res: Result, JoinError>) -> eyre::Result { + match res { + Ok(Ok(val)) => Ok(val), + Ok(Err(err)) => Err(err).wrap_err("task returned with error"), + Err(err) => Err(err).wrap_err("task panicked"), + } +} diff --git a/crates/astria-auctioneer/src/main.rs b/crates/astria-auctioneer/src/main.rs new file mode 100644 index 0000000000..7cdbf9447d --- /dev/null +++ b/crates/astria-auctioneer/src/main.rs @@ -0,0 +1,111 @@ +use std::process::ExitCode; + +use astria_auctioneer::{ + Auctioneer, + Config, + BUILD_INFO, +}; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; +use tokio::{ + select, + signal::unix::{ + signal, + SignalKind, + }, +}; +use tracing::{ + error, + info, + instrument, + warn, +}; + +#[tokio::main] +async fn main() -> ExitCode { + astria_eyre::install().expect("astria eyre hook must be the first hook installed"); + + eprintln!("{}", astria_telemetry::display::json(&BUILD_INFO)); + + let cfg: Config = match config::get() { + Err(err) => { + eprintln!("failed to read configuration:\n{err:?}"); + return ExitCode::FAILURE; + } + Ok(cfg) => cfg, + }; + eprintln!( + "starting with configuration:\n{}", + astria_telemetry::display::json(&cfg), + ); + + let mut astria_telemetry_conf = astria_telemetry::configure() + .set_no_otel(cfg.no_otel) + .set_force_stdout(cfg.force_stdout) + .set_pretty_print(cfg.pretty_print) + .set_filter_directives(&cfg.log); + + if !cfg.no_metrics { + astria_telemetry_conf = astria_telemetry_conf + .set_metrics(&cfg.metrics_http_listener_addr, env!("CARGO_PKG_NAME")); + } + + let (metrics, _astria_telemetry_guard) = match astria_telemetry_conf + .try_init(&()) + .wrap_err("failed to setup astria_telemetry") + { + Err(e) => { + eprintln!("initializing auctioneer failed:\n{e:?}"); + return ExitCode::FAILURE; + } + Ok(metrics_and_guard) => metrics_and_guard, + }; + + info!( + config = serde_json::to_string(&cfg).expect("serializing to a string cannot fail"), + "initializing auctioneer" + ); + + let mut auctioneer = match Auctioneer::spawn(cfg, metrics) { + Ok(auctioneer) => auctioneer, + Err(error) => { + error!(%error, "failed initializing auctioneer"); + return ExitCode::FAILURE; + } + }; + + let mut sigterm = signal(SignalKind::terminate()) + .expect("setting a SIGTERM listener should always work on Unix"); + + let exit_reason = select! { + _ = sigterm.recv() => Ok("received shutdown signal"), + res = &mut auctioneer => { + res.and_then(|()| Err(eyre!("auctioneer task exited unexpectedly"))) + } + }; + + shutdown(exit_reason, auctioneer).await +} + +#[instrument(skip_all)] +async fn shutdown(reason: eyre::Result<&'static str>, mut service: Auctioneer) -> ExitCode { + let message = "shutting down"; + let exit_code = match reason { + Ok(reason) => { + info!(reason, message); + if let Err(error) = service.shutdown().await { + warn!(%error, "encountered errors during shutdown"); + }; + ExitCode::SUCCESS + } + Err(reason) => { + error!(%reason, message); + ExitCode::FAILURE + } + }; + info!("shutdown target reached"); + exit_code +} diff --git a/crates/astria-auctioneer/src/metrics.rs b/crates/astria-auctioneer/src/metrics.rs new file mode 100644 index 0000000000..e944800cd4 --- /dev/null +++ b/crates/astria-auctioneer/src/metrics.rs @@ -0,0 +1,206 @@ +use astria_telemetry::{ + metric_names, + metrics::{ + self, + Counter, + Histogram, + IntoF64, + RegisteringBuilder, + }, +}; + +const BIDS_PER_AUCTIONLABEL: &str = "kind"; +const AUCTION_BIDS_PROCESSED: &str = "processed"; +const AUCTION_BIDS_DROPPED: &str = "dropped"; + +const AUCTION_WINNER_SUBMISSION_LATENCY_LABEL: &str = "auction_winner_submission_latency"; +const AUCTION_WINNER_ERROR: &str = "error"; +const AUCTION_WINNER_SUCCESS: &str = "success"; + +pub struct Metrics { + auction_bid_delay_since_start: Histogram, + bids_per_auction_dropped_histogram: Histogram, + bids_per_auction_processed_histogram: Histogram, + auction_bids_received_count: Counter, + auction_bids_without_matching_auction: Counter, + auction_winner_submission_error_latency: Histogram, + auction_winner_submission_success_latency: Histogram, + auction_winning_bid_histogram: Histogram, + auctions_cancelled_count: Counter, + block_commitments_received_count: Counter, + executed_blocks_received_count: Counter, + proposed_blocks_received_count: Counter, +} + +impl Metrics { + pub(crate) fn increment_auction_bids_received_counter(&self) { + self.auction_bids_received_count.increment(1); + } + + pub(crate) fn increment_auctions_cancelled_count(&self) { + self.auctions_cancelled_count.increment(1); + } + + pub(crate) fn increment_auction_bids_without_matching_auction(&self) { + self.auction_bids_without_matching_auction.increment(1); + } + + pub(crate) fn increment_block_commitments_received_counter(&self) { + self.block_commitments_received_count.increment(1); + } + + pub(crate) fn increment_executed_blocks_received_counter(&self) { + self.executed_blocks_received_count.increment(1); + } + + pub(crate) fn increment_proposed_blocks_received_counter(&self) { + self.proposed_blocks_received_count.increment(1); + } + + pub(crate) fn record_bids_per_auction_dropped_histogram(&self, val: impl IntoF64) { + self.bids_per_auction_dropped_histogram.record(val); + } + + pub(crate) fn record_bids_per_auction_processed_histogram(&self, val: impl IntoF64) { + self.bids_per_auction_processed_histogram.record(val); + } + + pub(crate) fn record_auction_bid_delay_since_start(&self, val: impl IntoF64) { + self.auction_bid_delay_since_start.record(val); + } + + pub(crate) fn record_auction_winning_bid_histogram(&self, val: impl IntoF64) { + self.auction_winning_bid_histogram.record(val); + } + + pub(crate) fn record_auction_winner_submission_error_latency(&self, val: impl IntoF64) { + self.auction_winner_submission_error_latency.record(val); + } + + pub(crate) fn record_auction_winner_submission_success_latency(&self, val: impl IntoF64) { + self.auction_winner_submission_success_latency.record(val); + } +} + +impl astria_telemetry::metrics::Metrics for Metrics { + type Config = (); + + fn register( + builder: &mut RegisteringBuilder, + _config: &Self::Config, + ) -> Result { + let block_commitments_received_count = builder + .new_counter_factory( + BLOCK_COMMITMENTS_RECEIVED, + "the number of block commitments received from the Sequencer node", + )? + .register()?; + + let executed_blocks_received_count = builder + .new_counter_factory( + EXECUTED_BLOCKS_RECEIVED, + "the number of executed blocks received from the Rollup node", + )? + .register()?; + + let proposed_blocks_received_count = builder + .new_counter_factory( + PROPOSED_BLOCKS_RECEIVED, + "the number of proposed blocks received from the Sequencer node", + )? + .register()?; + + let auction_bids_received_count = builder + .new_counter_factory( + AUCTION_BIDS_RECEIVED, + "the number of auction bids received from the Rollup node (total number over the \ + runtime of auctioneer)", + )? + .register()?; + + let mut auction_bids_factory = builder.new_histogram_factory( + AUCTION_BIDS_PROCESSED, + "the number of auction bids received during an auction (either admitted or dropped \ + because the time was up or due to some other issue)", + )?; + let bids_per_auction_processed_histogram = auction_bids_factory + .register_with_labels(&[(BIDS_PER_AUCTIONLABEL, AUCTION_BIDS_PROCESSED.to_string())])?; + let bids_per_auction_dropped_histogram = auction_bids_factory + .register_with_labels(&[(BIDS_PER_AUCTIONLABEL, AUCTION_BIDS_DROPPED.to_string())])?; + + let auctions_cancelled_count = builder + .new_counter_factory( + AUCTIONS_CANCELLED, + "the number of auctions that were cancelled due to a proposed block pre-empting a \ + prior proposed block", + )? + .register()?; + + let auction_winning_bid_histogram = builder + .new_histogram_factory(AUCTION_WINNING_BID, "the amount bid by the auction winner")? + .register()?; + + let auction_bid_delay_since_start = builder + .new_histogram_factory( + AUCTION_BID_DELAY_SINCE_START, + "the duration from the start of an auction to when a bid for that auction was \ + received", + )? + .register()?; + + let mut auction_winner_submission_latency_factory = builder.new_histogram_factory( + AUCTION_WINNER_SUBMISSION_LATENCY, + "the duration for Sequencer to respond to a auction submission", + )?; + + let auction_winner_submission_error_latency = auction_winner_submission_latency_factory + .register_with_labels(&[( + AUCTION_WINNER_SUBMISSION_LATENCY_LABEL, + AUCTION_WINNER_ERROR.to_string(), + )])?; + + let auction_winner_submission_success_latency = auction_winner_submission_latency_factory + .register_with_labels(&[( + AUCTION_WINNER_SUBMISSION_LATENCY_LABEL, + AUCTION_WINNER_SUCCESS.to_string(), + )])?; + + let auction_bids_without_matching_auction = builder + .new_counter_factory( + AUCTION_BIDS_WITHOUT_MATCHING_AUCTION, + "auction bids that were received by auctioneer but that contained a sequencer or + rollup parent block hash that did not match a currently running auction (this \ + includes + bids that arrived after an auction completed and bids that arrived before the + optimistically executed block was received by auctioneer from the rollup)", + )? + .register()?; + Ok(Self { + auction_bid_delay_since_start, + bids_per_auction_dropped_histogram, + bids_per_auction_processed_histogram, + auction_bids_received_count, + auction_bids_without_matching_auction, + auction_winner_submission_error_latency, + auction_winner_submission_success_latency, + auction_winning_bid_histogram, + auctions_cancelled_count, + block_commitments_received_count, + executed_blocks_received_count, + proposed_blocks_received_count, + }) + } +} + +metric_names!(const METRICS_NAMES: + BLOCK_COMMITMENTS_RECEIVED, + EXECUTED_BLOCKS_RECEIVED, + PROPOSED_BLOCKS_RECEIVED, + AUCTIONS_CANCELLED, + AUCTION_BID_DELAY_SINCE_START, + BIDS_PER_AUCTION, + AUCTION_BIDS_RECEIVED, + AUCTION_BIDS_WITHOUT_MATCHING_AUCTION, + AUCTION_WINNING_BID, + AUCTION_WINNER_SUBMISSION_LATENCY, +); diff --git a/crates/astria-auctioneer/src/rollup_channel.rs b/crates/astria-auctioneer/src/rollup_channel.rs new file mode 100644 index 0000000000..d1daf64bfc --- /dev/null +++ b/crates/astria-auctioneer/src/rollup_channel.rs @@ -0,0 +1,227 @@ +use std::{ + pin::Pin, + task::{ + ready, + Context, + Poll, + }, +}; + +use astria_core::generated::astria::{ + auction::v1alpha1::GetBidStreamResponse, + optimistic_execution::v1alpha1::{ + BaseBlock, + ExecuteOptimisticBlockStreamResponse, + }, +}; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; +use futures::{ + stream::BoxStream, + Stream, + StreamExt, +}; +use prost::Name as _; +use tokio::sync::broadcast; +use tokio_stream::wrappers::BroadcastStream; +use tracing::{ + info_span, + warn, + Instrument as _, +}; + +use crate::{ + bid::Bid, + streaming_utils::{ + make_instrumented_channel, + restarting_stream, + InstrumentedChannel, + }, +}; + +pub(crate) fn open(endpoint: &str) -> eyre::Result { + RollupChannel::create(endpoint) + .wrap_err_with(|| format!("failed to create a gRPC channel to rollup at `{endpoint}`")) +} + +#[derive(Clone)] +pub(crate) struct RollupChannel { + inner: InstrumentedChannel, +} + +impl RollupChannel { + fn create(uri: &str) -> eyre::Result { + Ok(Self { + inner: make_instrumented_channel(uri)?, + }) + } + + pub(crate) fn open_bid_stream(&self) -> BidStream { + use astria_core::generated::astria::auction::v1alpha1::{ + auction_service_client::AuctionServiceClient, + GetBidStreamRequest, + }; + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let inner = AuctionServiceClient::new(chan) + .get_bid_stream(GetBidStreamRequest {}) + .await + .wrap_err("failed to open bid stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerBidStream { + inner, + }) + } + .instrument(info_span!("request bid stream")) + }) + .boxed(); + BidStream { + inner, + } + } + + pub(crate) fn open_execute_optimistic_block_stream(&self) -> ExecuteOptimisticBlockStream { + use astria_core::generated::astria::optimistic_execution::v1alpha1::{ + optimistic_execution_service_client::OptimisticExecutionServiceClient, + ExecuteOptimisticBlockStreamRequest, + }; + + // NOTE: this implementation uses a broadcast channel instead of an mpsc because + // one can get new readers by using Sender::subscribe. This is important for the + // restart mechanism. The mpsc channel (or rather the tokio stream ReceiverStream wrapper) + // would need something ugly like a Arc>, but where + // we'd need to also implement Stream for some wrapper around it.... It's a mess. + let (to_server, _) = broadcast::channel(16); + let chan = self.inner.clone(); + let to_server_2 = to_server.clone(); + let incoming = restarting_stream(move || { + let chan = chan.clone(); + let out_stream = BroadcastStream::new(to_server_2.subscribe()) + // TODO: emit some kind of event when the stream actually starts + // lagging behind instead of quietly discarding the issue. + .filter_map(|maybe_lagged| std::future::ready(maybe_lagged.ok())) + .map(|base_block| ExecuteOptimisticBlockStreamRequest { + base_block: Some(base_block), + }); + + async move { + let inner = OptimisticExecutionServiceClient::new(chan) + .execute_optimistic_block_stream(out_stream) + .await + .wrap_err("failed to open execute optimistic block stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerExecuteOptimisticBlockStream { + inner, + }) + } + .instrument(info_span!("request execute optimistic block stream")) + }) + .boxed(); + + ExecuteOptimisticBlockStream { + incoming, + outgoing: to_server, + } + } +} + +pub(crate) struct BidStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for BidStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerBidStream { + inner: tonic::Streaming, +} + +impl Stream for InnerBidStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(res) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let raw = res + .wrap_err("error while receiving streamed message from server")? + .bid + .ok_or_else(|| { + eyre!( + "message field not set: `{}.bid`", + GetBidStreamResponse::full_name() + ) + })?; + + let bid = Bid::try_from_raw(raw).wrap_err_with(|| { + format!( + "failed to validate received message `{}`", + astria_core::generated::astria::auction::v1alpha1::Bid::full_name() + ) + })?; + + Poll::Ready(Some(Ok(bid))) + } +} + +pub(crate) struct ExecuteOptimisticBlockStream { + incoming: BoxStream<'static, eyre::Result>, + outgoing: broadcast::Sender, +} + +impl ExecuteOptimisticBlockStream { + /// Immediately sends `base_block` to the connected server. Fails if + /// the channel is full. + // NOTE: just leak the tokio error for now. It's crate private anyway + // and we'd just end up wrapping the same variants. + pub(crate) fn try_send( + &mut self, + base_block: BaseBlock, + ) -> Result<(), broadcast::error::SendError> { + self.outgoing.send(base_block).map(|_| ()) + } +} + +impl Stream for ExecuteOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.incoming.poll_next_unpin(cx) + } +} + +pub(crate) struct InnerExecuteOptimisticBlockStream { + inner: tonic::Streaming, +} + +impl Stream for InnerExecuteOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(message) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let message = message.wrap_err("failed receiving message over stream")?; + let executed_block = crate::block::Executed::try_from_raw(message).wrap_err_with(|| { + format!( + "failed to validate `{}`", + astria_core::generated::astria::optimistic_execution::v1alpha1::ExecuteOptimisticBlockStreamResponse::full_name(), + ) + })?; + Poll::Ready(Some(Ok(executed_block))) + } +} diff --git a/crates/astria-auctioneer/src/sequencer_channel.rs b/crates/astria-auctioneer/src/sequencer_channel.rs new file mode 100644 index 0000000000..c286f55fac --- /dev/null +++ b/crates/astria-auctioneer/src/sequencer_channel.rs @@ -0,0 +1,243 @@ +use std::{ + pin::Pin, + task::{ + ready, + Context, + Poll, + }, +}; + +use astria_core::{ + generated::astria::sequencerblock::optimistic::v1alpha1::{ + GetBlockCommitmentStreamRequest, + GetBlockCommitmentStreamResponse, + GetOptimisticBlockStreamRequest, + GetOptimisticBlockStreamResponse, + }, + primitive::v1::{ + Address, + RollupId, + }, + sequencerblock::{ + optimistic::v1alpha1::SequencerBlockCommit, + v1::block::FilteredSequencerBlock, + }, + Protobuf as _, +}; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; +use futures::{ + stream::BoxStream, + Future, + Stream, + StreamExt as _, +}; +use prost::Name as _; +use tracing::{ + info_span, + warn, + Instrument as _, +}; + +use crate::streaming_utils::{ + restarting_stream, + InstrumentedChannel, +}; + +pub(crate) fn open(endpoint: &str) -> eyre::Result { + SequencerChannel::create(endpoint) + .wrap_err_with(|| format!("failed to create a gRPC channel to Sequencer at `{endpoint}`")) +} + +#[derive(Clone)] +pub(crate) struct SequencerChannel { + inner: InstrumentedChannel, +} + +impl SequencerChannel { + fn create(uri: &str) -> eyre::Result { + Ok(Self { + inner: crate::streaming_utils::make_instrumented_channel(uri)?, + }) + } + + pub(crate) fn get_pending_nonce( + &self, + address: Address, + ) -> impl Future> { + use astria_core::generated::astria::sequencerblock::v1::{ + sequencer_service_client::SequencerServiceClient, + GetPendingNonceRequest, + }; + + let mut client = SequencerServiceClient::new(self.inner.clone()); + async move { + let nonce = client + .get_pending_nonce(GetPendingNonceRequest { + address: Some(address.into_raw()), + }) + .await + .wrap_err("failed to fetch most recent pending nonce")? + .into_inner() + .inner; + Ok(nonce) + } + } + + pub(crate) fn open_get_block_commitment_stream(&self) -> BlockCommitmentStream { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1:: + optimistic_block_service_client::OptimisticBlockServiceClient; + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let inner = OptimisticBlockServiceClient::new(chan) + .get_block_commitment_stream(GetBlockCommitmentStreamRequest {}) + .await + .wrap_err("failed to open block commitment stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerBlockCommitmentStream { + inner, + }) + } + .instrument(info_span!("request block commitment stream")) + }) + .boxed(); + BlockCommitmentStream { + inner, + } + } + + pub(crate) fn open_get_proposed_block_stream( + &self, + rollup_id: RollupId, + ) -> ProposedBlockStream { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1::{ + optimistic_block_service_client::OptimisticBlockServiceClient, + GetOptimisticBlockStreamRequest, + }; + + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let mut client = OptimisticBlockServiceClient::new(chan); + let inner = client + .get_optimistic_block_stream(GetOptimisticBlockStreamRequest { + rollup_id: Some(rollup_id.into_raw()), + }) + .await + .wrap_err("failed to open optimistic block stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerOptimisticBlockStream { + inner, + }) + } + .instrument(info_span!("request optimistic block stream")) + }) + .boxed(); + ProposedBlockStream { + inner, + } + } +} + +/// A stream for receiving committed blocks from the sequencer. +pub(crate) struct BlockCommitmentStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for BlockCommitmentStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerBlockCommitmentStream { + inner: tonic::Streaming, +} + +impl Stream for InnerBlockCommitmentStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1 as raw; + + let Some(res) = std::task::ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let raw = res + .wrap_err("failed receiving message over stream")? + .commitment + .ok_or_else(|| { + eyre!( + "expected field `{}.commitment` was not set", + GetBlockCommitmentStreamResponse::full_name() + ) + })?; + + let commitment = SequencerBlockCommit::try_from_raw_ref(&raw).wrap_err_with(|| { + format!( + "failed to validate message `{}` received from server", + raw::SequencerBlockCommit::full_name() + ) + })?; + + Poll::Ready(Some(Ok(commitment))) + } +} + +pub(crate) struct ProposedBlockStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for ProposedBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerOptimisticBlockStream { + inner: tonic::Streaming, +} + +impl Stream for InnerOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> std::task::Poll> { + let Some(item) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + let raw = item + .wrap_err("failed receiving message over stream")? + .block + .ok_or_else(|| { + eyre!( + "expected field `{}.block` was not set", + GetOptimisticBlockStreamRequest::full_name() + ) + })?; + + let optimistic_block = FilteredSequencerBlock::try_from_raw(raw).wrap_err_with(|| { + format!( + "failed to validate `{}`", + FilteredSequencerBlock::full_name() + ) + })?; + + std::task::Poll::Ready(Some(Ok(optimistic_block))) + } +} diff --git a/crates/astria-auctioneer/src/sequencer_key.rs b/crates/astria-auctioneer/src/sequencer_key.rs new file mode 100644 index 0000000000..463f77fc27 --- /dev/null +++ b/crates/astria-auctioneer/src/sequencer_key.rs @@ -0,0 +1,108 @@ +use std::{ + fs, + path::{ + Path, + PathBuf, + }, +}; + +use astria_core::{ + crypto::SigningKey, + primitive::v1::Address, +}; +use astria_eyre::eyre::{ + self, + bail, + eyre, + Context, +}; + +#[derive(Clone)] +pub(crate) struct SequencerKey { + address: Address, + signing_key: SigningKey, +} + +pub(crate) struct SequencerKeyBuilder { + path: Option, + prefix: Option, +} + +impl SequencerKeyBuilder { + /// Sets the path from which the sequencey key is read. + /// + /// The file at `path` should contain a hex-encoded ed25519 secret key. + pub(crate) fn path>(self, path: P) -> Self { + Self { + path: Some(path.as_ref().to_path_buf()), + ..self + } + } + + /// Sets the prefix for constructing a bech32m sequencer address. + /// + /// The prefix must be a valid bech32 human-readable-prefix (Hrp). + pub(crate) fn prefix>(self, prefix: S) -> Self { + Self { + prefix: Some(prefix.as_ref().to_string()), + ..self + } + } + + pub(crate) fn try_build(self) -> eyre::Result { + let Some(path) = self.path else { + bail!("path to sequencer key file must be set"); + }; + let Some(prefix) = self.prefix else { + bail!( + "a prefix to construct bech32m complicant astria addresses from the signing key \ + must be set" + ); + }; + let hex = fs::read_to_string(&path).wrap_err_with(|| { + format!("failed to read sequencer key from path: {}", path.display()) + })?; + let bytes: [u8; 32] = hex::decode(hex.trim()) + .wrap_err_with(|| format!("failed to decode hex: {}", path.display()))? + .try_into() + .map_err(|_| { + eyre!( + "invalid private key length; must be 32 bytes: {}", + path.display() + ) + })?; + let signing_key = SigningKey::from(bytes); + let address = Address::builder() + .array(signing_key.address_bytes()) + .prefix(&prefix) + .try_build() + .wrap_err_with(|| { + format!( + "failed constructing valid sequencer address using the provided prefix \ + `{prefix}`" + ) + })?; + + Ok(SequencerKey { + address, + signing_key, + }) + } +} + +impl SequencerKey { + pub(crate) fn builder() -> SequencerKeyBuilder { + SequencerKeyBuilder { + path: None, + prefix: None, + } + } + + pub(crate) fn address(&self) -> &Address { + &self.address + } + + pub(crate) fn signing_key(&self) -> &SigningKey { + &self.signing_key + } +} diff --git a/crates/astria-auctioneer/src/streaming_utils.rs b/crates/astria-auctioneer/src/streaming_utils.rs new file mode 100644 index 0000000000..dae348728a --- /dev/null +++ b/crates/astria-auctioneer/src/streaming_utils.rs @@ -0,0 +1,157 @@ +use std::{ + marker::PhantomData, + pin::Pin, + task::{ + ready, + Poll, + }, +}; + +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use bytes::Bytes; +use futures::{ + Future, + FutureExt as _, + Stream, + StreamExt as _, +}; +use http::{ + Request, + Response, +}; +use http_body::combinators::UnsyncBoxBody; +use pin_project_lite::pin_project; +use tonic::{ + transport::Channel, + Status, +}; +use tower::{ + util::BoxCloneService, + ServiceBuilder, +}; +use tower_http::{ + map_response_body::MapResponseBodyLayer, + trace::{ + DefaultMakeSpan, + TraceLayer, + }, +}; + +pub(crate) type InstrumentedChannel = BoxCloneService< + Request>, + Response>, + tonic::transport::Error, +>; + +pub(crate) fn make_instrumented_channel(uri: &str) -> eyre::Result { + // TODO(janis): understand what an appropriate setting for timeout or connect_timeout would be. + // + // We *do not* set timeouts because it's not clear what the correct behavior with streams is + // intended to be. + // + // On a first connection to the auction service of the rollup, it can take several seconds + // before any bids are received (because auctioneer first has to forward proposed blocks, + // followed by an optimistic execution, and finally geth receiving bids based on that + // optimistically executed block which it can then forward to auctioneer). + // + // We *do* set connect_timeout because that is for establishing the stream in the first place + // irrespective of receiving over it. We rely on the reconnect logic of the underlying tonic + // channel for things like network failure or geth not yet being online. + let channel = Channel::from_shared(uri.to_string()) + .wrap_err("failed to create a channel to the provided uri")? + .connect_timeout(std::time::Duration::from_secs(5)) + .connect_lazy(); + + let channel = ServiceBuilder::new() + .layer(MapResponseBodyLayer::new(UnsyncBoxBody::new)) + .layer( + TraceLayer::new_for_grpc().make_span_with(DefaultMakeSpan::new().include_headers(true)), + ) + .service(channel); + + Ok(InstrumentedChannel::new(channel)) +} + +pub(crate) fn restarting_stream(f: F) -> RestartingStream +where + F: Fn() -> Fut, + Fut: Future>, + S: Stream>, +{ + let opening_stream = Some(f()); + RestartingStream { + f, + opening_stream, + running_stream: None, + _phantom_data: PhantomData, + } +} + +// TODO: Adds logs. +// +// Specifically explain why Fut returns Option, and how to return +// an error to the user (tracing). +pin_project! { + pub(crate) struct RestartingStream { + f: F, + #[pin] + opening_stream: Option, + #[pin] + running_stream: Option, + _phantom_data: PhantomData>, + } +} + +impl Stream for RestartingStream +where + F: Fn() -> Fut, + Fut: Future>, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll> { + let mut this = self.project(); + + if this.opening_stream.is_some() { + debug_assert!(this.running_stream.is_none()); + + let open_output = ready!(this + .opening_stream + .as_mut() + .as_pin_mut() + .expect("inside a branch that checks opening_stream == Some") + .poll_unpin(cx)); + + // The future has completed, unset it so it will not be polled again. + Pin::set(&mut this.opening_stream, None); + match open_output { + Ok(stream) => Pin::set(&mut this.running_stream, Some(stream)), + Err(err) => return Poll::Ready(Some(Err(err))), + } + } + + if this.running_stream.is_some() { + debug_assert!(this.opening_stream.is_none()); + + if let Some(item) = ready!(this + .running_stream + .as_mut() + .as_pin_mut() + .expect("inside a branch that checks running_stream == Some") + .poll_next_unpin(cx)) + { + return Poll::Ready(Some(item)); + } + + Pin::set(&mut this.running_stream, None); + Pin::set(&mut this.opening_stream, Some((*this.f)())); + return Poll::Pending; + } + + Poll::Ready(None) + } +} diff --git a/crates/astria-core/CHANGELOG.md b/crates/astria-core/CHANGELOG.md index 33db19f036..28527b351e 100644 --- a/crates/astria-core/CHANGELOG.md +++ b/crates/astria-core/CHANGELOG.md @@ -15,7 +15,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add method `TracePrefixed::leading_channel` to read the left-most channel of a trace prefixed ICS20 asset [#1768](https://github.com/astriaorg/astria/pull/1768). - Add `impl Protobuf for Address` [#1802](https://github.com/astriaorg/astria/pull/1802). -- Add `BridgeTransfer` action and `BridgeTransfer` variant to `FeeChange`. +- Add module `sequencerblock::optimistic::v1alpha1` + and `impl Protobuf for FilteredSequencerBlock` [#1839](https://github.com/astriaorg/astria/pull/1839). +- Add `BridgeTransfer` action and `BridgeTransfer` variant to `FeeChange` + [#1934](https://github.com/astriaorg/astria/pull/1934). ### Changed diff --git a/crates/astria-core/src/generated/astria.auction.v1alpha1.rs b/crates/astria-core/src/generated/astria.auction.v1alpha1.rs new file mode 100644 index 0000000000..7be04b78c5 --- /dev/null +++ b/crates/astria-core/src/generated/astria.auction.v1alpha1.rs @@ -0,0 +1,395 @@ +/// The Allocation message is submitted by the Auctioneer to the rollup as a +/// `RollupDataSubmission` on the sequencer. +/// The rollup will verify the signature and public key against its configuration, +/// then unbundle the body into rollup transactions and execute them first in the +/// block. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Allocation { + /// The Ed25519 signature of the Auctioneer, to be verified against config by the + /// rollup. + #[prost(bytes = "bytes", tag = "1")] + pub signature: ::prost::bytes::Bytes, + /// The Ed25519 public key of the Auctioneer, to be verified against config by the + /// rollup. + #[prost(bytes = "bytes", tag = "2")] + pub public_key: ::prost::bytes::Bytes, + /// The bid that was allocated the winning slot by the Auctioneer. This is a + /// google.protobuf.Any to avoid decoding and re-encoding after receiving an Allocation + /// over the wire and checking if signature and public key match the signed bid. + /// Implementors are expected to read and write an encoded Bid into this field. + #[prost(message, optional, tag = "3")] + pub bid: ::core::option::Option<::pbjson_types::Any>, +} +impl ::prost::Name for Allocation { + const NAME: &'static str = "Allocation"; + const PACKAGE: &'static str = "astria.auction.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.auction.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBidStreamRequest {} +impl ::prost::Name for GetBidStreamRequest { + const NAME: &'static str = "GetBidStreamRequest"; + const PACKAGE: &'static str = "astria.auction.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.auction.v1alpha1.{}", Self::NAME) + } +} +/// A bid is a bundle of transactions that was submitted to the auctioneer's rollup node. +/// The rollup node will verify that the bundle is valid and pays the fee, and will stream +/// it to the auctioneer for participation in the auction for a given block. +/// The sequencer block hash and the rollup parent block hash are used by the auctioneer +/// to identify the block for which the bundle is intended (i.e. which auction the bid is for). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Bid { + /// The hash of previous rollup block, on top of which the bundle will be executed as ToB. + #[prost(bytes = "bytes", tag = "1")] + pub rollup_parent_block_hash: ::prost::bytes::Bytes, + /// The hash of the previous sequencer block, identifying the auction for which the bid is intended. + /// This is the hash of the sequencer block on top of which the bundle will be executed as ToB. + #[prost(bytes = "bytes", tag = "2")] + pub sequencer_parent_block_hash: ::prost::bytes::Bytes, + /// The fee paid by the bundle submitter. The auctioneer's rollup node calculates this based + /// on the bundles submitted by users. For example, this can be the sum of the coinbase transfers + /// in the bundle's transactions. + #[prost(uint64, tag = "3")] + pub fee: u64, + /// The list of serialized rollup transactions from the bundle. + #[prost(bytes = "bytes", repeated, tag = "4")] + pub transactions: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, +} +impl ::prost::Name for Bid { + const NAME: &'static str = "Bid"; + const PACKAGE: &'static str = "astria.auction.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.auction.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBidStreamResponse { + #[prost(message, optional, tag = "1")] + pub bid: ::core::option::Option, +} +impl ::prost::Name for GetBidStreamResponse { + const NAME: &'static str = "GetBidStreamResponse"; + const PACKAGE: &'static str = "astria.auction.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.auction.v1alpha1.{}", Self::NAME) + } +} +/// Generated client implementations. +#[cfg(feature = "client")] +pub mod auction_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct AuctionServiceClient { + inner: tonic::client::Grpc, + } + impl AuctionServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AuctionServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AuctionServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AuctionServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// An auctioneer will initiate this long running stream to receive bids from the rollup node, + /// until either a timeout or the connection is closed by the client. + pub async fn get_bid_stream( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/astria.auction.v1alpha1.AuctionService/GetBidStream", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "astria.auction.v1alpha1.AuctionService", + "GetBidStream", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "server")] +pub mod auction_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AuctionServiceServer. + #[async_trait] + pub trait AuctionService: Send + Sync + 'static { + /// Server streaming response type for the GetBidStream method. + type GetBidStreamStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + Send + + 'static; + /// An auctioneer will initiate this long running stream to receive bids from the rollup node, + /// until either a timeout or the connection is closed by the client. + async fn get_bid_stream( + self: std::sync::Arc, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct AuctionServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AuctionServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AuctionServiceServer + where + T: AuctionService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/astria.auction.v1alpha1.AuctionService/GetBidStream" => { + #[allow(non_camel_case_types)] + struct GetBidStreamSvc(pub Arc); + impl< + T: AuctionService, + > tonic::server::ServerStreamingService + for GetBidStreamSvc { + type Response = super::GetBidStreamResponse; + type ResponseStream = T::GetBidStreamStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_bid_stream(inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBidStreamSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AuctionServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for AuctionServiceServer { + const NAME: &'static str = "astria.auction.v1alpha1.AuctionService"; + } +} diff --git a/crates/astria-core/src/generated/astria.auction.v1alpha1.serde.rs b/crates/astria-core/src/generated/astria.auction.v1alpha1.serde.rs new file mode 100644 index 0000000000..b2479178d1 --- /dev/null +++ b/crates/astria-core/src/generated/astria.auction.v1alpha1.serde.rs @@ -0,0 +1,450 @@ +impl serde::Serialize for Allocation { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.signature.is_empty() { + len += 1; + } + if !self.public_key.is_empty() { + len += 1; + } + if self.bid.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.auction.v1alpha1.Allocation", len)?; + if !self.signature.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("signature", pbjson::private::base64::encode(&self.signature).as_str())?; + } + if !self.public_key.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("publicKey", pbjson::private::base64::encode(&self.public_key).as_str())?; + } + if let Some(v) = self.bid.as_ref() { + struct_ser.serialize_field("bid", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for Allocation { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "signature", + "public_key", + "publicKey", + "bid", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Signature, + PublicKey, + Bid, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "signature" => Ok(GeneratedField::Signature), + "publicKey" | "public_key" => Ok(GeneratedField::PublicKey), + "bid" => Ok(GeneratedField::Bid), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = Allocation; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.auction.v1alpha1.Allocation") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut signature__ = None; + let mut public_key__ = None; + let mut bid__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Signature => { + if signature__.is_some() { + return Err(serde::de::Error::duplicate_field("signature")); + } + signature__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::PublicKey => { + if public_key__.is_some() { + return Err(serde::de::Error::duplicate_field("publicKey")); + } + public_key__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::Bid => { + if bid__.is_some() { + return Err(serde::de::Error::duplicate_field("bid")); + } + bid__ = map_.next_value()?; + } + } + } + Ok(Allocation { + signature: signature__.unwrap_or_default(), + public_key: public_key__.unwrap_or_default(), + bid: bid__, + }) + } + } + deserializer.deserialize_struct("astria.auction.v1alpha1.Allocation", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for Bid { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.rollup_parent_block_hash.is_empty() { + len += 1; + } + if !self.sequencer_parent_block_hash.is_empty() { + len += 1; + } + if self.fee != 0 { + len += 1; + } + if !self.transactions.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.auction.v1alpha1.Bid", len)?; + if !self.rollup_parent_block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("rollupParentBlockHash", pbjson::private::base64::encode(&self.rollup_parent_block_hash).as_str())?; + } + if !self.sequencer_parent_block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("sequencerParentBlockHash", pbjson::private::base64::encode(&self.sequencer_parent_block_hash).as_str())?; + } + if self.fee != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("fee", ToString::to_string(&self.fee).as_str())?; + } + if !self.transactions.is_empty() { + struct_ser.serialize_field("transactions", &self.transactions.iter().map(pbjson::private::base64::encode).collect::>())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for Bid { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "rollup_parent_block_hash", + "rollupParentBlockHash", + "sequencer_parent_block_hash", + "sequencerParentBlockHash", + "fee", + "transactions", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + RollupParentBlockHash, + SequencerParentBlockHash, + Fee, + Transactions, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "rollupParentBlockHash" | "rollup_parent_block_hash" => Ok(GeneratedField::RollupParentBlockHash), + "sequencerParentBlockHash" | "sequencer_parent_block_hash" => Ok(GeneratedField::SequencerParentBlockHash), + "fee" => Ok(GeneratedField::Fee), + "transactions" => Ok(GeneratedField::Transactions), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = Bid; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.auction.v1alpha1.Bid") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut rollup_parent_block_hash__ = None; + let mut sequencer_parent_block_hash__ = None; + let mut fee__ = None; + let mut transactions__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::RollupParentBlockHash => { + if rollup_parent_block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("rollupParentBlockHash")); + } + rollup_parent_block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::SequencerParentBlockHash => { + if sequencer_parent_block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("sequencerParentBlockHash")); + } + sequencer_parent_block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::Fee => { + if fee__.is_some() { + return Err(serde::de::Error::duplicate_field("fee")); + } + fee__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::Transactions => { + if transactions__.is_some() { + return Err(serde::de::Error::duplicate_field("transactions")); + } + transactions__ = + Some(map_.next_value::>>()? + .into_iter().map(|x| x.0).collect()) + ; + } + } + } + Ok(Bid { + rollup_parent_block_hash: rollup_parent_block_hash__.unwrap_or_default(), + sequencer_parent_block_hash: sequencer_parent_block_hash__.unwrap_or_default(), + fee: fee__.unwrap_or_default(), + transactions: transactions__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.auction.v1alpha1.Bid", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for GetBidStreamRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let len = 0; + let struct_ser = serializer.serialize_struct("astria.auction.v1alpha1.GetBidStreamRequest", len)?; + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetBidStreamRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + Err(serde::de::Error::unknown_field(value, FIELDS)) + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetBidStreamRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.auction.v1alpha1.GetBidStreamRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + while map_.next_key::()?.is_some() { + let _ = map_.next_value::()?; + } + Ok(GetBidStreamRequest { + }) + } + } + deserializer.deserialize_struct("astria.auction.v1alpha1.GetBidStreamRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for GetBidStreamResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.bid.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.auction.v1alpha1.GetBidStreamResponse", len)?; + if let Some(v) = self.bid.as_ref() { + struct_ser.serialize_field("bid", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetBidStreamResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "bid", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Bid, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "bid" => Ok(GeneratedField::Bid), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetBidStreamResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.auction.v1alpha1.GetBidStreamResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut bid__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Bid => { + if bid__.is_some() { + return Err(serde::de::Error::duplicate_field("bid")); + } + bid__ = map_.next_value()?; + } + } + } + Ok(GetBidStreamResponse { + bid: bid__, + }) + } + } + deserializer.deserialize_struct("astria.auction.v1alpha1.GetBidStreamResponse", FIELDS, GeneratedVisitor) + } +} diff --git a/crates/astria-core/src/generated/astria.bundle.v1alpha1.rs b/crates/astria-core/src/generated/astria.bundle.v1alpha1.rs deleted file mode 100644 index 44265dd03d..0000000000 --- a/crates/astria-core/src/generated/astria.bundle.v1alpha1.rs +++ /dev/null @@ -1,762 +0,0 @@ -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetBundleStreamRequest {} -impl ::prost::Name for GetBundleStreamRequest { - const NAME: &'static str = "GetBundleStreamRequest"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -/// Information for the bundle submitter to know how to submit the bundle. -/// The fee and base_sequencer_block_hash are not necessarily strictly necessary -/// it allows for the case where the server doesn't always send the highest fee -/// bundles after the previous but could just stream any confirmed bundles. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Bundle { - /// The fee that can be expected to be received for submitting this bundle. - /// This allows the bundle producer to stream any confirmed bundles they would be ok - /// with submitting. Used to avoid race conditions in received bundle packets. Could - /// also be used by a bundle submitter to allow multiple entities to submit bundles. - #[prost(uint64, tag = "1")] - pub fee: u64, - /// The byte list of transactions to be included. - #[prost(bytes = "bytes", repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec<::prost::bytes::Bytes>, - /// The base_sequencer_block_hash is the hash from the base block this bundle - /// is based on. This is used to verify that the bundle is based on the correct - /// Sequencer block. - #[prost(bytes = "bytes", tag = "3")] - pub base_sequencer_block_hash: ::prost::bytes::Bytes, - /// The hash of previous rollup block, on top of which the bundle will be executed as ToB. - #[prost(bytes = "bytes", tag = "4")] - pub prev_rollup_block_hash: ::prost::bytes::Bytes, -} -impl ::prost::Name for Bundle { - const NAME: &'static str = "Bundle"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetBundleStreamResponse { - #[prost(message, optional, tag = "1")] - pub bundle: ::core::option::Option, -} -impl ::prost::Name for GetBundleStreamResponse { - const NAME: &'static str = "GetBundleStreamResponse"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -/// Generated client implementations. -#[cfg(feature = "client")] -pub mod bundle_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct BundleServiceClient { - inner: tonic::client::Grpc, - } - impl BundleServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BundleServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BundleServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - BundleServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// A bundle submitter requests bundles given a new optimistic Sequencer block, - /// and receives a stream of potential bundles for submission, until either a timeout - /// or the connection is closed by the client. - pub async fn get_bundle_stream( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/astria.bundle.v1alpha1.BundleService/GetBundleStream", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "astria.bundle.v1alpha1.BundleService", - "GetBundleStream", - ), - ); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated server implementations. -#[cfg(feature = "server")] -pub mod bundle_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BundleServiceServer. - #[async_trait] - pub trait BundleService: Send + Sync + 'static { - /// Server streaming response type for the GetBundleStream method. - type GetBundleStreamStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > - + Send - + 'static; - /// A bundle submitter requests bundles given a new optimistic Sequencer block, - /// and receives a stream of potential bundles for submission, until either a timeout - /// or the connection is closed by the client. - async fn get_bundle_stream( - self: std::sync::Arc, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct BundleServiceServer { - inner: _Inner, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - struct _Inner(Arc); - impl BundleServiceServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BundleServiceServer - where - T: BundleService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); - match req.uri().path() { - "/astria.bundle.v1alpha1.BundleService/GetBundleStream" => { - #[allow(non_camel_case_types)] - struct GetBundleStreamSvc(pub Arc); - impl< - T: BundleService, - > tonic::server::ServerStreamingService< - super::GetBundleStreamRequest, - > for GetBundleStreamSvc { - type Response = super::GetBundleStreamResponse; - type ResponseStream = T::GetBundleStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_bundle_stream(inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = GetBundleStreamSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } - } - } - } - impl Clone for BundleServiceServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for BundleServiceServer { - const NAME: &'static str = "astria.bundle.v1alpha1.BundleService"; - } -} -/// The "BaseBlock" is the information needed to simulate bundles on top of -/// a Sequencer block which may not have been committed yet. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BaseBlock { - /// This is the block hash for the proposed block. - #[prost(bytes = "bytes", tag = "1")] - pub sequencer_block_hash: ::prost::bytes::Bytes, - /// List of transactions to include in the new block. - #[prost(message, repeated, tag = "2")] - pub transactions: ::prost::alloc::vec::Vec< - super::super::sequencerblock::v1::RollupData, - >, - /// Timestamp to be used for new block. - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::pbjson_types::Timestamp>, -} -impl ::prost::Name for BaseBlock { - const NAME: &'static str = "BaseBlock"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteOptimisticBlockStreamRequest { - #[prost(message, optional, tag = "1")] - pub base_block: ::core::option::Option, -} -impl ::prost::Name for ExecuteOptimisticBlockStreamRequest { - const NAME: &'static str = "ExecuteOptimisticBlockStreamRequest"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteOptimisticBlockStreamResponse { - /// Metadata identifying the block resulting from executing a block. Includes number, hash, - /// parent hash and timestamp. - #[prost(message, optional, tag = "1")] - pub block: ::core::option::Option, - /// The base_sequencer_block_hash is the hash from the base sequencer block this block - /// is based on. This is used to associate an optimistic execution result with the hash - /// received once a sequencer block is committed. - #[prost(bytes = "bytes", tag = "2")] - pub base_sequencer_block_hash: ::prost::bytes::Bytes, -} -impl ::prost::Name for ExecuteOptimisticBlockStreamResponse { - const NAME: &'static str = "ExecuteOptimisticBlockStreamResponse"; - const PACKAGE: &'static str = "astria.bundle.v1alpha1"; - fn full_name() -> ::prost::alloc::string::String { - ::prost::alloc::format!("astria.bundle.v1alpha1.{}", Self::NAME) - } -} -/// Generated client implementations. -#[cfg(feature = "client")] -pub mod optimistic_execution_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct OptimisticExecutionServiceClient { - inner: tonic::client::Grpc, - } - impl OptimisticExecutionServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl OptimisticExecutionServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> OptimisticExecutionServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - OptimisticExecutionServiceClient::new( - InterceptedService::new(inner, interceptor), - ) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back - /// metadata from the executed blocks. - pub async fn execute_optimistic_block_stream( - &mut self, - request: impl tonic::IntoStreamingRequest< - Message = super::ExecuteOptimisticBlockStreamRequest, - >, - ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/astria.bundle.v1alpha1.OptimisticExecutionService/ExecuteOptimisticBlockStream", - ); - let mut req = request.into_streaming_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "astria.bundle.v1alpha1.OptimisticExecutionService", - "ExecuteOptimisticBlockStream", - ), - ); - self.inner.streaming(req, path, codec).await - } - } -} -/// Generated server implementations. -#[cfg(feature = "server")] -pub mod optimistic_execution_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with OptimisticExecutionServiceServer. - #[async_trait] - pub trait OptimisticExecutionService: Send + Sync + 'static { - /// Server streaming response type for the ExecuteOptimisticBlockStream method. - type ExecuteOptimisticBlockStreamStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result< - super::ExecuteOptimisticBlockStreamResponse, - tonic::Status, - >, - > - + Send - + 'static; - /// Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back - /// metadata from the executed blocks. - async fn execute_optimistic_block_stream( - self: std::sync::Arc, - request: tonic::Request< - tonic::Streaming, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct OptimisticExecutionServiceServer { - inner: _Inner, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - struct _Inner(Arc); - impl OptimisticExecutionServiceServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> - for OptimisticExecutionServiceServer - where - T: OptimisticExecutionService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); - match req.uri().path() { - "/astria.bundle.v1alpha1.OptimisticExecutionService/ExecuteOptimisticBlockStream" => { - #[allow(non_camel_case_types)] - struct ExecuteOptimisticBlockStreamSvc< - T: OptimisticExecutionService, - >( - pub Arc, - ); - impl< - T: OptimisticExecutionService, - > tonic::server::StreamingService< - super::ExecuteOptimisticBlockStreamRequest, - > for ExecuteOptimisticBlockStreamSvc { - type Response = super::ExecuteOptimisticBlockStreamResponse; - type ResponseStream = T::ExecuteOptimisticBlockStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - tonic::Streaming, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::execute_optimistic_block_stream( - inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = ExecuteOptimisticBlockStreamSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } - } - } - } - impl Clone for OptimisticExecutionServiceServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService - for OptimisticExecutionServiceServer { - const NAME: &'static str = "astria.bundle.v1alpha1.OptimisticExecutionService"; - } -} diff --git a/crates/astria-core/src/generated/astria.bundle.v1alpha1.serde.rs b/crates/astria-core/src/generated/astria.bundle.v1alpha1.serde.rs deleted file mode 100644 index 0ae7a566fd..0000000000 --- a/crates/astria-core/src/generated/astria.bundle.v1alpha1.serde.rs +++ /dev/null @@ -1,651 +0,0 @@ -impl serde::Serialize for BaseBlock { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if !self.sequencer_block_hash.is_empty() { - len += 1; - } - if !self.transactions.is_empty() { - len += 1; - } - if self.timestamp.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.BaseBlock", len)?; - if !self.sequencer_block_hash.is_empty() { - #[allow(clippy::needless_borrow)] - struct_ser.serialize_field("sequencerBlockHash", pbjson::private::base64::encode(&self.sequencer_block_hash).as_str())?; - } - if !self.transactions.is_empty() { - struct_ser.serialize_field("transactions", &self.transactions)?; - } - if let Some(v) = self.timestamp.as_ref() { - struct_ser.serialize_field("timestamp", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for BaseBlock { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "sequencer_block_hash", - "sequencerBlockHash", - "transactions", - "timestamp", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - SequencerBlockHash, - Transactions, - Timestamp, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "sequencerBlockHash" | "sequencer_block_hash" => Ok(GeneratedField::SequencerBlockHash), - "transactions" => Ok(GeneratedField::Transactions), - "timestamp" => Ok(GeneratedField::Timestamp), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = BaseBlock; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.BaseBlock") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut sequencer_block_hash__ = None; - let mut transactions__ = None; - let mut timestamp__ = None; - while let Some(k) = map_.next_key()? { - match k { - GeneratedField::SequencerBlockHash => { - if sequencer_block_hash__.is_some() { - return Err(serde::de::Error::duplicate_field("sequencerBlockHash")); - } - sequencer_block_hash__ = - Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) - ; - } - GeneratedField::Transactions => { - if transactions__.is_some() { - return Err(serde::de::Error::duplicate_field("transactions")); - } - transactions__ = Some(map_.next_value()?); - } - GeneratedField::Timestamp => { - if timestamp__.is_some() { - return Err(serde::de::Error::duplicate_field("timestamp")); - } - timestamp__ = map_.next_value()?; - } - } - } - Ok(BaseBlock { - sequencer_block_hash: sequencer_block_hash__.unwrap_or_default(), - transactions: transactions__.unwrap_or_default(), - timestamp: timestamp__, - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.BaseBlock", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for Bundle { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.fee != 0 { - len += 1; - } - if !self.transactions.is_empty() { - len += 1; - } - if !self.base_sequencer_block_hash.is_empty() { - len += 1; - } - if !self.prev_rollup_block_hash.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.Bundle", len)?; - if self.fee != 0 { - #[allow(clippy::needless_borrow)] - struct_ser.serialize_field("fee", ToString::to_string(&self.fee).as_str())?; - } - if !self.transactions.is_empty() { - struct_ser.serialize_field("transactions", &self.transactions.iter().map(pbjson::private::base64::encode).collect::>())?; - } - if !self.base_sequencer_block_hash.is_empty() { - #[allow(clippy::needless_borrow)] - struct_ser.serialize_field("baseSequencerBlockHash", pbjson::private::base64::encode(&self.base_sequencer_block_hash).as_str())?; - } - if !self.prev_rollup_block_hash.is_empty() { - #[allow(clippy::needless_borrow)] - struct_ser.serialize_field("prevRollupBlockHash", pbjson::private::base64::encode(&self.prev_rollup_block_hash).as_str())?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for Bundle { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "fee", - "transactions", - "base_sequencer_block_hash", - "baseSequencerBlockHash", - "prev_rollup_block_hash", - "prevRollupBlockHash", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Fee, - Transactions, - BaseSequencerBlockHash, - PrevRollupBlockHash, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "fee" => Ok(GeneratedField::Fee), - "transactions" => Ok(GeneratedField::Transactions), - "baseSequencerBlockHash" | "base_sequencer_block_hash" => Ok(GeneratedField::BaseSequencerBlockHash), - "prevRollupBlockHash" | "prev_rollup_block_hash" => Ok(GeneratedField::PrevRollupBlockHash), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = Bundle; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.Bundle") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut fee__ = None; - let mut transactions__ = None; - let mut base_sequencer_block_hash__ = None; - let mut prev_rollup_block_hash__ = None; - while let Some(k) = map_.next_key()? { - match k { - GeneratedField::Fee => { - if fee__.is_some() { - return Err(serde::de::Error::duplicate_field("fee")); - } - fee__ = - Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) - ; - } - GeneratedField::Transactions => { - if transactions__.is_some() { - return Err(serde::de::Error::duplicate_field("transactions")); - } - transactions__ = - Some(map_.next_value::>>()? - .into_iter().map(|x| x.0).collect()) - ; - } - GeneratedField::BaseSequencerBlockHash => { - if base_sequencer_block_hash__.is_some() { - return Err(serde::de::Error::duplicate_field("baseSequencerBlockHash")); - } - base_sequencer_block_hash__ = - Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) - ; - } - GeneratedField::PrevRollupBlockHash => { - if prev_rollup_block_hash__.is_some() { - return Err(serde::de::Error::duplicate_field("prevRollupBlockHash")); - } - prev_rollup_block_hash__ = - Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) - ; - } - } - } - Ok(Bundle { - fee: fee__.unwrap_or_default(), - transactions: transactions__.unwrap_or_default(), - base_sequencer_block_hash: base_sequencer_block_hash__.unwrap_or_default(), - prev_rollup_block_hash: prev_rollup_block_hash__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.Bundle", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ExecuteOptimisticBlockStreamRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.base_block.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamRequest", len)?; - if let Some(v) = self.base_block.as_ref() { - struct_ser.serialize_field("baseBlock", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ExecuteOptimisticBlockStreamRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "base_block", - "baseBlock", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - BaseBlock, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "baseBlock" | "base_block" => Ok(GeneratedField::BaseBlock), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ExecuteOptimisticBlockStreamRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamRequest") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut base_block__ = None; - while let Some(k) = map_.next_key()? { - match k { - GeneratedField::BaseBlock => { - if base_block__.is_some() { - return Err(serde::de::Error::duplicate_field("baseBlock")); - } - base_block__ = map_.next_value()?; - } - } - } - Ok(ExecuteOptimisticBlockStreamRequest { - base_block: base_block__, - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for ExecuteOptimisticBlockStreamResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.block.is_some() { - len += 1; - } - if !self.base_sequencer_block_hash.is_empty() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamResponse", len)?; - if let Some(v) = self.block.as_ref() { - struct_ser.serialize_field("block", v)?; - } - if !self.base_sequencer_block_hash.is_empty() { - #[allow(clippy::needless_borrow)] - struct_ser.serialize_field("baseSequencerBlockHash", pbjson::private::base64::encode(&self.base_sequencer_block_hash).as_str())?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for ExecuteOptimisticBlockStreamResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "block", - "base_sequencer_block_hash", - "baseSequencerBlockHash", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Block, - BaseSequencerBlockHash, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "block" => Ok(GeneratedField::Block), - "baseSequencerBlockHash" | "base_sequencer_block_hash" => Ok(GeneratedField::BaseSequencerBlockHash), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = ExecuteOptimisticBlockStreamResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamResponse") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut block__ = None; - let mut base_sequencer_block_hash__ = None; - while let Some(k) = map_.next_key()? { - match k { - GeneratedField::Block => { - if block__.is_some() { - return Err(serde::de::Error::duplicate_field("block")); - } - block__ = map_.next_value()?; - } - GeneratedField::BaseSequencerBlockHash => { - if base_sequencer_block_hash__.is_some() { - return Err(serde::de::Error::duplicate_field("baseSequencerBlockHash")); - } - base_sequencer_block_hash__ = - Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) - ; - } - } - } - Ok(ExecuteOptimisticBlockStreamResponse { - block: block__, - base_sequencer_block_hash: base_sequencer_block_hash__.unwrap_or_default(), - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.ExecuteOptimisticBlockStreamResponse", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetBundleStreamRequest { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let len = 0; - let struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.GetBundleStreamRequest", len)?; - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetBundleStreamRequest { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - Err(serde::de::Error::unknown_field(value, FIELDS)) - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetBundleStreamRequest; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.GetBundleStreamRequest") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - while map_.next_key::()?.is_some() { - let _ = map_.next_value::()?; - } - Ok(GetBundleStreamRequest { - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.GetBundleStreamRequest", FIELDS, GeneratedVisitor) - } -} -impl serde::Serialize for GetBundleStreamResponse { - #[allow(deprecated)] - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - let mut len = 0; - if self.bundle.is_some() { - len += 1; - } - let mut struct_ser = serializer.serialize_struct("astria.bundle.v1alpha1.GetBundleStreamResponse", len)?; - if let Some(v) = self.bundle.as_ref() { - struct_ser.serialize_field("bundle", v)?; - } - struct_ser.end() - } -} -impl<'de> serde::Deserialize<'de> for GetBundleStreamResponse { - #[allow(deprecated)] - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - const FIELDS: &[&str] = &[ - "bundle", - ]; - - #[allow(clippy::enum_variant_names)] - enum GeneratedField { - Bundle, - } - impl<'de> serde::Deserialize<'de> for GeneratedField { - fn deserialize(deserializer: D) -> std::result::Result - where - D: serde::Deserializer<'de>, - { - struct GeneratedVisitor; - - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GeneratedField; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "expected one of: {:?}", &FIELDS) - } - - #[allow(unused_variables)] - fn visit_str(self, value: &str) -> std::result::Result - where - E: serde::de::Error, - { - match value { - "bundle" => Ok(GeneratedField::Bundle), - _ => Err(serde::de::Error::unknown_field(value, FIELDS)), - } - } - } - deserializer.deserialize_identifier(GeneratedVisitor) - } - } - struct GeneratedVisitor; - impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { - type Value = GetBundleStreamResponse; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - formatter.write_str("struct astria.bundle.v1alpha1.GetBundleStreamResponse") - } - - fn visit_map(self, mut map_: V) -> std::result::Result - where - V: serde::de::MapAccess<'de>, - { - let mut bundle__ = None; - while let Some(k) = map_.next_key()? { - match k { - GeneratedField::Bundle => { - if bundle__.is_some() { - return Err(serde::de::Error::duplicate_field("bundle")); - } - bundle__ = map_.next_value()?; - } - } - } - Ok(GetBundleStreamResponse { - bundle: bundle__, - }) - } - } - deserializer.deserialize_struct("astria.bundle.v1alpha1.GetBundleStreamResponse", FIELDS, GeneratedVisitor) - } -} diff --git a/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.rs b/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.rs new file mode 100644 index 0000000000..606000e263 --- /dev/null +++ b/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.rs @@ -0,0 +1,392 @@ +/// The "BaseBlock" is the information needed to simulate bundles on top of +/// a Sequencer block which may not have been committed yet. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BaseBlock { + /// This is the block hash for the proposed block. + #[prost(bytes = "bytes", tag = "1")] + pub sequencer_block_hash: ::prost::bytes::Bytes, + /// List of transactions to include in the new block. + #[prost(message, repeated, tag = "2")] + pub transactions: ::prost::alloc::vec::Vec< + super::super::sequencerblock::v1::RollupData, + >, + /// Timestamp to be used for new block. + #[prost(message, optional, tag = "3")] + pub timestamp: ::core::option::Option<::pbjson_types::Timestamp>, +} +impl ::prost::Name for BaseBlock { + const NAME: &'static str = "BaseBlock"; + const PACKAGE: &'static str = "astria.optimistic_execution.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.optimistic_execution.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteOptimisticBlockStreamRequest { + #[prost(message, optional, tag = "1")] + pub base_block: ::core::option::Option, +} +impl ::prost::Name for ExecuteOptimisticBlockStreamRequest { + const NAME: &'static str = "ExecuteOptimisticBlockStreamRequest"; + const PACKAGE: &'static str = "astria.optimistic_execution.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.optimistic_execution.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteOptimisticBlockStreamResponse { + /// Metadata identifying the block resulting from executing a block. Includes number, hash, + /// parent hash and timestamp. + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option, + /// The base_sequencer_block_hash is the hash from the base sequencer block this block + /// is based on. This is used to associate an optimistic execution result with the hash + /// received once a sequencer block is committed. + #[prost(bytes = "bytes", tag = "2")] + pub base_sequencer_block_hash: ::prost::bytes::Bytes, +} +impl ::prost::Name for ExecuteOptimisticBlockStreamResponse { + const NAME: &'static str = "ExecuteOptimisticBlockStreamResponse"; + const PACKAGE: &'static str = "astria.optimistic_execution.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.optimistic_execution.v1alpha1.{}", Self::NAME) + } +} +/// Generated client implementations. +#[cfg(feature = "client")] +pub mod optimistic_execution_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct OptimisticExecutionServiceClient { + inner: tonic::client::Grpc, + } + impl OptimisticExecutionServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl OptimisticExecutionServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> OptimisticExecutionServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + OptimisticExecutionServiceClient::new( + InterceptedService::new(inner, interceptor), + ) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back + /// metadata from the executed blocks. + pub async fn execute_optimistic_block_stream( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::ExecuteOptimisticBlockStreamRequest, + >, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/astria.optimistic_execution.v1alpha1.OptimisticExecutionService/ExecuteOptimisticBlockStream", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "astria.optimistic_execution.v1alpha1.OptimisticExecutionService", + "ExecuteOptimisticBlockStream", + ), + ); + self.inner.streaming(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "server")] +pub mod optimistic_execution_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with OptimisticExecutionServiceServer. + #[async_trait] + pub trait OptimisticExecutionService: Send + Sync + 'static { + /// Server streaming response type for the ExecuteOptimisticBlockStream method. + type ExecuteOptimisticBlockStreamStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result< + super::ExecuteOptimisticBlockStreamResponse, + tonic::Status, + >, + > + + Send + + 'static; + /// Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back + /// metadata from the executed blocks. + async fn execute_optimistic_block_stream( + self: std::sync::Arc, + request: tonic::Request< + tonic::Streaming, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct OptimisticExecutionServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl OptimisticExecutionServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for OptimisticExecutionServiceServer + where + T: OptimisticExecutionService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/astria.optimistic_execution.v1alpha1.OptimisticExecutionService/ExecuteOptimisticBlockStream" => { + #[allow(non_camel_case_types)] + struct ExecuteOptimisticBlockStreamSvc< + T: OptimisticExecutionService, + >( + pub Arc, + ); + impl< + T: OptimisticExecutionService, + > tonic::server::StreamingService< + super::ExecuteOptimisticBlockStreamRequest, + > for ExecuteOptimisticBlockStreamSvc { + type Response = super::ExecuteOptimisticBlockStreamResponse; + type ResponseStream = T::ExecuteOptimisticBlockStreamStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::execute_optimistic_block_stream( + inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExecuteOptimisticBlockStreamSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for OptimisticExecutionServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for OptimisticExecutionServiceServer { + const NAME: &'static str = "astria.optimistic_execution.v1alpha1.OptimisticExecutionService"; + } +} diff --git a/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.serde.rs b/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.serde.rs new file mode 100644 index 0000000000..42b72d4e0f --- /dev/null +++ b/crates/astria-core/src/generated/astria.optimistic_execution.v1alpha1.serde.rs @@ -0,0 +1,333 @@ +impl serde::Serialize for BaseBlock { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.sequencer_block_hash.is_empty() { + len += 1; + } + if !self.transactions.is_empty() { + len += 1; + } + if self.timestamp.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.optimistic_execution.v1alpha1.BaseBlock", len)?; + if !self.sequencer_block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("sequencerBlockHash", pbjson::private::base64::encode(&self.sequencer_block_hash).as_str())?; + } + if !self.transactions.is_empty() { + struct_ser.serialize_field("transactions", &self.transactions)?; + } + if let Some(v) = self.timestamp.as_ref() { + struct_ser.serialize_field("timestamp", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for BaseBlock { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "sequencer_block_hash", + "sequencerBlockHash", + "transactions", + "timestamp", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + SequencerBlockHash, + Transactions, + Timestamp, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "sequencerBlockHash" | "sequencer_block_hash" => Ok(GeneratedField::SequencerBlockHash), + "transactions" => Ok(GeneratedField::Transactions), + "timestamp" => Ok(GeneratedField::Timestamp), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = BaseBlock; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.optimistic_execution.v1alpha1.BaseBlock") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut sequencer_block_hash__ = None; + let mut transactions__ = None; + let mut timestamp__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::SequencerBlockHash => { + if sequencer_block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("sequencerBlockHash")); + } + sequencer_block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::Transactions => { + if transactions__.is_some() { + return Err(serde::de::Error::duplicate_field("transactions")); + } + transactions__ = Some(map_.next_value()?); + } + GeneratedField::Timestamp => { + if timestamp__.is_some() { + return Err(serde::de::Error::duplicate_field("timestamp")); + } + timestamp__ = map_.next_value()?; + } + } + } + Ok(BaseBlock { + sequencer_block_hash: sequencer_block_hash__.unwrap_or_default(), + transactions: transactions__.unwrap_or_default(), + timestamp: timestamp__, + }) + } + } + deserializer.deserialize_struct("astria.optimistic_execution.v1alpha1.BaseBlock", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ExecuteOptimisticBlockStreamRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.base_block.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamRequest", len)?; + if let Some(v) = self.base_block.as_ref() { + struct_ser.serialize_field("baseBlock", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ExecuteOptimisticBlockStreamRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "base_block", + "baseBlock", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + BaseBlock, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "baseBlock" | "base_block" => Ok(GeneratedField::BaseBlock), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ExecuteOptimisticBlockStreamRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut base_block__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::BaseBlock => { + if base_block__.is_some() { + return Err(serde::de::Error::duplicate_field("baseBlock")); + } + base_block__ = map_.next_value()?; + } + } + } + Ok(ExecuteOptimisticBlockStreamRequest { + base_block: base_block__, + }) + } + } + deserializer.deserialize_struct("astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ExecuteOptimisticBlockStreamResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.block.is_some() { + len += 1; + } + if !self.base_sequencer_block_hash.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamResponse", len)?; + if let Some(v) = self.block.as_ref() { + struct_ser.serialize_field("block", v)?; + } + if !self.base_sequencer_block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("baseSequencerBlockHash", pbjson::private::base64::encode(&self.base_sequencer_block_hash).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ExecuteOptimisticBlockStreamResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "block", + "base_sequencer_block_hash", + "baseSequencerBlockHash", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Block, + BaseSequencerBlockHash, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "block" => Ok(GeneratedField::Block), + "baseSequencerBlockHash" | "base_sequencer_block_hash" => Ok(GeneratedField::BaseSequencerBlockHash), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ExecuteOptimisticBlockStreamResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut block__ = None; + let mut base_sequencer_block_hash__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Block => { + if block__.is_some() { + return Err(serde::de::Error::duplicate_field("block")); + } + block__ = map_.next_value()?; + } + GeneratedField::BaseSequencerBlockHash => { + if base_sequencer_block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("baseSequencerBlockHash")); + } + base_sequencer_block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + } + } + Ok(ExecuteOptimisticBlockStreamResponse { + block: block__, + base_sequencer_block_hash: base_sequencer_block_hash__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.optimistic_execution.v1alpha1.ExecuteOptimisticBlockStreamResponse", FIELDS, GeneratedVisitor) + } +} diff --git a/crates/astria-core/src/generated/mod.rs b/crates/astria-core/src/generated/mod.rs index 1fef28c5e7..4c1c37e154 100644 --- a/crates/astria-core/src/generated/mod.rs +++ b/crates/astria-core/src/generated/mod.rs @@ -41,14 +41,14 @@ pub mod astria_vendored { #[path = ""] pub mod astria { #[path = ""] - pub mod bundle { + pub mod auction { pub mod v1alpha1 { - include!("astria.bundle.v1alpha1.rs"); + include!("astria.auction.v1alpha1.rs"); #[cfg(feature = "serde")] mod _serde_impl { use super::*; - include!("astria.bundle.v1alpha1.serde.rs"); + include!("astria.auction.v1alpha1.serde.rs"); } } } @@ -66,6 +66,18 @@ pub mod astria { } } + pub mod optimistic_execution { + pub mod v1alpha1 { + include!("astria.optimistic_execution.v1alpha1.rs"); + + #[cfg(feature = "serde")] + mod _serde_impl { + use super::*; + include!("astria.optimistic_execution.v1alpha1.serde.rs"); + } + } + } + #[path = ""] pub mod primitive { pub mod v1 { diff --git a/crates/astria-core/src/sequencerblock/mod.rs b/crates/astria-core/src/sequencerblock/mod.rs index a3a6d96c3f..748868c71a 100644 --- a/crates/astria-core/src/sequencerblock/mod.rs +++ b/crates/astria-core/src/sequencerblock/mod.rs @@ -1 +1,2 @@ +pub mod optimistic; pub mod v1; diff --git a/crates/astria-core/src/sequencerblock/optimistic/mod.rs b/crates/astria-core/src/sequencerblock/optimistic/mod.rs new file mode 100644 index 0000000000..32a5a9d4fd --- /dev/null +++ b/crates/astria-core/src/sequencerblock/optimistic/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/crates/astria-core/src/sequencerblock/optimistic/v1alpha1.rs b/crates/astria-core/src/sequencerblock/optimistic/v1alpha1.rs new file mode 100644 index 0000000000..c1e8d5b09c --- /dev/null +++ b/crates/astria-core/src/sequencerblock/optimistic/v1alpha1.rs @@ -0,0 +1,84 @@ +use bytes::Bytes; + +use crate::{ + generated::astria::sequencerblock::optimistic::v1alpha1 as raw, + sequencerblock::v1::block, + Protobuf, +}; + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct SequencerBlockCommitError(SequencerBlockCommitErrorKind); + +impl SequencerBlockCommitError { + fn block_hash(source: block::HashFromSliceError) -> Self { + Self(SequencerBlockCommitErrorKind::BlockHash { + source, + }) + } +} + +#[derive(Debug, thiserror::Error)] +enum SequencerBlockCommitErrorKind { + #[error("failed to read .block_hash field as sequencer block hash")] + BlockHash { source: block::HashFromSliceError }, +} + +#[derive(Clone, Debug)] +pub struct SequencerBlockCommit { + height: u64, + block_hash: block::Hash, +} + +impl SequencerBlockCommit { + #[must_use] + pub fn new(height: u64, block_hash: block::Hash) -> Self { + Self { + height, + block_hash, + } + } + + #[must_use] + pub fn height(&self) -> u64 { + self.height + } + + #[must_use] + pub fn block_hash(&self) -> &block::Hash { + &self.block_hash + } +} + +impl From for raw::SequencerBlockCommit { + fn from(value: SequencerBlockCommit) -> Self { + value.to_raw() + } +} + +impl Protobuf for SequencerBlockCommit { + type Error = SequencerBlockCommitError; + type Raw = raw::SequencerBlockCommit; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + let Self::Raw { + height, + block_hash, + } = raw; + + let block_hash = + block::Hash::try_from(&**block_hash).map_err(SequencerBlockCommitError::block_hash)?; + + Ok(SequencerBlockCommit { + height: *height, + block_hash, + }) + } + + fn to_raw(&self) -> Self::Raw { + raw::SequencerBlockCommit { + height: self.height(), + block_hash: Bytes::copy_from_slice(self.block_hash.as_bytes()), + } + } +} diff --git a/crates/astria-core/src/sequencerblock/v1/block.rs b/crates/astria-core/src/sequencerblock/v1/block.rs index bbed12291d..692b602094 100644 --- a/crates/astria-core/src/sequencerblock/v1/block.rs +++ b/crates/astria-core/src/sequencerblock/v1/block.rs @@ -38,7 +38,7 @@ use crate::{ Transaction, TransactionError, }, - Protobuf as _, + Protobuf, }; #[derive(Debug, thiserror::Error)] @@ -1358,6 +1358,27 @@ impl FilteredSequencerBlock { } } +impl Protobuf for FilteredSequencerBlock { + type Error = FilteredSequencerBlockError; + type Raw = raw::FilteredSequencerBlock; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + Self::try_from_raw(raw.clone()) + } + + fn to_raw(&self) -> Self::Raw { + self.clone().into_raw() + } + + fn try_from_raw(raw: Self::Raw) -> Result { + Self::try_from_raw(raw) + } + + fn into_raw(self) -> Self::Raw { + self.into_raw() + } +} + #[derive(Debug, thiserror::Error)] #[error(transparent)] pub struct FilteredSequencerBlockError(FilteredSequencerBlockErrorKind); diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index 80192e36fe..8335c864f1 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -9,6 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Implement `astria.sequencerblock.optimistic.v1alpha1.OptimisticBlockService` [#1839](https://github.com/astriaorg/astria/pull/1839). + ### Changed - Bump MSRV to 1.83.0 [#1857](https://github.com/astriaorg/astria/pull/1857). @@ -22,6 +26,14 @@ of signer [#1905](https://github.com/astriaorg/astria/pull/1905). - Add more thorough unit tests for all actions [#1916](https://github.com/astriaorg/astria/pull/1916). - Implement `BridgeTransfer` action [#1934](https://github.com/astriaorg/astria/pull/1934). +### Removed + +- Remove ASTRIA_SEQUENCER_LISTEN_ADDR config variable [#1877](https://github.com/astriaorg/astria/pull/1877) + +### Added + +- Add ASTRIA_SEQUENCER_ABCI_LISTEN_URL config variable [#1877](https://github.com/astriaorg/astria/pull/1877) + ## [1.0.0] - 2024-10-25 ### Changed diff --git a/crates/astria-sequencer/Cargo.toml b/crates/astria-sequencer/Cargo.toml index 18458c8907..a8f1ac36bd 100644 --- a/crates/astria-sequencer/Cargo.toml +++ b/crates/astria-sequencer/Cargo.toml @@ -42,6 +42,7 @@ tower = "0.4" tower-abci = "0.12.0" tower-actor = "0.1.0" tower-http = { version = "0.4", features = ["cors"] } +url = "2.5.4" async-trait = { workspace = true } base64 = { workspace = true } @@ -50,6 +51,7 @@ divan = { workspace = true, optional = true } futures = { workspace = true } hex = { workspace = true, features = ["serde"] } ibc-types = { workspace = true, features = ["with_serde"] } +itertools = { workspace = true } penumbra-ibc = { workspace = true, features = ["component", "rpc"] } penumbra-proto = { workspace = true } penumbra-tower-trace = { workspace = true } @@ -64,6 +66,7 @@ tendermint-proto = { workspace = true } tendermint = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["rt", "tracing"] } +tokio-util = { workspace = true, features = ["rt"] } tonic = { workspace = true } tracing = { workspace = true } diff --git a/crates/astria-sequencer/local.env.example b/crates/astria-sequencer/local.env.example index 7a794e616a..e1ba1aa382 100644 --- a/crates/astria-sequencer/local.env.example +++ b/crates/astria-sequencer/local.env.example @@ -1,7 +1,9 @@ # Socket address to listen for ABCI requests from cometbft. -# This address corresponds to the `--proxy_app "tcp://"`, -# where `tcp://127.0.0.1:26658` is comebft's default. -ASTRIA_SEQUENCER_LISTEN_ADDR="127.0.0.1:26658" +# This address corresponds to the `--proxy_app ""`, +# where `tcp://127.0.0.1:26658` is comebft's default. Can also be configured to +# use a unix address ie `unix:///socket/astria_abci.sock`. Generally will see +# much higher performance with a unix socket. +ASTRIA_SEQUENCER_ABCI_LISTEN_URL="tcp://127.0.0.1:26658" # Path to rocksdb ASTRIA_SEQUENCER_DB_FILEPATH="/tmp/astria_db" @@ -33,6 +35,9 @@ ASTRIA_SEQUENCER_METRICS_HTTP_LISTENER_ADDR="127.0.0.1:9000" # `ASTRIA_SEQUENCER_FORCE_STDOUT` is set to `true`. ASTRIA_SEQUENCER_PRETTY_PRINT=false +# Disables streaming optimistic blocks to clients. +ASTRIA_SEQUENCER_NO_OPTIMISTIC_BLOCKS=false + # If set to any non-empty value removes ANSI escape characters from the pretty # printed output. Note that this does nothing unless `ASTRIA_SEQUENCER_PRETTY_PRINT` # is set to `true`. diff --git a/crates/astria-sequencer/src/app/event_bus.rs b/crates/astria-sequencer/src/app/event_bus.rs new file mode 100644 index 0000000000..f9aa189c51 --- /dev/null +++ b/crates/astria-sequencer/src/app/event_bus.rs @@ -0,0 +1,156 @@ +use std::sync::Arc; + +use astria_core::sequencerblock::v1::SequencerBlock; +use astria_eyre::eyre::WrapErr as _; +use tendermint::abci::request::FinalizeBlock; +use tokio::sync::watch::{ + Receiver, + Sender, +}; + +/// `EventReceiver` contains the receiver side of the events sent by the Sequencer App. +/// The listeners of the events can receive the latest value of the event by calling the +/// `receive` method. +#[derive(Clone)] +pub(crate) struct EventReceiver { + /// The receiver side of the watch which is read for the latest value of the event. + /// We receive an Option over T because the sender side of the watch is designed to send + /// Option values. This allows the sender value to send objects which do not have a `Default` + /// implementation. + inner: Receiver>, +} + +impl EventReceiver +where + T: Clone, +{ + /// Returns the latest value of the event, waiting for the value to change if it hasn't already. + pub(crate) async fn receive(&mut self) -> astria_eyre::Result { + // We want to only receive the latest value through the receiver, so we wait for the + // current value in the watch to change before we return it. + self.inner + .changed() + .await + .wrap_err("error waiting for latest event")?; + Ok(self.inner.borrow_and_update().clone().expect( + "receivers are only created through tokio::sync::watch::Sender::subscribe, which + means that the initial value of None is marked as seen. Subsequent updates always + set the value to Some(T). If this panic message is seen it means that either: + 1) the receiver in the watch::channel call was used instead of being dropped; + 2) the sender illegally set the value to None; + 3) the value in the channel was marged as changed/unseen; or + 4) the behavior of the tokio watch channel changed fundamentally. + All of these would violate the invariants of the event bus.", + )) + } +} + +/// `EventSender` contains the sender side of the events sent by the Sequencer App. +struct EventSender { + // A watch channel that is always starts unset. Once set, the `is_init` token is cancelled + // and value in the channel will never be unset. + inner: Sender>, +} + +impl EventSender { + /// Create a new sender for an event `T`. + fn new() -> Self { + // XXX: the receiver must dropped so that the only entrypoint to the subscription is + // Sender::subscribe. This is to ensure that a value of Option can be unwrapped + // and the Option remains an implementation detail. + let (sender, _) = tokio::sync::watch::channel(None); + Self { + inner: sender, + } + } + + /// Creates a receiver for events `T`. + fn subscribe(&self) -> EventReceiver { + EventReceiver { + inner: self.inner.subscribe(), + } + } + + /// Sends the event to all subscribers. + fn send(&self, event: T) { + self.inner.send_replace(Some(event)); + } +} + +/// A subscription to the event bus. +/// +/// Allows subscribing to specific events like [`Self::process_proposal_blocks`] +/// and [`Self::finalized_blocks`]. +pub(crate) struct EventBusSubscription { + process_proposal_blocks: EventReceiver>, + finalized_blocks: EventReceiver>, +} + +impl EventBusSubscription { + /// Receive sequencer blocks after the process proposal phase. + /// + /// The returned [`EventReceiver`] will always provide the next + /// event and ignore the latest one. + pub(crate) fn process_proposal_blocks(&self) -> EventReceiver> { + let mut receiver = self.process_proposal_blocks.clone(); + receiver.inner.mark_unchanged(); + receiver + } + + /// Receive finalized blocks. + /// + /// The returned [`EventReceiver`] will always provide the next + /// event and ignore the latest one. + pub(crate) fn finalized_blocks(&self) -> EventReceiver> { + let mut receiver = self.finalized_blocks.clone(); + receiver.inner.mark_unchanged(); + receiver + } +} + +/// The Sequencer `EventBus` is used to send and receive events between different components of the +/// sequencer. Components of Sequencer can subscribe to the `EventBus` via the `subscribe` method +/// which returns a [`EventBusSubscription`] objects that contains receivers of various events which +/// are of type [`EventReceiver`]. +/// +/// The `EventBus` is implemented using [`tokio::sync::watch`] which allows for multiple receivers +/// to receive the event at any given time. +pub(super) struct EventBus { + // Sends a process proposal block event to the subscribers. The event is sent in the form of a + // sequencer block which is created during the process proposal block phase. + process_proposal_block_sender: EventSender>, + // Sends a finalized block event to the subscribers. The event is sent in the form of the + // finalize block abci request. + finalized_block_sender: EventSender>, +} + +impl EventBus { + /// Instantiates a new event bus. + pub(super) fn new() -> Self { + let process_proposal_block_sender = EventSender::new(); + let finalized_block_sender = EventSender::new(); + + Self { + process_proposal_block_sender, + finalized_block_sender, + } + } + + /// Subscribe to the event bus. + pub(super) fn subscribe(&self) -> EventBusSubscription { + EventBusSubscription { + process_proposal_blocks: self.process_proposal_block_sender.subscribe(), + finalized_blocks: self.finalized_block_sender.subscribe(), + } + } + + /// Sends a process proposal block event over the event bus. + pub(super) fn send_process_proposal_block(&self, sequencer_block: Arc) { + self.process_proposal_block_sender.send(sequencer_block); + } + + /// Sends a finalized block event over the event bus. + pub(super) fn send_finalized_block(&self, sequencer_block_commit: Arc) { + self.finalized_block_sender.send(sequencer_block_commit); + } +} diff --git a/crates/astria-sequencer/src/app/mod.rs b/crates/astria-sequencer/src/app/mod.rs index 3dd9e29c49..ec0ebe60be 100644 --- a/crates/astria-sequencer/src/app/mod.rs +++ b/crates/astria-sequencer/src/app/mod.rs @@ -2,6 +2,7 @@ pub(crate) mod benchmark_and_test_utils; #[cfg(feature = "benchmark")] mod benchmarks; +pub(crate) mod event_bus; mod state_ext; pub(crate) mod storage; #[cfg(test)] @@ -97,6 +98,10 @@ use crate::{ ActionHandler as _, }, address::StateWriteExt as _, + app::event_bus::{ + EventBus, + EventBusSubscription, + }, assets::StateWriteExt as _, authority::{ component::{ @@ -233,6 +238,9 @@ pub(crate) struct App { )] app_hash: AppHash, + // the sequencer event bus, used to send and receive events between components within the app + event_bus: EventBus, + metrics: &'static Metrics, } @@ -259,6 +267,8 @@ impl App { // there should be no unexpected copies elsewhere. let state = Arc::new(StateDelta::new(snapshot)); + let event_bus = EventBus::new(); + Ok(Self { state, mempool, @@ -267,10 +277,15 @@ impl App { recost_mempool: false, write_batch: None, app_hash, + event_bus, metrics, }) } + pub(crate) fn subscribe_to_events(&self) -> EventBusSubscription { + self.event_bus.subscribe() + } + #[instrument(name = "App:init_chain", skip_all, err)] pub(crate) async fn init_chain( &mut self, @@ -435,16 +450,21 @@ impl App { bail!("execution results must be present after executing transactions") }; - self.post_execute_transactions( - process_proposal.hash, - process_proposal.height, - process_proposal.time, - process_proposal.proposer_address, - process_proposal.txs, - tx_results, - ) - .await - .wrap_err("failed to run post execute transactions handler")?; + // FIXME - avoid duplicate calls to post_execute_transactions. refer to: https://github.com/astriaorg/astria/issues/1835 + let sequencer_block = self + .post_execute_transactions( + process_proposal.hash, + process_proposal.height, + process_proposal.time, + process_proposal.proposer_address, + process_proposal.txs, + tx_results, + ) + .await + .wrap_err("failed to run post execute transactions handler")?; + + self.event_bus + .send_process_proposal_block(Arc::new(sequencer_block)); return Ok(()); } @@ -535,16 +555,22 @@ impl App { ); self.executed_proposal_hash = process_proposal.hash; - self.post_execute_transactions( - process_proposal.hash, - process_proposal.height, - process_proposal.time, - process_proposal.proposer_address, - process_proposal.txs, - tx_results, - ) - .await - .wrap_err("failed to run post execute transactions handler")?; + + // FIXME - avoid duplicate calls to post_execute_transactions. refer to: https://github.com/astriaorg/astria/issues/1835 + let sequencer_block = self + .post_execute_transactions( + process_proposal.hash, + process_proposal.height, + process_proposal.time, + process_proposal.proposer_address, + process_proposal.txs, + tx_results, + ) + .await + .wrap_err("failed to run post execute transactions handler")?; + + self.event_bus + .send_process_proposal_block(Arc::new(sequencer_block)); Ok(()) } @@ -748,6 +774,7 @@ impl App { /// `SequencerBlock`. /// /// this must be called after a block's transactions are executed. + /// FIXME: don't return sequencer block but grab the block from state delta https://github.com/astriaorg/astria/issues/1436 #[instrument(name = "App::post_execute_transactions", skip_all, err(level = Level::WARN))] async fn post_execute_transactions( &mut self, @@ -757,7 +784,7 @@ impl App { proposer_address: account::Id, txs: Vec, tx_results: Vec, - ) -> Result<()> { + ) -> Result { let Hash::Sha256(block_hash) = block_hash else { bail!("block hash is empty; this should not occur") }; @@ -796,6 +823,7 @@ impl App { finalize_block_tx_results.extend(std::iter::repeat(ExecTxResult::default()).take(2)); finalize_block_tx_results.extend(tx_results); + // FIXME - avoid duplicate calls to post_execute_transactions. refer to: https://github.com/astriaorg/astria/issues/1835 let sequencer_block = SequencerBlock::try_from_block_info_and_data( block_hash, chain_id, @@ -807,7 +835,7 @@ impl App { ) .wrap_err("failed to convert block info and data to SequencerBlock")?; state_tx - .put_sequencer_block(sequencer_block) + .put_sequencer_block(sequencer_block.clone()) .wrap_err("failed to write sequencer block to state")?; let result = PostTransactionExecutionResult { @@ -823,7 +851,7 @@ impl App { // there should be none anyways. let _ = self.apply(state_tx); - Ok(()) + Ok(sequencer_block) } /// Executes the given block, but does not write it to disk. @@ -850,18 +878,20 @@ impl App { rollup IDs commitment" ); + // FIXME: refactor to avoid cloning the finalize block + let finalize_block_arc = Arc::new(finalize_block.clone()); + // When the hash is not empty, we have already executed and cached the results if self.executed_proposal_hash.is_empty() { // convert tendermint id to astria address; this assumes they are // the same address, as they are both ed25519 keys let proposer_address = finalize_block.proposer_address; - let height = finalize_block.height; let time = finalize_block.time; // we haven't executed anything yet, so set up the state for execution. let block_data = BlockData { misbehavior: finalize_block.misbehavior, - height, + height: finalize_block.height, time, next_validators_hash: finalize_block.next_validators_hash, proposer_address, @@ -905,7 +935,7 @@ impl App { self.post_execute_transactions( finalize_block.hash, - height, + finalize_block.height, time, proposer_address, finalize_block.txs, @@ -935,7 +965,7 @@ impl App { .prepare_commit(storage) .await .wrap_err("failed to prepare commit")?; - let finalize_block = abci::response::FinalizeBlock { + let finalize_block_response = abci::response::FinalizeBlock { events: post_transaction_execution_result.events, validator_updates: post_transaction_execution_result.validator_updates, consensus_param_updates: post_transaction_execution_result.consensus_param_updates, @@ -943,7 +973,9 @@ impl App { tx_results: post_transaction_execution_result.tx_results, }; - Ok(finalize_block) + self.event_bus.send_finalized_block(finalize_block_arc); + + Ok(finalize_block_response) } #[instrument(skip_all, err(level = Level::WARN))] diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap index 9bdacbc6f9..15352db3e9 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_at_genesis.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 236, - 212, - 100, - 47, - 191, - 2, - 11, - 43, - 159, - 43, - 239, - 162, - 79, - 57, - 36, - 115, - 251, - 145, - 205, - 230, - 115, 163, - 142, - 124, - 154, - 22, - 225, - 211, - 113, - 50, - 182, - 221 + 247, + 139, + 47, + 78, + 129, + 169, + 19, + 217, + 165, + 120, + 82, + 190, + 249, + 77, + 186, + 153, + 51, + 213, + 253, + 37, + 38, + 99, + 100, + 91, + 245, + 28, + 150, + 61, + 214, + 212, + 12 ] diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap index cfb9ba5103..2006530974 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_execute_every_action.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 44, - 87, - 189, - 6, - 158, - 176, - 254, - 184, - 219, - 17, - 65, - 250, + 195, + 205, 225, - 216, - 59, - 145, - 9, - 240, - 203, - 167, - 55, - 202, - 64, - 54, + 173, 118, - 241, - 219, - 128, - 230, - 125, - 104, - 164 + 201, + 149, + 122, + 173, + 117, + 237, + 146, + 148, + 114, + 152, + 59, + 68, + 60, + 33, + 65, + 41, + 154, + 249, + 85, + 76, + 183, + 32, + 108, + 175, + 88, + 197, + 63 ] diff --git a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap index 5f4f794991..218d82f1f6 100644 --- a/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap +++ b/crates/astria-sequencer/src/app/snapshots/astria_sequencer__app__tests_breaking_changes__app_hash_finalize_block.snap @@ -3,36 +3,36 @@ source: crates/astria-sequencer/src/app/tests_breaking_changes.rs expression: app.app_hash.as_bytes() --- [ - 121, - 120, - 171, - 97, - 36, - 10, - 69, - 50, - 212, - 147, - 154, - 63, - 94, - 165, - 137, - 32, + 48, + 214, + 34, + 61, + 4, + 228, + 103, + 148, + 143, + 144, + 228, + 158, + 243, + 185, + 202, + 88, + 179, + 89, + 99, + 98, 113, - 38, - 52, - 70, - 119, - 110, + 240, + 167, + 127, + 88, + 153, + 200, 213, - 112, - 77, - 113, - 3, + 136, 197, - 212, - 95, - 149, - 52 + 103, + 12 ] diff --git a/crates/astria-sequencer/src/config.rs b/crates/astria-sequencer/src/config.rs index 00db5f637b..148ff9fd19 100644 --- a/crates/astria-sequencer/src/config.rs +++ b/crates/astria-sequencer/src/config.rs @@ -1,9 +1,14 @@ -use std::path::PathBuf; +use std::{ + net::SocketAddr, + path::PathBuf, + str::FromStr, +}; use serde::{ Deserialize, Serialize, }; +use url::Url; #[expect( clippy::struct_excessive_bools, @@ -13,7 +18,7 @@ use serde::{ #[derive(Debug, Deserialize, Serialize)] pub struct Config { /// The endpoint on which Sequencer will listen for ABCI requests - pub listen_addr: String, + pub abci_listen_url: AbciListenUrl, /// The path to penumbra storage db. pub db_filepath: PathBuf, /// Log level: debug, info, warn, or error @@ -32,20 +37,186 @@ pub struct Config { pub pretty_print: bool, /// The maximum number of transactions that can be parked in the mempool. pub mempool_parked_max_tx_count: usize, + /// Disables streaming optimistic blocks over grpc. + pub no_optimistic_blocks: bool, } impl config::Config for Config { const PREFIX: &'static str = "ASTRIA_SEQUENCER_"; } +#[derive(Debug)] +pub enum AbciListenUrl { + Tcp(SocketAddr), + Unix(PathBuf), +} + +impl std::fmt::Display for AbciListenUrl { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AbciListenUrl::Tcp(socket_addr) => write!(f, "tcp://{socket_addr}"), + AbciListenUrl::Unix(path) => write!(f, "unix://{}", path.display()), + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum AbciListenUrlParseError { + #[error( + "parsed input as a tcp address `{parsed}`, but could not turn it into a socket address" + )] + TcpButBadSocketAddr { parsed: Url, source: std::io::Error }, + #[error( + "parsed input as a unix domain socket URL `{parsed}`, but could not turn it into a path" + )] + UnixButBadPath { parsed: Url }, + #[error( + "parsed input as `{parsed}`, but scheme `scheme` is not suppported; supported schemes are \ + tcp, unix" + )] + UnsupportedScheme { parsed: Url, scheme: String }, + #[error("failed parsing input as URL")] + Url { + #[from] + source: url::ParseError, + }, +} + +impl FromStr for AbciListenUrl { + type Err = AbciListenUrlParseError; + + fn from_str(s: &str) -> std::result::Result { + let abci_url = Url::parse(s)?; + + match abci_url.scheme() { + "tcp" => match abci_url.socket_addrs(|| None) { + Ok(mut socket_addrs) => { + let socket_addr = socket_addrs.pop().expect( + "the url crate is guaranteed to return vec with exactly one element \ + because it relies on std::net::ToSocketAddrs::to_socket_addr; if this is \ + no longer the case there was a breaking change in the url crate", + ); + Ok(Self::Tcp(socket_addr)) + } + Err(source) => Err(Self::Err::TcpButBadSocketAddr { + parsed: abci_url, + source, + }), + }, + "unix" => { + if let Ok(path) = abci_url.to_file_path() { + Ok(Self::Unix(path)) + } else { + Err(Self::Err::UnixButBadPath { + parsed: abci_url, + }) + } + } + // If more options are added here will also need to update the server startup + // immediately below to support more than two protocols. + other => Err(Self::Err::UnsupportedScheme { + parsed: abci_url.clone(), + scheme: other.to_string(), + }), + } + } +} + +impl<'de> Deserialize<'de> for AbciListenUrl { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + let s = std::borrow::Cow::<'_, str>::deserialize(deserializer)?; + FromStr::from_str(&s).map_err(serde::de::Error::custom) + } +} + +impl Serialize for AbciListenUrl { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + serializer.collect_str(self) + } +} + #[cfg(test)] mod tests { use super::Config; const EXAMPLE_ENV: &str = include_str!("../local.env.example"); + use super::AbciListenUrl; + #[test] fn example_env_config_is_up_to_date() { config::tests::example_env_config_is_up_to_date::(EXAMPLE_ENV); } + + #[test] + fn unix_input_is_parsed_as_abci_listen_url() { + let expected = "/path/to/unix.sock"; + #[expect( + clippy::match_wildcard_for_single_variants, + reason = "intended to match all future variants because the test is only valid for a \ + single variant" + )] + match format!("unix://{expected}") + .parse::() + .unwrap() + { + AbciListenUrl::Unix(actual) => { + assert_eq!(AsRef::::as_ref(expected), actual.as_path(),); + } + other => panic!("expected AbciListenUrl::Unix, got {other:?}"), + } + } + + #[test] + fn tcp_input_is_parsed_as_abci_listen_url() { + let expected = "127.0.0.1:0"; + #[expect( + clippy::match_wildcard_for_single_variants, + reason = "intended to match all future variants because the test is only valid for a \ + single variant" + )] + match format!("tcp://{expected}") + .parse::() + .unwrap() + { + AbciListenUrl::Tcp(actual) => { + assert_eq!(expected, actual.to_string()); + } + other => panic!("expected AbciListenUrl, got {other:?}"), + } + } + + #[test] + fn tcp_listen_addr_format() { + assert_eq!( + "tcp://127.0.0.1:0", + &AbciListenUrl::Tcp(([127, 0, 0, 1], 0).into()).to_string() + ); + } + + #[test] + fn unix_listen_addr_format() { + assert_eq!( + "unix:///path/to/unix.sock", + &AbciListenUrl::Unix("/path/to/unix.sock".into()).to_string(), + ); + } + + // NOTE: the only genuine new error variant is AbciListenUrl. Tests for other error paths are + // not provided because they are fundamentally wrappers of url crate errors. + #[test] + fn http_is_not_valid_abci_listen_scheme() { + match "http://astria.org".parse::().unwrap_err() { + super::AbciListenUrlParseError::UnsupportedScheme { + scheme, .. + } => assert_eq!("http", scheme), + other => panic!("expected AbciListenUrlParseError::UnsupportedScheme, got `{other:?}`"), + } + } } diff --git a/crates/astria-sequencer/src/fees/storage/values.rs b/crates/astria-sequencer/src/fees/storage/values.rs index 967ec0a248..d174da3204 100644 --- a/crates/astria-sequencer/src/fees/storage/values.rs +++ b/crates/astria-sequencer/src/fees/storage/values.rs @@ -39,7 +39,6 @@ enum ValueImpl { InitBridgeAccountFees(FeeComponents), BridgeLockFees(FeeComponents), BridgeUnlockFees(FeeComponents), - BridgeTransferFees(FeeComponents), BridgeSudoChangeFees(FeeComponents), IbcRelayFees(FeeComponents), ValidatorUpdateFees(FeeComponents), @@ -48,6 +47,7 @@ enum ValueImpl { IbcRelayerChangeFees(FeeComponents), IbcSudoChangeFees(FeeComponents), SudoAddressChangeFees(FeeComponents), + BridgeTransferFees(FeeComponents), } macro_rules! impl_from_for_fee_storage { diff --git a/crates/astria-sequencer/src/grpc/mod.rs b/crates/astria-sequencer/src/grpc/mod.rs index 2de987dd92..2ffbe7ad1b 100644 --- a/crates/astria-sequencer/src/grpc/mod.rs +++ b/crates/astria-sequencer/src/grpc/mod.rs @@ -1,8 +1,213 @@ -pub(crate) mod sequencer; -mod state_ext; -pub(crate) mod storage; +use std::{ + future::Future, + time::Duration, +}; +use astria_core::generated::astria::sequencerblock::v1::sequencer_service_server::SequencerServiceServer; +use astria_eyre::eyre; pub(crate) use state_ext::{ StateReadExt, StateWriteExt, }; +use tokio::{ + sync::oneshot, + task::JoinError, +}; +use tokio_util::{ + sync::CancellationToken, + task::JoinMap, +}; +use tracing::{ + error, + error_span, + info, + info_span, + Instrument as _, +}; + +use crate::{ + app::event_bus::EventBusSubscription, + grpc::sequencer::SequencerServer, + ibc::host_interface::AstriaHost, + mempool::Mempool, +}; + +pub(crate) mod optimistic; +pub(crate) mod sequencer; +mod state_ext; +pub(crate) mod storage; + +/// Time for the background tasks supporting gRPC services to shutdown gracefully before being +/// aborted. +const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(1500); +const SHUTDOWN_SPAN: &str = "grpc_server_shutdown"; + +struct BackgroundTasks { + tasks: JoinMap<&'static str, ()>, + cancellation_token: CancellationToken, +} + +impl BackgroundTasks { + fn new() -> Self { + Self { + tasks: JoinMap::new(), + cancellation_token: CancellationToken::new(), + } + } + + fn abort_all(&mut self) { + self.tasks.abort_all(); + } + + fn cancel_all(&self) { + self.cancellation_token.cancel(); + } + + fn cancellation_token(&self) -> CancellationToken { + self.cancellation_token.child_token() + } + + fn display_running_tasks(&self) -> String { + use itertools::Itertools as _; + format!("[{}]", self.tasks.keys().format(",")) + } + + fn spawn(&mut self, key: &'static str, task: F) + where + F: Future, + F: Send + 'static, + { + self.tasks.spawn(key, task); + } + + async fn join_next(&mut self) -> Option<(&'static str, Result<(), JoinError>)> { + self.tasks.join_next().await + } +} + +pub(crate) async fn serve( + storage: cnidarium::Storage, + mempool: Mempool, + grpc_addr: std::net::SocketAddr, + no_optimistic_blocks: bool, + event_bus_subscription: EventBusSubscription, + shutdown_rx: oneshot::Receiver<()>, +) -> eyre::Result<(), tonic::transport::Error> { + use ibc_proto::ibc::core::{ + channel::v1::query_server::QueryServer as ChannelQueryServer, + client::v1::query_server::QueryServer as ClientQueryServer, + connection::v1::query_server::QueryServer as ConnectionQueryServer, + }; + use penumbra_tower_trace::remote_addr; + use tower_http::cors::CorsLayer; + + let ibc = penumbra_ibc::component::rpc::IbcQuery::::new(storage.clone()); + let sequencer_api = SequencerServer::new(storage.clone(), mempool); + let cors_layer: CorsLayer = CorsLayer::permissive(); + + let mut background_tasks = BackgroundTasks::new(); + let optimistic_block_service = if no_optimistic_blocks { + None + } else { + let (service, task) = optimistic::new( + event_bus_subscription, + background_tasks.cancellation_token(), + ); + background_tasks.spawn("OPTIMISTIC", task.run()); + Some(service) + }; + + // TODO: setup HTTPS? + let grpc_server = tonic::transport::Server::builder() + .trace_fn(|req| { + if let Some(remote_addr) = remote_addr(req) { + let addr = remote_addr.to_string(); + tracing::error_span!("grpc", addr) + } else { + tracing::error_span!("grpc") + } + }) + // (from Penumbra) Allow HTTP/1, which will be used by grpc-web connections. + // This is particularly important when running locally, as gRPC + // typically uses HTTP/2, which requires HTTPS. Accepting HTTP/2 + // allows local applications such as web browsers to talk to pd. + .accept_http1(true) + // (from Penumbra) Add permissive CORS headers, so pd's gRPC services are accessible + // from arbitrary web contexts, including from localhost. + .layer(cors_layer) + .add_service(ClientQueryServer::new(ibc.clone())) + .add_service(ChannelQueryServer::new(ibc.clone())) + .add_service(ConnectionQueryServer::new(ibc.clone())) + .add_service(SequencerServiceServer::new(sequencer_api)) + .add_optional_service(optimistic_block_service); + + info!(grpc_addr = grpc_addr.to_string(), "starting grpc server"); + + grpc_server + .serve_with_shutdown(grpc_addr, trigger_shutdown(background_tasks, shutdown_rx)) + .await +} + +async fn trigger_shutdown( + mut background_tasks: BackgroundTasks, + mut shutdown_rx: oneshot::Receiver<()>, +) { + let shutdown_span; + loop { + tokio::select! { + biased; + _ = &mut shutdown_rx => { + shutdown_span = info_span!(SHUTDOWN_SPAN); + shutdown_span.in_scope(|| { + info!("grpc server received shutdown signal and will shutdown all of its background tasks"); + }); + break; + } + + Some((task, res)) = background_tasks.join_next() => { + let panic_msg = res.err().map(eyre::Report::new).map(tracing::field::display); + error_span!("grpc_background_task_failed").in_scope(|| { + error!( + panic_msg, + task, + "background task supporting a grpc service ended unexpectedly; Sequencer will \ + keep responding to gRPC requests, but there is currently no way to recover \ + functionality of this service until Sequencer is restarted" + ); + }); + } + } + } + perform_shutdown(background_tasks) + .instrument(shutdown_span) + .await; +} + +async fn perform_shutdown(mut background_tasks: BackgroundTasks) { + background_tasks.cancel_all(); + + if let Ok(()) = tokio::time::timeout(SHUTDOWN_TIMEOUT, async { + while let Some((task, res)) = background_tasks.join_next().await { + let error = res + .err() + .map(eyre::Report::new) + .map(tracing::field::display); + info!( + error, + task, "background task exited while awaiting shutdown" + ); + } + }) + .await + { + info!("all background tasks exited during shutdown window"); + } else { + error!( + tasks = background_tasks.display_running_tasks(), + "background tasks did not finish during shutdown window and will be aborted", + ); + background_tasks.abort_all(); + }; + + info!("reached shutdown target"); +} diff --git a/crates/astria-sequencer/src/grpc/optimistic.rs b/crates/astria-sequencer/src/grpc/optimistic.rs new file mode 100644 index 0000000000..ea32d2f7f9 --- /dev/null +++ b/crates/astria-sequencer/src/grpc/optimistic.rs @@ -0,0 +1,422 @@ +use std::{ + pin::Pin, + sync::Arc, + time::Duration, +}; + +use astria_core::{ + generated::astria::sequencerblock::optimistic::v1alpha1::{ + optimistic_block_service_server::{ + OptimisticBlockService, + OptimisticBlockServiceServer, + }, + GetBlockCommitmentStreamRequest, + GetBlockCommitmentStreamResponse, + GetOptimisticBlockStreamRequest, + GetOptimisticBlockStreamResponse, + }, + primitive::v1::RollupId, + sequencerblock::{ + optimistic::v1alpha1::SequencerBlockCommit, + v1::{ + block, + SequencerBlock, + }, + }, + Protobuf as _, +}; +use astria_eyre::{ + eyre, + eyre::WrapErr as _, +}; +use tendermint::{ + abci::request::FinalizeBlock, + Hash, +}; +use tokio::{ + sync::mpsc, + task::JoinSet, +}; +use tokio_util::sync::CancellationToken; +use tonic::{ + codegen::tokio_stream::{ + wrappers::ReceiverStream, + Stream, + }, + Request, + Response, + Status, +}; +use tracing::{ + error, + info, + info_span, + instrument, + trace, + warn, +}; + +use crate::app::event_bus::{ + EventBusSubscription, + EventReceiver, +}; + +const STREAM_TASKS_SHUTDOWN_DURATION: Duration = Duration::from_secs(1); +const OPTIMISTIC_STREAM_SPAN: &str = "optimistic_stream"; +const BLOCK_COMMITMENT_STREAM_SPAN: &str = "block_commitment_stream"; + +type GrpcStream = Pin> + Send>>; + +/// Create a new optimistic block service. +/// +/// The service is split into a frontend and backend part, +/// where [`Facade`] wrapped in a [`OptimisticBlockServer`] is +/// to be passed to a [`tonic::tranport::Server`], while [`Runner`] is +/// should be spawned as a separate task. +/// +/// The [`Runner`] keeps track of all stream that are requested on +/// the gRPC server and are forwarded to it via the [`Facade`]. +pub(super) fn new( + event_bus_subscription: EventBusSubscription, + cancellation_token: CancellationToken, +) -> (OptimisticBlockServiceServer, Runner) { + let (tx, rx) = mpsc::channel(128); + + let facade = Facade::new(tx); + let runner = Runner::new(event_bus_subscription, rx, cancellation_token); + let server = OptimisticBlockServiceServer::new(facade); + (server, runner) +} + +struct StartOptimisticBlockStreamRequest { + rollup_id: RollupId, + response: mpsc::Sender>, +} + +struct StartBlockCommitmentStreamRequest { + response: mpsc::Sender>, +} + +enum NewStreamRequest { + OptimisticBlockStream(StartOptimisticBlockStreamRequest), + BlockCommitmentStream(StartBlockCommitmentStreamRequest), +} + +pub(super) struct Runner { + event_bus_subscription: EventBusSubscription, + stream_request_receiver: mpsc::Receiver, + stream_tasks: JoinSet>, + cancellation_token: CancellationToken, +} + +impl Runner { + fn new( + event_bus_subscription: EventBusSubscription, + stream_request_receiver: mpsc::Receiver, + cancellation_token: CancellationToken, + ) -> Self { + Self { + event_bus_subscription, + stream_request_receiver, + stream_tasks: JoinSet::new(), + cancellation_token, + } + } + + fn handle_optimistic_block_stream_request( + &mut self, + request: StartOptimisticBlockStreamRequest, + ) { + let StartOptimisticBlockStreamRequest { + rollup_id, + response, + } = request; + + let process_proposal_blocks = self.event_bus_subscription.process_proposal_blocks(); + self.stream_tasks.spawn(optimistic_stream( + process_proposal_blocks, + rollup_id, + response, + self.cancellation_token.child_token(), + )); + } + + fn handle_block_commitment_stream_request( + &mut self, + request: StartBlockCommitmentStreamRequest, + ) { + let StartBlockCommitmentStreamRequest { + response, + } = request; + + let finalized_blocks = self.event_bus_subscription.finalized_blocks(); + self.stream_tasks.spawn(block_commitment_stream( + finalized_blocks, + response, + self.cancellation_token.child_token(), + )); + } + + pub(super) async fn run(mut self) { + loop { + tokio::select! { + biased; + () = self.cancellation_token.cancelled() => { + break; + }, + Some(inner_stream_request) = self.stream_request_receiver.recv() => { + match inner_stream_request { + NewStreamRequest::OptimisticBlockStream(request) => { + self.handle_optimistic_block_stream_request(request); + } + NewStreamRequest::BlockCommitmentStream(request) => { + self.handle_block_commitment_stream_request(request); + } + } + }, + Some(joined_task) = self.stream_tasks.join_next() => { + match joined_task { + Ok(Ok(())) => { + trace!("stream task has been joined successfully"); + }, + Ok(Err(error)) => { + warn!(%error, "stream task has been joined with an error"); + }, + Err(error) => { + warn!(%error, "stream task has panicked"); + } + } + } + } + } + + self.shutdown().await; + } + + #[instrument(skip_all)] + async fn shutdown(&mut self) { + match tokio::time::timeout(STREAM_TASKS_SHUTDOWN_DURATION, async { + while let Some(joined_tasks) = self.stream_tasks.join_next().await { + match joined_tasks { + Ok(Ok(())) => { + trace!("stream task has been joined successfully"); + } + Ok(Err(error)) => { + warn!(%error, "stream task has been joined with an error"); + } + Err(error) => { + warn!(%error, "stream task has panicked"); + } + } + } + }) + .await + { + Ok(()) => { + info!("all stream tasks have been joined successfully"); + } + Err(error) => { + error!(%error, "stream tasks failed to shut down in time"); + self.stream_tasks.abort_all(); + } + } + } +} + +pub(super) struct Facade { + stream_request_sender: mpsc::Sender, +} + +impl Facade { + fn new(stream_request_sender: mpsc::Sender) -> Self { + Self { + stream_request_sender, + } + } + + #[instrument(skip_all)] + async fn spawn_optimistic_block_stream( + &self, + get_optimistic_block_stream_request: GetOptimisticBlockStreamRequest, + ) -> tonic::Result>> { + let rollup_id = { + let rollup_id = get_optimistic_block_stream_request + .rollup_id + .ok_or_else(|| Status::invalid_argument("rollup id is required"))?; + + RollupId::try_from_raw(rollup_id) + .map_err(|e| Status::invalid_argument(e.to_string()))? + }; + + let (tx, rx) = + tokio::sync::mpsc::channel::>(128); + + let request = NewStreamRequest::OptimisticBlockStream(StartOptimisticBlockStreamRequest { + rollup_id, + response: tx, + }); + + self.stream_request_sender + .send(request) + .await + .map_err(|e| { + Status::internal(format!("failed to create optimistic block stream: {e}")) + })?; + + Ok(Response::new( + Box::pin(ReceiverStream::new(rx)) as GrpcStream + )) + } + + #[instrument(skip_all)] + async fn spawn_block_commitment_stream_request( + &self, + ) -> tonic::Result>> { + let (tx, rx) = + tokio::sync::mpsc::channel::>(128); + + let request = NewStreamRequest::BlockCommitmentStream(StartBlockCommitmentStreamRequest { + response: tx, + }); + + self.stream_request_sender + .send(request) + .await + .map_err(|e| { + Status::internal(format!("failed to create block commitment stream: {e}")) + })?; + + Ok(Response::new( + Box::pin(ReceiverStream::new(rx)) as GrpcStream + )) + } +} + +#[async_trait::async_trait] +impl OptimisticBlockService for Facade { + type GetBlockCommitmentStreamStream = GrpcStream; + type GetOptimisticBlockStreamStream = GrpcStream; + + #[instrument(skip_all)] + async fn get_optimistic_block_stream( + self: Arc, + request: Request, + ) -> tonic::Result> { + let get_optimistic_block_stream_request = request.into_inner(); + + self.spawn_optimistic_block_stream(get_optimistic_block_stream_request) + .await + } + + #[instrument(skip_all)] + async fn get_block_commitment_stream( + self: Arc, + _request: Request, + ) -> tonic::Result> { + self.spawn_block_commitment_stream_request().await + } +} + +async fn block_commitment_stream( + mut finalized_blocks_receiver: EventReceiver>, + tx: mpsc::Sender>, + cancellation_token: CancellationToken, +) -> Result<(), eyre::Report> { + match cancellation_token + .run_until_cancelled(async move { + loop { + match finalized_blocks_receiver.receive().await { + Ok(finalized_block) => { + if let Err(error) = + info_span!(BLOCK_COMMITMENT_STREAM_SPAN).in_scope(|| { + let Hash::Sha256(block_hash) = finalized_block.hash else { + warn!("block hash is empty; this should not occur"); + return Ok(()); + }; + + let sequencer_block_commit = SequencerBlockCommit::new( + finalized_block.height.value(), + block::Hash::new(block_hash), + ); + + let get_block_commitment_stream_response = + GetBlockCommitmentStreamResponse { + commitment: Some(sequencer_block_commit.to_raw()), + }; + + match tx + .try_send(Ok(get_block_commitment_stream_response)) + .wrap_err("forwarding block commitment stream to client failed") + { + Ok(()) => Ok(()), + Err(error) => { + error!(%error); + Err(error) + } + } + }) + { + break Err(error); + } + } + Err(e) => { + break Err(e).wrap_err("failed receiving finalized block from event bus"); + } + } + } + }) + .await + { + Some(res) => res, + None => Ok(()), + } +} + +async fn optimistic_stream( + mut process_proposal_blocks: EventReceiver>, + rollup_id: RollupId, + tx: mpsc::Sender>, + cancellation_token: CancellationToken, +) -> Result<(), eyre::Report> { + match cancellation_token + .run_until_cancelled(async move { + loop { + match process_proposal_blocks.receive().await { + Ok(block) => { + if let Err(e) = info_span!(OPTIMISTIC_STREAM_SPAN).in_scope(|| { + let filtered_optimistic_block = + block.to_filtered_block(vec![rollup_id]); + let raw_filtered_optimistic_block = + filtered_optimistic_block.into_raw(); + + let get_optimistic_block_stream_response = + GetOptimisticBlockStreamResponse { + block: Some(raw_filtered_optimistic_block), + }; + + match tx + .try_send(Ok(get_optimistic_block_stream_response)) + .wrap_err("forwarding optimistic block stream to client failed") + { + Ok(()) => Ok(()), + Err(error) => { + error!(%error); + Err(error) + } + } + }) { + break Err(e); + } + } + Err(e) => { + break Err(e).wrap_err("failed receiving proposed block from event bus"); + } + } + } + }) + .await + { + Some(res) => res, + None => Ok(()), + } +} diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index 520cc8f788..9870dae710 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -1,7 +1,7 @@ -use astria_core::generated::astria::sequencerblock::v1::sequencer_service_server::SequencerServiceServer; use astria_eyre::{ anyhow_to_eyre, eyre::{ + self, eyre, OptionExt as _, Result, @@ -38,9 +38,10 @@ use tracing::{ use crate::{ app::App, - config::Config, - grpc::sequencer::SequencerServer, - ibc::host_interface::AstriaHost, + config::{ + AbciListenUrl, + Config, + }, mempool::Mempool, metrics::Metrics, service, @@ -137,30 +138,14 @@ impl Sequencer { let snapshot = storage.latest_snapshot(); let mempool = Mempool::new(metrics, config.mempool_parked_max_tx_count); + let app = App::new(snapshot, mempool.clone(), metrics) .await .wrap_err("failed to initialize app")?; - let consensus_service = tower::ServiceBuilder::new() - .layer(request_span::layer(|req: &ConsensusRequest| { - req.create_span() - })) - .service(tower_actor::Actor::new(10, |queue: _| { - let storage = storage.clone(); - async move { service::Consensus::new(storage, app, queue).run().await } - })); - let mempool_service = service::Mempool::new(storage.clone(), mempool.clone(), metrics); - let info_service = - service::Info::new(storage.clone()).wrap_err("failed initializing info service")?; - let snapshot_service = service::Snapshot; + let event_bus_subscription = app.subscribe_to_events(); - let abci_server = Server::builder() - .consensus(consensus_service) - .info(info_service) - .mempool(mempool_service) - .snapshot(snapshot_service) - .finish() - .ok_or_eyre("server builder didn't return server; are all fields set?")?; + let mempool_service = service::Mempool::new(storage.clone(), mempool.clone(), metrics); let (grpc_shutdown_tx, grpc_shutdown_rx) = tokio::sync::oneshot::channel(); let (abci_shutdown_tx, abci_shutdown_rx) = tokio::sync::oneshot::channel(); @@ -169,24 +154,28 @@ impl Sequencer { .grpc_addr .parse() .wrap_err("failed to parse grpc_addr address")?; - let grpc_server_handle = start_grpc_server(&storage, mempool, grpc_addr, grpc_shutdown_rx); - debug!(config.listen_addr, "starting sequencer"); + // TODO(janis): need a mechanism to check and report if the grpc server setup failed. + // right now it's fire and forget and the grpc server is only reaped if sequencer + // itself is taken down. + let grpc_server_handle = tokio::spawn(crate::grpc::serve( + storage.clone(), + mempool, + grpc_addr, + config.no_optimistic_blocks, + event_bus_subscription, + grpc_shutdown_rx, + )); - let listen_addr = config.listen_addr.clone(); - let abci_server_handle = tokio::spawn(async move { - match abci_server.listen_tcp(listen_addr).await { - Ok(()) => { - // this shouldn't happen, as there isn't a way for the ABCI server to exit - info_span!("abci_server").in_scope(|| info!("ABCI server exited successfully")); - } - Err(e) => { - error_span!("abci_server") - .in_scope(|| error!(err = e.as_ref(), "ABCI server exited with error")); - } - } - let _ = abci_shutdown_tx.send(()); - }); + debug!(%config.abci_listen_url, "starting sequencer"); + let abci_server_handle = start_abci_server( + &storage, + app, + mempool_service, + config.abci_listen_url, + abci_shutdown_tx, + ) + .wrap_err("failed to start ABCI server")?; let grpc_server = RunningGrpcServer { handle: grpc_server_handle, @@ -201,52 +190,52 @@ impl Sequencer { } } -fn start_grpc_server( +fn start_abci_server( storage: &cnidarium::Storage, - mempool: Mempool, - grpc_addr: std::net::SocketAddr, - shutdown_rx: oneshot::Receiver<()>, -) -> JoinHandle> { - use futures::TryFutureExt as _; - use ibc_proto::ibc::core::{ - channel::v1::query_server::QueryServer as ChannelQueryServer, - client::v1::query_server::QueryServer as ClientQueryServer, - connection::v1::query_server::QueryServer as ConnectionQueryServer, - }; - use penumbra_tower_trace::remote_addr; - use tower_http::cors::CorsLayer; + app: App, + mempool_service: service::Mempool, + listen_url: AbciListenUrl, + abci_shutdown_tx: oneshot::Sender<()>, +) -> eyre::Result> { + let consensus_service = tower::ServiceBuilder::new() + .layer(request_span::layer(|req: &ConsensusRequest| { + req.create_span() + })) + .service(tower_actor::Actor::new(10, |queue: _| { + let storage = storage.clone(); + async move { service::Consensus::new(storage, app, queue).run().await } + })); + let info_service = + service::Info::new(storage.clone()).wrap_err("failed initializing info service")?; + let snapshot_service = service::Snapshot; - let ibc = penumbra_ibc::component::rpc::IbcQuery::::new(storage.clone()); - let sequencer_api = SequencerServer::new(storage.clone(), mempool); - let cors_layer: CorsLayer = CorsLayer::permissive(); + let server = Server::builder() + .consensus(consensus_service) + .info(info_service) + .mempool(mempool_service) + .snapshot(snapshot_service) + .finish() + .ok_or_eyre("server builder didn't return server; are all fields set?")?; - // TODO: setup HTTPS? - let grpc_server = tonic::transport::Server::builder() - .trace_fn(|req| { - if let Some(remote_addr) = remote_addr(req) { - let addr = remote_addr.to_string(); - error_span!("grpc", addr) - } else { - error_span!("grpc") + let server_handle = tokio::spawn(async move { + let server_listen_result = match listen_url { + AbciListenUrl::Tcp(socket_addr) => server.listen_tcp(socket_addr).await, + AbciListenUrl::Unix(path) => server.listen_unix(path).await, + }; + match server_listen_result { + Ok(()) => { + // this shouldn't happen, as there isn't a way for the ABCI server to exit + info_span!("abci_server").in_scope(|| info!("ABCI server exited successfully")); } - }) - // (from Penumbra) Allow HTTP/1, which will be used by grpc-web connections. - // This is particularly important when running locally, as gRPC - // typically uses HTTP/2, which requires HTTPS. Accepting HTTP/2 - // allows local applications such as web browsers to talk to pd. - .accept_http1(true) - // (from Penumbra) Add permissive CORS headers, so pd's gRPC services are accessible - // from arbitrary web contexts, including from localhost. - .layer(cors_layer) - .add_service(ClientQueryServer::new(ibc.clone())) - .add_service(ChannelQueryServer::new(ibc.clone())) - .add_service(ConnectionQueryServer::new(ibc.clone())) - .add_service(SequencerServiceServer::new(sequencer_api)); + Err(e) => { + error_span!("abci_server") + .in_scope(|| error!(err = e.as_ref(), "ABCI server exited with error")); + } + } + let _ = abci_shutdown_tx.send(()); + }); - info!(grpc_addr = grpc_addr.to_string(), "starting grpc server"); - tokio::task::spawn( - grpc_server.serve_with_shutdown(grpc_addr, shutdown_rx.unwrap_or_else(|_| ())), - ) + Ok(server_handle) } struct SignalReceiver { diff --git a/dev/values/auctioneer/values.yaml b/dev/values/auctioneer/values.yaml new file mode 100644 index 0000000000..b3f98c1ae5 --- /dev/null +++ b/dev/values/auctioneer/values.yaml @@ -0,0 +1,28 @@ +global: + namespaceOverride: "" + replicaCount: 1 + # Whether to use tty readable logging for astria services, when false use json. + useTTY: true + dev: true + +config: + sequencerGrpcEndpoint: "http://node0-sequencer-grpc-service.astria-dev-cluster.svc.cluster.local:8080" + sequencerAbciEndpoint: "http://node0-sequencer-rpc-service.astria-dev-cluster.svc.cluster.local:26657" + sequencerChainId: "sequencer-test-chain-0" + sequencerPrivateKey: + devContent: "b8da3289343cb92a235af079d74bae435f16272df38b9ab3ab468e62c632e2f8" + feeAssetDenomination: "nria" + sequencerAddressPrefix: astria + rollupGrpcEndpoint: "http://astria-evm-service.astria-dev-cluster.svc.cluster.local:50051" + rollupId: "astria" + latencyMarginMs: 1000 + logLevel: "debug" + +otel: + enabled: false + +metrics: + enabled: false + +serviceMonitor: + enabled: false diff --git a/dev/values/hermes/local.yaml b/dev/values/hermes/local.yaml index a643cff25f..b9fc0c6fb4 100644 --- a/dev/values/hermes/local.yaml +++ b/dev/values/hermes/local.yaml @@ -57,9 +57,8 @@ chains: filename: celestia.json key: token eventSource: - mode: push - url: ws://celestia-app-service.astria-dev-cluster.svc.cluster.local:26657/websocket - batchDelay: 200ms + mode: pull + interval: 1s gasDenom: "utia" gasPrice: 0.0026 gasMultiplier: 1.2 diff --git a/dev/values/rollup/dev.yaml b/dev/values/rollup/dev.yaml index 0d9be85422..66eb3afb71 100644 --- a/dev/values/rollup/dev.yaml +++ b/dev/values/rollup/dev.yaml @@ -56,7 +56,7 @@ evm-rollup: # 1: # minBaseFee: 0 # elasticityMultiplier: 2 - # baseFeeChangeDenominator: 8 + # baseFeeChangeDenominator: 8 ## Standard Eth Genesis config values # Configuration of Eth forks, setting to 0 will enable from height, diff --git a/dev/values/rollup/flame-dev.yaml b/dev/values/rollup/flame-dev.yaml new file mode 100644 index 0000000000..5eb58554fd --- /dev/null +++ b/dev/values/rollup/flame-dev.yaml @@ -0,0 +1,274 @@ +global: + useTTY: true + dev: true + evmChainId: 1337 + rollupName: astria + sequencerRpc: http://node0-sequencer-rpc-service.astria-dev-cluster.svc.cluster.local:26657 + sequencerGrpc: http://node0-sequencer-grpc-service.astria-dev-cluster.svc.cluster.local:8080 + sequencerChainId: sequencer-test-chain-0 + celestiaChainId: celestia-local-0 + +evm-rollup: + enabled: false + +flame-rollup: + enabled: true + genesis: + ## These values are used to configure the genesis block of the rollup chain + ## no defaults as they are unique to each chain + + # Block height to start syncing rollup from, lowest possible is 2 + sequencerInitialHeight: 2 + # The first Celestia height to utilize when looking for rollup data + celestiaInitialHeight: 2 + # The variance in Celestia height to allow before halting the chain + celestiaHeightVariance: 10 + # Will fill the extra data in each block, can be left empty + # can also fill with something unique for your chain. + extraDataOverride: "" + + ## These are general configuration values with some recommended defaults + + # Configure the gas Limit + gasLimit: "50000000" + # If set to true the genesis block will contain extra data + overrideGenesisExtraData: true + # The hrp for bech32m addresses, unlikely to be changed + sequencerAddressPrefix: "astria" + + ## These values are used to configure astria native bridging + ## Many of the fields have commented out example fields + + # Configure the sequencer bridge addresses and allowed assets if using + # the astria canonical bridge. Recommend removing alloc values if so. + bridgeAddresses: + - bridgeAddress: "astria13ahqz4pjqfmynk9ylrqv4fwe4957x2p0h5782u" + startHeight: 1 + senderAddress: "0x0000000000000000000000000000000000000000" + assetDenom: "nria" + assetPrecision: 9 + + + ## Fee configuration + + # Configure the fee collector for the evm tx fees, activated at block heights. + # If not configured, all tx fees will be burned. + feeCollectors: + 1: "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" + # Configure EIP-1559 params, activated at block heights + eip1559Params: {} + # 1: + # minBaseFee: 0 + # elasticityMultiplier: 2 + # baseFeeChangeDenominator: 8 + auctioneerAddresses: + 1: "astria1ferdmm38w7zr4ankmntst0g0qg8e7ygeu3vxcy" + ## Standard Eth Genesis config values + # Configuration of Eth forks, setting to 0 will enable from height, + # left as is these forks will not activate. + cancunTime: "" + pragueTime: "" + verkleTime: "" + # Can configure the genesis allocs for the chain + alloc: + # Deploying the deterministic deploy proxy contract in genesis + # Forge and other tools use this for their CREATE2 usage, but + # can only be included through the genesis block after EIP-155 + # https://github.com/Arachnid/deterministic-deployment-proxy + - address: "0x4e59b44847b379578588920cA78FbF26c0B4956C" + value: + balance: "0" + code: "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3" + - address: "0xA58639fB5458e65E4fA917FF951C390292C24A15" + value: + balance: "0" + code: "0x6080604052600436106100f35760003560e01c8063b6476c7e1161008a578063e74b981b11610059578063e74b981b1461027b578063ebd090541461029b578063f2fde38b146102bb578063fc88d31b146102db57600080fd5b8063b6476c7e1461021c578063bab916d01461023e578063d294f09314610251578063db97dc981461026657600080fd5b80638da5cb5b116100c65780638da5cb5b146101a1578063a7eaa739146101d3578063a996e020146101f3578063ad2282471461020657600080fd5b80636f46384a146100f8578063715018a6146101215780637eb6dec7146101385780638897397914610181575b600080fd5b34801561010457600080fd5b5061010e60035481565b6040519081526020015b60405180910390f35b34801561012d57600080fd5b506101366102f1565b005b34801561014457600080fd5b5061016c7f000000000000000000000000000000000000000000000000000000000000000981565b60405163ffffffff9091168152602001610118565b34801561018d57600080fd5b5061013661019c3660046107a6565b610305565b3480156101ad57600080fd5b506000546001600160a01b03165b6040516001600160a01b039091168152602001610118565b3480156101df57600080fd5b506101366101ee3660046107a6565b610312565b610136610201366004610808565b61031f565b34801561021257600080fd5b5061010e60065481565b34801561022857600080fd5b50610231610414565b6040516101189190610874565b61013661024c3660046108c3565b6104a2565b34801561025d57600080fd5b50610136610588565b34801561027257600080fd5b506102316106b4565b34801561028757600080fd5b50610136610296366004610905565b6106c1565b3480156102a757600080fd5b506005546101bb906001600160a01b031681565b3480156102c757600080fd5b506101366102d6366004610905565b6106eb565b3480156102e757600080fd5b5061010e60045481565b6102f9610729565b6103036000610756565b565b61030d610729565b600455565b61031a610729565b600355565b3460045480821161034b5760405162461bcd60e51b815260040161034290610935565b60405180910390fd5b60007f000000000000000000000000000000000000000000000000000000003b9aca006103788385610998565b61038291906109b1565b1161039f5760405162461bcd60e51b8152600401610342906109d3565b600454600660008282546103b39190610a61565b90915550506004546103c59034610998565b336001600160a01b03167f0c64e29a5254a71c7f4e52b3d2d236348c80e00a00ba2e1961962bd2827c03fb888888886040516104049493929190610a9d565b60405180910390a3505050505050565b6002805461042190610acf565b80601f016020809104026020016040519081016040528092919081815260200182805461044d90610acf565b801561049a5780601f1061046f5761010080835404028352916020019161049a565b820191906000526020600020905b81548152906001019060200180831161047d57829003601f168201915b505050505081565b346003548082116104c55760405162461bcd60e51b815260040161034290610935565b60007f000000000000000000000000000000000000000000000000000000003b9aca006104f28385610998565b6104fc91906109b1565b116105195760405162461bcd60e51b8152600401610342906109d3565b6003546006600082825461052d9190610a61565b909155505060035461053f9034610998565b336001600160a01b03167f0f4961cab7530804898499aa89f5ec81d1a73102e2e4a1f30f88e5ae3513ba2a868660405161057a929190610b09565b60405180910390a350505050565b6005546001600160a01b031633146105f45760405162461bcd60e51b815260206004820152602960248201527f41737472696142726964676561626c6545524332303a206f6e6c7920666565206044820152681c9958da5c1a595b9d60ba1b6064820152608401610342565b6005546006546040516000926001600160a01b031691908381818185875af1925050503d8060008114610643576040519150601f19603f3d011682016040523d82523d6000602084013e610648565b606091505b50509050806106ac5760405162461bcd60e51b815260206004820152602a60248201527f41737472696142726964676561626c6545524332303a20666565207472616e7360448201526919995c8819985a5b195960b21b6064820152608401610342565b506000600655565b6001805461042190610acf565b6106c9610729565b600580546001600160a01b0319166001600160a01b0392909216919091179055565b6106f3610729565b6001600160a01b03811661071d57604051631e4fbdf760e01b815260006004820152602401610342565b61072681610756565b50565b6000546001600160a01b031633146103035760405163118cdaa760e01b8152336004820152602401610342565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b6000602082840312156107b857600080fd5b5035919050565b60008083601f8401126107d157600080fd5b50813567ffffffffffffffff8111156107e957600080fd5b60208301915083602082850101111561080157600080fd5b9250929050565b6000806000806040858703121561081e57600080fd5b843567ffffffffffffffff8082111561083657600080fd5b610842888389016107bf565b9096509450602087013591508082111561085b57600080fd5b50610868878288016107bf565b95989497509550505050565b60006020808352835180602085015260005b818110156108a257858101830151858201604001528201610886565b506000604082860101526040601f19601f8301168501019250505092915050565b600080602083850312156108d657600080fd5b823567ffffffffffffffff8111156108ed57600080fd5b6108f9858286016107bf565b90969095509350505050565b60006020828403121561091757600080fd5b81356001600160a01b038116811461092e57600080fd5b9392505050565b6020808252602d908201527f417374726961576974686472617765723a20696e73756666696369656e74207760408201526c69746864726177616c2066656560981b606082015260800190565b634e487b7160e01b600052601160045260246000fd5b818103818111156109ab576109ab610982565b92915050565b6000826109ce57634e487b7160e01b600052601260045260246000fd5b500490565b60208082526062908201527f417374726961576974686472617765723a20696e73756666696369656e74207660408201527f616c75652c206d7573742062652067726561746572207468616e203130202a2a60608201527f20283138202d20424153455f434841494e5f41535345545f505245434953494f6080820152614e2960f01b60a082015260c00190565b808201808211156109ab576109ab610982565b81835281816020850137506000828201602090810191909152601f909101601f19169091010190565b604081526000610ab1604083018688610a74565b8281036020840152610ac4818587610a74565b979650505050505050565b600181811c90821680610ae357607f821691505b602082108103610b0357634e487b7160e01b600052602260045260246000fd5b50919050565b602081526000610b1d602083018486610a74565b94935050505056fea2646970667358221220842bd8104ffc1c611919341f64a8277f2fc808138b97720a6dc1382e5670099064736f6c63430008190033" + + + config: + # The level at which core astria components will log out + # Options are: error, warn, info, and debug + logLevel: "debug" + + geth: + auctioneer: false + + conductor: + # Determines what will drive block execution, options are: + # - "SoftOnly" -> blocks are only pulled from the sequencer + # - "FirmOnly" -> blocks are only pulled from DA + # - "SoftAndFirm" -> blocks are pulled from both the sequencer and DA + executionCommitLevel: 'SoftAndFirm' + # The expected fastest block time possible from sequencer, determines polling + # rate. When running with the auctioneer side car, decrease this value in + # order to avoid race condition between executions (100ms recommended). + sequencerBlockTimeMs: 2000 + # The maximum number of requests to make to the sequencer per second + sequencerRequestsPerSecond: 500 + + celestia: + rpc: "http://celestia-service.astria-dev-cluster.svc.cluster.local:26658" + token: "" + + resources: + conductor: + requests: + cpu: 0.01 + memory: 1Mi + limits: + cpu: 0.1 + memory: 20Mi + geth: + requests: + cpu: 0.25 + memory: 256Mi + limits: + cpu: 2 + memory: 1Gi + + storage: + enabled: false + + ingress: + enabled: true + services: + rpc: + enabled: true + ws: + enabled: true + +auctioneer: + enabled: false + config: + sequencerPrivateKey: + devContent: "b8da3289343cb92a235af079d74bae435f16272df38b9ab3ab468e62c632e2f8" + feeAssetDenomination: "nria" + sequencerAddressPrefix: astria + rollupGrpcEndpoint: "http://astria-evm-service.astria-dev-cluster.svc.cluster.local:50051" + rollupId: "astria" + latencyMarginMs: 1000 + logLevel: "debug" + + +celestia-node: + enabled: false + +composer: + enabled: true + config: + privateKey: + devContent: "2bd806c97f0e00af1a1fc3328fa763a9269723c8db8fac4f93af71db186d6e90" + +evm-bridge-withdrawer: + enabled: true + config: + minExpectedFeeAssetBalance: "0" + sequencerBridgeAddress: "astria13ahqz4pjqfmynk9ylrqv4fwe4957x2p0h5782u" + feeAssetDenom: "nria" + rollupAssetDenom: "nria" + evmContractAddress: "0xA58639fB5458e65E4fA917FF951C390292C24A15" + sequencerPrivateKey: + devContent: "dfa7108e38ab71f89f356c72afc38600d5758f11a8c337164713e4471411d2e0" + +evm-faucet: + enabled: true + ingress: + enabled: true + config: + privateKey: + devContent: "8b3a7999072c9c9314c084044fe705db11714c6c4ed7cddb64da18ea270dd203" + +postgresql: + enabled: true + nameOverride: blockscout-postegres + primary: + persistence: + enabled: false + resourcesPreset: "medium" + auth: + enablePostgresUser: true + postgresPassword: bigsecretpassword + username: blockscout + password: blockscout + database: blockscout + audit: + logHostname: true + logConnections: true + logDisconnections: true +blockscout-stack: + enabled: true + config: + network: + id: 1337 + name: Astria + shortname: Astria + currency: + name: RIA + symbol: RIA + decimals: 18 + testnet: true + prometheus: + enabled: false + blockscout: + extraEnv: + - name: ECTO_USE_SSL + value: "false" + - name: DATABASE_URL + value: "postgres://postgres:bigsecretpassword@astria-chain-chart-blockscout-postegres.astria-dev-cluster.svc.cluster.local:5432/blockscout" + - name: ETHEREUM_JSONRPC_VARIANT + value: "geth" + - name: ETHEREUM_JSONRPC_HTTP_URL + value: "http://astria-evm-service.astria-dev-cluster.svc.cluster.local:8545/" + - name: ETHEREUM_JSONRPC_INSECURE + value: "true" + - name: ETHEREUM_JSONRPC_WS_URL + value: "ws://astria-evm-service.astria-dev-cluster.svc.cluster.local:8546/" + - name: INDEXER_DISABLE_BEACON_BLOB_FETCHER + value: "true" + - name: NETWORK + value: "Astria" + - name: SUBNETWORK + value: "Local" + - name: CONTRACT_VERIFICATION_ALLOWED_SOLIDITY_EVM_VERSIONS + value: "homestead,tangerineWhistle,spuriousDragon,byzantium,constantinople,petersburg,istanbul,berlin,london,paris,shanghai,default" + - name: CONTRACT_VERIFICATION_ALLOWED_VYPER_EVM_VERSIONS + value: "byzantium,constantinople,petersburg,istanbul,berlin,paris,shanghai,default" + - name: DISABLE_EXCHANGE_RATES + value: "true" + + ingress: + enabled: true + hostname: explorer.astria.localdev.me + paths: + - path: /api + pathType: Prefix + - path: /socket + pathType: Prefix + - path: /sitemap.xml + pathType: ImplementationSpecific + - path: /public-metrics + pathType: Prefix + - path: /auth/auth0 + pathType: Exact + - path: /auth/auth0/callback + pathType: Exact + - path: /auth/logout + pathType: Exact + + frontend: + extraEnv: + - name: NEXT_PUBLIC_NETWORK_VERIFICATION_TYPE + value: "validation" + - name: NEXT_PUBLIC_AD_BANNER_PROVIDER + value: "none" + - name: NEXT_PUBLIC_API_PROTOCOL + value: "http" + - name: NEXT_PUBLIC_API_WEBSOCKET_PROTOCOL + value: "ws" + - name: NEXT_PUBLIC_NETWORK_CURRENCY_WEI_NAME + value: "aRia" + - name: NEXT_PUBLIC_AD_TEXT_PROVIDER + value: "none" + ingress: + enabled: true + hostname: explorer.astria.localdev.me diff --git a/dev/values/validators/all.yml b/dev/values/validators/all.yml index 0b482a4e62..21af3448ed 100644 --- a/dev/values/validators/all.yml +++ b/dev/values/validators/all.yml @@ -37,6 +37,11 @@ genesis: - address: astria1d7zjjljc0dsmxa545xkpwxym86g8uvvwhtezcr balance: "69000000" +sequencer: + optimisticBlockApis: + # set to true to enable optimistic block APIs + enabled: false + resources: cometbft: requests: diff --git a/justfile b/justfile index 39c3f0284f..c80df8de57 100644 --- a/justfile +++ b/justfile @@ -40,6 +40,7 @@ _crate_short_name crate quiet="": #!/usr/bin/env sh set -eu case {{crate}} in + astria-auctioneer) short_name=auctioneer ;; astria-bridge-withdrawer) short_name=bridge-withdrawer ;; astria-cli) short_name=astria-cli ;; astria-composer) short_name=composer ;; diff --git a/proto/executionapis/astria/auction/v1alpha1/allocation.proto b/proto/executionapis/astria/auction/v1alpha1/allocation.proto new file mode 100644 index 0000000000..da7b3644dc --- /dev/null +++ b/proto/executionapis/astria/auction/v1alpha1/allocation.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package astria.auction.v1alpha1; + +import "google/protobuf/any.proto"; + +// The Allocation message is submitted by the Auctioneer to the rollup as a +// `RollupDataSubmission` on the sequencer. +// The rollup will verify the signature and public key against its configuration, +// then unbundle the body into rollup transactions and execute them first in the +// block. +message Allocation { + // The Ed25519 signature of the Auctioneer, to be verified against config by the + // rollup. + bytes signature = 1; + // The Ed25519 public key of the Auctioneer, to be verified against config by the + // rollup. + bytes public_key = 2; + // The bid that was allocated the winning slot by the Auctioneer. This is a + // google.protobuf.Any to avoid decoding and re-encoding after receiving an Allocation + // over the wire and checking if signature and public key match the signed bid. + // Implementors are expected to read and write an encoded Bid into this field. + google.protobuf.Any bid = 3; +} diff --git a/proto/executionapis/astria/auction/v1alpha1/auction_service.proto b/proto/executionapis/astria/auction/v1alpha1/auction_service.proto new file mode 100644 index 0000000000..357f8eedd6 --- /dev/null +++ b/proto/executionapis/astria/auction/v1alpha1/auction_service.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package astria.auction.v1alpha1; + +import "astria/auction/v1alpha1/get_bid_stream_request.proto"; +import "astria/auction/v1alpha1/get_bid_stream_response.proto"; + +service AuctionService { + // An auctioneer will initiate this long running stream to receive bids from the rollup node, + // until either a timeout or the connection is closed by the client. + rpc GetBidStream(GetBidStreamRequest) returns (stream GetBidStreamResponse); +} diff --git a/proto/executionapis/astria/auction/v1alpha1/bid.proto b/proto/executionapis/astria/auction/v1alpha1/bid.proto new file mode 100644 index 0000000000..4ac19416e6 --- /dev/null +++ b/proto/executionapis/astria/auction/v1alpha1/bid.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package astria.auction.v1alpha1; + +// A bid is a bundle of transactions that was submitted to the auctioneer's rollup node. +// The rollup node will verify that the bundle is valid and pays the fee, and will stream +// it to the auctioneer for participation in the auction for a given block. +// The sequencer block hash and the rollup parent block hash are used by the auctioneer +// to identify the block for which the bundle is intended (i.e. which auction the bid is for). +message Bid { + // The hash of previous rollup block, on top of which the bundle will be executed as ToB. + bytes rollup_parent_block_hash = 1; + // The hash of the previous sequencer block, identifying the auction for which the bid is intended. + // This is the hash of the sequencer block on top of which the bundle will be executed as ToB. + bytes sequencer_parent_block_hash = 2; + // The fee paid by the bundle submitter. The auctioneer's rollup node calculates this based + // on the bundles submitted by users. For example, this can be the sum of the coinbase transfers + // in the bundle's transactions. + uint64 fee = 3; + // The list of serialized rollup transactions from the bundle. + repeated bytes transactions = 4; +} diff --git a/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_request.proto b/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_request.proto new file mode 100644 index 0000000000..baaf27b0b3 --- /dev/null +++ b/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_request.proto @@ -0,0 +1,5 @@ +syntax = "proto3"; + +package astria.auction.v1alpha1; + +message GetBidStreamRequest {} diff --git a/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_response.proto b/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_response.proto new file mode 100644 index 0000000000..282133f996 --- /dev/null +++ b/proto/executionapis/astria/auction/v1alpha1/get_bid_stream_response.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package astria.auction.v1alpha1; + +import "astria/auction/v1alpha1/bid.proto"; + +message GetBidStreamResponse { + Bid bid = 1; +} diff --git a/proto/executionapis/astria/bundle/v1alpha1/bundle.proto b/proto/executionapis/astria/bundle/v1alpha1/bundle.proto deleted file mode 100644 index f4d350803f..0000000000 --- a/proto/executionapis/astria/bundle/v1alpha1/bundle.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; - -package astria.bundle.v1alpha1; - -message GetBundleStreamRequest {} - -// Information for the bundle submitter to know how to submit the bundle. -// The fee and base_sequencer_block_hash are not necessarily strictly necessary -// it allows for the case where the server doesn't always send the highest fee -// bundles after the previous but could just stream any confirmed bundles. -message Bundle { - // The fee that can be expected to be received for submitting this bundle. - // This allows the bundle producer to stream any confirmed bundles they would be ok - // with submitting. Used to avoid race conditions in received bundle packets. Could - // also be used by a bundle submitter to allow multiple entities to submit bundles. - uint64 fee = 1; - // The byte list of transactions to be included. - repeated bytes transactions = 2; - // The base_sequencer_block_hash is the hash from the base block this bundle - // is based on. This is used to verify that the bundle is based on the correct - // Sequencer block. - bytes base_sequencer_block_hash = 3; - // The hash of previous rollup block, on top of which the bundle will be executed as ToB. - bytes prev_rollup_block_hash = 4; -} - -message GetBundleStreamResponse { - Bundle bundle = 1; -} - -service BundleService { - // A bundle submitter requests bundles given a new optimistic Sequencer block, - // and receives a stream of potential bundles for submission, until either a timeout - // or the connection is closed by the client. - rpc GetBundleStream(GetBundleStreamRequest) returns (stream GetBundleStreamResponse); -} diff --git a/proto/executionapis/astria/bundle/v1alpha1/optimistic_execution.proto b/proto/executionapis/astria/bundle/v1alpha1/optimistic_execution.proto deleted file mode 100644 index 426b72798d..0000000000 --- a/proto/executionapis/astria/bundle/v1alpha1/optimistic_execution.proto +++ /dev/null @@ -1,38 +0,0 @@ -syntax = "proto3"; - -package astria.bundle.v1alpha1; - -import "astria/execution/v1/execution.proto"; -import "astria/sequencerblock/v1/block.proto"; -import "google/protobuf/timestamp.proto"; - -// The "BaseBlock" is the information needed to simulate bundles on top of -// a Sequencer block which may not have been committed yet. -message BaseBlock { - // This is the block hash for the proposed block. - bytes sequencer_block_hash = 1; - // List of transactions to include in the new block. - repeated astria.sequencerblock.v1.RollupData transactions = 2; - // Timestamp to be used for new block. - google.protobuf.Timestamp timestamp = 3; -} - -message ExecuteOptimisticBlockStreamRequest { - BaseBlock base_block = 1; -} - -message ExecuteOptimisticBlockStreamResponse { - // Metadata identifying the block resulting from executing a block. Includes number, hash, - // parent hash and timestamp. - astria.execution.v1.Block block = 1; - // The base_sequencer_block_hash is the hash from the base sequencer block this block - // is based on. This is used to associate an optimistic execution result with the hash - // received once a sequencer block is committed. - bytes base_sequencer_block_hash = 2; -} - -service OptimisticExecutionService { - // Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back - // metadata from the executed blocks. - rpc ExecuteOptimisticBlockStream(stream ExecuteOptimisticBlockStreamRequest) returns (stream ExecuteOptimisticBlockStreamResponse); -} diff --git a/proto/executionapis/astria/optimistic_execution/v1alpha1/base_block.proto b/proto/executionapis/astria/optimistic_execution/v1alpha1/base_block.proto new file mode 100644 index 0000000000..79565dc29a --- /dev/null +++ b/proto/executionapis/astria/optimistic_execution/v1alpha1/base_block.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package astria.optimistic_execution.v1alpha1; + +import "astria/sequencerblock/v1/block.proto"; +import "google/protobuf/timestamp.proto"; + +// The "BaseBlock" is the information needed to simulate bundles on top of +// a Sequencer block which may not have been committed yet. +message BaseBlock { + // This is the block hash for the proposed block. + bytes sequencer_block_hash = 1; + // List of transactions to include in the new block. + repeated astria.sequencerblock.v1.RollupData transactions = 2; + // Timestamp to be used for new block. + google.protobuf.Timestamp timestamp = 3; +} diff --git a/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_request.proto b/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_request.proto new file mode 100644 index 0000000000..7f6ad35056 --- /dev/null +++ b/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_request.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package astria.optimistic_execution.v1alpha1; + +import "astria/optimistic_execution/v1alpha1/base_block.proto"; + +message ExecuteOptimisticBlockStreamRequest { + BaseBlock base_block = 1; +} diff --git a/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_response.proto b/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_response.proto new file mode 100644 index 0000000000..e3291478ff --- /dev/null +++ b/proto/executionapis/astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_response.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package astria.optimistic_execution.v1alpha1; + +import "astria/execution/v1/execution.proto"; + +message ExecuteOptimisticBlockStreamResponse { + // Metadata identifying the block resulting from executing a block. Includes number, hash, + // parent hash and timestamp. + astria.execution.v1.Block block = 1; + // The base_sequencer_block_hash is the hash from the base sequencer block this block + // is based on. This is used to associate an optimistic execution result with the hash + // received once a sequencer block is committed. + bytes base_sequencer_block_hash = 2; +} diff --git a/proto/executionapis/astria/optimistic_execution/v1alpha1/optimistic_execution_service.proto b/proto/executionapis/astria/optimistic_execution/v1alpha1/optimistic_execution_service.proto new file mode 100644 index 0000000000..bda829fab8 --- /dev/null +++ b/proto/executionapis/astria/optimistic_execution/v1alpha1/optimistic_execution_service.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package astria.optimistic_execution.v1alpha1; + +import "astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_request.proto"; +import "astria/optimistic_execution/v1alpha1/execute_optimistic_block_stream_response.proto"; + +service OptimisticExecutionService { + // Stream blocks from the Auctioneer to Geth for optimistic execution. Geth will stream back + // metadata from the executed blocks. + rpc ExecuteOptimisticBlockStream(stream ExecuteOptimisticBlockStreamRequest) returns (stream ExecuteOptimisticBlockStreamResponse); +}