diff --git a/.github/workflows/build-docker-image-and-binaries.yaml b/.github/workflows/build-docker-image-and-binaries.yaml index ebd75196..ad2eb7fd 100644 --- a/.github/workflows/build-docker-image-and-binaries.yaml +++ b/.github/workflows/build-docker-image-and-binaries.yaml @@ -40,37 +40,39 @@ jobs: aws s3 ls s3://axelar-releases/tofnd/"$SEMVER" && echo "tag already exists, use a new one" && exit 1 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: '0' ref: ${{ github.event.inputs.tag }} submodules: recursive - - name: Install Rust - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + - name: Install stable toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: 1.78.0 + override: true + components: rustfmt, clippy - - name: build darwin binaries + - name: Build Mac OS binaries env: SEMVER: ${{ github.event.inputs.tag }} if: matrix.os == 'macos-latest' run: | OS="darwin" ARCH="${{ matrix.arch }}" + mkdir tofndbin if [ "$ARCH" == "arm64" ] then - ./install-gmp-arm64.sh rustup target add aarch64-apple-darwin - cargo build --release --target aarch64-apple-darwin - mkdir tofndbin + cargo build --release --locked --target aarch64-apple-darwin mv /Users/runner/work/tofnd/tofnd/target/aarch64-apple-darwin/release/tofnd "./tofndbin/tofnd-$OS-$ARCH-$SEMVER" else cargo install --locked --path . - mkdir tofndbin mv "/Users/runner/work/tofnd/tofnd/target/release/tofnd" "./tofndbin/tofnd-$OS-$ARCH-$SEMVER" fi - - name: build linux binaries + - name: Build Linux binaries env: SEMVER: ${{ github.event.inputs.tag }} if: matrix.os == 'ubuntu-latest' @@ -147,7 +149,7 @@ jobs: steps: - name: Checkout code for docker image build - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: '0' ref: ${{ github.event.inputs.tag }} diff --git a/.github/workflows/build-latest-docker-image.yaml b/.github/workflows/build-latest-docker-image.yaml index dd9b8d55..3d8715b3 100644 --- a/.github/workflows/build-latest-docker-image.yaml +++ b/.github/workflows/build-latest-docker-image.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout code and submodule - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/format.yaml b/.github/workflows/format.yaml index 94b148e6..b0910c5d 100644 --- a/.github/workflows/format.yaml +++ b/.github/workflows/format.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: recursive @@ -21,9 +21,9 @@ jobs: uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: 1.78.0 override: true - components: rustfmt, clippy + components: rustfmt - name: Run cargo fmt uses: actions-rs/cargo@v1 diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 35765d74..abe81278 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout code and submodule - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: recursive @@ -21,16 +21,10 @@ jobs: uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: 1.78.0 override: true components: rustfmt, clippy - - name: Run cargo clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets -- -D warnings - - name: Run cargo clippy with all features uses: actions-rs/cargo@v1 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0dd02c8c..c1b19ad7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3.0.1 + uses: actions/checkout@v4 with: fetch-depth: '0' submodules: recursive diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 20ef0323..d9c6c7b7 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Checkout code and submodule - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: recursive @@ -21,8 +21,9 @@ jobs: uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: stable + toolchain: 1.78.0 override: true + components: rustfmt - name: Run cargo test run: cargo test --release --all-features diff --git a/Cargo.lock b/Cargo.lock index 8872bc2a..c4a594a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -224,6 +224,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.0" @@ -257,7 +263,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", "generic-array", ] @@ -270,12 +275,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" version = "0.4.7" @@ -477,9 +476,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.2.5" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8658c15c5d921ddf980f7fe25b1e82f4b7a4083b2c4985fea4922edb8e43e07d" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -507,16 +506,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "curve25519-dalek" version = "4.0.0" @@ -565,12 +554,6 @@ dependencies = [ "libc", ] -[[package]] -name = "der" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28e98c534e9c8a0483aa01d6f6913bc063de254311bd267c9cf535e9b70e15b2" - [[package]] name = "der" version = "0.7.8" @@ -609,6 +592,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -635,14 +619,15 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.12.4" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43ee23aa5b4f68c7a092b5c3beb25f50c406adc75e2363634f242f28ab255372" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.4.4", + "der", + "digest 0.10.7", "elliptic-curve", - "hmac 0.11.0", - "signature 1.3.2", + "rfc6979", + "signature", ] [[package]] @@ -652,21 +637,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fb04eee5d9d907f29e80ee6b0e78f7e2c82342c63e3580d8c4f69d9d5aad963" dependencies = [ "pkcs8", - "signature 2.0.0", + "signature", ] [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.7", - "signature 2.0.0", + "signature", + "subtle", "zeroize", ] @@ -678,15 +664,18 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "elliptic-curve" -version = "0.10.4" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e5c176479da93a0983f0a6fdc3c1b8e7d5be0d7fe3fe05a99f15b96582b9a8" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ + "base16ct", "crypto-bigint", + "digest 0.10.7", "ff", "generic-array", "group", "rand_core 0.6.4", + "sec1", "subtle", "zeroize", ] @@ -708,9 +697,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.10.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f40b2dcd8bc322217a5f6559ae5f9e9d1de202a2ecee2e9eafcbece7562a4f" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ "rand_core 0.6.4", "subtle", @@ -867,12 +856,13 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -913,9 +903,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "group" -version = "0.10.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c363a5301b8f153d80747126a04b3c82073b9fe3130571a9d170cacdeaf7912" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", "rand_core 0.6.4", @@ -1084,29 +1074,13 @@ dependencies = [ "libc", ] -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hmac" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac", "digest 0.9.0", ] @@ -1254,21 +1228,16 @@ dependencies = [ [[package]] name = "k256" -version = "0.9.5" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b0281ca8032567c9711cd48631781c15228301860a39b32deb28d63125e46" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", + "sha2 0.10.7", ] -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - [[package]] name = "lazy_static" version = "1.4.0" @@ -1281,19 +1250,6 @@ version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" -[[package]] -name = "libpaillier" -version = "0.2.1" -source = "git+https://github.com/axelarnetwork/paillier-rs#2d965b16d89de6f5d15b054fd5874d0c017c4747" -dependencies = [ - "digest 0.9.0", - "rand 0.8.4", - "serde", - "serde_bare", - "unknown_order", - "zeroize", -] - [[package]] name = "lock_api" version = "0.4.5" @@ -1436,7 +1392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", - "num-traits 0.2.14", + "num-traits", ] [[package]] @@ -1447,16 +1403,7 @@ checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.14", + "num-traits", ] [[package]] @@ -1505,7 +1452,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18869315e81473c951eb56ad5558bbc56978562d3ecfb87abb7a1e944cea4518" dependencies = [ - "num-traits 0.2.14", + "num-traits", ] [[package]] @@ -1571,7 +1518,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" dependencies = [ - "crypto-mac 0.8.0", + "crypto-mac", ] [[package]] @@ -1644,7 +1591,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", + "der", "spki", ] @@ -1908,23 +1855,23 @@ dependencies = [ ] [[package]] -name = "rpassword" -version = "5.0.1" +name = "rfc6979" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "libc", - "winapi", + "hmac 0.12.1", + "subtle", ] [[package]] -name = "rust-gmp" -version = "0.5.0" +name = "rpassword" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ddf28998d5730b96a9fe188557953de503d77ff403ae175ad1417921e5d906" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", - "num-traits 0.1.43", + "winapi", ] [[package]] @@ -1987,6 +1934,19 @@ dependencies = [ "sha2 0.10.7", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "subtle", + "zeroize", +] + [[package]] name = "semver" version = "1.0.18" @@ -2008,15 +1968,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde_bare" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adbd09b1e0b45fadbc163e4ea1f4224b451146ba4f01963c69975780c33215fa" -dependencies = [ - "serde", -] - [[package]] name = "serde_derive" version = "1.0.130" @@ -2050,7 +2001,6 @@ dependencies = [ "cpufeatures", "digest 0.9.0", "opaque-debug", - "sha2-asm", ] [[package]] @@ -2062,6 +2012,7 @@ dependencies = [ "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", + "sha2-asm", ] [[package]] @@ -2073,18 +2024,6 @@ dependencies = [ "cc", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", -] - [[package]] name = "sharded-slab" version = "0.1.4" @@ -2113,16 +2052,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2807892cfa58e081aa1f1111391c7a0649d4fa127a4ffbe34bcbfb35a1171a4" -dependencies = [ - "digest 0.9.0", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.0.0" @@ -2130,6 +2059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d" dependencies = [ "digest 0.10.7", + "rand_core 0.6.4", ] [[package]] @@ -2227,7 +2157,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", - "der 0.7.8", + "der", ] [[package]] @@ -2357,29 +2287,29 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tofn" -version = "0.1.0" -source = "git+https://github.com/axelarnetwork/tofn?branch=main#18dc9c3449b6af4fd50c29971d3cd095cd126e41" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f56caeee0bb85743d8b05e84d71877cd2eb40908a2d17456f7aceededade1ab" dependencies = [ "bincode", - "der 0.7.8", + "crypto-bigint", + "der", "ecdsa", "ed25519", "ed25519-dalek", - "hmac 0.11.0", + "hmac 0.12.1", "k256", - "libpaillier", "rand 0.8.4", "rand_chacha 0.3.1", "serde", - "sha2 0.9.8", - "sha3", + "sha2 0.10.7", "tracing", "zeroize", ] [[package]] name = "tofnd" -version = "0.10.2" +version = "0.11.0" dependencies = [ "anyhow", "atty", @@ -2731,19 +2661,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unknown_order" -version = "0.2.3" -source = "git+https://github.com/axelarnetwork/unknown_order#0f841f810f8bfe440d7768501da186b2426b7afa" -dependencies = [ - "digest 0.9.0", - "hex", - "rand 0.8.4", - "rust-gmp", - "serde", - "zeroize", -] - [[package]] name = "uom" version = "0.30.0" @@ -2751,7 +2668,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e76503e636584f1e10b9b3b9498538279561adcef5412927ba00c2b32c4ce5ed" dependencies = [ "num-rational", - "num-traits 0.2.14", + "num-traits", "typenum", ] @@ -2949,9 +2866,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "zeroize" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 89a5d566..af56c97c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "tofnd" -version = "0.10.2" +version = "0.11.0" authors = ["Gus Gutoski ", "Stelios Daveas "] edition = "2018" license = "MIT OR Apache-2.0" [dependencies] tonic = "0.6" -tofn = { git = "https://github.com/axelarnetwork/tofn", branch = "main"} +tofn = { version = "1.0" } # tofn = { path = "../tofn" } sled = {version = "0.34", default-features = false} @@ -63,5 +63,3 @@ panic = "unwind" panic = "unwind" [features] -# when we compile tofnd with malicious build, also use malicious build for tofn -malicious = ["tofn/malicious"] diff --git a/Dockerfile b/Dockerfile index 6b8871ba..997584b9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,6 @@ COPY ./entrypoint.sh / VOLUME [ "/.tofnd" ] -ENV UNSAFE "" ENV MNEMONIC_CMD "" ENV NOPASSWORD "" ENV TOFND_HOME "" diff --git a/Makefile b/Makefile index 61ac92dd..454c98df 100644 --- a/Makefile +++ b/Makefile @@ -2,11 +2,6 @@ docker-image: git-submodule-setup @DOCKER_BUILDKIT=1 docker build --ssh default -t axelar/tofnd . -.PHONY: docker-image-malicious -docker-image-malicious: git-submodule-setup - @DOCKER_BUILDKIT=1 docker build --ssh default --build-arg features="malicious" -t axelar/tofnd-malicious . - - .PHONY: copy-binary copy-binary-from-image: guard-SEMVER ./scripts/copy-binaries-from-image.sh @@ -20,7 +15,6 @@ upload-binaries-to-s3: guard-S3_PATH .PHONY: docker-image-all docker-image-all: git-submodule-setup make docker-image - make docker-image-malicious .PHONY: git-submodule-setup git-submodule-setup: @@ -28,4 +22,4 @@ git-submodule-setup: git submodule update guard-%: - @ if [ -z '${${*}}' ]; then echo 'Environment variable $* not set' && exit 1; fi \ No newline at end of file + @ if [ -z '${${*}}' ]; then echo 'Environment variable $* not set' && exit 1; fi diff --git a/README.md b/README.md index 85879801..acce6f5a 100644 --- a/README.md +++ b/README.md @@ -1,54 +1,39 @@ # Tofnd: A gRPC threshold signature scheme daemon -Tofnd is a [gRPC](https://grpc.io/) server written in Rust that wraps the [tofn](https://github.com/axelarnetwork/tofn) threshold cryptography library. +Tofnd is a [gRPC](https://grpc.io/) server written in Rust that wraps the [tofn](https://github.com/axelarnetwork/tofn) cryptography library. -# Setup +## Setup The gRPC protobuf file is a separate [submodule](https://github.com/axelarnetwork/grpc-protobuf/). To fetch it, please be sure that the `--recursive` flag is enabled: -``` + +```bash git clone git@github.com:axelarnetwork/tofnd.git --recursive ``` -`tofnd` uses the [hyperium/tonic](https://github.com/hyperium/tonic) Rust gRPC implementation, which requires: -* Rust `1.56` or greater - ``` - $ rustup update - ``` -* `rustfmt` to tidy up the code it generates - ``` - $ rustup component add rustfmt - ``` +## Build binaries -`tofnd` depends on `tofn`, which needs the GNU Multiple Precision Arithmetic Library -* MacOS: `brew install gmp` -* Ubuntu: `sudo apt install libgmp-dev` +Pre-built releases can be found [here](https://github.com/axelarnetwork/tofnd/releases) -# Build binaries +To build yourself, run: -The pipeline will build binaries for the following OS/architecures : - -* Linux AMD64 -* MacOS AMD64 -* MacOS ARM64 - -See https://github.com/axelarnetwork/tofnd/releases - -For any other OS/Architecture, binaries should be built locally. +```bash +cargo build --release --locked +``` -# Running the server +## Running the server -``` +```bash # install tofnd at ./target/release/tofnd -$ cargo install --path . && cd ./target/release +cargo install --locked --path . && cd ./target/release # init tofnd -$ ./tofnd -m create +./tofnd -m create # IMPORTANT: store the content of ./.tofnd/export file at a safe, offline place, and then delete the file -$ rm ./.tofnd/export +rm ./.tofnd/export # start tofnd daemon -$ ./tofnd +./tofnd ``` Terminate the server with `ctrl+C`. @@ -59,21 +44,21 @@ By default, `tofnd` prompts for a password from stdin immediately upon launch. Users may automate password entry as they see fit. Some examples follow. These examples are not necessarily secure as written---it's the responsibility of the user to secure password entry. -``` +```bash # feed password from MacOS keyring -$ security find-generic-password -a $(whoami) -s "tofnd" -w | ./tofnd +security find-generic-password -a $(whoami) -s "tofnd" -w | ./tofnd # feed password from 1password-cli -$ op get item tofnd --fields password | ./tofnd +op get item tofnd --fields password | ./tofnd # feed password from Pass -$ pass show tofnd | ./tofnd +pass show tofnd | ./tofnd # feed password from environment variable `PASSWORD` -$ echo $PASSWORD | ./tofnd +echo $PASSWORD | ./tofnd # feed password from a file `password.txt` -$ cat ./password.txt | ./tofnd +cat ./password.txt | ./tofnd ``` Sophisticated users may explicitly opt out of password entry via the `--no-password` terminal argument (see below). In this case, on-disk storage is not secure---it is the responsibility of the user to take additional steps to secure on-disk storage. @@ -83,15 +68,15 @@ Sophisticated users may explicitly opt out of password entry via the `--no-passw We use [clap](https://clap.rs/) to manage command line arguments. Users can specify: -1. Tofnd's root folder. Use `--directory` or `-d` to specify a full or a relative path. If no argument is provided, then the environment variable `TOFND_HOME` is used. If no environment variable is set either, the default `./tofnd` directory is used. + +1. Tofnd's root folder. Use `--directory` or `-d` to specify a full or a relative path. If no argument is provided, then the environment variable `TOFND_HOME` is used. If no environment variable is set either, the default `./tofnd` directory is used. 2. The port number of the gRPC server (default is 50051). -3. The option to run in _unsafe_ mode. By default, this option is off, and safe primes are used for keygen. Use the `--unsafe` flag only for testing. -4. `mnemonic` operations for their `tofnd` instance (default is `Existing`). +3. `mnemonic` operations for their `tofnd` instance (default is `Existing`). For more information, see on mnemonic options, see [Mnemonic](#mnemonic). -4. The option to run in _unsafe_ mode. By default, this option is off, and safe primes are used for keygen. **Attention: Use the `--unsafe` flag only for testing**. -5. By default, `tofnd` expects a password from the standard input. Users that don't want to use passwords can use the `--no-password` flag. **Attention: Use `--no-password` only for testing .** -``` -A threshold signature scheme daemon +4. By default, `tofnd` expects a password from the standard input. Users that don't want to use passwords can use the `--no-password` flag. **Attention: Use `--no-password` only for testing .** + +```text +A cryptographic signing service USAGE: tofnd [FLAGS] [OPTIONS] @@ -99,8 +84,6 @@ USAGE: FLAGS: --no-password Skip providing a password. Disabled by default. **Important note** If --no-password is set, the a default (and public) password is used to encrypt. - --unsafe Use unsafe primes. Deactivated by default. **Important note** This option should only be used - for testing. -h, --help Prints help information -V, --version Prints version information @@ -111,44 +94,45 @@ OPTIONS: -p, --port [default: 50051]] ``` -# Docker +## Docker -## Setup +### Setup To setup a `tofnd` container, use the `create` mnemonic command: -``` +```bash docker-compose run -e MNEMONIC_CMD=create tofnd ``` This will initialize `tofnd`, and then exit. -## Execution +### Execution To run a `tofnd` daemon inside a container, run: -``` +```bash docker-compose up ``` -## Storage +### Storage We use [data containers](https://docs.docker.com/engine/reference/commandline/volume_create/) to persist data across restarts. To clean up storage, remove all `tofnd` containers, and run -``` +```bash docker volume rm tofnd_tofnd ``` -## Testing +### Testing -For testing purposes, `docker-compose.test.yml` is available, which is equivelent to `./tofnd --no-password --unsafe`. To spin up a test `tofnd` container, run +For testing purposes, `docker-compose.test.yml` is available, which is equivelent to `./tofnd --no-password`. To spin up a test `tofnd` container, run -``` +```bash docker-compose -f docker-compose.test.yml up ``` -## The `auto` command +### The `auto` command In containerized environments the `auto` mnemonic command can be used. This command is implemented in `entrypoint.sh` and does the following: + 1. Try to use existing mnemonic. If successful then launch `tofnd` server. 2. Try to import a mnemonic from file. If successful then launch `tofnd` server. 3. Create a new mnemonic. The newly created mnemonic is automatically written to the file `TOFND_HOME/export`---rename this file to `TOFND_HOME/import` so as to unblock future executions of tofnd. Then launch `tofnd` server. @@ -158,13 +142,11 @@ The rationale behind `auto` is that users can frictionlessly launch and restart **Attention:** `auto` leaves the mnemonic on plain text on disk. You should remove the `TOFND_HOME/import` file and store the mnemonic at a safe, offline place. -# Mnemonic - -`Tofnd` uses the [tiny-bip39](https://docs.rs/crate/tiny-bip39/0.8.0) crate to enable users manage mnemonic passphrases. Currently, each party can use only one passphrase. +## Mnemonic -Mnemonic is used to enable _recovery_ of shares in case of unexpected loss. See more about recovery under the [Recover](#Recover) section. +`Tofnd` uses the [tiny-bip39](https://docs.rs/crate/tiny-bip39) crate to enable users manage mnemonic passphrases. Currently, each party can use only one passphrase. -## Mnemonic options +### Mnemonic options The command line API supports the following commands: @@ -178,226 +160,35 @@ The command line API supports the following commands: ## Zeroization -We use the [zeroize](https://docs.rs/zeroize/1.1.1/zeroize/) crate to clear sensitive info for memory as a good procatie. The data we clean are related to the mnemonic: +We use the [zeroize](https://docs.rs/zeroize/1.1.1/zeroize/) crate to clear sensitive info for memory as a good practice. The data we clean are related to the mnemonic: + 1. entropy 2. passwords 3. passphrases -Note that, [tiny-bip39](https://docs.rs/crate/tiny-bip39/0.8.0) also uses `zeroize` internally. +Note that, [tiny-bip39](https://docs.rs/crate/tiny-bip39) also uses `zeroize` internally. -# KV Store +## KV Store To persist information between different gRPCs (i.e. _keygen_ and _sign_), we use a key-value storage based on [sled](https://sled.rs/). -`Tofnd` uses two separate KV Stores: -1. `Share KV Store`. Stores all user's shares when `keygen` protocol is completed, and uses them for `sign` protocol. Default path is _./kvstore/shares_. -2. `Mnemonic KV Store`. Stores the entropy of a mnemonic passphrase. This entropy is used to encrypt and decrypt users' sensitive info, i.e. the content of the `Share KV Store`. Default path is _./kvstore/mnemonic_. +`Tofnd` uses an encrypted mnemonic KV Store which stores the entropy of a mnemonic passphrase. This entropy is used to derive user's keys. The KV Store is encrypted with a password provided by the user. The password is used to derive a key that encrypts the KV Store. ## Security **Important note**: Currently, the `mnemonic KV Store` is **not** encrypted. The mnemonic entropy is stored in clear text on disk. Our current security model assumes secure device access. -# Multiple shares - -Multiple shares are handled internally. That is, if a party has 3 shares, the `tofnd` binary spawns 3 protocol execution threads, and each thread invokes `tofn` functions independently. - -When a message is received from the gRPC client, it is broadcasted to all shares. This is done in the [broadcast](https://github.com/axelarnetwork/tofnd/tree/main/src/gg20/broadcast.rs) module. - -At the end of the protocol, the outputs of all N party's shares are aggregated and a single result is created and sent to the client. There are separate modules [keygen result](https://github.com/axelarnetwork/tofnd/tree/main/src/gg20/keygen/result.rs) and [sign result](https://github.com/axelarnetwork/tofnd/tree/main/src/gg20/sign/result.rs) that handles the aggregation results for each protocol. - -For `tofn` support on multiple shares, see [here](https://github.com/axelarnetwork/tofn#support-for-multiple-shares-per-party). - -# gRPCs -Tofnd currently supports the following gRPCs: -1. `keygen` -2. `sign` -3. `recover` - -`Keygen` and `sign` use [bidirectional streaming](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc) and `recover` is [unary](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc). - -## Diagrams - -See a generic protocol sequence diagram, [here](https://github.com/axelarnetwork/tofnd/blob/main/diagrams/protocol.pdf). - -See [keygen](https://github.com/axelarnetwork/tofnd/blob/main/diagrams/keygen.svg) and [sign](https://github.com/axelarnetwork/tofnd/blob/main/diagrams/sign.svg) diagrams of detailed message flow of each protocol. By opening the `.svg` files at a new tab (instead of previewing from github), hyperlinks will be available that will point you to the code block in which the underlying operations are implemented. - -## Keygen -The _keygen_ gRPC executes the keygen protocol as implemented in [tofn](https://github.com/axelarnetwork/tofn) and described in [GG20](https://eprint.iacr.org/2020/540.pdf). - -The initialization of _keygen_ is actualized by the following message: - -``` -message KeygenInit { - string new_key_uid; // keygen's identifier - repeated string party_uids; - repeated uint32 party_share_counts; - int32 my_party_index; - int32 threshold; -} -``` - -### Successful keygen -On success, the _keygen_ protocol returns a `SecretKeyShare` struct defined by `tofn` -``` -pub struct SecretKeyShare { - group: GroupPublicInfo, - share: ShareSecretInfo, -} -``` - -This struct includes: -1. The information that is needed by the party in order to participate in subsequent _sign_ protocols that are associated with the completed _keygen_. -2. The `public key` of the current _keygen_. - -Since multiple shares per party are supported, _keygen_'s result may produce multiple `SecretKeyShare`s. The collection of `SecretKeyShare`s is stored in the `Share KV Store` as the _value_ with the `key_uid` as _key_. - -Each `SecretKeyShare` is then encrypted using the party's `mnemonic`, and the encrypted data is sent to the client as bytes, along with the `public key`. We send the encrypted `SecretKeyShare`s to facilitate _recovery_ in case of data loss. - -The gRPC message of _keygen_'s data is the following: -``` -message KeygenOutput { - bytes pub_key = 1; // pub_key - repeated bytes share_recovery_infos = 2; // recovery info -} -``` - -### Unsuccessful keygen - -The `tofn` library supports fault detection. That is, if a party does not follow the protocol (e.g. by corrupting zero knowledge proofs, stalling messages etc), a fault detection mechanism is triggered, and the protocol ends prematurely with all honest parties composing a faulter list. - -In this case, instead of the aforementioned result, _keygen_ returns a `Vec`, which is sent over the gRPC stream before closing the connection. - -### File structure -_Keygen_ is implemented in [tofnd/src/gg20/keygen](https://github.com/axelarnetwork/tofnd/tree/main/src/gg20/keygen), which has the following file structure: - -``` -├── keygen - ├── mod.rs - ├── init.rs - ├── execute.rs - ├── result.rs - └── types.rs -``` - -* In `mod.rs`, the handlers of protocol initialization, execution and aggregation of results are called. Also, in case of multiple shares, multiple execution threads are spawned. -* In `init.rs`, the verification and sanitization of the `Keygen Init` message is handled. -* In `execute.rs`, the instantiation and execution of the protocol is actualized. -* In `result.rs`, the results of all party shares are aggregated, validated and sent to the gRPC client. -* In `types.rs`, useful structs that are needed in the rest of the modules are defined. - -## Sign -The _sign_ gRPC executes the sign protocol as implemented in [tofn](https://github.com/axelarnetwork/tofn) and described in [GG20](https://eprint.iacr.org/2020/540.pdf). - -The initialization of _sign_ is actualized by the following message: - -``` -message SignInit { - string key_uid; // keygen's identifier - repeated string party_uids; - bytes message_to_sign; -} -``` - -### Successful sign - -On success, the _keygen_ protocol returns a `signature` which is a `Vec`. - -Since multiple shares per party are supported, _sign_'s result may produce multiple `signatures`s which are the same across all shares. Only one copy of the `signature` is sent to the gRPC client. - -### Unsuccessful sign +## Threshold cryptography -Similarly to _keygen_, if faulty parties are detected during the execution of _sign_, the protocol is stopped and a `Vec` is returned to the client. - -### Trigger recovery - -_Sign_ is started with the special gRPC message `SignInit`. -``` -message SignInit { - string key_uid = 1; - repeated string party_uids = 2; - bytes message_to_sign = 3; -} -``` - -`key_uid` indicates the session identifier of an executed _keygen_. In order to be able to participate to _sign_, parties need to have their `share` info stored at the `Share KV Store` as _value_, under the _key_ `key_uid`. If this data is not present at the machine of a party (i.e. no `key_uid` exists in `Share KV Store`), a `need_recover` gRPC message is sent to the client and the connection is then closed. In the `need_recover` message, the missing `key_uid` is included. - -``` -message NeedRecover { - string session_id = 1; -} -``` - -The client then proceeds by triggering _recover_ gRPC, and then starts the _sign_ again for the recovered party. Other participants are not affected. - -### File structure -The keygen protocol is implemented in [tofnd/src/gg20/sign](https://github.com/axelarnetwork/tofnd/tree/main/src/gg20/sign), which, similar to _keygen_, has the following file structure: - -``` -├── sign - ├── mod.rs - ├── init.rs - ├── execute.rs - ├── result.rs - └── types.rs -``` - -* In `mod.rs`, the handlers of protocol initialization, execution and aggregation of results are called. Also, in case of multiple shares, multiple execution threads are spawned. -* In `init.rs`, the verification and sanitization of `Sign Init` message is handled. If the absence of shares is discovered, the client sends a `need_recover` and stops. -* In `execute.rs`, the instantiation and execution of the protocol is actualized. -* In `result.rs`, the results of all party shares are aggregated, validated and sent to the gRPC client. -* In `types.rs`, useful structs that are needed in the rest of the modules are defined. - -## Recover - -As discussed in [keygen](#keygen) and [sign](#sign) section, the recovery of lost keys and shares is supported. In case of sudden data loss, for example due to a hard disk crash, parties are able to recover their shares. This is possible because each party sends it's encrypted secret info to the client before storing it inside the `Share KV Store`. - -When _keygen_ is completed, the party's information is encryped and sent to the client. When the absence of party's information is detected during _sign_, `Tofnd` sends the `need_recover` message, indicating that recovery must be triggered. - -Recovery is a [unary](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc) gRPC. The client re-sends the `KeygenInit` message and the encrypted recovery info. This allows `Tofnd` to reconstruct the `Share KV Store` by decrypting the recovery info using the party's `mnemonic`. - -``` -message RecoverRequest { - KeygenInit keygen_init = 1; - repeated bytes share_recovery_infos = 2; -} -``` - -If _recovery_ was successful, a `success` message is sent, other wise `Tofnd` sends a `fail` message. - -``` -message RecoverResponse { - enum Response { - success = 0; - fail = 1; - } - Response response = 1; -} -``` - -# Testing - -## Honest behaviours - -Both unit tests and integration tests are provided: -``` -$ cargo test -``` - -## Malicious behaviours - -`Tofn` supports faulty behaviours to test fault detection. These behaviours are only supported under the `malicious` feature. See more for Rust features [here](https://doc.rust-lang.org/cargo/reference/features.html). - -`Tofnd` incorporates the `malicious` feature. You can run malicious tests by: -``` -$ cargo test --all-features -``` +For an implementation of the [GG20](https://eprint.iacr.org/2020/540.pdf) threshold-ECDSA protocol, +see this version of [tofnd](https://github.com/axelarnetwork/tofnd/tree/v0.10.1). The GG20 protocol implementation should not be considered ready for production since it doesn't protect against recently discovered attacks on the protocol implementation. This was removed from `tofnd` as it is not being used in the Axelar protocol. -# License +## License All crates licensed under either of - * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - * [MIT license](http://opensource.org/licenses/MIT) +* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) +* [MIT license](http://opensource.org/licenses/MIT) at your option. diff --git a/build.rs b/build.rs index ef673d86..7986f24e 100644 --- a/build.rs +++ b/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { tonic_build::configure() // .build_client(false) // .out_dir(".") // if you want to peek at the generated code - .compile(&["proto/grpc.proto", "proto/multisig.proto"], &["proto"])?; + .compile(&["proto/multisig.proto"], &["proto"])?; Ok(()) } diff --git a/docker-compose.test.yml b/docker-compose.test.yml index 6db3f9af..25464c95 100644 --- a/docker-compose.test.yml +++ b/docker-compose.test.yml @@ -15,8 +15,6 @@ services: volumes: - tofnd:/.tofnd environment: - # Attention! Use UNSAFE=true only for testing - - UNSAFE=true - NOPASSWORD=true - MNEMONIC_CMD=auto - TOFND_HOME=.tofnd diff --git a/entrypoint.sh b/entrypoint.sh index 7fe54379..c29f7d83 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -35,10 +35,10 @@ import_mnemonic() { if [ -n "${NOPASSWORD}" ]; then \ echo "No password" - (cat $IMPORT_PATH | tofnd ${ARGS} -m import) || return $ERR + ( cat $IMPORT_PATH | tofnd ${ARGS} -m import ) || return $ERR else echo "With password" - ((echo $PASSWORD && cat $IMPORT_PATH) | tofnd ${ARGS} -m import) || return $ERR + ( (echo $PASSWORD && cat $IMPORT_PATH) | tofnd ${ARGS} -m import ) || return $ERR fi echo "... ok" @@ -66,10 +66,8 @@ echo "Using tofnd root:" $TOFND_HOME # gather user's args -# add '--no-password' and '--unsafe' flags to args if enabled +# add '--no-password' flag to args if enabled ARGS=${NOPASSWORD:+"--no-password"} -# add '--unsafe' flag to args if enabled -ARGS+=${UNSAFE:+" --unsafe"} # add '--address' flag to args if enabled ARGS+=${ADDRESS:+" --address ${ADDRESS}"} # add '--port' flag to args if enabled diff --git a/install-gmp-arm64.sh b/install-gmp-arm64.sh deleted file mode 100755 index 2b3a497c..00000000 --- a/install-gmp-arm64.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash - -export HOMEBREW_NO_INSTALL_CLEANUP=TRUE -brew uninstall --ignore-dependencies gmp -ARM_DEPENDENCY=$(brew fetch --force --bottle-tag=arm64_big_sur gmp | grep Downloaded | awk '{print $3}') -brew install "$ARM_DEPENDENCY" \ No newline at end of file diff --git a/proto b/proto index 5fa83fe0..549f6f8a 160000 --- a/proto +++ b/proto @@ -1 +1 @@ -Subproject commit 5fa83fe0dbe1463f756348fa48116c7b69564a2f +Subproject commit 549f6f8a783b586291df82f1c7b14eacaf66014a diff --git a/src/config/malicious.rs b/src/config/malicious.rs deleted file mode 100644 index 0e40f3c0..00000000 --- a/src/config/malicious.rs +++ /dev/null @@ -1,96 +0,0 @@ -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -use tofn::{ - collections::TypedUsize, - gg20::{ - keygen::malicious::Behaviour as KeygenBehaviour, - sign::malicious::Behaviour as SignBehaviour, - }, -}; - -use clap::App; - -pub(super) type Behaviours = crate::gg20::service::malicious::Behaviours; - -pub(super) const AVAILABLE_BEHAVIOURS: [&str; 20] = [ - "Honest", - "R1BadProof", - "R1BadGammaI", - "R2FalseAccusation", - "R2BadMta", - "R2BadMtaWc", - "R3BadSigmaI", - "R3FalseAccusationMta", - "R3FalseAccusationMtaWc", - "R3BadProof", - "R3BadDeltaI", - "R3BadKI", - "R3BadAlpha", - "R3BadBeta", - "R4BadReveal", - "R5BadProof", - "R6FalseAccusation", - "R6BadProof", - "R6FalseFailRandomizer", - "R7BadSI", -]; - -pub fn get_behaviour_matches(app: App) -> TofndResult { - // TODO: if we want to read all available behaviours from tofn automatically, - // we should add strum (https://docs.rs/strum) to iterate over enums and - // print their names, but it has to be imported in tofn. - - let matches = app.get_matches(); - - // Set a default behaviour - let mut sign_behaviour = "Honest"; - let mut victim = 0; - if let Some(matches) = matches.subcommand_matches("malicious") { - sign_behaviour = matches - .value_of("behaviour") - .ok_or_else(|| anyhow!("behaviour value"))?; - victim = matches - .value_of("victim") - .ok_or_else(|| anyhow!("victim value"))? - .parse::()?; - } - - // TODO: parse keygen malicious types as well - let keygen = KeygenBehaviour::R1BadCommit; - let sign = match_string_to_behaviour(sign_behaviour, victim); - Ok(Behaviours { keygen, sign }) -} - -fn match_string_to_behaviour(behaviour: &str, victim: usize) -> SignBehaviour { - use SignBehaviour::*; - let victim = TypedUsize::from_usize(victim); - // TODO: some of the behaviours do not demand a victim. In the future, more - // will be added that potentially need different set of arguments. - // Adjust this as needed to support that. - match behaviour { - "Honest" => Honest, - "R1BadProof" => R1BadProof { victim }, - "R1BadGammaI" => R1BadGammaI, - "R2FalseAccusation" => R2FalseAccusation { victim }, - "R2BadMta" => R2BadMta { victim }, - "R2BadMtaWc" => R2BadMtaWc { victim }, - "R3BadSigmaI" => R3BadSigmaI, - "R3FalseAccusationMta" => R3FalseAccusationMta { victim }, - "R3FalseAccusationMtaWc" => R3FalseAccusationMtaWc { victim }, - "R3BadProof" => R3BadProof, - "R3BadDeltaI" => R3BadDeltaI, - "R3BadKI" => R3BadKI, - "R3BadAlpha" => R3BadAlpha { victim }, - "R3BadBeta" => R3BadBeta { victim }, - "R4BadReveal" => R4BadReveal, - "R5BadProof" => R5BadProof { victim }, - "R6FalseAccusation" => R6FalseAccusation { victim }, - "R6BadProof" => R6BadProof, - "R6FalseFailRandomizer" => R6FalseType5Claim, - "R7BadSI" => R7BadSI, - "R7FalseFailRandomizer" => R7FalseType7Claim, - _ => Honest, - } -} diff --git a/src/config/mod.rs b/src/config/mod.rs index 6ef23da0..f73344ff 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -14,11 +14,6 @@ const DEFAULT_IP: &str = "127.0.0.1"; const DEFAULT_PORT: u16 = 50051; const AVAILABLE_MNEMONIC_CMDS: &[&str] = &["existing", "create", "import", "export", "rotate"]; -#[cfg(feature = "malicious")] -mod malicious; -#[cfg(feature = "malicious")] -use malicious::*; - // default path is ~/.tofnd fn default_tofnd_dir() -> TofndResult { Ok(dirs::home_dir() @@ -31,12 +26,9 @@ fn default_tofnd_dir() -> TofndResult { pub struct Config { pub ip: String, pub port: u16, - pub safe_keygen: bool, pub mnemonic_cmd: Cmd, pub tofnd_path: PathBuf, pub password_method: PasswordMethod, - #[cfg(feature = "malicious")] - pub behaviours: Behaviours, } pub fn parse_args() -> TofndResult { @@ -49,7 +41,7 @@ pub fn parse_args() -> TofndResult { .ok_or_else(|| anyhow!("can't convert default dir to str"))?; let app = App::new("tofnd") - .about("A threshold signature scheme daemon") + .about("A cryptographic signing service") .version(crate_version!()) .arg( Arg::new("ip") @@ -65,17 +57,6 @@ pub fn parse_args() -> TofndResult { .required(false) .default_value(port), ) - .arg( - // TODO: change to something like `--unsafe-primes` - Arg::new("unsafe") - .help( - "Use unsafe primes for generation of Pailler encryption keys. (default: deactivated) **Security warning:** This option is intented for use only in tests. Do not use this option to secure real value.", - ) - .long("unsafe") - .required(false) - .takes_value(false) - .display_order(0), - ) .arg( Arg::new("no-password") .help( @@ -103,21 +84,6 @@ pub fn parse_args() -> TofndResult { .default_value(default_dir), ); - #[cfg(feature = "malicious")] - let app = app.subcommand( - App::new("malicious") - .about("Select malicious behaviour") - .arg( - Arg::new("behaviour") - .required(true) - .possible_values(&AVAILABLE_BEHAVIOURS) - .help("malicious behaviour"), - ) - .arg(Arg::new("victim").required(true).help("victim")), - ); - #[cfg(feature = "malicious")] - let behaviours = get_behaviour_matches(app.clone())?; - let matches = app.get_matches(); let ip = matches @@ -128,7 +94,6 @@ pub fn parse_args() -> TofndResult { .value_of("port") .ok_or_else(|| anyhow!("port value"))? .parse::()?; - let safe_keygen = !matches.is_present("unsafe"); let mnemonic_cmd = matches .value_of("mnemonic") .ok_or_else(|| anyhow!("cmd value"))? @@ -146,11 +111,8 @@ pub fn parse_args() -> TofndResult { Ok(Config { ip, port, - safe_keygen, mnemonic_cmd, tofnd_path, password_method, - #[cfg(feature = "malicious")] - behaviours, }) } diff --git a/src/encrypted_sled/mod.rs b/src/encrypted_sled/mod.rs index 17c2003d..fcebb350 100644 --- a/src/encrypted_sled/mod.rs +++ b/src/encrypted_sled/mod.rs @@ -10,7 +10,7 @@ mod result; // match the API of sled pub use kv::EncryptedDb as Db; -pub use password::{Password, PasswordMethod, PasswordSalt}; +pub use password::{Password, PasswordMethod}; pub use result::EncryptedDbError as Error; pub use result::EncryptedDbResult as Result; diff --git a/src/encrypted_sled/tests.rs b/src/encrypted_sled/tests.rs index 51412875..d4ea703d 100644 --- a/src/encrypted_sled/tests.rs +++ b/src/encrypted_sled/tests.rs @@ -4,7 +4,7 @@ use testdir::testdir; #[test] fn test_encrypted_sled() { let db_path = testdir!("encrypted_db"); - let db = EncryptedDb::open(&db_path, get_test_password()).unwrap(); + let db = EncryptedDb::open(db_path, get_test_password()).unwrap(); // insert -> returns None let res = db.insert("key", "value").unwrap(); @@ -75,7 +75,7 @@ fn test_password() { fn test_large_input() { let db_path = testdir!("large_input"); - let db = EncryptedDb::open(&db_path, get_test_password()).unwrap(); + let db = EncryptedDb::open(db_path, get_test_password()).unwrap(); let large_value = vec![0; 100000]; let res = db.insert("key", large_value.clone()).unwrap(); diff --git a/src/gg20/broadcast.rs b/src/gg20/broadcast.rs deleted file mode 100644 index 4ddd447a..00000000 --- a/src/gg20/broadcast.rs +++ /dev/null @@ -1,176 +0,0 @@ -//! This module handles the routing of incoming traffic. -//! Receives and validates messages until the connection is closed by the client. -//! The incoming messages come from the gRPC stream and are forwarded to shares' internal channels. - -// tonic cruft -use super::proto; -use futures_util::StreamExt; -use tokio::sync::mpsc; -use tonic::Status; - -// logging -use tracing::{debug, info, span, warn, Level, Span}; - -/// Results of routing -#[derive(Debug, PartialEq)] -enum RoutingStatus { - Continue { traffic: proto::TrafficIn }, - Stop, - Skip, -} - -/// Receives incoming from a gRPC stream and broadcasts them to internal channels; -/// Loops until client closes the socket, or a message containing [proto::message_in::Data::Abort] is received -/// Empty and unknown messages are ignored -pub(super) async fn broadcast_messages( - in_grpc_stream: &mut tonic::Streaming, - mut out_internal_channels: Vec>>, - span: Span, -) { - // loop until `stop` is received - loop { - // read message from stream - let msg_data = in_grpc_stream.next().await; - - // check incoming message - let traffic = match open_message(msg_data, span.clone()) { - RoutingStatus::Continue { traffic } => traffic, - RoutingStatus::Stop => break, - RoutingStatus::Skip => continue, - }; - - // send the message to all channels - for out_channel in &mut out_internal_channels { - let _ = out_channel.send(Some(traffic.clone())); - } - } -} - -/// gets a gPRC [proto::MessageIn] and checks the type -/// available messages are: -/// [proto::message_in::Data::Traffic] -> return [RoutingResult::Continue] -/// [proto::message_in::Data::Abort] -> return [RoutingResult::Stop] -/// [proto::message_in::Data::KeygenInit] -> return [RoutingResult::Skip] -/// [proto::message_in::Data::SignInit] -> return [RoutingResult::Skip] -fn open_message(msg: Option>, span: Span) -> RoutingStatus { - // start routing span - let route_span = span!(parent: &span, Level::INFO, "routing"); - let _start = route_span.enter(); - - // we receive MessageIn wrapped in multiple layers. We have to unpeel tonic message - - // get result - let msg_result = match msg { - Some(msg_result) => msg_result, - None => { - info!("Stream closed"); - return RoutingStatus::Stop; - } - }; - - // get data option - - // TODO examine why this happens: Sometimes, when the connection is - // closed by the client, instead of a `None` message, we get a `Some` - // message containing an "error reading a body from connection: protocol - // error: not a result of an error" error Removing for now to prevent this - // message from appearing while doing keygen/signs but will need to - // find out why this happens - // https://github.com/axelarnetwork/tofnd/issues/167 - let msg_data_opt = match msg_result { - Ok(msg_in) => msg_in.data, - Err(err) => { - info!("Stream closed"); - debug!("Stream closed with err {}", err); - return RoutingStatus::Stop; - } - }; - - // get message data - let msg_data = match msg_data_opt { - Some(msg_data) => msg_data, - None => { - warn!("ignore incoming msg: missing `data` field"); - return RoutingStatus::Skip; - } - }; - - // match message data to types - let traffic = match msg_data { - proto::message_in::Data::Traffic(t) => t, - proto::message_in::Data::Abort(_) => { - warn!("received abort message"); - return RoutingStatus::Stop; - } - proto::message_in::Data::KeygenInit(_) | proto::message_in::Data::SignInit(_) => { - warn!("ignore incoming msg: expect `data` to be TrafficIn type"); - return RoutingStatus::Skip; - } - }; - - // return traffic - RoutingStatus::Continue { traffic } -} - -#[cfg(test)] -mod tests { - use super::*; - - struct TestCase { - message_in: proto::MessageIn, - expected_result: RoutingStatus, - } - - impl TestCase { - fn new(message_in: proto::MessageIn, expected_result: RoutingStatus) -> Self { - TestCase { - message_in, - expected_result, - } - } - } - - fn new_msg_in(msg_in: proto::message_in::Data) -> proto::MessageIn { - proto::MessageIn { data: Some(msg_in) } - } - - #[test] - fn test_validate_message() { - let test_cases = vec![ - TestCase::new( - new_msg_in(proto::message_in::Data::Abort(true)), - RoutingStatus::Stop, - ), - TestCase::new( - new_msg_in(proto::message_in::Data::KeygenInit( - proto::KeygenInit::default(), - )), - RoutingStatus::Skip, - ), - TestCase::new( - new_msg_in(proto::message_in::Data::SignInit(proto::SignInit::default())), - RoutingStatus::Skip, - ), - TestCase::new( - new_msg_in(proto::message_in::Data::Traffic(proto::TrafficIn::default())), - RoutingStatus::Continue { - traffic: proto::TrafficIn::default(), - }, - ), - TestCase::new(proto::MessageIn { data: None }, RoutingStatus::Skip), - ]; - - let span = span!(Level::INFO, "test-span"); - - for test_case in test_cases { - let result = open_message(Some(Ok(test_case.message_in)), span.clone()); - assert_eq!(result, test_case.expected_result); - } - - let result = open_message(Some(Err(tonic::Status::ok("test status"))), span.clone()); - assert_eq!(result, RoutingStatus::Stop); - - let result = open_message(None, span); - assert_eq!(result, RoutingStatus::Stop); - } -} diff --git a/src/gg20/key_presence.rs b/src/gg20/key_presence.rs deleted file mode 100644 index 214511d1..00000000 --- a/src/gg20/key_presence.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! This module handles the key_presence gRPC. -//! Request includes [proto::message_in::Data::KeyPresenceRequest] struct and encrypted recovery info. -//! The recovery info is decrypted by party's mnemonic seed and saved in the KvStore. - -use super::{proto, service::Gg20Service}; - -// logging -use tracing::info; - -// error handling -use crate::TofndResult; - -impl Gg20Service { - pub(super) async fn handle_key_presence( - &self, - request: proto::KeyPresenceRequest, - ) -> TofndResult { - // check if mnemonic is available - let _ = self.kv_manager.seed().await?; - - // check if requested key exists - if self.kv_manager.kv().exists(&request.key_uid).await? { - info!( - "Found session-id {} in kv store during key presence check", - request.key_uid - ); - Ok(proto::key_presence_response::Response::Present) - } else { - info!( - "Did not find session-id {} in kv store during key presence check", - request.key_uid - ); - Ok(proto::key_presence_response::Response::Absent) - } - } -} diff --git a/src/gg20/keygen/execute.rs b/src/gg20/keygen/execute.rs deleted file mode 100644 index ccd3ac9e..00000000 --- a/src/gg20/keygen/execute.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! This module creates and executes the keygen protocol -//! On success it returns [super::TofnKeygenOutput]. A successful [Keygen] can produce either an Ok(SecretKeyShare) of an Err(Vec>). -//! On failure it returns [anyhow!] error if [Keygen] struct cannot be instantiated. - -use super::{ - proto, - types::{Context, PartyShareCounts, TofndKeygenOutput}, - Gg20Service, ProtocolCommunication, -}; - -use crate::gg20::protocol; -use tofn::{ - gg20::keygen::{new_keygen, KeygenProtocol}, - sdk::api::TofnResult, -}; - -// logging -use tracing::{info, Span}; - -// error handling -use anyhow::anyhow; - -impl Gg20Service { - /// create a new keygen. - /// The field of Gg20Service `safe_keygen` dictates whether the new keygen will use big primes of not - /// TODO: support `cfg(feature="unsafe")` in the future instead of matching `gg20.safe_keygen` - async fn new_keygen( - &self, - party_share_counts: PartyShareCounts, - ctx: &Context, - ) -> TofnResult { - new_keygen( - party_share_counts, - ctx.threshold, - ctx.tofnd_index, - ctx.tofnd_subindex, - &ctx.party_keygen_data, - #[cfg(feature = "malicious")] - self.cfg.behaviours.keygen.clone(), - ) - } - - /// create and execute keygen protocol and returning the result. - /// if the protocol cannot be instantiated, return a [anyhow!] - pub(super) async fn execute_keygen( - &self, - chans: ProtocolCommunication< - Option, - Result, - >, - ctx: &Context, - execute_span: Span, - ) -> TofndKeygenOutput { - // try to create keygen with context - let party_share_counts = ctx.share_counts()?; - let keygen = self - .new_keygen(party_share_counts, ctx) - .await - .map_err(|_| anyhow!("keygen protocol instantiation failed"))?; - - // execute protocol and wait for completion - let protocol_result = protocol::execute_protocol( - keygen, - chans, - &ctx.uids, - &ctx.share_counts, - execute_span.clone(), - ) - .await; - - let res = protocol_result - .map_err(|err| anyhow!("Keygen was not completed due to error: {}", err))?; - - info!("Keygen completed"); - Ok(res) - } -} diff --git a/src/gg20/keygen/init.rs b/src/gg20/keygen/init.rs deleted file mode 100644 index c621c812..00000000 --- a/src/gg20/keygen/init.rs +++ /dev/null @@ -1,351 +0,0 @@ -//! This module handles the initialization of the Keygen protocol. -//! A [KeygenInitSanitized] struct is created out of the raw incoming [proto::KeygenInit] message and a key is reserved inside the KvStore -//! If [proto::KeygenInit] fails to be parsed, an [InitResult] is returned - -// tonic cruft -use futures_util::StreamExt; - -// spans for logging -use tracing::Span; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -use super::{ - proto, - types::{KeygenInitSanitized, MAX_PARTY_SHARE_COUNT, MAX_TOTAL_SHARE_COUNT}, - Gg20Service, -}; -use crate::kv_manager::KeyReservation; - -impl Gg20Service { - /// Receives a message from the stream and tries to handle keygen init operations. - /// On success, it reserves a key in the KVStrore and returns a sanitized struct ready to be used by the protocol. - /// On failure, returns a [KeygenInitError] and no changes are been made in the KvStore. - pub(super) async fn handle_keygen_init( - &self, - stream: &mut tonic::Streaming, - keygen_span: Span, - ) -> TofndResult<(KeygenInitSanitized, KeyReservation)> { - // try to receive message - let msg = stream - .next() - .await - .ok_or_else(|| anyhow!("stream closed by client"))? - .map_err(|e| anyhow!("stream closed by server: {}", e))?; - - // try to get message data - let msg_data = msg - .data - .ok_or_else(|| anyhow!("received `None` message from client"))?; - - // check if message is of expected type - let keygen_init = match msg_data { - proto::message_in::Data::KeygenInit(k) => k, - _ => { - return Err(anyhow!( - "wrong message type; expecting KeygenInit, got {:?}", - msg_data - )) - } - }; - - // try to process incoming message - let (keygen_init, key_reservation) = self.process_keygen_init(keygen_init).await?; - - // log keygen init state - keygen_init.log_info(keygen_span); - - // return sanitized key and its KvStore reservation - Ok((keygen_init, key_reservation)) - } - - // makes all needed assertions on incoming data, and create structures that are - // needed for the execution of the protocol - async fn process_keygen_init( - &self, - keygen_init: proto::KeygenInit, - ) -> TofndResult<(KeygenInitSanitized, KeyReservation)> { - // try to sanitize arguments - let keygen_init = Self::keygen_sanitize_args(keygen_init) - .map_err(|err| anyhow!("failed to sanitize KeygenInit: {}", err))?; - - // reserve key - let key_uid_reservation = self - .kv_manager - .kv() - .reserve_key(keygen_init.new_key_uid.clone()) - .await - .map_err(|err| anyhow!("failed to reseve key: {}", err))?; - - // return sanitized keygen init and key reservation - Ok((keygen_init, key_uid_reservation)) - } - - /// This function is pub(crate) because it is also needed in handle_recover - /// sanitize arguments of incoming message. - /// Example: - /// input for party 'a': - /// args.party_uids = [c, b, a] - /// args.party_share_counts = [1, 2, 3] - /// args.my_party_index = 2 - /// args.threshold = 1 - /// output for party 'a': - /// keygen_init.party_uids = [a, b, c] <- sorted array - /// keygen_init.party_share_counts = [3, 2, 1] . <- sorted with respect to party_uids - /// keygen_init.my_party_index = 0 . <- index inside sorted array - /// keygen_init.threshold = 1 <- same as in input - pub(crate) fn keygen_sanitize_args( - args: proto::KeygenInit, - ) -> TofndResult { - // convert `u32`s to `usize`s - use std::convert::TryFrom; - let my_index = usize::try_from(args.my_party_index)?; - let threshold = usize::try_from(args.threshold)?; - let mut party_share_counts = args - .party_share_counts - .iter() - .map(|i| usize::try_from(*i)) - .collect::, _>>()?; - - // if share_counts are not provided, fall back to 1 share per party - if party_share_counts.is_empty() { - party_share_counts = vec![1; args.party_uids.len()]; - } - - // assert that uids and party shares are alligned - if args.party_uids.len() != party_share_counts.len() { - return Err(anyhow!( - "uid vector and share counts vector not alligned: {:?}, {:?}", - args.party_uids, - party_share_counts, - )); - } - - // check if my_index is inside party_uids - if my_index >= args.party_uids.len() { - return Err(anyhow!( - "my index is {}, but there are only {} parties.", - my_index, - args.party_uids.len(), - )); - } - - // if party's shares are above max, return error - for party_share_count in &party_share_counts { - if *party_share_count > MAX_PARTY_SHARE_COUNT { - return Err(anyhow!( - "party {} has {} shares, but maximum number of shares per party is {}.", - args.party_uids[my_index], - args.party_share_counts[my_index], - MAX_PARTY_SHARE_COUNT, - )); - } - } - - let total_shares = party_share_counts.iter().sum::(); - if total_shares <= threshold { - return Err(anyhow!( - "threshold is not satisfied: t = {}, total number of shares = {}", - threshold, - total_shares, - )); - } else if total_shares > MAX_TOTAL_SHARE_COUNT { - return Err(anyhow!( - "total shares count is {}, but maximum number of share count is {}.", - total_shares, - MAX_PARTY_SHARE_COUNT, - )); - } - - // sort uids and share counts - // we need to sort uids and shares because the caller does not necessarily - // send the same vectors (in terms of order) to all tofnd instances. - let (my_new_index, sorted_uids, sorted_share_counts) = - sort_uids_and_shares(my_index, args.party_uids, party_share_counts)?; - - Ok(KeygenInitSanitized { - new_key_uid: args.new_key_uid, - party_uids: sorted_uids, - party_share_counts: sorted_share_counts, - my_index: my_new_index, - threshold, - }) - } -} - -// helper function to co-sort uids and shares with respect to uids an find new index -fn sort_uids_and_shares( - my_index: usize, - uids: Vec, - share_counts: Vec, -) -> TofndResult<(usize, Vec, Vec)> { - // save my uid - let my_uid = uids - .get(my_index) - .ok_or_else(|| anyhow!("Error: Index out of bounds"))? - .clone(); - - // create a vec of (uid, share_count) and sort it - let mut pairs: Vec<(String, usize)> = uids.into_iter().zip(share_counts.into_iter()).collect(); - pairs.sort(); - - // unzip vec and search for duplicates in uids - let (mut sorted_uids, sorted_share_counts): (Vec<_>, Vec<_>) = pairs.into_iter().unzip(); - let old_len = sorted_uids.len(); - sorted_uids.dedup(); - if old_len != sorted_uids.len() { - return Err(anyhow!("Error: party_uid vector contained a duplicate")); - } - - // find my new index - let my_index = sorted_uids - .iter() - .position(|x| x == &my_uid) - .ok_or_else(|| anyhow!("Error: Lost my uid after sorting uids"))?; - - Ok((my_index, sorted_uids, sorted_share_counts)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_sort_uids_and_shares() { - let in_pairs = vec![ - ("c".to_owned(), 1), - ("b".to_owned(), 2), - ("a".to_owned(), 3), - ]; - let out_pairs = vec![ - ("a".to_owned(), 3), - ("b".to_owned(), 2), - ("c".to_owned(), 1), - ]; - - let (in_keys, in_values): (Vec, Vec) = in_pairs.into_iter().unzip(); - let (out_keys, out_values): (Vec, Vec) = out_pairs.into_iter().unzip(); - - let res = sort_uids_and_shares(0, in_keys.clone(), in_values.clone()).unwrap(); - assert_eq!((2, out_keys.clone(), out_values.clone()), res); - let res = sort_uids_and_shares(1, in_keys.clone(), in_values.clone()).unwrap(); - assert_eq!((1, out_keys.clone(), out_values.clone()), res); - let res = sort_uids_and_shares(2, in_keys.clone(), in_values.clone()).unwrap(); - assert_eq!((0, out_keys, out_values), res); - assert!(sort_uids_and_shares(3, in_keys, in_values).is_err()); // index out of bounds - - let err_pairs = vec![("a".to_owned(), 1), ("a".to_owned(), 2)]; - let (err_keys, err_values): (Vec, Vec) = err_pairs.into_iter().unzip(); - assert!(sort_uids_and_shares(0, err_keys.clone(), err_values.clone()).is_err()); - assert!(sort_uids_and_shares(1, err_keys, err_values).is_err()); - } - - #[test] - fn test_ok_keygen_sanitize_args() { - // check sorting of parties and shares - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_2".to_owned(), "party_1".to_owned()], // unsorted parties - party_share_counts: vec![2, 1], // unsorted shares - my_party_index: 1, // index of "party_1" - threshold: 1, - }; - let sanitized_keygen_init = KeygenInitSanitized { - new_key_uid: "test_uid".to_owned(), // should be same as in raw keygen init - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], // parties should be sorted - party_share_counts: vec![1, 2], // shares should be sorted with respect to parties - my_index: 0, // index should track "party_1" in the sorted party_uids - threshold: 1, // threshold should be the same - }; - let res = Gg20Service::keygen_sanitize_args(raw_keygen_init).unwrap(); - assert_eq!(&res.new_key_uid, &sanitized_keygen_init.new_key_uid); - assert_eq!(&res.party_uids, &sanitized_keygen_init.party_uids); - assert_eq!( - &res.party_share_counts, - &sanitized_keygen_init.party_share_counts - ); - assert_eq!(&res.my_index, &sanitized_keygen_init.my_index); - assert_eq!(&res.threshold, &sanitized_keygen_init.threshold); - - // check empty share counts - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![], // empty share counts; should default to [1, 1] - my_party_index: 0, - threshold: 1, - }; - let res = Gg20Service::keygen_sanitize_args(raw_keygen_init).unwrap(); - assert_eq!(&res.party_share_counts, &vec![1, 1]); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned()], - party_share_counts: vec![MAX_PARTY_SHARE_COUNT as u32], // should be ok - my_party_index: 0, - threshold: 1, - }; - let res = Gg20Service::keygen_sanitize_args(raw_keygen_init).unwrap(); - assert_eq!(&res.party_share_counts, &vec![MAX_PARTY_SHARE_COUNT]); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![MAX_TOTAL_SHARE_COUNT as u32 - 1, 1], // should be ok - my_party_index: 0, - threshold: 1, - }; - let res = Gg20Service::keygen_sanitize_args(raw_keygen_init).unwrap(); - assert_eq!(&res.party_share_counts, &vec![MAX_TOTAL_SHARE_COUNT - 1, 1]); - } - - #[test] - fn test_fail_keygen_sanitize_args() { - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![1, 1, 1], // counts are not the same number as parties - my_party_index: 0, - threshold: 1, - }; - assert!(Gg20Service::keygen_sanitize_args(raw_keygen_init).is_err()); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![1, 1], - my_party_index: 0, - threshold: 2, // incorrect threshold - }; - assert!(Gg20Service::keygen_sanitize_args(raw_keygen_init).is_err()); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![1, 1], - my_party_index: 2, // index out of bounds - threshold: 1, - }; - assert!(Gg20Service::keygen_sanitize_args(raw_keygen_init).is_err()); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned()], - party_share_counts: vec![(MAX_PARTY_SHARE_COUNT + 1) as u32], // party has more than max number of shares - my_party_index: 0, - threshold: 1, - }; - assert!(Gg20Service::keygen_sanitize_args(raw_keygen_init).is_err()); - - let raw_keygen_init = proto::KeygenInit { - new_key_uid: "test_uid".to_owned(), - party_uids: vec!["party_1".to_owned(), "party_2".to_owned()], - party_share_counts: vec![MAX_TOTAL_SHARE_COUNT as u32, 1], // total share count is more than max total shares - my_party_index: 0, - threshold: 1, - }; - assert!(Gg20Service::keygen_sanitize_args(raw_keygen_init).is_err()); - } -} diff --git a/src/gg20/keygen/mod.rs b/src/gg20/keygen/mod.rs deleted file mode 100644 index f40b171c..00000000 --- a/src/gg20/keygen/mod.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! Handles the keygen streaming gRPC for one party. -//! -//! Protocol: -//! 1. [self::init] First, the initialization message [proto::KeygenInit] is received from the client. -//! This message describes the execution of the protocol (i.e. number of participants, share counts, etc). -//! 2. [self::execute] Then, the party starts to generate messages by invoking calls of the [tofn] library until the protocol is completed. -//! These messages are send to the client using the gRPC stream, and are broadcasted to all participating parties by the client. -//! 3. [self::result] Finally, the party receives the result of the protocol, which is also send to the client through the gRPC stream. Afterwards, the stream is closed. -//! -//! Shares: -//! Each party might have multiple shares. A single thread is created for each share. -//! We keep this information agnostic to the client, and we use the [crate::gg20::routing] layer to distribute the messages to each share. -//! The result of the protocol is common across all shares, and unique for each party. We make use of [self::result] layer to aggregate and process the result. -//! -//! All relevant helper structs and types are defined in [self::types] - -use super::{ - broadcast::broadcast_messages, proto, service::Gg20Service, types::ProtocolCommunication, -}; - -use tonic::Status; - -use tofn::{ - collections::TypedUsize, - gg20::keygen::{ - create_party_keypair_and_zksetup, create_party_keypair_and_zksetup_unsafe, KeygenPartyId, - }, -}; - -// tonic cruft -use tokio::sync::{mpsc, oneshot}; - -// logging -use tracing::{info, span, Level, Span}; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -pub mod types; -use types::*; -mod execute; -mod init; -mod result; - -impl Gg20Service { - /// handle keygen gRPC - pub async fn handle_keygen( - &self, - mut stream_in: tonic::Streaming, - mut stream_out_sender: mpsc::UnboundedSender>, - keygen_span: Span, - ) -> TofndResult<()> { - // 1. Receive KeygenInit, open message, sanitize arguments -> init mod - // 2. Spawn N keygen threads to execute the protocol in parallel; one of each of our shares -> execute mod - // 3. Spawn 1 router thread to route messages from client to the respective keygen thread -> routing mod - // 4. Wait for all keygen threads to finish and aggregate all responses -> result mod - - // 1. - // get KeygenInit message from stream, sanitize arguments and reserve key - let (keygen_init, key_uid_reservation) = self - .handle_keygen_init(&mut stream_in, keygen_span.clone()) - .await?; - - // 2. - // find my share count to allocate channel vectors - let my_share_count = keygen_init.my_shares_count(); - if my_share_count == 0 { - return Err(anyhow!( - "Party {} has 0 shares assigned", - keygen_init.my_index - )); - } - - // create in and out channels for each share, and spawn as many threads - let mut keygen_senders = Vec::with_capacity(my_share_count); - let mut aggregator_receivers = Vec::with_capacity(my_share_count); - - // computation of (party_keypair, party_zksetup) is intensive so we compute them here once - let secret_recovery_key = self.kv_manager.seed().await?; - let session_nonce = keygen_init.new_key_uid.as_bytes(); - - info!("Generating keypair for party {} ...", keygen_init.my_index); - - let party_id = TypedUsize::::from_usize(keygen_init.my_index); - - let party_keygen_data = match self.cfg.safe_keygen { - true => create_party_keypair_and_zksetup(party_id, &secret_recovery_key, session_nonce), - false => create_party_keypair_and_zksetup_unsafe( - party_id, - &secret_recovery_key, - session_nonce, - ), - } - .map_err(|_| anyhow!("Party keypair generation failed"))?; - - info!( - "Finished generating keypair for party {}", - keygen_init.my_index - ); - - for my_tofnd_subindex in 0..my_share_count { - // channels for communication between router (sender) and protocol threads (receivers) - let (keygen_sender, keygen_receiver) = mpsc::unbounded_channel(); - keygen_senders.push(keygen_sender); - // channels for communication between protocol threads (senders) and final result aggregator (receiver) - let (aggregator_sender, aggregator_receiver) = oneshot::channel(); - aggregator_receivers.push(aggregator_receiver); - - // wrap channels needed by internal threads; receiver chan for router and sender chan gRPC stream - let chans = ProtocolCommunication::new(keygen_receiver, stream_out_sender.clone()); - // wrap all context data needed for each thread - let ctx = Context::new( - &keygen_init, - keygen_init.my_index, - my_tofnd_subindex, - party_keygen_data.clone(), - ); - // clone gg20 service because tokio thread takes ownership - let gg20 = self.clone(); - - // set up log state - let log_info = ctx.log_info(); - let state = log_info.as_str(); - let execute_span = span!(parent: &keygen_span, Level::DEBUG, "execute", state); - - // spawn keygen thread and continue immediately - tokio::spawn(async move { - // wait for keygen's result inside thread - let secret_key_share = gg20.execute_keygen(chans, &ctx, execute_span).await; - // send result to aggregator - let _ = aggregator_sender.send(secret_key_share); - }); - } - - // 3. - // spin up broadcaster thread and return immediately - tokio::spawn(async move { - broadcast_messages(&mut stream_in, keygen_senders, keygen_span).await; - }); - - // 4. - // wait for all keygen threads to end, aggregate their responses, and store data in KV store - self.aggregate_results( - aggregator_receivers, - &mut stream_out_sender, - key_uid_reservation, - keygen_init, - ) - .await?; - - Ok(()) - } -} diff --git a/src/gg20/keygen/result.rs b/src/gg20/keygen/result.rs deleted file mode 100644 index f3c6f0d3..00000000 --- a/src/gg20/keygen/result.rs +++ /dev/null @@ -1,200 +0,0 @@ -//! This module handles the aggregation of process of keygen results. -//! When all keygen threads finish, we aggregate their results and retrieve: -//! 1. the public key - must be the same across all results; stored in KvStore -//! 2. all secret share data - data used to allow parties to participate to future Signs; stored in KvStore -//! 3. all secret share recovery info - information used to allow client to issue secret share recovery in case of data loss; sent to client - -use tofn::{gg20::keygen::SecretKeyShare, sdk::api::serialize}; - -use super::{ - proto::{self}, - types::{BytesVec, KeygenInitSanitized, TofnKeygenOutput, TofndKeygenOutput}, - Gg20Service, -}; -use crate::{gg20::types::PartyInfo, kv_manager::KeyReservation}; - -// tonic cruft -use tokio::sync::{ - mpsc, - oneshot::{self, Receiver}, -}; -use tonic::Status; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -use std::convert::TryInto; - -impl Gg20Service { - /// aggregate results from all keygen threads, create a record and insert it in the KvStore - pub(super) async fn aggregate_results( - &self, - aggregator_receivers: Vec>, - stream_out_sender: &mut mpsc::UnboundedSender>, - key_uid_reservation: KeyReservation, - keygen_init: KeygenInitSanitized, - ) -> TofndResult<()> { - // wait all keygen threads and aggregate results - // can't use `map_err` because of `.await` func :( - let keygen_outputs = match Self::aggregate_keygen_outputs(aggregator_receivers).await { - Ok(keygen_outputs) => keygen_outputs, - Err(err) => { - self.kv_manager - .kv() - .unreserve_key(key_uid_reservation) - .await; - return Err(anyhow!( - "Error at Keygen output aggregation. Unreserving key {}", - err - )); - } - }; - - // try to process keygen outputs - let (pub_key, group_recover_info, secret_key_shares) = - Self::process_keygen_outputs(&keygen_init, keygen_outputs, stream_out_sender)?; - - // try to retrieve private recovery info from all shares - let private_recover_info = - Self::get_private_recovery_data(&secret_key_shares).map_err(|err| anyhow!(err))?; - - // combine responses from all keygen threads to a single struct - let kv_data = PartyInfo::get_party_info( - secret_key_shares, - keygen_init.party_uids.clone(), - keygen_init.party_share_counts.clone(), - keygen_init.my_index, - ); - - // try to put data inside kv store - self.kv_manager - .kv() - .put(key_uid_reservation, kv_data.try_into()?) - .await - .map_err(|err| anyhow!(err))?; - - // try to send result - Ok( - stream_out_sender.send(Ok(proto::MessageOut::new_keygen_result( - &keygen_init.party_uids, - Ok(proto::KeygenOutput { - pub_key, - group_recover_info, - private_recover_info, - }), - )))?, - ) - } - - /// iterate all keygen outputs, and return data that need to be permenantly stored - /// we perform a sanity check that all shares produces the same pubkey and group recovery - /// and then return a single copy of the common info and a vec with `SecretKeyShares` of each party - /// This vec is later used to derive private recovery info - fn process_keygen_outputs( - keygen_init: &KeygenInitSanitized, - keygen_outputs: Vec, - stream_out_sender: &mut mpsc::UnboundedSender>, - ) -> TofndResult<(BytesVec, BytesVec, Vec)> { - // Collect all key shares unless there's a protocol fault - let keygen_outputs = keygen_outputs - .into_iter() - .collect::, _>>(); - - match keygen_outputs { - Ok(secret_key_shares) => { - if secret_key_shares.is_empty() { - return Err(anyhow!( - "Party {} created no secret key shares", - keygen_init.my_index - )); - } - - // check that all shares returned the same public key and group recover info - let share_id = secret_key_shares[0].share().index(); - let pub_key = secret_key_shares[0].group().encoded_pubkey(); - let group_info = secret_key_shares[0] - .group() - .all_shares_bytes() - .map_err(|_| anyhow!("unable to call all_shares_bytes()"))?; - - // sanity check: pubkey and group recovery info should be the same across all shares - // Here we check that the first share produced the same info as the i-th. - for secret_key_share in &secret_key_shares[1..] { - // try to get pubkey of i-th share. Each share should produce the same pubkey - if pub_key != secret_key_share.group().encoded_pubkey() { - return Err(anyhow!( - "Party {}'s share {} and {} returned different public key", - keygen_init.my_index, - share_id, - secret_key_share.share().index() - )); - } - - // try to get group recovery info of i-th share. Each share should produce the same group info - let curr_group_info = secret_key_share - .group() - .all_shares_bytes() - .map_err(|_| anyhow!("unable to call all_shares_bytes()"))?; - if group_info != curr_group_info { - return Err(anyhow!( - "Party {}'s share {} and {} returned different group recovery info", - keygen_init.my_index, - share_id, - secret_key_share.share().index() - )); - } - } - - Ok((pub_key, group_info, secret_key_shares)) - } - Err(crimes) => { - // send crimes and exit with an error - stream_out_sender.send(Ok(proto::MessageOut::new_keygen_result( - &keygen_init.party_uids, - Err(crimes.clone()), - )))?; - - Err(anyhow!( - "Party {} found crimes: {:?}", - keygen_init.my_index, - crimes - )) - } - } - } - - /// Create private recovery info out of a vec with all parties' SecretKeyShares - fn get_private_recovery_data(secret_key_shares: &[SecretKeyShare]) -> TofndResult { - // try to retrieve private recovery info from all party's shares - let private_infos = secret_key_shares - .iter() - .enumerate() - .map(|(index, secret_key_share)| { - secret_key_share - .recovery_info() - .map_err(|_| anyhow!("Unable to get recovery info for share {}", index)) - }) - .collect::>>()?; - - // We use an additional layer of serialization to simplify the protobuf definition - let private_bytes = serialize(&private_infos) - .map_err(|_| anyhow!("Failed to serialize private recovery infos"))?; - - Ok(private_bytes) - } - - /// wait all keygen threads and get keygen outputs - async fn aggregate_keygen_outputs( - aggregator_receivers: Vec>, - ) -> TofndResult> { - let mut keygen_outputs = Vec::with_capacity(aggregator_receivers.len()); - - for aggregator in aggregator_receivers { - let res = aggregator.await??; - keygen_outputs.push(res); - } - - Ok(keygen_outputs) - } -} diff --git a/src/gg20/keygen/types.rs b/src/gg20/keygen/types.rs deleted file mode 100644 index 24d15b7f..00000000 --- a/src/gg20/keygen/types.rs +++ /dev/null @@ -1,105 +0,0 @@ -//! Helper structs and implementations for [crate::gg20::keygen]. - -use tofn::{ - collections::TypedUsize, - gg20::keygen::{KeygenPartyId, KeygenPartyShareCounts, PartyKeygenData, SecretKeyShare}, - sdk::api::ProtocolOutput, -}; - -pub(super) type PartyShareCounts = KeygenPartyShareCounts; -pub const MAX_PARTY_SHARE_COUNT: usize = tofn::gg20::keygen::MAX_PARTY_SHARE_COUNT; -pub const MAX_TOTAL_SHARE_COUNT: usize = tofn::gg20::keygen::MAX_TOTAL_SHARE_COUNT; - -use crate::TofndResult; -use anyhow::anyhow; -use tracing::{info, span, Level, Span}; - -/// tofn's ProtocolOutput for Keygen -pub type TofnKeygenOutput = ProtocolOutput; -/// tofnd's ProtocolOutput for Keygen -pub type TofndKeygenOutput = TofndResult; -/// type for bytes -pub use tofn::sdk::api::BytesVec; - -/// KeygenInitSanitized holds all arguments needed by Keygen in the desired form; populated by proto::KeygenInit -/// pub because it is also needed by recovery module -pub struct KeygenInitSanitized { - pub new_key_uid: String, // session's UID - pub party_uids: Vec, // vector of party uids; this is alligned with party_share_count vector - pub party_share_counts: Vec, // vector of share counts; this is alligned with party_uids vector - pub my_index: usize, // the _tofnd_ index of the party inside party_uids and party_shares_counts - pub threshold: usize, // protocol's threshold -} -impl KeygenInitSanitized { - // get the share count of `my_index`th party - pub(super) fn my_shares_count(&self) -> usize { - self.party_share_counts[self.my_index] as usize - } - - // log KeygenInitSanitized state - pub(super) fn log_info(&self, keygen_span: Span) { - // create log span and display current status - let init_span = span!(parent: &keygen_span, Level::INFO, "init"); - let _enter = init_span.enter(); - info!( - "[uid:{}, shares:{}] starting Keygen with [key: {}, (t,n)=({},{}), participants:{:?}", - self.party_uids[self.my_index], - self.my_shares_count(), - self.new_key_uid, - self.threshold, - self.party_share_counts.iter().sum::(), - self.party_uids, - ); - } -} - -/// Context holds the all arguments that need to be passed from keygen gRPC call into protocol execution -pub struct Context { - pub(super) key_id: String, // session id; used for logs - pub(super) uids: Vec, // all party uids; alligned with `share_counts` - pub(super) share_counts: Vec, // all party share counts; alligned with `uids` - pub(super) threshold: usize, // protocol's threshold - pub(super) tofnd_index: TypedUsize, // tofnd index of party - pub(super) tofnd_subindex: usize, // index of party's share - pub(super) party_keygen_data: PartyKeygenData, -} - -impl Context { - /// create a new Context - pub fn new( - keygen_init: &KeygenInitSanitized, - tofnd_index: usize, - tofnd_subindex: usize, - party_keygen_data: PartyKeygenData, - ) -> Self { - let tofnd_index = TypedUsize::from_usize(tofnd_index); - Context { - key_id: keygen_init.new_key_uid.clone(), - uids: keygen_init.party_uids.clone(), - share_counts: keygen_init.party_share_counts.clone(), - threshold: keygen_init.threshold, - tofnd_index, - tofnd_subindex, - party_keygen_data, - } - } - - /// get share_counts in the form of tofn::PartyShareCounts - pub fn share_counts(&self) -> TofndResult { - match PartyShareCounts::from_vec(self.share_counts.clone()) { - Ok(party_share_counts) => Ok(party_share_counts), - Err(_) => Err(anyhow!("failed to create party_share_counts")), - } - } - - /// export state; used for logging - pub fn log_info(&self) -> String { - format!( - "[{}] [uid:{}, share:{}/{}]", - self.key_id, - self.uids[self.tofnd_index.as_usize()], - self.tofnd_subindex + 1, - self.share_counts[self.tofnd_index.as_usize()] - ) - } -} diff --git a/src/gg20/mod.rs b/src/gg20/mod.rs deleted file mode 100644 index 654c7161..00000000 --- a/src/gg20/mod.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! [proto::gg20_server::Gg20] gRPC server API -//! Available gRPCs are: -//! [recover] - Recovers private data of a party provided a mnemonic. -//! [keygen] - Starts keygen. -//! [sign] - Starts sing. - -// tonic cruft -use super::proto; -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tonic::{Request, Response, Status}; -pub mod proto_helpers; - -// logging -use tracing::{error, info, span, Level}; - -// gRPC -mod broadcast; -mod key_presence; -mod keygen; -mod protocol; -mod recover; -pub mod service; -mod sign; -pub mod types; -use types::*; - -#[tonic::async_trait] -impl proto::gg20_server::Gg20 for service::Gg20Service { - type KeygenStream = UnboundedReceiverStream>; - type SignStream = Self::KeygenStream; - - /// Recover unary gRPC. See [recover]. - async fn recover( - &self, - request: tonic::Request, - ) -> Result, Status> { - let request = request.into_inner(); - - let response = self.handle_recover(request).await; - let response = match response { - Ok(()) => { - info!("Recovery completed successfully!"); - proto::recover_response::Response::Success - } - Err(err) => { - error!("Unable to complete recovery: {}", err); - proto::recover_response::Response::Fail - } - }; - - Ok(Response::new(proto::RecoverResponse { - // the prost way to convert enums to i32 https://github.com/danburkert/prost#enumerations - response: response as i32, - })) - } - - /// KeyPresence unary gRPC. See [key_presence]. - async fn key_presence( - &self, - request: tonic::Request, - ) -> Result, Status> { - let request = request.into_inner(); - - let response = match self.handle_key_presence(request).await { - Ok(res) => { - info!("Key presence check completed succesfully!"); - res - } - Err(err) => { - error!("Unable to complete key presence check: {}", err); - proto::key_presence_response::Response::Fail - } - }; - - Ok(Response::new(proto::KeyPresenceResponse { - response: response as i32, - })) - } - - /// Keygen streaming gRPC. See [keygen]. - async fn keygen( - &self, - request: Request>, - ) -> Result, Status> { - let stream_in = request.into_inner(); - let (msg_sender, rx) = mpsc::unbounded_channel(); - - // log span for keygen - let span = span!(Level::INFO, "Keygen"); - let _enter = span.enter(); - let s = span.clone(); - let gg20 = self.clone(); - - tokio::spawn(async move { - // can't return an error from a spawned thread - if let Err(e) = gg20.handle_keygen(stream_in, msg_sender.clone(), s).await { - error!("keygen failure: {:?}", e.to_string()); - // we can't handle errors in tokio threads. Log error if we are unable to send the status code to client. - if let Err(e) = msg_sender.send(Err(Status::invalid_argument(e.to_string()))) { - error!("could not send error to client: {}", e.to_string()); - } - } - }); - Ok(Response::new(UnboundedReceiverStream::new(rx))) - } - - /// Sign sreaming gRPC. See [sign]. - async fn sign( - &self, - request: Request>, - ) -> Result, Status> { - let stream = request.into_inner(); - let (msg_sender, rx) = mpsc::unbounded_channel(); - - // log span for sign - let span = span!(Level::INFO, "Sign"); - let _enter = span.enter(); - let s = span.clone(); - let gg20 = self.clone(); - - tokio::spawn(async move { - // can't return an error from a spawned thread - if let Err(e) = gg20.handle_sign(stream, msg_sender.clone(), s).await { - error!("sign failure: {:?}", e.to_string()); - // we can't handle errors in tokio threads. Log error if we are unable to send the status code to client. - if let Err(e) = msg_sender.send(Err(Status::invalid_argument(e.to_string()))) { - error!("could not send error to client: {}", e.to_string()); - } - } - }); - Ok(Response::new(UnboundedReceiverStream::new(rx))) - } -} diff --git a/src/gg20/proto_helpers.rs b/src/gg20/proto_helpers.rs deleted file mode 100644 index 4de75b11..00000000 --- a/src/gg20/proto_helpers.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Wrappers for sending and receiving [proto] messages - -use tofn::{ - collections::FillVecMap, - gg20::{keygen::KeygenPartyId, sign::SignPartyId}, - sdk::api::Fault, -}; - -use crate::proto; -type KeygenFaults = FillVecMap; -type SignFaults = FillVecMap; -type KeygenResultData = Result; -type SignResultData = Result, SignFaults>; -use proto::message_out::criminal_list::criminal::CrimeType as ProtoCrimeType; -use proto::message_out::criminal_list::Criminal as ProtoCriminal; -use proto::message_out::keygen_result::KeygenResultData::Criminals as ProtoKeygenCriminals; -use proto::message_out::keygen_result::KeygenResultData::Data as ProtoKeygenData; -use proto::message_out::sign_result::SignResultData::Criminals as ProtoSignCriminals; -use proto::message_out::sign_result::SignResultData::Signature as ProtoSignature; -use proto::message_out::CriminalList as ProtoCriminalList; - -// convenience constructors -impl proto::MessageOut { - pub(super) fn new_bcast(bcast: &[u8]) -> Self { - Self::new_traffic("", bcast, true) - } - pub(super) fn new_p2p(receiver_id: &str, p2p: &[u8]) -> Self { - Self::new_traffic(receiver_id, p2p, false) - } - pub(super) fn new_traffic(receiver_id: &str, msg: &[u8], is_broadcast: bool) -> Self { - proto::MessageOut { - data: Some(proto::message_out::Data::Traffic(proto::TrafficOut { - to_party_uid: receiver_id.to_string(), - payload: msg.to_vec(), - is_broadcast, - })), - } - } - pub(super) fn need_recover() -> Self { - proto::MessageOut { - data: Some(proto::message_out::Data::NeedRecover(true)), - } - } - - pub(super) fn new_keygen_result(participant_uids: &[String], result: KeygenResultData) -> Self { - let result = match result { - Ok(keygen_output) => ProtoKeygenData(keygen_output), - Err(faults) => ProtoKeygenCriminals(ProtoCriminalList::from_tofn_faults( - faults, - participant_uids, - )), - }; - proto::MessageOut { - data: Some(proto::message_out::Data::KeygenResult( - proto::message_out::KeygenResult { - keygen_result_data: Some(result), - }, - )), - } - } - - pub(super) fn new_sign_result(participant_uids: &[String], result: SignResultData) -> Self { - let result = match result { - Err(faults) => ProtoSignCriminals(ProtoCriminalList::from_tofn_faults( - faults, - participant_uids, - )), - Ok(sign_output) => ProtoSignature(sign_output), - }; - proto::MessageOut { - data: Some(proto::message_out::Data::SignResult( - proto::message_out::SignResult { - sign_result_data: Some(result), - }, - )), - } - } -} - -fn fault_to_crime(f: &Fault) -> ProtoCrimeType { - match f { - Fault::MissingMessage | Fault::CorruptedMessage => ProtoCrimeType::NonMalicious, - Fault::ProtocolFault => ProtoCrimeType::Malicious, - } -} - -impl ProtoCriminalList { - fn from_tofn_faults

(faults: FillVecMap, uids: &[String]) -> Self { - let criminals = faults - .into_iter_some() - .map(|(i, fault)| ProtoCriminal { - party_uid: uids[i.as_usize()].clone(), - crime_type: fault_to_crime(&fault) as i32, // why `as i32`? https://github.com/danburkert/prost#enumerations - }) - .collect(); - Self { criminals } - } -} diff --git a/src/gg20/protocol.rs b/src/gg20/protocol.rs deleted file mode 100644 index a92541e1..00000000 --- a/src/gg20/protocol.rs +++ /dev/null @@ -1,188 +0,0 @@ -//! Abstract functionality used by keygen, sign, etc. - -use tofn::{ - collections::TypedUsize, - sdk::api::{Protocol, ProtocolOutput, Round}, -}; - -// tonic cruft -use super::{proto, ProtocolCommunication}; -use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; - -// logging -use tracing::{debug, error, span, warn, Level, Span}; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -/// execute gg20 protocol -pub(super) async fn execute_protocol( - mut party: Protocol, - mut chans: ProtocolCommunication< - Option, - Result, - >, - party_uids: &[String], - party_share_counts: &[usize], - span: Span, -) -> TofndResult> -where - K: Clone, -{ - // set up counters for logging - let total_num_of_shares = party_share_counts.iter().fold(0, |acc, s| acc + *s); - let total_round_p2p_msgs = total_num_of_shares * (total_num_of_shares - 1); // total number of messages is n(n-1) - - let mut round_count = 0; - while let Protocol::NotDone(mut round) = party { - round_count += 1; - - // handle outgoing traffic - handle_outgoing(&chans.sender, &round, party_uids, round_count, span.clone())?; - - // collect incoming traffic - handle_incoming( - &mut chans.receiver, - &mut round, - party_uids, - total_round_p2p_msgs, - total_num_of_shares, - round_count, - span.clone(), - ) - .await?; - - // check if everything was ok this round - party = round - .execute_next_round() - .map_err(|_| anyhow!("Error in tofn::execute_next_round"))?; - } - - match party { - Protocol::NotDone(_) => Err(anyhow!("Protocol failed to complete")), - Protocol::Done(result) => Ok(result), - } -} - -fn handle_outgoing( - sender: &UnboundedSender>, - round: &Round, - party_uids: &[String], - round_count: usize, - span: Span, -) -> TofndResult<()> { - let send_span = span!(parent: &span, Level::DEBUG, "outgoing", round = round_count); - let _start = send_span.enter(); - debug!("begin"); - // send outgoing bcasts - if let Some(bcast) = round.bcast_out() { - debug!("generating out bcast"); - // send message to gRPC client - sender.send(Ok(proto::MessageOut::new_bcast(bcast)))? - } - // send outgoing p2ps - if let Some(p2ps_out) = round.p2ps_out() { - let mut p2p_msg_count = 1; - for (i, p2p) in p2ps_out.iter() { - // get tofnd index from tofn - let tofnd_idx = round - .info() - .party_share_counts() - .share_to_party_id(i) - .map_err(|_| anyhow!("Unable to get tofnd index for party {}", i))?; - - debug!( - "out p2p to [{}] ({}/{})", - party_uids[tofnd_idx.as_usize()], - p2p_msg_count, - p2ps_out.len() - 1 - ); - p2p_msg_count += 1; - - // send message to gRPC client - sender.send(Ok(proto::MessageOut::new_p2p( - &party_uids[tofnd_idx.as_usize()], - p2p, - )))? - } - } - debug!("finished"); - Ok(()) -} - -async fn handle_incoming( - receiver: &mut UnboundedReceiver>, - round: &mut Round, - party_uids: &[String], - total_round_p2p_msgs: usize, - total_num_of_shares: usize, - round_count: usize, - span: Span, -) -> TofndResult<()> { - let mut p2p_msg_count = 0; - let mut bcast_msg_count = 0; - - // loop until no more messages are needed for this round - while round.expecting_more_msgs_this_round() { - // get internal message from broadcaster - let traffic = receiver.recv().await.ok_or(format!( - "{}: stream closed by client before protocol has completed", - round_count - )); - - // unpeel TrafficIn - let traffic = match traffic { - Ok(traffic_opt) => match traffic_opt { - Some(traffic) => traffic, - None => { - // if data is missing, ignore the message, - warn!("ignore incoming msg: missing `data` field"); - continue; - } - }, - Err(_) => { - // if channel is closed, stop - error!("internal channel closed prematurely"); - break; - } - }; - - // We have to spawn a new span it in each loop because `async` calls don't work well with tracing - // See details on how we need to make spans curve around `.await`s here: - // https://docs.rs/tracing/0.1.25/tracing/span/index.html#entering-a-span - let recv_span = span!(parent: &span, Level::DEBUG, "incoming", round = round_count); - let _start = recv_span.enter(); - - // log incoming message - if traffic.is_broadcast { - bcast_msg_count += 1; - debug!( - "got incoming bcast message {}/{}", - bcast_msg_count, total_num_of_shares - ); - } else { - p2p_msg_count += 1; - debug!( - "got incoming p2p message {}/{}", - p2p_msg_count, total_round_p2p_msgs - ); - } - - // get sender's party index - let from = party_uids - .iter() - .position(|uid| uid == &traffic.from_party_uid) - .ok_or_else(|| anyhow!("from uid does not exist in party uids"))?; - - // try to set a message - if round - .msg_in(TypedUsize::from_usize(from), &traffic.payload) - .is_err() - { - return Err(anyhow!("error calling tofn::msg_in with [from: {}]", from)); - }; - } - - Ok(()) -} diff --git a/src/gg20/recover.rs b/src/gg20/recover.rs deleted file mode 100644 index 8d85a57e..00000000 --- a/src/gg20/recover.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! This module handles the recover gRPC. -//! Request includes [proto::message_in::Data::KeygenInit] struct and encrypted recovery info. -//! The recovery info is decrypted by party's mnemonic seed and saved in the KvStore. - -use super::{keygen::types::KeygenInitSanitized, proto, service::Gg20Service, types::PartyInfo}; -use tofn::{ - collections::TypedUsize, - gg20::keygen::{ - recover_party_keypair, recover_party_keypair_unsafe, KeygenPartyId, SecretKeyShare, - SecretRecoveryKey, - }, - sdk::api::{deserialize, BytesVec, PartyShareCounts}, -}; - -// logging -use tracing::{info, warn}; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -use std::convert::TryInto; - -impl Gg20Service { - pub(super) async fn handle_recover(&self, request: proto::RecoverRequest) -> TofndResult<()> { - // get keygen init sanitized from request - let keygen_init = { - let keygen_init = request - .keygen_init - .ok_or_else(|| anyhow!("missing keygen_init field in recovery request"))?; - Self::keygen_sanitize_args(keygen_init)? - }; - - let keygen_output = request - .keygen_output - .ok_or_else(|| anyhow!("missing keygen_output field in recovery request"))?; - - // check if key-uid already exists in kv-store. If yes, return success and don't update the kv-store - if self - .kv_manager - .kv() - .exists(&keygen_init.new_key_uid) - .await - .map_err(|err| anyhow!(err))? - { - warn!( - "Request to recover shares for [key {}, party {}] but shares already exist in kv-store. Abort request.", - keygen_init.new_key_uid, keygen_init.party_uids[keygen_init.my_index] - ); - return Ok(()); - } - - // recover secret key shares from request - // get mnemonic seed - let secret_recovery_key = self.kv_manager.seed().await?; - let secret_key_shares = self - .recover_secret_key_shares(&secret_recovery_key, &keygen_init, &keygen_output) - .map_err(|err| anyhow!("Failed to acquire secret key share {}", err))?; - - self.update_share_kv_store(keygen_init, secret_key_shares) - .await - } - - /// get recovered secret key shares from serilized share recovery info - fn recover_secret_key_shares( - &self, - secret_recovery_key: &SecretRecoveryKey, - init: &KeygenInitSanitized, - output: &proto::KeygenOutput, - ) -> TofndResult> { - // get my share count safely - let my_share_count = *init.party_share_counts.get(init.my_index).ok_or_else(|| { - anyhow!( - "index {} is out of party_share_counts bounds {}", - init.my_index, - init.party_share_counts.len() - ) - })?; - if my_share_count == 0 { - return Err(anyhow!("Party {} has 0 shares assigned", init.my_index)); - } - - // check party share counts - let party_share_counts = PartyShareCounts::from_vec(init.party_share_counts.to_owned()) - .map_err(|_| { - anyhow!( - "PartyCounts::from_vec() error for {:?}", - init.party_share_counts - ) - })?; - - // check private recovery infos - // use an additional layer of deserialization to simpify the protobuf definition - // deserialize recovery info here to catch errors before spending cycles on keypair recovery - let private_info_vec: Vec = deserialize(&output.private_recover_info) - .ok_or_else(|| anyhow!("Failed to deserialize private recovery infos"))?; - - if private_info_vec.len() != my_share_count { - return Err(anyhow!( - "Party {} has {} shares assigned, but retrieved {} shares from client", - init.my_index, - my_share_count, - private_info_vec.len() - )); - } - - info!("Recovering keypair for party {} ...", init.my_index); - - let party_id = TypedUsize::::from_usize(init.my_index); - - // try to recover keypairs - let session_nonce = init.new_key_uid.as_bytes(); - let party_keypair = match self.cfg.safe_keygen { - true => recover_party_keypair(party_id, secret_recovery_key, session_nonce), - false => recover_party_keypair_unsafe(party_id, secret_recovery_key, session_nonce), - } - .map_err(|_| anyhow!("party keypair recovery failed"))?; - - info!("Finished recovering keypair for party {}", init.my_index); - - // try to gather secret key shares from recovery infos - let secret_key_shares = private_info_vec - .iter() - .enumerate() - .map(|(i, share_recovery_info_bytes)| { - SecretKeyShare::recover( - &party_keypair, - share_recovery_info_bytes, // request recovery for ith share - &output.group_recover_info, - &output.pub_key, - party_id, - i, - party_share_counts.clone(), - init.threshold, - ) - .map_err(|_| anyhow!("Cannot recover share [{}] of party [{}]", i, party_id)) - }) - .collect::>()?; - - Ok(secret_key_shares) - } - - /// attempt to write recovered secret key shares to the kv-store - async fn update_share_kv_store( - &self, - keygen_init_sanitized: KeygenInitSanitized, - secret_key_shares: Vec, - ) -> TofndResult<()> { - // try to make a reservation - let reservation = self - .kv_manager - .kv() - .reserve_key(keygen_init_sanitized.new_key_uid) - .await - .map_err(|err| anyhow!("failed to complete reservation: {}", err))?; - // acquire kv-data - let kv_data = PartyInfo::get_party_info( - secret_key_shares, - keygen_init_sanitized.party_uids, - keygen_init_sanitized.party_share_counts, - keygen_init_sanitized.my_index, - ); - // try writing the data to the kv-store - self.kv_manager - .kv() - .put(reservation, kv_data.try_into()?) - .await - .map_err(|err| anyhow!("failed to update kv store: {}", err)) - } -} diff --git a/src/gg20/service/malicious.rs b/src/gg20/service/malicious.rs deleted file mode 100644 index 4aec2932..00000000 --- a/src/gg20/service/malicious.rs +++ /dev/null @@ -1,10 +0,0 @@ -use tofn::gg20::{ - keygen::malicious::Behaviour as KeygenBehaviour, sign::malicious::Behaviour as SignBehaviour, -}; - -/// Behaviours are pub because config mod needs access -#[derive(Clone, Debug)] -pub struct Behaviours { - pub keygen: KeygenBehaviour, - pub sign: SignBehaviour, -} diff --git a/src/gg20/service/mod.rs b/src/gg20/service/mod.rs deleted file mode 100644 index db145cdf..00000000 --- a/src/gg20/service/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! This mod includes the service implementation derived from - -use super::proto; -use crate::config::Config; -use crate::kv_manager::KvManager; - -#[cfg(feature = "malicious")] -pub mod malicious; - -/// Gg20Service -#[derive(Clone)] -pub struct Gg20Service { - pub(super) kv_manager: KvManager, - pub(super) cfg: Config, -} - -/// create a new Gg20 gRPC server -pub fn new_service(cfg: Config, kv_manager: KvManager) -> impl proto::gg20_server::Gg20 { - Gg20Service { kv_manager, cfg } -} diff --git a/src/gg20/sign/execute.rs b/src/gg20/sign/execute.rs deleted file mode 100644 index f9d0b598..00000000 --- a/src/gg20/sign/execute.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! This module creates and executes the sign protocol -//! On success it returns [super::TofndSignOutput]. A successful sign execution can produce either an Ok(Vec) of an Err(Vec>). -//! On failure it returns [anyhow!] error if [Sign] struct cannot be instantiated. - -use super::{ - proto, - types::{Context, TofndSignOutput}, - Gg20Service, ProtocolCommunication, -}; -use crate::gg20::protocol; -use tofn::gg20::sign::new_sign; - -// logging -use tracing::{info, Span}; - -// error handling -use anyhow::anyhow; - -impl Gg20Service { - /// create and execute sign protocol and returning the result. - /// if the protocol cannot be instantiated, return an [anyhow!] error - pub(super) async fn execute_sign( - &self, - chans: ProtocolCommunication< - Option, - Result, - >, - ctx: &Context, - execute_span: Span, - ) -> TofndSignOutput { - // try to create sign with context - let sign = new_sign( - ctx.group(), - &ctx.share, - &ctx.sign_parties, - ctx.msg_to_sign(), - #[cfg(feature = "malicious")] - self.cfg.behaviours.sign.clone(), - ) - .map_err(|_| anyhow!("sign instantiation failed"))?; - - // execute protocol and wait for completion - let protocol_result = protocol::execute_protocol( - sign, - chans, - // &ctx.sign_init.participant_uids, - &ctx.sign_uids(), - &ctx.sign_share_counts, - execute_span.clone(), - ) - .await; - - let res = protocol_result - .map_err(|err| anyhow!("Sign was not completed due to error: {}", err))?; - - info!("Sign completed"); - Ok(res) - } -} diff --git a/src/gg20/sign/init.rs b/src/gg20/sign/init.rs deleted file mode 100644 index b1593acd..00000000 --- a/src/gg20/sign/init.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! This module handles the initialization of the Sign protocol. -//! A [SignInitSanitized] struct is created out of the raw incoming [proto::SignInit] message and the session key is queried inside from KvStore. -//! If [proto::SignInit] fails to be parsed, or no Keygen has been executed for the current session ID, an [anyhow!] error is returned - -// try_into() for MessageDigest -use std::convert::TryInto; - -use super::{proto, types::SignInitSanitized, Gg20Service}; -use crate::gg20::types::PartyInfo; - -// tonic cruft -use futures_util::StreamExt; -use tokio::sync::mpsc; -use tonic::Status; - -// logging -use tracing::Span; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -impl Gg20Service { - /// Receives a message from the stream and tries to handle sign init operations. - /// On success, it extracts the PartyInfo from the KVStrore and returns a sanitized struct ready to be used by the protocol. - /// On failure, returns an [anyhow!] error and no changes are been made in the KvStore. - pub(super) async fn handle_sign_init( - &self, - in_stream: &mut tonic::Streaming, - out_stream: &mut mpsc::UnboundedSender>, - sign_span: Span, - ) -> TofndResult<(SignInitSanitized, PartyInfo)> { - let msg_type = in_stream - .next() - .await - .ok_or_else(|| anyhow!("sign: stream closed by client without sending a message"))?? - .data - .ok_or_else(|| anyhow!("sign: missing `data` field in client message"))?; - - let sign_init = match msg_type { - proto::message_in::Data::SignInit(k) => k, - _ => return Err(anyhow!("Expected sign init message")), - }; - - // try to get party info related to session id - let party_info: PartyInfo = match self.kv_manager.kv().get(&sign_init.key_uid).await { - Ok(value) => value.try_into()?, - Err(err) => { - // if no such session id exists, send a message to client that indicates that recovery is needed and stop sign - Self::send_kv_store_failure(out_stream)?; - let err = anyhow!("Unable to find session-id {} in kv store. Issuing share recovery and exit sign {:?}", sign_init.key_uid, err); - return Err(err); - } - }; - - // try to sanitize arguments - let sign_init = Self::sign_sanitize_args(sign_init, &party_info.tofnd.party_uids)?; - - // log SignInitSanitized state - party_info.log_info(&sign_init.new_sig_uid, sign_span); - - Ok((sign_init, party_info)) - } - - /// send "need recover" message to client - fn send_kv_store_failure( - out_stream: &mut mpsc::UnboundedSender>, - ) -> TofndResult<()> { - Ok(out_stream.send(Ok(proto::MessageOut::need_recover()))?) - } - - /// sanitize arguments of incoming message. - /// Example: - /// input for party 'a': - /// (from keygen) party_uids = [a, b, c] - /// (from keygen) party_share_counts = [3, 2, 1] - /// proto::SignInit.party_uids = [c, a] - /// output for party 'a': - /// SignInitSanitized.party_uids = [2, 0] <- index of c, a in party_uids - fn sign_sanitize_args( - sign_init: proto::SignInit, - all_party_uids: &[String], - ) -> TofndResult { - // create a vector of the tofnd indices of the participant uids - let participant_indices = sign_init - .party_uids - .iter() - .map(|s| { - all_party_uids.iter().position(|k| k == s).ok_or_else(|| { - anyhow!( - "participant [{}] not found in key [{}]", - s, - sign_init.key_uid - ) - }) - }) - .collect::, _>>()?; - - Ok(SignInitSanitized { - new_sig_uid: sign_init.new_sig_uid, - participant_uids: sign_init.party_uids, - participant_indices, - message_to_sign: sign_init.message_to_sign.as_slice().try_into()?, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_ok_sign_sanitize_args() { - let all_party_uids = vec![ - "party_0".to_owned(), // party 0 has index 0 - "party_1".to_owned(), // party 1 has index 1 - "party_2".to_owned(), // party 2 has index 2 - ]; - - let raw_sign_init = proto::SignInit { - new_sig_uid: "test_uid".to_owned(), - key_uid: "test_uid".to_owned(), - party_uids: vec!["party_2".to_owned(), "party_1".to_owned()], - message_to_sign: vec![42; 32], - }; - let sanitized_sign_init = SignInitSanitized { - new_sig_uid: "test_uid".to_owned(), // new sig uid should be the same - participant_uids: vec!["party_2".to_owned(), "party_1".to_owned()], // party 2 has index 2, party 1 has index 1 - participant_indices: vec![2, 1], // indices should be [2, 1] - message_to_sign: vec![42; 32].as_slice().try_into().unwrap(), // msg of 32 bytes should be successfully converted to MessageDigest - }; - - let res = Gg20Service::sign_sanitize_args(raw_sign_init, &all_party_uids).unwrap(); - assert_eq!(&res.new_sig_uid, &sanitized_sign_init.new_sig_uid); - assert_eq!(&res.participant_uids, &sanitized_sign_init.participant_uids); - assert_eq!( - &res.participant_indices, - &sanitized_sign_init.participant_indices - ); - assert_eq!(&res.message_to_sign, &sanitized_sign_init.message_to_sign); - } - - #[test] - fn test_fail_sign_sanitize_args() { - let all_party_uids = vec![ - "party_0".to_owned(), - "party_1".to_owned(), - "party_2".to_owned(), - ]; - let raw_sign_init = proto::SignInit { - new_sig_uid: "test_uid".to_owned(), - key_uid: "test_uid".to_owned(), - party_uids: vec!["party_4".to_owned(), "party_1".to_owned()], // party 4 does not exist - message_to_sign: vec![42; 32], - }; - assert!(Gg20Service::sign_sanitize_args(raw_sign_init, &all_party_uids).is_err()); - - let raw_sign_init = proto::SignInit { - new_sig_uid: "test_uid".to_owned(), - key_uid: "test_uid".to_owned(), - party_uids: vec!["party_2".to_owned(), "party_1".to_owned()], - message_to_sign: vec![42; 33], // message is not 32 bytes - }; - assert!(Gg20Service::sign_sanitize_args(raw_sign_init, &all_party_uids).is_err()); - } -} diff --git a/src/gg20/sign/mod.rs b/src/gg20/sign/mod.rs deleted file mode 100644 index 58e0cb06..00000000 --- a/src/gg20/sign/mod.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! Handles the sign streaming gRPC for one party. -//! -//! Protocol: -//! 1. [self::init] First, the initialization message [proto::SignInit] is received from the client. -//! This message describes the execution of the protocol (i.e. number of sign participants, message-to-sign, etc). -//! 2. [self::execute] Then, the party starts to generate messages by invoking calls of the [tofn] library until the protocol is completed. -//! These messages are send to the client using the gRPC stream, and are broadcasted to all participating parties by the client. -//! 3. [self::result] Finally, the party receives the result of the protocol, which is also send to the client through the gRPC stream. Afterwards, the stream is closed. -//! -//! Shares: -//! Each party might have multiple shares. A single thread is created for each share. -//! We keep this information agnostic to the client, and we use the [crate::gg20::routing] layer to distribute the messages to each share. -//! The result of the protocol is common across all shares, and unique for each party. We make use of [self::result] layer to aggregate and process the result. -//! -//! All relevant helper structs and types are defined in [self::types] - -use super::{broadcast::broadcast_messages, proto, service::Gg20Service, ProtocolCommunication}; - -// tonic cruft -use tokio::sync::{mpsc, oneshot}; -use tonic::Status; - -// logging -use tracing::{span, Level, Span}; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -pub mod types; -use types::*; -mod execute; -mod init; -mod result; - -impl Gg20Service { - // we wrap the functionality of sign gRPC here because we can't handle errors - // conveniently when spawning theads. - pub async fn handle_sign( - &self, - mut stream_in: tonic::Streaming, - mut stream_out_sender: mpsc::UnboundedSender>, - sign_span: Span, - ) -> TofndResult<()> { - // 1. Receive SignInit, open message, sanitize arguments -> init mod - // 2. Spawn N sign threads to execute the protocol in parallel; one of each of our shares -> execute mod - // 3. Spawn 1 router thread to route messages from client to the respective sign thread -> routing mod - // 4. Wait for all sign threads to finish and aggregate all responses -> result mod - - // 1. - // get SignInit message from stream and sanitize arguments - let mut stream_out = stream_out_sender.clone(); - let (sign_init, party_info) = self - .handle_sign_init(&mut stream_in, &mut stream_out, sign_span.clone()) - .await?; - - // 2. - // find my share count to allocate channel vectors - let my_share_count = party_info.shares.len(); - if my_share_count == 0 { - return Err(anyhow!( - "Party {} has 0 shares assigned", - party_info.tofnd.index - )); - } - - // create in and out channels for each share, and spawn as many threads - let mut sign_senders = Vec::with_capacity(my_share_count); - let mut aggregator_receivers = Vec::with_capacity(my_share_count); - - for my_tofnd_subindex in 0..my_share_count { - // channels for communication between router (sender) and protocol threads (receivers) - let (sign_sender, sign_receiver) = mpsc::unbounded_channel(); - sign_senders.push(sign_sender); - // channels for communication between protocol threads (senders) and final result aggregator (receiver) - let (aggregator_sender, aggregator_receiver) = oneshot::channel(); - aggregator_receivers.push(aggregator_receiver); - - // wrap channels needed by internal threads; receiver chan for router and sender chan gRPC stream - let chans = ProtocolCommunication::new(sign_receiver, stream_out_sender.clone()); - // wrap all context data needed for each thread - let ctx = Context::new(sign_init.clone(), party_info.clone(), my_tofnd_subindex)?; - // clone gg20 service because tokio thread takes ownership - let gg20 = self.clone(); - - // set up log state - let log_info = ctx.log_info(); - let state = log_info.as_str(); - let execute_span = span!(parent: &sign_span, Level::INFO, "execute", state); - - // spawn sign threads - tokio::spawn(async move { - // get result of sign - let signature = gg20.execute_sign(chans, &ctx, execute_span.clone()).await; - // send result to aggregator - let _ = aggregator_sender.send(signature); - }); - } - - // 3. - // spin up broadcaster thread and return immediately - let span = sign_span.clone(); - tokio::spawn(async move { - broadcast_messages(&mut stream_in, sign_senders, span).await; - }); - - // 4. - // wait for all sign threads to end, get responses, and return signature - Self::handle_results( - aggregator_receivers, - &mut stream_out_sender, - &sign_init.participant_uids, - ) - .await?; - - Ok(()) - } -} diff --git a/src/gg20/sign/result.rs b/src/gg20/sign/result.rs deleted file mode 100644 index 8316ecef..00000000 --- a/src/gg20/sign/result.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! This module handles the aggregation and process of sign results. -//! When all sign threads finish, we aggregate their results and retrieve the signature of the message. The signature must be the same across all results. - -use super::{proto, types::TofnSignOutput, Gg20Service}; - -// tonic cruft -use tokio::sync::mpsc; -use tokio::sync::oneshot; -use tonic::Status; - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -impl Gg20Service { - /// handle results from all shares - /// if all shares return a valid output, send the result to client - /// if a share does not return a valid output, return an [anyhow!] - pub(super) async fn handle_results( - aggregator_receivers: Vec>>, - stream_out_sender: &mut mpsc::UnboundedSender>, - participant_uids: &[String], - ) -> TofndResult<()> { - // create vec to store all sign outputs - // cannot use aggregator_receivers.map(|aggr| aggr.await??) because map() does not support async funcs - let mut sign_outputs = Vec::with_capacity(aggregator_receivers.len()); - - // wait all sign threads and get signature - for aggregator in aggregator_receivers { - let sign_output = aggregator.await??; - sign_outputs.push(sign_output); - } - - // sanity check: check if all shares produced the same signature - let first_sign_output = &sign_outputs[0]; - // skip() first element of sign outputs to avoid extra loop - for (i, sign_output) in sign_outputs.iter().enumerate().skip(1) { - if sign_output != first_sign_output { - let mut error_msg = format!( - "Signature mismatch between shares [{}] and [{}]. More mismatches may exist.", - 0, i - ); - error_msg = format!( - "{}\nReceived signatures: {:#?}", - error_msg, - sign_output.iter().enumerate().collect::>() - ); - return Err(anyhow!(error_msg)); - } - } - - // send signature to client - stream_out_sender.send(Ok(proto::MessageOut::new_sign_result( - participant_uids, - sign_outputs[0].clone(), - )))?; - Ok(()) - } -} diff --git a/src/gg20/sign/types.rs b/src/gg20/sign/types.rs deleted file mode 100644 index a6138ddc..00000000 --- a/src/gg20/sign/types.rs +++ /dev/null @@ -1,245 +0,0 @@ -//! Helper structs and implementations for [crate::gg20::sign]. - -// error handling -use crate::TofndResult; -use anyhow::anyhow; - -// tofn types -use super::super::MessageDigest; -use tofn::collections::{Subset, TypedUsize}; -use tofn::gg20::keygen::{GroupPublicInfo, KeygenPartyId, ShareSecretInfo}; -use tofn::gg20::sign::{SignParties, SignPartyId}; -use tofn::sdk::api::ProtocolOutput; - -/// tofn's ProtocolOutput for Sign -pub type TofnSignOutput = ProtocolOutput, SignPartyId>; -/// tofnd's ProtocolOutput for Sign -pub type TofndSignOutput = TofndResult; - -#[derive(Clone, Debug)] -pub(super) struct SignInitSanitized { - pub(super) new_sig_uid: String, // this is only used for logging - // pub(super) key_uid: String, - pub(super) participant_uids: Vec, - pub(super) participant_indices: Vec, - pub(super) message_to_sign: MessageDigest, -} - -use crate::gg20::types::PartyInfo; - -pub(super) struct Context { - pub(super) sign_init: SignInitSanitized, - pub(super) party_info: PartyInfo, - pub(super) sign_share_counts: Vec, - pub(super) tofnd_subindex: usize, - pub(super) share: ShareSecretInfo, - pub(super) sign_parties: Subset, -} - -impl Context { - /// create a new signing context - pub(super) fn new( - sign_init: SignInitSanitized, - party_info: PartyInfo, - tofnd_subindex: usize, - ) -> TofndResult { - // retrieve sign_share_couts and secret_key_shares here instead of adding - // getters to immediatelly dicover potential errors - let sign_share_counts = Self::get_sign_share_counts( - &party_info.tofnd.party_uids, - &party_info.tofnd.share_counts, - &sign_init.participant_uids, - )?; - - let sign_parties = Self::get_sign_parties( - party_info.tofnd.party_uids.len(), - &sign_init.participant_indices, - )?; - - let share = Self::get_share(&party_info, tofnd_subindex)?; - Ok(Self { - sign_init, - party_info, - sign_share_counts, - tofnd_subindex, - share, - sign_parties, - }) - } - - pub(super) fn group(&self) -> &GroupPublicInfo { - &self.party_info.common - } - - /// from keygen we have - /// party uids: [A, B, C, D] - /// share counts: [1, 2, 3, 4] - /// in sign we receive - /// sign uids: [D, B] - /// we need to construct an array of share counts that is alligned with sign uids - /// sign share counts: [4, 2] - fn get_sign_share_counts( - keygen_uids: &[String], - keygen_share_counts: &[usize], - sign_uids: &[String], - ) -> TofndResult> { - if keygen_uids.len() != keygen_share_counts.len() { - return Err(anyhow!("misalligned keygen uids and keygen share counts")); - } - let mut sign_share_counts = vec![]; - for sign_uid in sign_uids { - let keygen_index = keygen_uids - .iter() - .position(|uid| uid == sign_uid) - .ok_or_else(|| anyhow!("Sign uid was not found"))?; - let sign_share_count = *keygen_share_counts - .get(keygen_index) - .ok_or_else(|| anyhow!("invalid index"))?; - sign_share_counts.push(sign_share_count); - } - Ok(sign_share_counts) - } - - fn get_share(party_info: &PartyInfo, tofnd_subindex: usize) -> TofndResult { - Ok(party_info - .shares - .get(tofnd_subindex) - .ok_or_else(|| anyhow!("failed to get ShareSecretInfo from PartyInfo"))? - .clone()) - } - - pub(super) fn msg_to_sign(&self) -> &MessageDigest { - &self.sign_init.message_to_sign - } - - /// create a `Subset` of sign parties - /// Example: - /// from keygen init we have: - /// keygen_party_uids: [a, b, c, d] - /// keygen_party_indices: [0, 1, 2, 3] - /// from sign init we have: - /// sign_party_uids: [d, b] - /// sign_party_indices: [3, 1] - /// result: - /// sign_parties: [None -> party a with index 0 is not a signer - /// Some(()) -> party b with index 1 is a signer - /// None -> party c with index 2 is not a signer - /// Some(())] -> party d with index 3 is a signer - pub(super) fn get_sign_parties( - length: usize, - sign_indices: &[usize], - ) -> TofndResult { - let mut sign_parties = Subset::with_max_size(length); - for signer_idx in sign_indices.iter() { - if sign_parties - .add(TypedUsize::from_usize(*signer_idx)) - .is_err() - { - return Err(anyhow!("failed to call Subset::add")); - } - } - Ok(sign_parties) - } - - /// get signers' uids with respect to keygen uids ordering - /// Example: - /// from keygen init we have: - /// keygen_party_uids: [a, b, c, d] - /// from sign init we have: - /// sign_party_uids: [d, c, a] - /// result: - /// sign_parties: [a, c, d] - pub(super) fn sign_uids(&self) -> Vec { - let mut sign_uids = vec![]; - for uid in self.party_info.tofnd.party_uids.iter() { - if self - .sign_init - .participant_uids - .iter() - .any(|s_uid| s_uid == uid) - { - sign_uids.push(uid.clone()); - } - } - sign_uids - } - - /// export state; used for logging - pub(super) fn log_info(&self) -> String { - format!( - "[{}] [uid:{}, share:{}/{}]", - self.sign_init.new_sig_uid, - self.party_info.tofnd.party_uids[self.party_info.tofnd.index], - self.party_info.shares[self.tofnd_subindex] - .index() - .as_usize() - + 1, - self.party_info.common.share_count(), - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_sign_parties() {} - - #[test] - fn test_sign_share_counts() { - struct TestCase { - keygen_uids: Vec, - keygen_share_counts: Vec, - sign_uids: Vec, - result: Vec, - } - - let ok_test_cases = vec![ - TestCase { - keygen_uids: vec!["a".to_owned(), "b".to_owned()], - keygen_share_counts: vec![1, 2], - sign_uids: vec!["a".to_owned(), "b".to_owned()], - result: vec![1, 2], - }, - TestCase { - keygen_uids: vec!["b".to_owned(), "a".to_owned()], - keygen_share_counts: vec![1, 2], - sign_uids: vec!["a".to_owned()], - result: vec![2], - }, - ]; - - let fail_test_cases = vec![ - TestCase { - keygen_uids: vec!["a".to_owned(), "b".to_owned()], - keygen_share_counts: vec![1, 2], - sign_uids: vec!["c".to_owned()], // party "c" does not exist - result: vec![], - }, - TestCase { - keygen_uids: vec!["a".to_owned(), "b".to_owned()], - keygen_share_counts: vec![1, 2, 3], // keygen shares not alligned with uids - sign_uids: vec!["a".to_owned()], - result: vec![], - }, - ]; - - for t in ok_test_cases { - let res = Context::get_sign_share_counts( - &t.keygen_uids, - &t.keygen_share_counts, - &t.sign_uids, - ); - assert_eq!(res.unwrap(), t.result); - } - for t in fail_test_cases { - let res = Context::get_sign_share_counts( - &t.keygen_uids, - &t.keygen_share_counts, - &t.sign_uids, - ); - assert!(res.is_err()); - } - } -} diff --git a/src/gg20/types.rs b/src/gg20/types.rs deleted file mode 100644 index 977078a1..00000000 --- a/src/gg20/types.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Helper structs and implementations for [crate::gg20]. - -// zeroize Entropy and Password -use zeroize::Zeroize; - -use tracing::{info, span, Level, Span}; - -pub(super) type MessageDigest = tofn::gg20::sign::MessageDigest; - -/// Mnemonic type needs to be known globaly to create/access the mnemonic kv store -#[derive(Zeroize, Debug, Clone, Serialize, Deserialize)] -#[zeroize(drop)] -pub struct Entropy(pub Vec); - -#[derive(Zeroize, Clone)] -#[zeroize(drop)] -pub struct Password(pub String); - -use tokio::sync::mpsc; -/// define the input and output channels of generic execute_protocol worker -pub(super) struct ProtocolCommunication { - pub(super) receiver: mpsc::UnboundedReceiver, - pub(super) sender: mpsc::UnboundedSender, -} -impl ProtocolCommunication { - pub fn new( - receiver: mpsc::UnboundedReceiver, - sender: mpsc::UnboundedSender, - ) -> Self { - Self { receiver, sender } - } -} - -use serde::{Deserialize, Serialize}; -use tofn::gg20::keygen::{GroupPublicInfo, SecretKeyShare, ShareSecretInfo}; - -/// Struct to hold `tonfd` info. This consists of information we need to -/// store in the KV store that is not relevant to `tofn` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub(super) struct TofndInfo { - pub(super) party_uids: Vec, - pub(super) share_counts: Vec, - pub(super) index: usize, -} - -/// `KeyShareKv` record -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PartyInfo { - pub(super) common: GroupPublicInfo, - pub(super) shares: Vec, - pub(super) tofnd: TofndInfo, -} - -impl PartyInfo { - /// Get GroupPublicInfo and ShareSecretInfo from tofn to create PartyInfo - /// Also needed in recovery - pub(super) fn get_party_info( - secret_key_shares: Vec, - uids: Vec, - share_counts: Vec, - tofnd_index: usize, - ) -> Self { - // grap the first share to acquire common data - let common = secret_key_shares[0].group().clone(); - - // aggregate share data into a vector - let shares = secret_key_shares - .into_iter() - .map(|share| share.share().clone()) - .collect(); - - // add tofnd data - let tofnd = TofndInfo { - party_uids: uids, - share_counts, - index: tofnd_index, - }; - - PartyInfo { - common, - shares, - tofnd, - } - } - - /// log PartyInfo state - pub(super) fn log_info(&self, session_id: &str, sign_span: Span) { - let init_span = span!(parent: &sign_span, Level::INFO, "init"); - let _enter = init_span.enter(); - - info!( - "[uid:{}, shares:{}] starting Sign with [key: {}, (t,n)=({},{}), participants:{:?}", - self.tofnd.party_uids[self.tofnd.index], - self.tofnd.share_counts[self.tofnd.index], - session_id, - self.common.threshold(), - self.tofnd.share_counts.iter().sum::(), - self.tofnd.party_uids, - ); - } -} diff --git a/src/kv_manager/kv.rs b/src/kv_manager/kv.rs index 30c8b018..1d9ebb04 100644 --- a/src/kv_manager/kv.rs +++ b/src/kv_manager/kv.rs @@ -63,6 +63,7 @@ where } /// Unreserves an existing reservation + #[allow(dead_code)] pub async fn unreserve_key(&self, reservation: KeyReservation) { let _ = self.sender.send(UnreserveKey { reservation }); } @@ -150,12 +151,10 @@ pub fn get_kv_store( } // private handler function to process commands as per the "actor" pattern (see above) -async fn kv_cmd_handler( +async fn kv_cmd_handler( mut rx: mpsc::UnboundedReceiver>, kv: encrypted_sled::Db, -) where - V: Serialize + DeserializeOwned, -{ +) { // if resp.send() fails then log a warning and continue // see discussion https://github.com/axelarnetwork/tofnd/pull/15#discussion_r595426775 while let Some(cmd) = rx.recv().await { diff --git a/src/kv_manager/tests.rs b/src/kv_manager/tests.rs index defa0d88..f47a22b7 100644 --- a/src/kv_manager/tests.rs +++ b/src/kv_manager/tests.rs @@ -153,7 +153,7 @@ fn get_failure() { #[test] fn test_exists() { let kv_name = testdir!(); - let kv = open_with_test_password(&kv_name).unwrap(); + let kv = open_with_test_password(kv_name).unwrap(); let key: String = "key".to_string(); let value: String = "value".to_string(); diff --git a/src/kv_manager/value.rs b/src/kv_manager/value.rs index 527d989e..849c2c47 100644 --- a/src/kv_manager/value.rs +++ b/src/kv_manager/value.rs @@ -3,8 +3,7 @@ use tofn::sdk::api::{deserialize, serialize}; use crate::{ encrypted_sled::Password, - gg20::types::{Entropy, PartyInfo}, - mnemonic::FileIo, + mnemonic::{Entropy, FileIo}, }; use super::{ @@ -37,22 +36,6 @@ impl KvManager { /// Value type stored in the kv-store type KvValue = Vec; -/// Create PartyInfo from KvValue -impl TryFrom for PartyInfo { - type Error = InnerKvError; - fn try_from(v: KvValue) -> Result { - deserialize(&v).ok_or(InnerKvError::DeserializationErr) - } -} - -/// Create KvValue from PartyInfo -impl TryFrom for KvValue { - type Error = InnerKvError; - fn try_from(v: PartyInfo) -> Result { - serialize(&v).map_err(|_| InnerKvError::SerializationErr) - } -} - /// Create Entropy from KvValue impl TryFrom for Entropy { type Error = InnerKvError; diff --git a/src/main.rs b/src/main.rs index de49d43a..86f15870 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,6 @@ use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; mod encrypted_sled; -mod gg20; mod kv_manager; mod mnemonic; mod multisig; @@ -36,17 +35,6 @@ fn set_up_logs() { .init(); } -#[cfg(feature = "malicious")] -pub fn warn_for_malicious_build() { - use tracing::warn; - warn!("WARNING: THIS tofnd BINARY AS COMPILED IN 'MALICIOUS' MODE. MALICIOUS BEHAVIOUR IS INTENTIONALLY INSERTED INTO SOME MESSAGES. THIS BEHAVIOUR WILL CAUSE OTHER tofnd PROCESSES TO IDENTIFY THE CURRENT PROCESS AS MALICIOUS."); -} - -fn warn_for_unsafe_execution() { - use tracing::warn; - warn!("WARNING: THIS tofnd BINARY IS NOT SAFE: SAFE PRIMES ARE NOT USED BECAUSE '--unsafe' FLAG IS ENABLED. USE '--unsafe' FLAG ONLY FOR TESTING."); -} - /// worker_threads defaults to the number of cpus on the system /// https://docs.rs/tokio/1.2.0/tokio/attr.main.html#multi-threaded-runtime #[tokio::main(flavor = "multi_thread")] @@ -58,13 +46,6 @@ async fn main() -> TofndResult<()> { // immediately read an encryption password from stdin let password = cfg.password_method.execute()?; - // print config warnings - #[cfg(feature = "malicious")] - warn_for_malicious_build(); - if !cfg.safe_keygen { - warn_for_unsafe_execution(); - } - // set up span for logs let main_span = span!(Level::INFO, "main"); let _enter = main_span.enter(); @@ -75,7 +56,6 @@ async fn main() -> TofndResult<()> { .handle_mnemonic(&cfg.mnemonic_cmd) .await?; - let gg20_service = gg20::service::new_service(cfg, kv_manager.clone()); let multisig_service = multisig::service::new_service(kv_manager); if cmd.exit_after_cmd() { @@ -83,7 +63,6 @@ async fn main() -> TofndResult<()> { return Ok(()); } - let gg20_service = proto::gg20_server::Gg20Server::new(gg20_service); let multisig_service = proto::multisig_server::MultisigServer::new(multisig_service); let incoming = TcpListener::bind(socket_address).await?; @@ -93,7 +72,6 @@ async fn main() -> TofndResult<()> { ); tonic::transport::Server::builder() - .add_service(gg20_service) .add_service(multisig_service) .serve_with_incoming_shutdown(TcpListenerStream::new(incoming), shutdown_signal()) .await?; diff --git a/src/mnemonic/bip39_bindings.rs b/src/mnemonic/bip39_bindings.rs index 9fc017fe..f5a0ed20 100644 --- a/src/mnemonic/bip39_bindings.rs +++ b/src/mnemonic/bip39_bindings.rs @@ -8,7 +8,7 @@ //! [crate::gg20::Password], [crate::gg20::Entropy], [bip39::Mnemonic], [bip39::Seed] use super::results::bip39::{Bip39Error::*, Bip39Result}; -use crate::gg20::types::{Entropy, Password}; +use super::types::{Entropy, Password}; use bip39::{Language, Mnemonic, Seed}; // TODO: we can enrich the API so that users can decide which language they want to use diff --git a/src/mnemonic/cmd_handler.rs b/src/mnemonic/cmd_handler.rs index 2d783b44..da5f7dd8 100644 --- a/src/mnemonic/cmd_handler.rs +++ b/src/mnemonic/cmd_handler.rs @@ -5,18 +5,13 @@ use super::{ results::mnemonic::{ InnerMnemonicError::*, InnerMnemonicResult, MnemonicError::*, MnemonicResult, SeedResult, }, + types::{Entropy, Password}, }; -use crate::{ - gg20::types::{Entropy, Password}, // TODO: move from gg20::types - kv_manager::{ - error::{InnerKvError, KvError}, - KeyReservation, KvManager, - }, -}; -use tofn::{ - gg20::keygen::SecretRecoveryKey, - sdk::api::{deserialize, serialize}, +use crate::kv_manager::{ + error::{InnerKvError, KvError}, + KeyReservation, KvManager, }; +use tofn::sdk::api::{deserialize, serialize, SecretRecoveryKey}; use rpassword::read_password; use std::convert::TryInto; diff --git a/src/mnemonic/file_io.rs b/src/mnemonic/file_io.rs index 06edf93a..3e348aa0 100644 --- a/src/mnemonic/file_io.rs +++ b/src/mnemonic/file_io.rs @@ -4,8 +4,8 @@ use std::{io::Write, path::PathBuf}; use tracing::info; +use super::types::Entropy; use super::{bip39_bindings::bip39_from_entropy, results::file_io::FileIoError::Exists}; -use crate::gg20::types::Entropy; /// name of export file const EXPORT_FILE: &str = "export"; @@ -48,7 +48,7 @@ impl FileIo { // if there is an existing exported file raise an error self.check_if_not_exported()?; - let mut file = std::fs::File::create(&self.export_path())?; + let mut file = std::fs::File::create(self.export_path())?; file.write_all(phrase.as_bytes())?; file.sync_all()?; diff --git a/src/mnemonic/mod.rs b/src/mnemonic/mod.rs index c8a23787..00de07f5 100644 --- a/src/mnemonic/mod.rs +++ b/src/mnemonic/mod.rs @@ -10,6 +10,8 @@ mod bip39_bindings; mod cmd_handler; mod file_io; mod results; +mod types; pub use cmd_handler::Cmd; pub use file_io::FileIo; +pub use types::Entropy; diff --git a/src/mnemonic/types.rs b/src/mnemonic/types.rs new file mode 100644 index 00000000..ab61d0a5 --- /dev/null +++ b/src/mnemonic/types.rs @@ -0,0 +1,13 @@ +//! Mnemonic types + +use serde::{Deserialize, Serialize}; +use zeroize::Zeroize; + +/// Mnemonic type needs to be known globaly to create/access the mnemonic kv store +#[derive(Zeroize, Debug, Clone, Serialize, Deserialize)] +#[zeroize(drop)] +pub struct Entropy(pub Vec); + +#[derive(Zeroize, Clone)] +#[zeroize(drop)] +pub struct Password(pub String); diff --git a/src/multisig/keypair.rs b/src/multisig/keypair.rs index 836df969..e407665d 100644 --- a/src/multisig/keypair.rs +++ b/src/multisig/keypair.rs @@ -2,7 +2,7 @@ use crate::{proto::Algorithm, TofndResult}; use anyhow::anyhow; use tofn::{ ecdsa, ed25519, - multisig::{keygen::SecretRecoveryKey, sign::MessageDigest}, + sdk::api::{MessageDigest, SecretRecoveryKey}, }; pub enum KeyPair { @@ -18,14 +18,14 @@ impl KeyPair { ) -> TofndResult { Ok(match algorithm { Algorithm::Ecdsa => { - let key_pair = ecdsa::keygen(&secret_recovery_key, session_nonce) + let key_pair = ecdsa::keygen(secret_recovery_key, session_nonce) .map_err(|_| anyhow!("Cannot generate keypair"))?; Self::Ecdsa(key_pair) } Algorithm::Ed25519 => { - let key_pair = ed25519::keygen(&secret_recovery_key, session_nonce) + let key_pair = ed25519::keygen(secret_recovery_key, session_nonce) .map_err(|_| anyhow!("Cannot generate keypair"))?; Self::Ed25519(key_pair) diff --git a/src/multisig/sign.rs b/src/multisig/sign.rs index d56d39a8..46f486d5 100644 --- a/src/multisig/sign.rs +++ b/src/multisig/sign.rs @@ -5,7 +5,7 @@ use crate::{ }; use anyhow::anyhow; use std::convert::TryInto; -use tofn::multisig::keygen::SecretRecoveryKey; +use tofn::sdk::api::SecretRecoveryKey; impl MultisigService { pub(super) async fn handle_sign(&self, request: &SignRequest) -> TofndResult> { diff --git a/src/tests/honest_test_cases.rs b/src/tests/honest_test_cases.rs deleted file mode 100644 index cef119a6..00000000 --- a/src/tests/honest_test_cases.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::tests::TestCase; -use crate::tests::{ - run_keygen_fail_test_cases, run_restart_recover_test_cases, run_restart_test_cases, - run_sign_fail_test_cases, run_test_cases, -}; - -#[cfg(feature = "malicious")] -use super::malicious::MaliciousData; - -use crate::proto::message_out::CriminalList; - -use tracing_test::traced_test; // logs for tests - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn general_honest_test_cases() { - run_test_cases(&generate_honest_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn honest_test_cases_with_restart() { - run_restart_test_cases(&generate_honest_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn honest_test_cases_with_recover() { - run_restart_recover_test_cases(&generate_honest_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn keygen_fail_cases() { - run_keygen_fail_test_cases(&generate_fail_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn sign_fail_cases() { - run_sign_fail_test_cases(&generate_fail_cases()).await; -} - -impl TestCase { - pub(super) fn new( - uid_count: usize, - share_counts: Vec, - threshold: usize, - signer_indices: Vec, - ) -> TestCase { - TestCase { - uid_count, - share_counts, - threshold, - signer_indices, - expected_keygen_faults: CriminalList::default(), - expected_sign_faults: CriminalList::default(), - #[cfg(feature = "malicious")] - malicious_data: MaliciousData::empty(uid_count), - } - } -} - -#[rustfmt::skip] // skip formatting to make file more readable -pub(super) fn generate_honest_cases() -> Vec { - vec![ - TestCase::new(4, vec![], 0, vec![0, 1, 2, 3]), // should initialize share_counts into [1,1,1,1,1] - TestCase::new(5, vec![1, 1, 1, 1, 1], 3, vec![1, 4, 2, 3]), // 1 share per uid - TestCase::new(5, vec![1, 2, 1, 3, 2], 6, vec![1, 4, 2, 3]), // multiple shares per uid - TestCase::new(1, vec![1], 0, vec![0]), // trivial case - // TestCase::new(5, vec![1,2,3,4,20], 27, vec![0, 1, 4, 3, 2]), // Create a malicious party - ] -} - -pub(super) fn generate_fail_cases() -> Vec { - vec![ - TestCase::new(1, vec![], 0, vec![0]), // trivial case - TestCase::new(5, vec![1, 2, 1, 3, 2], 6, vec![1, 4, 2, 3]), // multiple shares per uid - ] -} diff --git a/src/tests/malicious/keygen_test_cases.rs b/src/tests/malicious/keygen_test_cases.rs deleted file mode 100644 index da26c1ed..00000000 --- a/src/tests/malicious/keygen_test_cases.rs +++ /dev/null @@ -1,190 +0,0 @@ -use crate::proto::message_out::{ - criminal_list::{criminal::CrimeType, Criminal}, - CriminalList, -}; -use tofn::collections::TypedUsize; - -use tofn::gg20::keygen::malicious::Behaviour::{self, *}; - -use super::super::{run_test_cases, TestCase}; -use super::{Disrupt, MaliciousData, Timeout}; - -use tracing_test::traced_test; // log for tests - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn keygen_malicious_general_cases() { - run_test_cases(&generate_basic_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn keygen_malicious_multiple_per_round() { - run_test_cases(&generate_multiple_malicious_per_round()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn malicious_timeout_cases() { - run_test_cases(&timeout_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn malicious_disrupt_cases() { - run_test_cases(&disrupt_cases()).await; -} - -#[derive(Clone, Debug)] -pub(crate) struct KeygenData { - pub(crate) behaviours: Vec, - pub(crate) timeout: Option, - pub(crate) disrupt: Option, -} - -impl KeygenData { - pub(super) fn empty(party_count: usize) -> KeygenData { - KeygenData { - behaviours: vec![Honest; party_count], - timeout: None, - disrupt: None, - } - } -} - -impl TestCase { - fn new_malicious_keygen( - uid_count: usize, - share_counts: Vec, - threshold: usize, - behaviours: Vec, - ) -> TestCase { - // expected faults: Vec, crime_type: CrimeType::Malicious}> - let mut expected_faults = vec![]; - for (i, behaviour) in behaviours.iter().enumerate() { - if matches!(behaviour, &Behaviour::Honest) { - continue; - } - expected_faults.push(Criminal { - party_uid: ((b'A' + i as u8) as char).to_string(), - crime_type: CrimeType::Malicious as i32, - }); - } - let expected_faults = CriminalList { - criminals: expected_faults, - }; - - let mut malicious_data = MaliciousData::empty(uid_count); - malicious_data.set_keygen_data(KeygenData { - behaviours, - timeout: None, - disrupt: None, - }); - - TestCase { - uid_count, - share_counts, - threshold, - signer_indices: vec![], - expected_keygen_faults: expected_faults, - expected_sign_faults: CriminalList::default(), - malicious_data, - } - } - - fn with_keygen_timeout(mut self, index: usize, round: usize) -> Self { - self.malicious_data.keygen_data.timeout = Some(Timeout { index, round }); - self.expected_keygen_faults = CriminalList { - criminals: vec![Criminal { - party_uid: ((b'A' + index as u8) as char).to_string(), - crime_type: CrimeType::NonMalicious as i32, - }], - }; - self - } - - fn with_keygen_disrupt(mut self, index: usize, round: usize) -> Self { - self.malicious_data.keygen_data.disrupt = Some(Disrupt { index, round }); - self.expected_keygen_faults = CriminalList { - criminals: vec![Criminal { - party_uid: ((b'A' + index as u8) as char).to_string(), - crime_type: CrimeType::NonMalicious as i32, - }], - }; - self - } -} - -fn generate_basic_cases() -> Vec { - let behaviours = vec![ - R1BadCommit, - R1BadEncryptionKeyProof, - R1BadZkSetupProof, - R2BadShare { - victim: TypedUsize::from_usize(0), - }, - R2BadEncryption { - victim: TypedUsize::from_usize(0), - }, - R3FalseAccusation { - victim: TypedUsize::from_usize(0), - }, - R3BadXIWitness, - ]; - - behaviours - .into_iter() - .map(|b| { - TestCase::new_malicious_keygen(4, vec![1, 2, 1, 3], 3, vec![Honest, Honest, Honest, b]) - }) - .collect() -} - -fn generate_multiple_malicious_per_round() -> Vec { - let victim = TypedUsize::from_usize(0); - let all_rounds_faults = vec![ - // round 1 faults - vec![R1BadCommit], - // round 2 faults - vec![R2BadEncryption { victim }, R2BadShare { victim }], - // round 3 faults - vec![R3FalseAccusation { victim }], - ]; - // create test cases for all rounds - let mut cases = Vec::new(); - for round_faults in all_rounds_faults { - let mut participants = vec![Honest]; - for fault in round_faults.into_iter() { - participants.push(fault.clone()); // behaviour data initialized with Default:default() - } - cases.push(TestCase::new_malicious_keygen( - participants.len(), - vec![1; participants.len()], - participants.len() - 1, // threshold < #parties - participants, - )); - } - cases -} - -fn timeout_cases() -> Vec { - let timeout_rounds = vec![1, 2, 3]; - timeout_rounds - .into_iter() - .map(|r| { - TestCase::new_malicious_keygen(3, vec![1, 1, 1], 2, vec![Honest, Honest, Honest]) - .with_keygen_timeout(0, r) // add timeout party at index 0 - }) - .collect() -} - -fn disrupt_cases() -> Vec { - let disrupt_rounds = vec![1, 2, 3]; - disrupt_rounds - .into_iter() - .map(|r| { - TestCase::new_malicious_keygen(3, vec![1, 1, 1], 2, vec![Honest, Honest, Honest]) - .with_keygen_disrupt(0, r) // add disrupt party at index 0 - }) - .collect() -} diff --git a/src/tests/malicious/mod.rs b/src/tests/malicious/mod.rs deleted file mode 100644 index 34eb06ed..00000000 --- a/src/tests/malicious/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -pub mod keygen_test_cases; -use keygen_test_cases::KeygenData; -pub(super) type KeygenBehaviour = tofn::gg20::keygen::malicious::Behaviour; - -pub mod sign_test_cases; -use sign_test_cases::SignData; -pub(super) type SignBehaviour = tofn::gg20::sign::malicious::Behaviour; - -#[derive(Clone, Debug)] -pub(crate) struct Timeout { - pub(crate) index: usize, - pub(crate) round: usize, -} - -#[derive(Clone, Debug)] -pub(crate) struct Disrupt { - pub(crate) index: usize, - pub(crate) round: usize, -} - -#[derive(Clone, Debug)] -pub(super) struct MaliciousData { - pub(super) keygen_data: KeygenData, - pub(super) sign_data: SignData, -} - -impl MaliciousData { - pub(super) fn empty(party_count: usize) -> MaliciousData { - MaliciousData { - keygen_data: KeygenData::empty(party_count), - sign_data: SignData::empty(party_count), - } - } - pub(super) fn set_keygen_data(&mut self, keygen_data: KeygenData) { - self.keygen_data = keygen_data; - } - pub(super) fn set_sign_data(&mut self, sign_data: SignData) { - self.sign_data = sign_data; - } -} - -#[derive(Clone, Debug)] -pub(super) struct PartyMaliciousData { - pub(super) timeout_round: usize, - pub(super) disrupt_round: usize, - pub(super) keygen_behaviour: KeygenBehaviour, - pub(super) sign_behaviour: SignBehaviour, -} diff --git a/src/tests/malicious/sign_test_cases.rs b/src/tests/malicious/sign_test_cases.rs deleted file mode 100644 index 90b90f7c..00000000 --- a/src/tests/malicious/sign_test_cases.rs +++ /dev/null @@ -1,243 +0,0 @@ -use crate::proto::message_out::{ - criminal_list::{criminal::CrimeType, Criminal}, - CriminalList, -}; - -use tofn::{ - collections::TypedUsize, - gg20::sign::malicious::Behaviour::{self, *}, -}; - -use super::super::{run_test_cases, TestCase}; -use super::{Disrupt, MaliciousData, Timeout}; - -use tracing_test::traced_test; // log for tests - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn malicious_general_cases() { - run_test_cases(&generate_basic_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn malicious_timeout_cases() { - run_test_cases(&timeout_cases()).await; -} - -#[traced_test] -#[tokio::test(flavor = "multi_thread")] -async fn malicious_disrupt_cases() { - run_test_cases(&disrupt_cases()).await; -} - -pub(super) struct Signer { - pub(super) party_index: usize, - pub(super) behaviour: Behaviour, -} - -impl Signer { - pub(super) fn new(party_index: usize, behaviour: Behaviour) -> Self { - Signer { - party_index, - behaviour, - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct SignData { - pub(crate) behaviours: Vec, - pub(crate) timeout: Option, - pub(crate) disrupt: Option, -} - -impl SignData { - pub(crate) fn empty(party_count: usize) -> SignData { - SignData { - behaviours: vec![Honest; party_count], - timeout: None, - disrupt: None, - } - } -} - -impl TestCase { - fn new_malicious_sign( - uid_count: usize, - share_counts: Vec, - threshold: usize, - signers: Vec, - ) -> TestCase { - let mut expected_faults = vec![]; - // TODO: enable this when sign faults are available - for (i, signer) in signers.iter().enumerate() { - if matches!(signer.behaviour, Behaviour::Honest) { - continue; - } - expected_faults.push(Criminal { - party_uid: ((b'A' + i as u8) as char).to_string(), - crime_type: CrimeType::Malicious as i32, - }); - } - let expected_faults = CriminalList { - criminals: expected_faults, - }; - - // we use the Signer struct to allign the beaviour type with the index of each signer - // However, in the context of tofnd, behaviour is not only related with signers, but with - // init_party, as well. That is, because we need to initialize a Gg20 service for both - // signers and non-signers. We build these vectors from user's input `sign_participants`: - // 1. behaviours -> holds the behaviour of every party (not just signers) and is alligned with tofnd party uids - // 2. signer_indices -> holds the tofnd index of every signer - let mut signer_indices = Vec::new(); - let mut signer_behaviours = Vec::new(); - - for signer in signers.iter() { - signer_indices.push(signer.party_index); - signer_behaviours.push(signer.behaviour.clone()); - } - - let mut behaviours = Vec::new(); - for i in 0..uid_count { - if !signer_indices.contains(&i) { - behaviours.push(Honest); - } else { - let signer_index = signer_indices.iter().position(|&idx| idx == i).unwrap(); - let signer_type = signer_behaviours[signer_index].clone(); - behaviours.push(signer_type); - } - } - - let mut malicious_data = MaliciousData::empty(uid_count); - malicious_data.set_sign_data(SignData { - behaviours, - timeout: None, - disrupt: None, - }); - - TestCase { - uid_count, - share_counts, - threshold, - signer_indices, - expected_keygen_faults: CriminalList::default(), - expected_sign_faults: expected_faults, - malicious_data, - } - } - - fn with_sign_timeout(mut self, index: usize, round: usize) -> Self { - let keygen_rounds = 4; - self.malicious_data.sign_data.timeout = Some(Timeout { - index, - round: keygen_rounds + round, - }); - self.expected_sign_faults = CriminalList { - criminals: vec![Criminal { - party_uid: ((b'A' + index as u8) as char).to_string(), - crime_type: CrimeType::NonMalicious as i32, - }], - }; - self - } - - fn with_sign_disrupt(mut self, index: usize, round: usize) -> Self { - let keygen_rounds = 4; - self.malicious_data.sign_data.disrupt = Some(Disrupt { - index, - round: round + keygen_rounds, - }); - self.expected_sign_faults = CriminalList { - criminals: vec![Criminal { - party_uid: ((b'A' + index as u8) as char).to_string(), - crime_type: CrimeType::NonMalicious as i32, - }], - }; - self - } -} - -fn generate_basic_cases() -> Vec { - let victim = TypedUsize::from_usize(0); - let behaviours = vec![ - R1BadProof { victim }, - R1BadGammaI, - R2FalseAccusation { victim }, - R2BadMta { victim }, - R2BadMtaWc { victim }, - R3FalseAccusationMta { victim }, - R3FalseAccusationMtaWc { victim }, - R3BadProof, - R3BadDeltaI, - R3BadKI, - R3BadAlpha { victim }, - R3BadBeta { victim }, - R4BadReveal, - R5BadProof { victim }, - R6FalseAccusation { victim }, - R6BadProof, - R6FalseType5Claim, - R7BadSI, - R7FalseType7Claim, - R3BadSigmaI, - ]; - - behaviours - .into_iter() - .map(|b| { - TestCase::new_malicious_sign( - 4, - vec![1, 1, 1, 1], - 3, - vec![ - Signer::new(0, Honest), - Signer::new(1, Honest), - Signer::new(2, Honest), - Signer::new(3, b), - ], - ) - }) - .collect() -} - -fn timeout_cases() -> Vec { - // let timeout_rounds = vec![1]; - let timeout_rounds = vec![1, 2, 3, 4, 5, 6, 7]; - timeout_rounds - .into_iter() - .map(|r| { - TestCase::new_malicious_sign( - 3, - vec![1, 1, 1], - 2, - vec![ - Signer::new(0, Honest), - Signer::new(1, Honest), - Signer::new(2, Honest), - ], - ) - .with_sign_timeout(0, r) // add timeout party at _keygen_ index 0 - }) - .collect() -} - -fn disrupt_cases() -> Vec { - let disrupt_rounds = vec![1, 2, 3, 4, 5, 6, 7]; - disrupt_rounds - .into_iter() - .map(|r| { - TestCase::new_malicious_sign( - 3, - vec![1, 1, 1], - 2, - vec![ - Signer::new(0, Honest), - Signer::new(1, Honest), - Signer::new(2, Honest), - ], - ) - .with_sign_disrupt(0, r) // add disrupt party at _keygen_ index 0 - }) - .collect() -} diff --git a/src/tests/mnemonic.rs b/src/tests/mnemonic.rs index 74be269a..44d47626 100644 --- a/src/tests/mnemonic.rs +++ b/src/tests/mnemonic.rs @@ -1,19 +1,12 @@ //! mnemonic tests at the TofndParty level -use super::{InitParty, TofndParty}; - use crate::mnemonic::Cmd; use testdir::testdir; -#[cfg(feature = "malicious")] -use super::MaliciousData; +use super::{tofnd_party::TofndParty, InitParty}; fn dummy_init_party() -> InitParty { - InitParty::new( - 0, - #[cfg(feature = "malicious")] - &MaliciousData::empty(1), - ) + InitParty::new(0) } #[should_panic] diff --git a/src/tests/mock.rs b/src/tests/mock.rs deleted file mode 100644 index 226987d3..00000000 --- a/src/tests/mock.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::proto; -use std::collections::HashMap; -use tokio::sync::mpsc; -use tracing::error; - -use super::{GrpcKeygenResult, GrpcSignResult}; - -#[tonic::async_trait] -pub(super) trait Party: Sync + Send { - async fn execute_keygen( - &mut self, - init: proto::KeygenInit, - channels: SenderReceiver, - delivery: Deliverer, - notify: std::sync::Arc, - ) -> GrpcKeygenResult; - async fn execute_recover( - &mut self, - keygen_init: proto::KeygenInit, - keygen_output: proto::KeygenOutput, - ); - async fn execute_key_presence(&mut self, key_uid: String) -> bool; - async fn execute_sign( - &mut self, - init: proto::SignInit, - channels: SenderReceiver, - delivery: Deliverer, - my_uid: &str, - notify: std::sync::Arc, - ) -> GrpcSignResult; - async fn shutdown(mut self); - fn get_root(&self) -> std::path::PathBuf; -} - -pub(super) type SenderReceiver = ( - mpsc::UnboundedSender, - mpsc::UnboundedReceiver, -); -#[derive(Clone)] -pub(super) struct Deliverer { - senders: HashMap>, // (party_uid, sender) -} -impl Deliverer { - pub(super) fn with_party_ids(party_ids: &[String]) -> (Self, Vec) { - let channels: Vec = (0..party_ids.len()) - .map(|_| mpsc::unbounded_channel()) - .collect(); - let senders = party_ids - .iter() - .cloned() - .zip(channels.iter().map(|(tx, _)| tx.clone())) - .collect(); - (Deliverer { senders }, channels) - } - pub fn deliver(&self, msg: &proto::MessageOut, from: &str) { - let msg = msg.data.as_ref().expect("missing data"); - let msg = match msg { - proto::message_out::Data::Traffic(t) => t, - _ => { - panic!("msg must be traffic out"); - } - }; - - // simulate wire transmission: translate proto::MessageOut to proto::MessageIn - let msg_in = proto::MessageIn { - data: Some(proto::message_in::Data::Traffic(proto::TrafficIn { - from_party_uid: from.to_string(), - is_broadcast: msg.is_broadcast, - payload: msg.payload.clone(), - })), - }; - - // deliver all msgs to all parties (even p2p msgs) - for (_, sender) in self.senders.iter() { - // we need to catch for errors in case the receiver's channel closes unexpectedly - if let Err(err) = sender.send(msg_in.clone()) { - error!("Error in deliverer while sending message: {:?}", err); - } - } - } - pub fn send_timeouts(&self, secs: u64) { - let abort = proto::message_in::Data::Abort(false); - let msg_in = proto::MessageIn { data: Some(abort) }; - - // allow honest parties to exchange messages for this round - let t = std::time::Duration::from_secs(secs); - std::thread::sleep(t); - - // deliver to all parties - for (_, sender) in self.senders.iter() { - sender.send(msg_in.clone()).unwrap(); - } - } -} diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 1358fd42..5c954b86 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -1,878 +1,23 @@ -// Notes: -// # Helper functions: -// Since we are using tokio, we need to make use of async function. That comes -// with the unfortunate necessity to declare some extra functions in order to -// facilitate the tests. These functions are: -// 1. src/kv_manager::KV::get_db_paths -// 2. src/gg20/mod::get_db_paths -// 3. src/gg20/mod::with_db_name - -use std::convert::TryFrom; -use std::path::{Path, PathBuf}; -use testdir::testdir; -use tokio::time::{sleep, Duration}; -use tonic::Code::InvalidArgument; - -mod mock; -mod tofnd_party; - -mod honest_test_cases; -#[cfg(feature = "malicious")] -mod malicious; -#[cfg(feature = "malicious")] -use malicious::{MaliciousData, PartyMaliciousData}; - mod mnemonic; mod socket_address; - -use crate::mnemonic::Cmd::{self, Create}; -use proto::message_out::CriminalList; -use tracing::{info, warn}; - -use crate::proto::{ - self, - message_out::{ - keygen_result::KeygenResultData::{Criminals as KeygenCriminals, Data as KeygenData}, - sign_result::SignResultData::{Criminals as SignCriminals, Signature}, - KeygenResult, SignResult, - }, -}; -use mock::{Deliverer, Party}; -use tofnd_party::TofndParty; - -// use crate::gg20::proto_helpers::to_criminals; +mod tofnd_party; lazy_static::lazy_static! { static ref MSG_TO_SIGN: Vec = vec![42; 32]; - // TODO add test for messages smaller and larger than 32 bytes } + const SLEEP_TIME: u64 = 1; const MAX_TRIES: u32 = 3; pub const DEFAULT_TEST_IP: &str = "0.0.0.0"; pub const DEFAULT_TEST_PORT: u16 = 0; // use port 0 and let the OS decide -struct TestCase { - uid_count: usize, - share_counts: Vec, - threshold: usize, - signer_indices: Vec, - expected_keygen_faults: CriminalList, - expected_sign_faults: CriminalList, - #[cfg(feature = "malicious")] - malicious_data: MaliciousData, -} - -async fn run_test_cases(test_cases: &[TestCase]) { - let restart = false; - let recover = false; - let dir = testdir!(); - for test_case in test_cases { - basic_keygen_and_sign(test_case, &dir, restart, recover).await; - } -} - -async fn run_restart_test_cases(test_cases: &[TestCase]) { - let restart = true; - let recover = false; - let dir = testdir!(); - for test_case in test_cases { - basic_keygen_and_sign(test_case, &dir, restart, recover).await; - } -} - -async fn run_restart_recover_test_cases(test_cases: &[TestCase]) { - let restart = true; - let recover = true; - let dir = testdir!(); - for test_case in test_cases { - basic_keygen_and_sign(test_case, &dir, restart, recover).await; - } -} - -async fn run_keygen_fail_test_cases(test_cases: &[TestCase]) { - let dir = testdir!(); - for test_case in test_cases { - keygen_init_fail(test_case, &dir).await; - } -} - -async fn run_sign_fail_test_cases(test_cases: &[TestCase]) { - let dir = testdir!(); - for test_case in test_cases { - sign_init_fail(test_case, &dir).await; - } -} - -// Horrible code duplication indeed. Don't think we should spend time here though -// because this will be deleted when axelar-core accommodates crimes -fn successful_keygen_results(results: Vec, expected_faults: &CriminalList) -> bool { - // get the first non-empty result. We can't simply take results[0] because some behaviours - // don't return results and we pad them with `None`s - let first = results.iter().find(|r| r.keygen_result_data.is_some()); - - let mut pub_keys = vec![]; - for result in results.iter() { - let res = match result.keygen_result_data.clone().unwrap() { - KeygenData(data) => data.pub_key, - KeygenCriminals(_) => continue, - }; - pub_keys.push(res); - } - - // else we have at least one result - let first = first.unwrap().clone(); - match first.keygen_result_data { - Some(KeygenData(data)) => { - let first_pub_key = &data.pub_key; - assert_eq!( - expected_faults, - &CriminalList::default(), - "expected faults but none was found" - ); - for (i, pub_key) in pub_keys.iter().enumerate() { - assert_eq!( - first_pub_key, pub_key, - "party {} didn't produce the expected pub_key", - i - ); - } - } - Some(KeygenCriminals(ref actual_faults)) => { - assert_eq!(expected_faults, actual_faults); - info!("Fault list: {:?}", expected_faults); - return false; - } - None => { - panic!("Result was None"); - } - } - true -} - -// Horrible code duplication indeed. Don't think we should spend time here though -// because this will be deleted when axelar-core accommodates crimes -fn check_sign_results(results: Vec, expected_faults: &CriminalList) -> bool { - // get the first non-empty result. We can't simply take results[0] because some behaviours - // don't return results and we pad them with `None`s - let first = results.iter().find(|r| r.sign_result_data.is_some()); - - let mut pub_keys = vec![]; - for result in results.iter() { - let res = match result.sign_result_data.clone().unwrap() { - Signature(signature) => signature, - SignCriminals(_) => continue, - }; - pub_keys.push(res); - } - - // else we have at least one result - let first = first.unwrap().clone(); - match first.sign_result_data { - Some(Signature(signature)) => { - let first_signature = signature; - assert_eq!( - expected_faults, - &CriminalList::default(), - "expected faults but none was found" - ); - for (i, signature) in pub_keys.iter().enumerate() { - assert_eq!( - &first_signature, signature, - "party {} didn't produce the expected signature", - i - ); - } - } - Some(SignCriminals(ref actual_faults)) => { - assert_eq!(expected_faults, actual_faults); - info!("Fault list: {:?}", expected_faults); - return false; - } - None => { - panic!("Result was None"); - } - } - true -} - -fn gather_recover_info(results: &[KeygenResult]) -> Vec { - // gather recover info - let mut recover_infos = vec![]; - for result in results.iter() { - let result_data = result.keygen_result_data.clone().unwrap(); - match result_data { - KeygenData(output) => { - recover_infos.push(output); - } - KeygenCriminals(_) => {} - } - } - recover_infos -} - -// shutdown i-th party -// returns i-th party's db path and a vec of Option that contain all parties (including i-th) -async fn shutdown_party( - parties: Vec, - party_index: usize, -) -> (Vec>, PathBuf) { - info!("shutdown party {}", party_index); - let party_root = parties[party_index].get_root(); - // use Option to temporarily transfer ownership of individual parties to a spawn - let mut party_options: Vec> = parties.into_iter().map(Some).collect(); - let shutdown_party = party_options[party_index].take().unwrap(); - shutdown_party.shutdown().await; - (party_options, party_root) -} - -// deletes the share kv-store of a party's db path -fn delete_party_export(mut mnemonic_path: PathBuf) { - mnemonic_path.push("export"); - std::fs::remove_file(mnemonic_path).unwrap(); -} - -// deletes the share kv-store of a party's db path -async fn delete_party_shares(mut party_db_path: PathBuf, key: &str) { - party_db_path.push("kvstore/kv"); - info!("Deleting shares for {:?}", party_db_path); - - let mut tries = 0; - let db = loop { - match sled::open(&party_db_path) { - Ok(db) => break db, - Err(err) => { - sleep(Duration::from_secs(SLEEP_TIME)).await; - warn!("({}/{}) Cannot open db: {}", tries, err, MAX_TRIES); - } - } - tries += 1; - if tries == MAX_TRIES { - panic!("Cannot open db"); - } - }; - - match db.remove(key) { - Ok(_) => {} - Err(err) => { - panic!("Could not remove key {} from kvstore: {}", key, err) - } - }; -} - -// reinitializes i-th party -// pass malicious data if we are running in malicious mode -async fn reinit_party( - mut party_options: Vec>, - party_index: usize, - testdir: &Path, - #[cfg(feature = "malicious")] malicious_data: &MaliciousData, -) -> Vec { - // initialize restarted party with its previous behaviour if we are in malicious mode - let init_party = InitParty::new( - party_index, - #[cfg(feature = "malicious")] - malicious_data, - ); - - // here we assume that the party already has a mnemonic, so we pass Cmd::Existing - party_options[party_index] = Some(TofndParty::new(init_party, Cmd::Existing, testdir).await); - - party_options - .into_iter() - .map(|o| o.unwrap()) - .collect::>() -} - -// delete all kv-stores of all parties and kill servers -async fn clean_up(parties: Vec) { - delete_dbs(&parties); - shutdown_parties(parties).await; -} - -// create parties that will participate in keygen/sign from testcase args -async fn init_parties_from_test_case( - test_case: &TestCase, - dir: &Path, -) -> (Vec, Vec) { - let init_parties_t = InitParties::new( - test_case.uid_count, - #[cfg(feature = "malicious")] - &test_case.malicious_data, - ); - init_parties(&init_parties_t, dir).await -} - -// keygen wrapper -async fn basic_keygen( - test_case: &TestCase, - parties: Vec, - party_uids: Vec, - new_key_uid: &str, -) -> (Vec, proto::KeygenInit, Vec, bool) { - let party_share_counts = &test_case.share_counts; - let threshold = test_case.threshold; - let expected_keygen_faults = &test_case.expected_keygen_faults; - - info!( - "======= Expected keygen crimes: {:?}", - expected_keygen_faults - ); - - #[allow(unused_variables)] // allow unsused in non malicious - let expect_timeout = false; - #[cfg(feature = "malicious")] - let expect_timeout = test_case.malicious_data.keygen_data.timeout.is_some(); - - let (parties, results, keygen_init) = execute_keygen( - parties, - &party_uids, - party_share_counts, - new_key_uid, - threshold, - expect_timeout, - ) - .await; - - // a successful keygen does not have grpc errors - let results = results.into_iter().map(|r| r.unwrap()).collect::>(); - let success = successful_keygen_results(results.clone(), expected_keygen_faults); - (parties, keygen_init, results, success) -} - -// restart i-th and optionally delete its shares kv-store -async fn restart_party( - dir: &Path, - parties: Vec, - party_index: usize, - recover: bool, - key_uid: String, - #[cfg(feature = "malicious")] malicious_data: &MaliciousData, -) -> Vec { - // shutdown party with party_index - let (party_options, shutdown_db_path) = shutdown_party(parties, party_index).await; - - // if we are going to restart, delete exported mnemonic to allow using Cmd::Existing - delete_party_export(shutdown_db_path.clone()); - - if recover { - // if we are going to recover, delete party's shares - delete_party_shares(shutdown_db_path, &key_uid).await; - } - - // reinit party - let mut parties = reinit_party( - party_options, - party_index, - dir, - #[cfg(feature = "malicious")] - malicious_data, - ) - .await; - - if recover { - // Check that session for the party doing recovery is absent in kvstore - let is_key_present = parties[party_index].execute_key_presence(key_uid).await; - - assert!( - !is_key_present, - "Expected session to be absent after a restart" - ); - } - - parties -} - -// main testing function -async fn basic_keygen_and_sign(test_case: &TestCase, dir: &Path, restart: bool, recover: bool) { - // set up a key uid - let new_key_uid = "Gus-test-key"; - - // use test case params to create parties - let (parties, party_uids) = init_parties_from_test_case(test_case, dir).await; - - // Check that the session is not present in the kvstore - let parties = execute_key_presence(parties, new_key_uid.into(), false).await; - - // execute keygen and return everything that will be needed later on - let (parties, keygen_init, keygen_results, success) = - basic_keygen(test_case, parties, party_uids.clone(), new_key_uid).await; - - if !success { - clean_up(parties).await; - return; - } - - // Check that the session is present in the kvstore - let parties = execute_key_presence(parties, new_key_uid.into(), true).await; - - // restart party if restart is enabled and return new parties' set - let parties = match restart { - true => { - restart_party( - dir, - parties, - test_case.signer_indices[0], - recover, - keygen_init.new_key_uid.clone(), - #[cfg(feature = "malicious")] - &test_case.malicious_data, - ) - .await - } - false => parties, - }; - - // delete party's if recover is enabled and return new parties' set - let parties = match recover { - true => { - execute_recover( - parties, - test_case.signer_indices[0], - keygen_init, - gather_recover_info(&keygen_results), - ) - .await - } - false => parties, - }; - - let expected_sign_faults = &test_case.expected_sign_faults; - - #[allow(unused_variables)] // allow unsused in non malicious - let expect_timeout = false; - #[cfg(feature = "malicious")] - let expect_timeout = test_case.malicious_data.sign_data.timeout.is_some(); - - // Check that the session is present in the kvstore - let parties = execute_key_presence(parties, new_key_uid.into(), true).await; - - // execute sign - let new_sig_uid = "Gus-test-sig"; - let (parties, results) = execute_sign( - parties, - &party_uids, - &test_case.signer_indices, - new_key_uid, - new_sig_uid, - &MSG_TO_SIGN, - expect_timeout, - ) - .await; - let results = results.into_iter().map(|r| r.unwrap()).collect::>(); - check_sign_results(results, expected_sign_faults); - - clean_up(parties).await; -} - -async fn keygen_init_fail(test_case: &TestCase, dir: &Path) { - // set up a key uid - let new_key_uid = "test-key"; - - // use test case params to create parties - let (parties, party_uids) = init_parties_from_test_case(test_case, dir).await; - - // execute keygen and return everything that will be needed later on - let (parties, _, _, _) = - basic_keygen(test_case, parties, party_uids.clone(), new_key_uid).await; - - // attempt to execute keygen again with the same `new_key_id` - let (parties, results, _) = execute_keygen( - parties, - &party_uids, - &test_case.share_counts, - new_key_uid, - test_case.threshold, - false, - ) - .await; - - // all results must be Err(Status) with Code::InvalidArgument - for result in results { - assert_eq!(result.err().unwrap().code(), InvalidArgument); - } - - clean_up(parties).await; -} - -async fn sign_init_fail(test_case: &TestCase, dir: &Path) { - // set up a key uid - let new_key_uid = "test-key"; - let new_sign_uid = "sign-test-key"; - - // use test case params to create parties - let (parties, party_uids) = init_parties_from_test_case(test_case, dir).await; - - // execute keygen and return everything that will be needed later on - let (parties, _, _, success) = - basic_keygen(test_case, parties, party_uids.clone(), new_key_uid).await; - assert!(success); - - // attempt to execute sign with malformed `MSG_TO_SIGN` - let (parties, results) = execute_sign( - parties, - &party_uids, - &test_case.signer_indices, - new_key_uid, - new_sign_uid, - &MSG_TO_SIGN[0..MSG_TO_SIGN.len() - 1], - false, - ) - .await; - - // all results must be Err(Status) with Code::InvalidArgument - for result in results { - assert_eq!(result.err().unwrap().code(), InvalidArgument); - } - - clean_up(parties).await; -} - -// struct to pass in TofndParty constructor. -// needs to include malicious when we are running in malicious mode +// Struct to pass in TofndParty constructor. struct InitParty { party_index: usize, - #[cfg(feature = "malicious")] - malicious_data: PartyMaliciousData, } impl InitParty { - // as ugly as it gets - fn new( - my_index: usize, - #[cfg(feature = "malicious")] all_malicious_data: &MaliciousData, - ) -> InitParty { - #[cfg(feature = "malicious")] - let malicious_data = { - // register timeouts - let mut timeout_round = 0; - if let Some(timeout) = all_malicious_data.keygen_data.timeout.clone() { - if timeout.index == my_index { - timeout_round = timeout.round; - } - } - if let Some(timeout) = all_malicious_data.sign_data.timeout.clone() { - if timeout.index == my_index { - timeout_round = timeout.round; - } - } - - // register disrupts - let mut disrupt_round = 0; - if let Some(disrupt) = all_malicious_data.keygen_data.disrupt.clone() { - if disrupt.index == my_index { - disrupt_round = disrupt.round; - } - } - if let Some(disrupt) = all_malicious_data.sign_data.disrupt.clone() { - if disrupt.index == my_index { - disrupt_round = disrupt.round; - } - } - - // get keygen malicious behaviours - let my_keygen_behaviour = all_malicious_data - .keygen_data - .behaviours - .get(my_index) - .unwrap() - .clone(); - - // get sign malicious behaviours - let my_sign_behaviour = all_malicious_data - .sign_data - .behaviours - .get(my_index) - .unwrap() - .clone(); - - // construct struct of malicous data - PartyMaliciousData { - timeout_round, - disrupt_round, - keygen_behaviour: my_keygen_behaviour, - sign_behaviour: my_sign_behaviour, - } - }; - - InitParty { - party_index: my_index, - #[cfg(feature = "malicious")] - malicious_data, - } - } -} - -// struct to pass in init_parties function. -// needs to include malicious when we are running in malicious mode -struct InitParties { - party_count: usize, - #[cfg(feature = "malicious")] - malicious_data: MaliciousData, -} - -impl InitParties { - fn new( - party_count: usize, - #[cfg(feature = "malicious")] malicious_data: &MaliciousData, - ) -> InitParties { - InitParties { - party_count, - #[cfg(feature = "malicious")] - malicious_data: malicious_data.clone(), - } + fn new(party_index: usize) -> Self { + Self { party_index } } } - -async fn init_parties( - init_parties: &InitParties, - testdir: &Path, -) -> (Vec, Vec) { - let mut parties = Vec::with_capacity(init_parties.party_count); - - // use a for loop because async closures are unstable https://github.com/rust-lang/rust/issues/62290 - for i in 0..init_parties.party_count { - let init_party = InitParty::new( - i, - #[cfg(feature = "malicious")] - &init_parties.malicious_data, - ); - parties.push(TofndParty::new(init_party, Create, testdir).await); - } - - let party_uids: Vec = (0..init_parties.party_count) - .map(|i| format!("{}", (b'A' + i as u8) as char)) - .collect(); - - (parties, party_uids) -} - -async fn shutdown_parties(parties: Vec) { - for p in parties { - p.shutdown().await; - } -} - -fn delete_dbs(parties: &[impl Party]) { - for p in parties { - // Sled creates a directory for the database and its configuration - std::fs::remove_dir_all(p.get_root()).unwrap(); - } -} - -use tonic::Status; -type GrpcKeygenResult = Result; -type GrpcSignResult = Result; - -// need to take ownership of parties `parties` and return it on completion -async fn execute_keygen( - parties: Vec, - party_uids: &[String], - party_share_counts: &[u32], - new_key_uid: &str, - threshold: usize, - expect_timeout: bool, -) -> (Vec, Vec, proto::KeygenInit) { - info!("Expecting timeout: [{}]", expect_timeout); - let share_count = parties.len(); - let (keygen_delivery, keygen_channel_pairs) = Deliverer::with_party_ids(party_uids); - let mut keygen_join_handles = Vec::with_capacity(share_count); - let notify = std::sync::Arc::new(tokio::sync::Notify::new()); - for (i, (mut party, channel_pair)) in parties - .into_iter() - .zip(keygen_channel_pairs.into_iter()) - .enumerate() - { - let init = proto::KeygenInit { - new_key_uid: new_key_uid.to_string(), - party_uids: party_uids.to_owned(), - party_share_counts: party_share_counts.to_owned(), - my_party_index: u32::try_from(i).unwrap(), - threshold: u32::try_from(threshold).unwrap(), - }; - let delivery = keygen_delivery.clone(); - let n = notify.clone(); - let handle = tokio::spawn(async move { - let result = party.execute_keygen(init, channel_pair, delivery, n).await; - (party, result) - }); - keygen_join_handles.push(handle); - } - - // Sleep here to prevent data races between parties: - // some clients might start sending TrafficIn messages to other parties' - // servers before these parties manage to receive their own - // KeygenInit/SignInit from their clients. This leads to an - // `WrongMessage` error. - sleep(Duration::from_secs(SLEEP_TIME)).await; - // wake up one party - notify.notify_one(); - - // if we are expecting a timeout, abort parties after a reasonable amount of time - if expect_timeout { - let unblocker = keygen_delivery.clone(); - abort_parties(unblocker, 10); - } - - let mut parties = Vec::with_capacity(share_count); // async closures are unstable https://github.com/rust-lang/rust/issues/62290 - let mut results = vec![]; - for h in keygen_join_handles { - let handle = h.await.unwrap(); - parties.push(handle.0); - results.push(handle.1); - } - let init = proto::KeygenInit { - new_key_uid: new_key_uid.to_string(), - party_uids: party_uids.to_owned(), - party_share_counts: party_share_counts.to_owned(), - my_party_index: 0, // return keygen for first party. Might need to change index before using - threshold: u32::try_from(threshold).unwrap(), - }; - (parties, results, init) -} - -async fn execute_key_presence( - parties: Vec, - key_uid: String, - expected_key_present: bool, -) -> Vec { - let mut handles = Vec::new(); - - for mut party in parties { - let key_uid = key_uid.clone(); - - let handle = tokio::spawn(async move { - let res = party.execute_key_presence(key_uid).await; - (party, res) - }); - - handles.push(handle); - } - - let mut parties = Vec::new(); - - for handle in handles { - let (party, is_key_present) = handle.await.unwrap(); - assert_eq!( - is_key_present, expected_key_present, - "Key presence expected to be {} but observed {}", - expected_key_present, is_key_present - ); - - parties.push(party); - } - - parties -} - -async fn execute_recover( - mut parties: Vec, - recover_party_index: usize, - mut keygen_init: proto::KeygenInit, - keygen_outputs: Vec, -) -> Vec { - // create keygen init for recovered party - let key_uid = keygen_init.new_key_uid.clone(); - - keygen_init.my_party_index = recover_party_index as u32; - parties[recover_party_index] - .execute_recover(keygen_init, keygen_outputs[recover_party_index].clone()) - .await; - - // Check that session for the party doing recovery is absent in kvstore - let is_key_present = parties[recover_party_index] - .execute_key_presence(key_uid) - .await; - - assert!( - is_key_present, - "Expected session to be present after a recovery" - ); - - parties -} - -// need to take ownership of parties `parties` and return it on completion -async fn execute_sign( - parties: Vec, - party_uids: &[String], - sign_participant_indices: &[usize], - key_uid: &str, - new_sig_uid: &str, - msg_to_sign: &[u8], - expect_timeout: bool, -) -> (Vec, Vec) { - info!("Expecting timeout: [{}]", expect_timeout); - let participant_uids: Vec = sign_participant_indices - .iter() - .map(|&i| party_uids[i].clone()) - .collect(); - let (sign_delivery, sign_channel_pairs) = Deliverer::with_party_ids(&participant_uids); - - // use Option to temporarily transfer ownership of individual parties to a spawn - let mut party_options: Vec> = parties.into_iter().map(Some).collect(); - - let mut sign_join_handles = Vec::with_capacity(sign_participant_indices.len()); - let notify = std::sync::Arc::new(tokio::sync::Notify::new()); - for (i, channel_pair) in sign_channel_pairs.into_iter().enumerate() { - let participant_index = sign_participant_indices[i]; - - // clone everything needed in spawn - let init = proto::SignInit { - new_sig_uid: new_sig_uid.to_string(), - key_uid: key_uid.to_string(), - party_uids: participant_uids.clone(), - message_to_sign: msg_to_sign.to_vec(), - }; - let delivery = sign_delivery.clone(); - let participant_uid = participant_uids[i].clone(); - let mut party = party_options[participant_index].take().unwrap(); - - let n = notify.clone(); - // execute the protocol in a spawn - let handle = tokio::spawn(async move { - let result = party - .execute_sign(init, channel_pair, delivery, &participant_uid, n) - .await; - (party, result) - }); - sign_join_handles.push((i, handle)); - } - - // Sleep here to prevent data races between parties: - // some clients might start sending TrafficIn messages to other parties' - // servers before these parties manage to receive their own - // KeygenInit/SignInit from their clients. This leads to an - // `WrongMessage` error. - sleep(Duration::from_secs(SLEEP_TIME)).await; - notify.notify_one(); - - // if we are expecting a timeout, abort parties after a reasonable amount of time - if expect_timeout { - let unblocker = sign_delivery.clone(); - abort_parties(unblocker, 10); - } - - let mut results = Vec::with_capacity(sign_join_handles.len()); - for (i, h) in sign_join_handles { - info!("Running party {}", i); - let handle = h.await.unwrap(); - party_options[sign_participant_indices[i]] = Some(handle.0); - results.push(handle.1); - } - ( - party_options - .into_iter() - .map(|o| o.unwrap()) - .collect::>(), - results, - ) -} - -fn abort_parties(unblocker: Deliverer, time: u64) { - // send an abort message if protocol is taking too much time - info!("I will send an abort message in {} seconds", time); - std::thread::spawn(move || { - unblocker.send_timeouts(time); - }); - info!("Continuing for now"); -} diff --git a/src/tests/tofnd_party.rs b/src/tests/tofnd_party.rs index ee37ea6d..59ed9fe7 100644 --- a/src/tests/tofnd_party.rs +++ b/src/tests/tofnd_party.rs @@ -1,48 +1,31 @@ -// TODO: To facilitate timeout and disruption tests we need to count incoming messages. -// This brings a bunch of functions and counters that are needed only for malicious build -// For now, we use `#[allow(allow)]` instead of `#[cfg(feature = "malicious")]` because it -// produces less friction in the code. Should implement a beeter solution soon. - -use super::{ - mock::SenderReceiver, Deliverer, GrpcKeygenResult, GrpcSignResult, InitParty, Party, - DEFAULT_TEST_IP, DEFAULT_TEST_PORT, MAX_TRIES, -}; +use super::{InitParty, DEFAULT_TEST_IP, DEFAULT_TEST_PORT, MAX_TRIES}; use crate::{ addr, config::Config, encrypted_sled::{get_test_password, PasswordMethod}, - gg20, kv_manager::KvManager, mnemonic::Cmd, - proto::{self, Algorithm}, + multisig, proto, tests::SLEEP_TIME, }; -use proto::message_out::{KeygenResult, SignResult}; use std::path::Path; -use std::{convert::TryFrom, path::PathBuf}; +use std::path::PathBuf; use tokio::time::{sleep, Duration}; use tokio::{net::TcpListener, sync::oneshot, task::JoinHandle}; -use tokio_stream::wrappers::{TcpListenerStream, UnboundedReceiverStream}; -use tonic::Request; +use tokio_stream::wrappers::TcpListenerStream; use tracing::{info, warn}; -#[cfg(feature = "malicious")] -use super::malicious::PartyMaliciousData; -#[cfg(feature = "malicious")] -use gg20::service::malicious::Behaviours; - // I tried to keep this struct private and return `impl Party` from new() but ran into so many problems with the Rust compiler // I also tried using Box but ran into this: https://github.com/rust-lang/rust/issues/63033 +#[allow(dead_code)] pub(super) struct TofndParty { tofnd_path: PathBuf, - client: proto::gg20_client::Gg20Client, + client: proto::multisig_client::MultisigClient, server_handle: JoinHandle<()>, server_shutdown_sender: oneshot::Sender<()>, server_port: u16, - #[cfg(feature = "malicious")] - pub(super) malicious_data: PartyMaliciousData, } impl TofndParty { @@ -65,14 +48,8 @@ impl TofndParty { mnemonic_cmd, ip: server_ip.to_string(), port: server_port, - safe_keygen: false, tofnd_path, password_method: PasswordMethod::NoPassword, - #[cfg(feature = "malicious")] - behaviours: Behaviours { - keygen: init_party.malicious_data.keygen_behaviour.clone(), - sign: init_party.malicious_data.sign_behaviour.clone(), - }, }; // start service @@ -97,9 +74,9 @@ impl TofndParty { }; let kv_manager = kv_manager.handle_mnemonic(&cfg.mnemonic_cmd).await.unwrap(); - let my_service = gg20::service::new_service(cfg.clone(), kv_manager); + let my_service = multisig::service::new_service(kv_manager); - let proto_service = proto::gg20_server::Gg20Server::new(my_service); + let proto_service = proto::multisig_server::MultisigServer::new(my_service); // let (startup_sender, startup_receiver) = tokio::sync::oneshot::channel::<()>(); let server_handle = tokio::spawn(async move { tonic::transport::Server::builder() @@ -122,9 +99,10 @@ impl TofndParty { // println!("party [{}] server started!", init.party_uids[my_id_index]); info!("new party [{}] connect to server...", server_port); - let client = proto::gg20_client::Gg20Client::connect(format!("http://{}", server_addr)) - .await - .unwrap(); + let client = + proto::multisig_client::MultisigClient::connect(format!("http://{}", server_addr)) + .await + .unwrap(); TofndParty { tofnd_path: cfg.tofnd_path, @@ -132,388 +110,6 @@ impl TofndParty { server_handle, server_shutdown_sender, server_port, - #[cfg(feature = "malicious")] - malicious_data: init_party.malicious_data, - } - } -} - -// r1 -> bcast -// r2 -> bcast -// r3 -> bcast + p2ps -// r4 -> bcast -#[allow(unused)] // allow unsused traffin in non malicious -fn keygen_round(msg_count: usize, all_share_counts: usize, my_share_count: usize) -> usize { - let bcast = 1; - let p2ps = all_share_counts - 1; - - let r1_msgs = bcast; - let r2_msgs = r1_msgs + bcast; - let r3_msgs = r2_msgs + bcast + p2ps; - let r4_msgs = r3_msgs + bcast; - - // multiply by my share count - let r1_msgs = r1_msgs * my_share_count; - let r2_msgs = r2_msgs * my_share_count; - let r3_msgs = r3_msgs * my_share_count; - let r4_msgs = r4_msgs * my_share_count; - - let last = r4_msgs + my_share_count; // n bcasts and n(n-1) p2ps - - if 1 <= msg_count && msg_count <= r1_msgs { - return 1; - } else if r1_msgs < msg_count && msg_count <= r2_msgs { - return 2; - } else if r2_msgs < msg_count && msg_count <= r3_msgs { - return 3; - } else if r3_msgs < msg_count && msg_count <= r4_msgs { - return 4; - } - - // return something that won't trigger a timeout in non-timeout malicous cases with multiple shares - usize::MAX -} - -// r1 -> bcast + p2ps -// r2 -> p2ps -// r3 -> bcast -// r4 -> bcast -// r5 -> bcast + p2ps -// r6 -> bcast -// r7 -> bcast -#[allow(unused)] // allow unsused traffin in non malicious -fn sign_round(msg_count: usize, all_share_counts: usize, my_share_count: usize) -> usize { - let bcast = 1; - let p2ps = all_share_counts - 1; - - let r1_msgs = bcast + p2ps; - let r2_msgs = r1_msgs + p2ps; - let r3_msgs = r2_msgs + bcast; - let r4_msgs = r3_msgs + bcast; - let r5_msgs = r4_msgs + bcast + p2ps; - let r6_msgs = r5_msgs + bcast; - let r7_msgs = r6_msgs + bcast; - let r8_msgs = r7_msgs + bcast; - - // multiply by my share count - let r1_msgs = r1_msgs * my_share_count; - let r2_msgs = r2_msgs * my_share_count; - let r3_msgs = r3_msgs * my_share_count; - let r4_msgs = r4_msgs * my_share_count; - let r5_msgs = r5_msgs * my_share_count; - let r6_msgs = r6_msgs * my_share_count; - let r7_msgs = r7_msgs * my_share_count; - - // let last = r4_msgs + my_share_count; // n bcasts and n(n-1) p2ps - - let mut round = 0; - if 1 <= msg_count && msg_count <= r1_msgs { - round = 1; - } else if r1_msgs < msg_count && msg_count <= r2_msgs { - round = 2; - } else if r2_msgs < msg_count && msg_count <= r3_msgs { - round = 3; - } else if r3_msgs < msg_count && msg_count <= r4_msgs { - round = 4; - } else if r4_msgs < msg_count && msg_count <= r5_msgs { - round = 5; - } else if r5_msgs < msg_count && msg_count <= r6_msgs { - round = 6; - } else if r6_msgs < msg_count && msg_count <= r7_msgs { - round = 7; - } else if r7_msgs < msg_count && msg_count <= r8_msgs { - round = 8; - } - // if we got a round from message count successfully, then add keygen rounds to it - if round != 0 { - let keygen_rounds = 4; - return round + keygen_rounds; - } - - // TODO: support multiple shares for sign. For now, return something that is not 0. - // panic!("message counter overflow: {}. Max is {}", msg_count, last); // this info should be a panic - - // return something that won't trigger a timeout in non-timeout malicous cases with multiple shares - usize::MAX -} - -#[tonic::async_trait] -impl Party for TofndParty { - async fn execute_keygen( - &mut self, - init: proto::KeygenInit, - channels: SenderReceiver, - delivery: Deliverer, - notify: std::sync::Arc, - ) -> GrpcKeygenResult { - let my_uid = init.party_uids[usize::try_from(init.my_party_index).unwrap()].clone(); - let (keygen_server_incoming, rx) = channels; - let mut keygen_server_outgoing = self - .client - .keygen(Request::new(UnboundedReceiverStream::new(rx))) - .await - .unwrap() - .into_inner(); - - #[allow(unused_variables)] - let all_share_count = { - if init.party_share_counts.is_empty() { - init.party_uids.len() - } else { - init.party_share_counts.iter().sum::() as usize - } - }; - #[allow(unused_variables)] - let my_share_count = { - if init.party_share_counts.is_empty() { - 1 - } else { - init.party_share_counts[init.my_party_index as usize] as usize - } - }; - // the first outbound message is keygen init info - keygen_server_incoming - .send(proto::MessageIn { - data: Some(proto::message_in::Data::KeygenInit(init)), - }) - .unwrap(); - - // block until all parties send their KeygenInit - notify.notified().await; - notify.notify_one(); - - #[allow(unused_variables)] - let mut msg_count = 1; - - let result = loop { - let msg = match keygen_server_outgoing.message().await { - Ok(msg) => match msg { - Some(msg) => msg, - None => { - warn!( - "party [{}] keygen execution was not completed due to abort", - my_uid - ); - return Ok(KeygenResult::default()); - } - }, - Err(status) => { - warn!( - "party [{}] keygen execution was not completed due to connection error: {}", - my_uid, status - ); - return Err(status); - } - }; - - let msg_type = msg.data.as_ref().expect("missing data"); - - match msg_type { - #[allow(unused_variables)] // allow unsused traffin in non malicious - proto::message_out::Data::Traffic(traffic) => { - // in malicous case, if we are stallers we skip the message - #[cfg(feature = "malicious")] - { - let round = keygen_round(msg_count, all_share_count, my_share_count); - if self.malicious_data.timeout_round == round { - warn!("{} is stalling a message in round {}", my_uid, round); - continue; // tough is the life of the staller - } - if self.malicious_data.disrupt_round == round { - warn!("{} is disrupting a message in round {}", my_uid, round); - let mut t = traffic.clone(); - t.payload = traffic.payload[0..traffic.payload.len() / 2].to_vec(); - let mut m = msg.clone(); - m.data = Some(proto::message_out::Data::Traffic(t)); - delivery.deliver(&m, &my_uid); - } - } - delivery.deliver(&msg, &my_uid); - } - proto::message_out::Data::KeygenResult(res) => { - info!("party [{}] keygen finished!", my_uid); - break Ok(res.clone()); - } - _ => panic!("party [{}] keygen error: bad outgoing message type", my_uid), - }; - msg_count += 1; - }; - - info!("party [{}] keygen execution complete", my_uid); - result - } - - async fn execute_recover( - &mut self, - keygen_init: proto::KeygenInit, - keygen_output: proto::KeygenOutput, - ) { - let recover_request = proto::RecoverRequest { - keygen_init: Some(keygen_init), - keygen_output: Some(keygen_output), - }; - let response = self - .client - .recover(Request::new(recover_request)) - .await - .unwrap() - .into_inner(); - - // prost way to convert i32 to enums https://github.com/danburkert/prost#enumerations - match proto::recover_response::Response::from_i32(response.response) { - Some(proto::recover_response::Response::Success) => { - info!("Got success from recover") - } - Some(proto::recover_response::Response::Fail) => { - warn!("Got fail from recover") - } - Some(proto::recover_response::Response::Unspecified) => { - panic!("Unspecified recovery response. Expecting Success/Fail") - } - None => { - panic!("Invalid recovery response. Could not convert i32 to enum") - } } } - - async fn execute_key_presence(&mut self, key_uid: String) -> bool { - let key_presence_request = proto::KeyPresenceRequest { - key_uid, - pub_key: vec![], - algorithm: Algorithm::Ecdsa as i32, - }; - - let response = self - .client - .key_presence(Request::new(key_presence_request)) - .await - .unwrap() - .into_inner(); - - // prost way to convert i32 to enums https://github.com/danburkert/prost#enumerations - match proto::key_presence_response::Response::from_i32(response.response) { - Some(proto::key_presence_response::Response::Present) => true, - Some(proto::key_presence_response::Response::Absent) => false, - Some(proto::key_presence_response::Response::Fail) => { - panic!("key presence request failed") - } - Some(proto::key_presence_response::Response::Unspecified) => { - panic!("Unspecified key presence response") - } - None => { - panic!("Invalid key presence response. Could not convert i32 to enum") - } - } - } - - async fn execute_sign( - &mut self, - init: proto::SignInit, - channels: SenderReceiver, - delivery: Deliverer, - my_uid: &str, - notify: std::sync::Arc, - ) -> GrpcSignResult { - let (sign_server_incoming, rx) = channels; - let mut sign_server_outgoing = self - .client - .sign(Request::new(UnboundedReceiverStream::new(rx))) - .await - .unwrap() - .into_inner(); - - // TODO: support multiple shares for sign - #[allow(unused_variables)] // allow unsused traffin in non malicious - let all_share_count = init.party_uids.len(); - #[allow(unused_variables)] // allow unsused traffin in non malicious - let my_share_count = 1; - - // the first outbound message is sign init info - sign_server_incoming - .send(proto::MessageIn { - data: Some(proto::message_in::Data::SignInit(init)), - }) - .unwrap(); - - // block until all parties send their SignInit - notify.notified().await; - notify.notify_one(); - - #[allow(unused_variables)] // allow unsused traffin in non malicious - let mut msg_count = 1; - - let result = loop { - let msg = match sign_server_outgoing.message().await { - Ok(msg) => match msg { - Some(msg) => msg, - None => { - warn!( - "party [{}] sign execution was not completed due to abort", - my_uid - ); - return Ok(SignResult::default()); - } - }, - Err(status) => { - warn!( - "party [{}] sign execution was not completed due to connection error: {}", - my_uid, status - ); - return Err(status); - } - }; - - let msg_type = msg.data.as_ref().expect("missing data"); - - match msg_type { - #[allow(unused_variables)] // allow unsused traffin in non malicious - proto::message_out::Data::Traffic(traffic) => { - // in malicous case, if we are stallers we skip the message - #[cfg(feature = "malicious")] - { - let round = sign_round(msg_count, all_share_count, my_share_count); - if self.malicious_data.timeout_round == round { - warn!("{} is stalling a message in round {}", my_uid, round - 4); // subtract keygen rounds - continue; // tough is the life of the staller - } - if self.malicious_data.disrupt_round == round { - warn!("{} is disrupting a message in round {}", my_uid, round); - let mut t = traffic.clone(); - t.payload = traffic.payload[0..traffic.payload.len() / 2].to_vec(); - let mut m = msg.clone(); - m.data = Some(proto::message_out::Data::Traffic(t)); - delivery.deliver(&m, my_uid); - } - } - delivery.deliver(&msg, my_uid); - } - proto::message_out::Data::SignResult(res) => { - info!("party [{}] sign finished!", my_uid); - break Ok(res.clone()); - } - proto::message_out::Data::NeedRecover(_) => { - info!("party [{}] needs recover", my_uid); - // when recovery is needed, sign is canceled. We abort the protocol manualy instead of waiting parties to time out - // no worries that we don't wait for enough time, we will not be checking criminals in this case - delivery.send_timeouts(0); - break Ok(SignResult::default()); - } - _ => panic!("party [{}] sign error: bad outgoing message type", my_uid), - }; - msg_count += 1; - }; - - info!("party [{}] sign execution complete", my_uid); - result - } - - async fn shutdown(mut self) { - self.server_shutdown_sender.send(()).unwrap(); // tell the server to shut down - self.server_handle.await.unwrap(); // wait for server to shut down - info!("party [{}] shutdown success", self.server_port); - } - - fn get_root(&self) -> std::path::PathBuf { - self.tofnd_path.clone() - } }