From bc26c83012c4d72ed0c62eeb72988685f67f8659 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 11 Dec 2024 16:57:04 +0300 Subject: [PATCH 01/91] new atomic pkg --- plugin/evm/admin.go | 7 +- plugin/evm/{ => atomic}/codec.go | 15 +- plugin/evm/{ => atomic}/export_tx.go | 239 +++++++++--- plugin/evm/{ => atomic}/import_tx.go | 165 +++++---- plugin/evm/{ => atomic}/metadata.go | 2 +- plugin/evm/{ => atomic}/status.go | 6 +- plugin/evm/{ => atomic}/tx.go | 119 +++--- plugin/evm/atomic/utils.go | 32 ++ plugin/evm/atomic_backend.go | 51 ++- plugin/evm/atomic_state.go | 13 +- plugin/evm/atomic_trie.go | 11 +- plugin/evm/atomic_trie_test.go | 53 +-- plugin/evm/atomic_tx_repository.go | 41 +- plugin/evm/atomic_tx_repository_test.go | 25 +- plugin/evm/block.go | 11 +- plugin/evm/{ => client}/client.go | 91 ++++- .../evm/{ => client}/client_interface_test.go | 2 +- plugin/evm/export_tx_test.go | 343 +++++++++-------- plugin/evm/formatting.go | 21 -- plugin/evm/gossip.go | 9 +- plugin/evm/gossip_test.go | 11 +- plugin/evm/handler.go | 7 +- plugin/evm/import_tx_test.go | 249 ++++++------- plugin/evm/mempool.go | 57 +-- plugin/evm/mempool_atomic_gossiping_test.go | 3 +- plugin/evm/mempool_test.go | 5 +- plugin/evm/service.go | 104 +----- plugin/evm/syncervm_test.go | 17 +- plugin/evm/test_tx.go | 44 +-- plugin/evm/tx_gossip_test.go | 27 +- plugin/evm/tx_heap.go | 17 +- plugin/evm/tx_heap_test.go | 13 +- plugin/evm/tx_test.go | 57 +-- plugin/evm/user.go | 7 +- plugin/evm/vm.go | 349 +++++------------- plugin/evm/vm_test.go | 99 ++--- 36 files changed, 1236 insertions(+), 1086 deletions(-) rename plugin/evm/{ => atomic}/codec.go (91%) rename plugin/evm/{ => atomic}/export_tx.go (58%) rename plugin/evm/{ => atomic}/import_tx.go (70%) rename plugin/evm/{ => atomic}/metadata.go (98%) rename plugin/evm/{ => atomic}/status.go (95%) rename plugin/evm/{ => atomic}/tx.go (75%) create mode 100644 plugin/evm/atomic/utils.go rename plugin/evm/{ => client}/client.go (78%) rename plugin/evm/{ => client}/client_interface_test.go (97%) diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index fd8d7f8d6e..e90be473a7 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/utils/profiler" + "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ethereum/go-ethereum/log" ) @@ -65,11 +66,7 @@ func (p *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) err return p.profiler.LockProfile() } -type SetLogLevelArgs struct { - Level string `json:"level"` -} - -func (p *Admin) SetLogLevel(_ *http.Request, args *SetLogLevelArgs, reply *api.EmptyReply) error { +func (p *Admin) SetLogLevel(_ *http.Request, args *client.SetLogLevelArgs, reply *api.EmptyReply) error { log.Info("EVM: SetLogLevel called", "logLevel", args.Level) p.vm.ctx.Lock.Lock() diff --git a/plugin/evm/codec.go b/plugin/evm/atomic/codec.go similarity index 91% rename from plugin/evm/codec.go rename to plugin/evm/atomic/codec.go index e4c38761e3..3376eeb049 100644 --- a/plugin/evm/codec.go +++ b/plugin/evm/atomic/codec.go @@ -1,9 +1,10 @@ // (c) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( + "errors" "fmt" "github.com/ava-labs/avalanchego/codec" @@ -12,8 +13,14 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -// Codec does serialization and deserialization -var Codec codec.Manager +const CodecVersion = uint16(0) + +var ( + // Codec does serialization and deserialization + Codec codec.Manager + + errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") +) func init() { Codec = codec.NewDefaultManager() @@ -35,7 +42,7 @@ func init() { lc.RegisterType(&secp256k1fx.Credential{}), lc.RegisterType(&secp256k1fx.Input{}), lc.RegisterType(&secp256k1fx.OutputOwners{}), - Codec.RegisterCodec(codecVersion, lc), + Codec.RegisterCodec(CodecVersion, lc), ) if errs.Errored() { panic(errs.Err) diff --git a/plugin/evm/export_tx.go b/plugin/evm/atomic/export_tx.go similarity index 58% rename from plugin/evm/export_tx.go rename to plugin/evm/atomic/export_tx.go index a187007046..26307cface 100644 --- a/plugin/evm/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "context" @@ -9,7 +9,6 @@ import ( "fmt" "math/big" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" "github.com/holiman/uint256" @@ -30,10 +29,15 @@ import ( ) var ( - _ UnsignedAtomicTx = &UnsignedExportTx{} - _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} - errExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") - errExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") + _ UnsignedAtomicTx = &UnsignedExportTx{} + _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} + ErrExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") + ErrExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") + ErrNoExportOutputs = errors.New("tx has no export outputs") + errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") + errOverflowExport = errors.New("overflow when computing export amount + txFee") + errInsufficientFunds = errors.New("insufficient funds") + errInvalidNonce = errors.New("invalid nonce") ) // UnsignedExportTx is an unsigned ExportTx @@ -73,13 +77,13 @@ func (utx *UnsignedExportTx) Verify( ) error { switch { case utx == nil: - return errNilTx + return ErrNilTx case len(utx.ExportedOutputs) == 0: - return errNoExportOutputs + return ErrNoExportOutputs case utx.NetworkID != ctx.NetworkID: - return errWrongNetworkID + return ErrWrongNetworkID case ctx.ChainID != utx.BlockchainID: - return errWrongBlockchainID + return ErrWrongChainID } // Make sure that the tx has a valid peer chain ID @@ -87,11 +91,11 @@ func (utx *UnsignedExportTx) Verify( // Note that SameSubnet verifies that [tx.DestinationChain] isn't this // chain's ID if err := verify.SameSubnet(context.TODO(), ctx, utx.DestinationChain); err != nil { - return errWrongChainID + return ErrWrongChainID } } else { if utx.DestinationChain != ctx.XChainID { - return errWrongChainID + return ErrWrongChainID } } @@ -100,7 +104,7 @@ func (utx *UnsignedExportTx) Verify( return err } if rules.IsBanff && in.AssetID != ctx.AVAXAssetID { - return errExportNonAVAXInputBanff + return ErrExportNonAVAXInputBanff } } @@ -110,17 +114,17 @@ func (utx *UnsignedExportTx) Verify( } assetID := out.AssetID() if assetID != ctx.AVAXAssetID && utx.DestinationChain == constants.PlatformChainID { - return errWrongChainID + return ErrWrongChainID } if rules.IsBanff && assetID != ctx.AVAXAssetID { - return errExportNonAVAXOutputBanff + return ErrExportNonAVAXOutputBanff } } if !avax.IsSortedTransferableOutputs(utx.ExportedOutputs, Codec) { - return errOutputsNotSorted + return ErrOutputsNotSorted } if rules.IsApricotPhase1 && !utils.IsSortedAndUnique(utx.Ins) { - return errInputsNotSortedUnique + return ErrInputsNotSortedUnique } return nil @@ -176,13 +180,14 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedExportTx) SemanticVerify( - vm *VM, + backend *Backend, stx *Tx, - _ *Block, + parent AtomicBlockContext, baseFee *big.Int, - rules params.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + ctx := backend.Ctx + rules := backend.Rules + if err := utx.Verify(ctx, rules); err != nil { return err } @@ -199,10 +204,10 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - fc.Produce(vm.ctx.AVAXAssetID, txFee) + fc.Produce(ctx.AVAXAssetID, txFee) // Apply fees to export transactions before Apricot Phase 3 default: - fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) } for _, out := range utx.ExportedOutputs { fc.Produce(out.AssetID(), out.Output().Amount()) @@ -231,7 +236,7 @@ func (utx *UnsignedExportTx) SemanticVerify( if len(cred.Sigs) != 1 { return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) } - pubKey, err := vm.secpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) + pubKey, err := backend.SecpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) if err != nil { return err } @@ -258,7 +263,7 @@ func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { Out: out.Out, } - utxoBytes, err := Codec.Marshal(codecVersion, utxo) + utxoBytes, err := Codec.Marshal(CodecVersion, utxo) if err != nil { return ids.ID{}, nil, err } @@ -277,8 +282,11 @@ func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { return utx.DestinationChain, &atomic.Requests{PutRequests: elems}, nil } -// newExportTx returns a new ExportTx -func (vm *VM) newExportTx( +// NewExportTx returns a new ExportTx +func NewExportTx( + ctx *snow.Context, + rules params.Rules, + state StateDB, assetID ids.ID, // AssetID of the tokens to export amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to @@ -306,8 +314,8 @@ func (vm *VM) newExportTx( ) // consume non-AVAX - if assetID != vm.ctx.AVAXAssetID { - ins, signers, err = vm.GetSpendableFunds(keys, assetID, amount) + if assetID != ctx.AVAXAssetID { + ins, signers, err = GetSpendableFunds(ctx, state, keys, assetID, amount) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) } @@ -315,18 +323,17 @@ func (vm *VM) newExportTx( avaxNeeded = amount } - rules := vm.currentRules() switch { case rules.IsApricotPhase3: utx := &UnsignedExportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, DestinationChain: chainID, Ins: ins, ExportedOutputs: outs, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(Codec, nil); err != nil { return nil, err } @@ -336,14 +343,14 @@ func (vm *VM) newExportTx( return nil, err } - avaxIns, avaxSigners, err = vm.GetSpendableAVAXWithFee(keys, avaxNeeded, cost, baseFee) + avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) default: var newAvaxNeeded uint64 newAvaxNeeded, err = math.Add64(avaxNeeded, params.AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = vm.GetSpendableFunds(keys, vm.ctx.AVAXAssetID, newAvaxNeeded) + avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -351,26 +358,26 @@ func (vm *VM) newExportTx( ins = append(ins, avaxIns...) signers = append(signers, avaxSigners...) - avax.SortTransferableOutputs(outs, vm.codec) + avax.SortTransferableOutputs(outs, Codec) SortEVMInputsAndSigners(ins, signers) // Create the transaction utx := &UnsignedExportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, DestinationChain: chainID, Ins: ins, ExportedOutputs: outs, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(Codec, signers); err != nil { return nil, err } - return tx, utx.Verify(vm.ctx, vm.currentRules()) + return tx, utx.Verify(ctx, rules) } // EVMStateTransfer executes the state update from the atomic export transaction -func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { addrs := map[[20]byte]uint64{} for _, from := range utx.Ins { if from.AssetID == ctx.AVAXAssetID { @@ -379,7 +386,7 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St // denomination before export. amount := new(uint256.Int).Mul( uint256.NewInt(from.Amount), - uint256.NewInt(x2cRate.Uint64()), + uint256.NewInt(X2CRate.Uint64()), ) if state.GetBalance(from.Address).Cmp(amount) < 0 { return errInsufficientFunds @@ -403,3 +410,151 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St } return nil } + +// GetSpendableFunds returns a list of EVMInputs and keys (in corresponding +// order) to total [amount] of [assetID] owned by [keys]. +// Note: we return [][]*secp256k1.PrivateKey even though each input +// corresponds to a single key, so that the signers can be passed in to +// [tx.Sign] which supports multiple keys on a single input. +func GetSpendableFunds( + ctx *snow.Context, + state StateDB, + keys []*secp256k1.PrivateKey, + assetID ids.ID, + amount uint64, +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { + inputs := []EVMInput{} + signers := [][]*secp256k1.PrivateKey{} + // Note: we assume that each key in [keys] is unique, so that iterating over + // the keys will not produce duplicated nonces in the returned EVMInput slice. + for _, key := range keys { + if amount == 0 { + break + } + addr := GetEthAddress(key) + var balance uint64 + if assetID == ctx.AVAXAssetID { + // If the asset is AVAX, we divide by the x2cRate to convert back to the correct + // denomination of AVAX that can be exported. + balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + } else { + balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() + } + if balance == 0 { + continue + } + if amount < balance { + balance = amount + } + nonce := state.GetNonce(addr) + + inputs = append(inputs, EVMInput{ + Address: addr, + Amount: balance, + AssetID: assetID, + Nonce: nonce, + }) + signers = append(signers, []*secp256k1.PrivateKey{key}) + amount -= balance + } + + if amount > 0 { + return nil, nil, errInsufficientFunds + } + + return inputs, signers, nil +} + +// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding +// order) to total [amount] + [fee] of [AVAX] owned by [keys]. +// This function accounts for the added cost of the additional inputs needed to +// create the transaction and makes sure to skip any keys with a balance that is +// insufficient to cover the additional fee. +// Note: we return [][]*secp256k1.PrivateKey even though each input +// corresponds to a single key, so that the signers can be passed in to +// [tx.Sign] which supports multiple keys on a single input. +func GetSpendableAVAXWithFee( + ctx *snow.Context, + state StateDB, + keys []*secp256k1.PrivateKey, + amount uint64, + cost uint64, + baseFee *big.Int, +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { + initialFee, err := CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, nil, err + } + + newAmount, err := math.Add64(amount, initialFee) + if err != nil { + return nil, nil, err + } + amount = newAmount + + inputs := []EVMInput{} + signers := [][]*secp256k1.PrivateKey{} + // Note: we assume that each key in [keys] is unique, so that iterating over + // the keys will not produce duplicated nonces in the returned EVMInput slice. + for _, key := range keys { + if amount == 0 { + break + } + + prevFee, err := CalculateDynamicFee(cost, baseFee) + if err != nil { + return nil, nil, err + } + + newCost := cost + EVMInputGas + newFee, err := CalculateDynamicFee(newCost, baseFee) + if err != nil { + return nil, nil, err + } + + additionalFee := newFee - prevFee + + addr := GetEthAddress(key) + // Since the asset is AVAX, we divide by the x2cRate to convert back to + // the correct denomination of AVAX that can be exported. + balance := new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + // If the balance for [addr] is insufficient to cover the additional cost + // of adding an input to the transaction, skip adding the input altogether + if balance <= additionalFee { + continue + } + + // Update the cost for the next iteration + cost = newCost + + newAmount, err := math.Add64(amount, additionalFee) + if err != nil { + return nil, nil, err + } + amount = newAmount + + // Use the entire [balance] as an input, but if the required [amount] + // is less than the balance, update the [inputAmount] to spend the + // minimum amount to finish the transaction. + inputAmount := balance + if amount < balance { + inputAmount = amount + } + nonce := state.GetNonce(addr) + + inputs = append(inputs, EVMInput{ + Address: addr, + Amount: inputAmount, + AssetID: ctx.AVAXAssetID, + Nonce: nonce, + }) + signers = append(signers, []*secp256k1.PrivateKey{key}) + amount -= inputAmount + } + + if amount > 0 { + return nil, nil, errInsufficientFunds + } + + return inputs, signers, nil +} diff --git a/plugin/evm/import_tx.go b/plugin/evm/atomic/import_tx.go similarity index 70% rename from plugin/evm/import_tx.go rename to plugin/evm/atomic/import_tx.go index b447a717ee..0d4d367d4e 100644 --- a/plugin/evm/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "context" @@ -10,7 +10,6 @@ import ( "math/big" "slices" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" "github.com/holiman/uint256" @@ -21,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -31,8 +31,19 @@ import ( var ( _ UnsignedAtomicTx = &UnsignedImportTx{} _ secp256k1fx.UnsignedTx = &UnsignedImportTx{} - errImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") - errImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") + ErrImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") + ErrImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") + ErrNoImportInputs = errors.New("tx has no imported inputs") + ErrConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") + ErrWrongChainID = errors.New("tx has wrong chain ID") + ErrNoEVMOutputs = errors.New("tx has no EVM outputs") + ErrInputsNotSortedUnique = errors.New("inputs not sorted and unique") + ErrOutputsNotSortedUnique = errors.New("outputs not sorted and unique") + ErrOutputsNotSorted = errors.New("tx outputs not sorted") + ErrAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") + errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") + errRejectedParent = errors.New("rejected parent") ) // UnsignedImportTx is an unsigned ImportTx @@ -66,15 +77,15 @@ func (utx *UnsignedImportTx) Verify( ) error { switch { case utx == nil: - return errNilTx + return ErrNilTx case len(utx.ImportedInputs) == 0: - return errNoImportInputs + return ErrNoImportInputs case utx.NetworkID != ctx.NetworkID: - return errWrongNetworkID + return ErrWrongNetworkID case ctx.ChainID != utx.BlockchainID: - return errWrongBlockchainID + return ErrWrongChainID case rules.IsApricotPhase3 && len(utx.Outs) == 0: - return errNoEVMOutputs + return ErrNoEVMOutputs } // Make sure that the tx has a valid peer chain ID @@ -82,11 +93,11 @@ func (utx *UnsignedImportTx) Verify( // Note that SameSubnet verifies that [tx.SourceChain] isn't this // chain's ID if err := verify.SameSubnet(context.TODO(), ctx, utx.SourceChain); err != nil { - return errWrongChainID + return ErrWrongChainID } } else { if utx.SourceChain != ctx.XChainID { - return errWrongChainID + return ErrWrongChainID } } @@ -95,7 +106,7 @@ func (utx *UnsignedImportTx) Verify( return fmt.Errorf("EVM Output failed verification: %w", err) } if rules.IsBanff && out.AssetID != ctx.AVAXAssetID { - return errImportNonAVAXOutputBanff + return ErrImportNonAVAXOutputBanff } } @@ -104,20 +115,20 @@ func (utx *UnsignedImportTx) Verify( return fmt.Errorf("atomic input failed verification: %w", err) } if rules.IsBanff && in.AssetID() != ctx.AVAXAssetID { - return errImportNonAVAXInputBanff + return ErrImportNonAVAXInputBanff } } if !utils.IsSortedAndUnique(utx.ImportedInputs) { - return errInputsNotSortedUnique + return ErrInputsNotSortedUnique } if rules.IsApricotPhase2 { if !utils.IsSortedAndUnique(utx.Outs) { - return errOutputsNotSortedUnique + return ErrOutputsNotSortedUnique } } else if rules.IsApricotPhase1 { if !slices.IsSortedFunc(utx.Outs, EVMOutput.Compare) { - return errOutputsNotSorted + return ErrOutputsNotSorted } } @@ -177,13 +188,14 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedImportTx) SemanticVerify( - vm *VM, + backend *Backend, stx *Tx, - parent *Block, + parent AtomicBlockContext, baseFee *big.Int, - rules params.Rules, ) error { - if err := utx.Verify(vm.ctx, rules); err != nil { + ctx := backend.Ctx + rules := backend.Rules + if err := utx.Verify(ctx, rules); err != nil { return err } @@ -200,11 +212,11 @@ func (utx *UnsignedImportTx) SemanticVerify( if err != nil { return err } - fc.Produce(vm.ctx.AVAXAssetID, txFee) + fc.Produce(ctx.AVAXAssetID, txFee) // Apply fees to import transactions as of Apricot Phase 2 case rules.IsApricotPhase2: - fc.Produce(vm.ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) } for _, out := range utx.Outs { fc.Produce(out.AssetID, out.Amount) @@ -221,7 +233,7 @@ func (utx *UnsignedImportTx) SemanticVerify( return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) } - if !vm.bootstrapped.Get() { + if !backend.Bootstrapped { // Allow for force committing during bootstrapping return nil } @@ -232,7 +244,7 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoIDs[i] = inputID[:] } // allUTXOBytes is guaranteed to be the same length as utxoIDs - allUTXOBytes, err := vm.ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) + allUTXOBytes, err := ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) if err != nil { return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", utx.SourceChain, err) } @@ -241,7 +253,7 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoBytes := allUTXOBytes[i] utxo := &avax.UTXO{} - if _, err := vm.codec.Unmarshal(utxoBytes, utxo); err != nil { + if _, err := Codec.Unmarshal(utxoBytes, utxo); err != nil { return fmt.Errorf("failed to unmarshal UTXO: %w", err) } @@ -250,15 +262,15 @@ func (utx *UnsignedImportTx) SemanticVerify( utxoAssetID := utxo.AssetID() inAssetID := in.AssetID() if utxoAssetID != inAssetID { - return errAssetIDMismatch + return ErrAssetIDMismatch } - if err := vm.fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { + if err := backend.Fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { return fmt.Errorf("import tx transfer failed verification: %w", err) } } - return vm.conflicts(utx.InputUTXOs(), parent) + return conflicts(backend, utx.InputUTXOs(), parent) } // AtomicOps returns imported inputs spent on this transaction @@ -275,28 +287,11 @@ func (utx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { return utx.SourceChain, &atomic.Requests{RemoveRequests: utxoIDs}, nil } -// newImportTx returns a new ImportTx -func (vm *VM) newImportTx( - chainID ids.ID, // chain to import from - to common.Address, // Address of recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds -) (*Tx, error) { - kc := secp256k1fx.NewKeychain() - for _, key := range keys { - kc.Add(key) - } - - atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } - - return vm.newImportTxWithUTXOs(chainID, to, baseFee, kc, atomicUTXOs) -} - -// newImportTx returns a new ImportTx -func (vm *VM) newImportTxWithUTXOs( +// NewImportTx returns a new ImportTx +func NewImportTx( + ctx *snow.Context, + rules params.Rules, + clk mockable.Clock, chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 @@ -307,7 +302,7 @@ func (vm *VM) newImportTxWithUTXOs( signers := [][]*secp256k1.PrivateKey{} importedAmount := make(map[ids.ID]uint64) - now := vm.clock.Unix() + now := clk.Unix() for _, utxo := range atomicUTXOs { inputIntf, utxoSigners, err := kc.Spend(utxo.Out, now) if err != nil { @@ -330,7 +325,7 @@ func (vm *VM) newImportTxWithUTXOs( signers = append(signers, utxoSigners) } avax.SortTransferableInputsWithSigners(importedInputs, signers) - importedAVAXAmount := importedAmount[vm.ctx.AVAXAssetID] + importedAVAXAmount := importedAmount[ctx.AVAXAssetID] outs := make([]EVMOutput, 0, len(importedAmount)) // This will create unique outputs (in the context of sorting) @@ -338,7 +333,7 @@ func (vm *VM) newImportTxWithUTXOs( for assetID, amount := range importedAmount { // Skip the AVAX amount since it is included separately to account for // the fee - if assetID == vm.ctx.AVAXAssetID || amount == 0 { + if assetID == ctx.AVAXAssetID || amount == 0 { continue } outs = append(outs, EVMOutput{ @@ -348,8 +343,6 @@ func (vm *VM) newImportTxWithUTXOs( }) } - rules := vm.currentRules() - var ( txFeeWithoutChange uint64 txFeeWithChange uint64 @@ -360,14 +353,14 @@ func (vm *VM) newImportTxWithUTXOs( return nil, errNilBaseFeeApricotPhase3 } utx := &UnsignedImportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: outs, ImportedInputs: importedInputs, SourceChain: chainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(Codec, nil); err != nil { return nil, err } @@ -399,7 +392,7 @@ func (vm *VM) newImportTxWithUTXOs( outs = append(outs, EVMOutput{ Address: to, Amount: importedAVAXAmount - txFeeWithChange, - AssetID: vm.ctx.AVAXAssetID, + AssetID: ctx.AVAXAssetID, }) } @@ -407,35 +400,35 @@ func (vm *VM) newImportTxWithUTXOs( // Note: this can happen if there is exactly enough AVAX to pay the // transaction fee, but no other funds to be imported. if len(outs) == 0 { - return nil, errNoEVMOutputs + return nil, ErrNoEVMOutputs } utils.Sort(outs) // Create the transaction utx := &UnsignedImportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.ChainID, Outs: outs, ImportedInputs: importedInputs, SourceChain: chainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(Codec, signers); err != nil { return nil, err } - return tx, utx.Verify(vm.ctx, vm.currentRules()) + return tx, utx.Verify(ctx, rules) } // EVMStateTransfer performs the state transfer to increase the balances of // accounts accordingly with the imported EVMOutputs -func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { for _, to := range utx.Outs { if to.AssetID == ctx.AVAXAssetID { log.Debug("import_tx", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", "AVAX") // If the asset is AVAX, convert the input amount in nAVAX to gWei by // multiplying by the x2c rate. - amount := new(uint256.Int).Mul(uint256.NewInt(to.Amount), x2cRate) + amount := new(uint256.Int).Mul(uint256.NewInt(to.Amount), X2CRate) state.AddBalance(to.Address, amount) } else { log.Debug("import_tx", "src", utx.SourceChain, "addr", to.Address, "amount", to.Amount, "assetID", to.AssetID) @@ -445,3 +438,43 @@ func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state *state.St } return nil } + +// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] +// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is +// accepted, then nil will be returned immediately. +// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. +func conflicts(backend *Backend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { + fetcher := backend.BlockFetcher + lastAcceptedBlock := fetcher.LastAcceptedBlockInternal() + lastAcceptedHeight := lastAcceptedBlock.Height() + for ancestor.Height() > lastAcceptedHeight { + // If any of the atomic transactions in the ancestor conflict with [inputs] + // return an error. + for _, atomicTx := range ancestor.AtomicTxs() { + if inputs.Overlaps(atomicTx.InputUTXOs()) { + return ErrConflictingAtomicInputs + } + } + + // Move up the chain. + nextAncestorID := ancestor.Parent() + // If the ancestor is unknown, then the parent failed + // verification when it was called. + // If the ancestor is rejected, then this block shouldn't be + // inserted into the canonical chain because the parent is + // will be missing. + // If the ancestor is processing, then the block may have + // been verified. + nextAncestorIntf, err := fetcher.GetBlockInternal(context.TODO(), nextAncestorID) + if err != nil { + return errRejectedParent + } + nextAncestor, ok := nextAncestorIntf.(AtomicBlockContext) + if !ok { + return fmt.Errorf("ancestor block %s had unexpected type %T", nextAncestor.ID(), nextAncestorIntf) + } + ancestor = nextAncestor + } + + return nil +} diff --git a/plugin/evm/metadata.go b/plugin/evm/atomic/metadata.go similarity index 98% rename from plugin/evm/metadata.go rename to plugin/evm/atomic/metadata.go index 2665d329bc..7cd570f7ec 100644 --- a/plugin/evm/metadata.go +++ b/plugin/evm/atomic/metadata.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "github.com/ava-labs/avalanchego/ids" diff --git a/plugin/evm/status.go b/plugin/evm/atomic/status.go similarity index 95% rename from plugin/evm/status.go rename to plugin/evm/atomic/status.go index 14d1b009a7..c7c72d0987 100644 --- a/plugin/evm/status.go +++ b/plugin/evm/atomic/status.go @@ -1,16 +1,14 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "errors" "fmt" ) -var ( - errUnknownStatus = errors.New("unknown status") -) +var errUnknownStatus = errors.New("unknown status") // Status ... type Status uint32 diff --git a/plugin/evm/tx.go b/plugin/evm/atomic/tx.go similarity index 75% rename from plugin/evm/tx.go rename to plugin/evm/atomic/tx.go index 9361f71976..a911402dea 100644 --- a/plugin/evm/tx.go +++ b/plugin/evm/atomic/tx.go @@ -1,44 +1,50 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" + "context" "errors" "fmt" "math/big" "sort" "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +const ( + X2CRateUint64 uint64 = 1_000_000_000 + x2cRateMinus1Uint64 uint64 = X2CRateUint64 - 1 +) + var ( - errWrongBlockchainID = errors.New("wrong blockchain ID provided") - errWrongNetworkID = errors.New("tx was issued with a different network ID") - errNilTx = errors.New("tx is nil") - errNoValueOutput = errors.New("output has no value") - errNoValueInput = errors.New("input has no value") - errNilOutput = errors.New("nil output") - errNilInput = errors.New("nil input") - errEmptyAssetID = errors.New("empty asset ID is not valid") - errNilBaseFee = errors.New("cannot calculate dynamic fee with nil baseFee") - errFeeOverflow = errors.New("overflow occurred while calculating the fee") + ErrWrongNetworkID = errors.New("tx was issued with a different network ID") + ErrNilTx = errors.New("tx is nil") + errNoValueOutput = errors.New("output has no value") + ErrNoValueInput = errors.New("input has no value") + errNilOutput = errors.New("nil output") + errNilInput = errors.New("nil input") + errEmptyAssetID = errors.New("empty asset ID is not valid") + errNilBaseFee = errors.New("cannot calculate dynamic fee with nil baseFee") + errFeeOverflow = errors.New("overflow occurred while calculating the fee") ) // Constants for calculating the gas consumed by atomic transactions @@ -46,6 +52,12 @@ var ( TxBytesGas uint64 = 1 EVMOutputGas uint64 = (common.AddressLength + wrappers.LongLen + hashing.HashLen) * TxBytesGas EVMInputGas uint64 = (common.AddressLength+wrappers.LongLen+hashing.HashLen+wrappers.LongLen)*TxBytesGas + secp256k1fx.CostPerSignature + // X2CRate is the conversion rate between the smallest denomination on the X-Chain + // 1 nAVAX and the smallest denomination on the C-Chain 1 wei. Where 1 nAVAX = 1 gWei. + // This is only required for AVAX because the denomination of 1 AVAX is 9 decimal + // places on the X and P chains, but is 18 decimal places within the EVM. + X2CRate = uint256.NewInt(X2CRateUint64) + x2cRateMinus1 = uint256.NewInt(x2cRateMinus1Uint64) ) // EVMOutput defines an output that is added to the EVM state created by import transactions @@ -98,7 +110,7 @@ func (in *EVMInput) Verify() error { case in == nil: return errNilInput case in.Amount == 0: - return errNoValueInput + return ErrNoValueInput case in.AssetID == ids.Empty: return errEmptyAssetID } @@ -115,6 +127,39 @@ type UnsignedTx interface { SignedBytes() []byte } +type Backend struct { + Ctx *snow.Context + Fx fx.Fx + Rules params.Rules + Bootstrapped bool + BlockFetcher BlockFetcher + SecpCache *secp256k1.RecoverCache +} + +type BlockFetcher interface { + LastAcceptedBlockInternal() snowman.Block + GetBlockInternal(context.Context, ids.ID) (snowman.Block, error) +} + +type AtomicBlockContext interface { + AtomicTxs() []*Tx + snowman.Block +} + +type StateDB interface { + AddBalance(common.Address, *uint256.Int) + AddBalanceMultiCoin(common.Address, common.Hash, *big.Int) + + SubBalance(common.Address, *uint256.Int) + SubBalanceMultiCoin(common.Address, common.Hash, *big.Int) + + GetBalance(common.Address) *uint256.Int + GetBalanceMultiCoin(common.Address, common.Hash) *big.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) +} + // UnsignedAtomicTx is an unsigned operation that can be atomically accepted type UnsignedAtomicTx interface { UnsignedTx @@ -124,13 +169,14 @@ type UnsignedAtomicTx interface { // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules params.Rules) error // Attempts to verify this transaction with the provided state. - SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules) error + // SemanticVerify this transaction is valid. + SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. // The set of atomic requests must be returned in a consistent order. AtomicOps() (ids.ID, *atomic.Requests, error) - EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error + EVMStateTransfer(ctx *snow.Context, state StateDB) error } // Tx is a signed transaction @@ -157,7 +203,7 @@ func (tx *Tx) Compare(other *Tx) int { // Sign this transaction with the provided signers func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { - unsignedBytes, err := c.Marshal(codecVersion, &tx.UnsignedAtomicTx) + unsignedBytes, err := c.Marshal(CodecVersion, &tx.UnsignedAtomicTx) if err != nil { return fmt.Errorf("couldn't marshal UnsignedAtomicTx: %w", err) } @@ -178,7 +224,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { tx.Creds = append(tx.Creds, cred) // Attach credential } - signedBytes, err := c.Marshal(codecVersion, tx) + signedBytes, err := c.Marshal(CodecVersion, tx) if err != nil { return fmt.Errorf("couldn't marshal Tx: %w", err) } @@ -216,7 +262,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b // Calculate the amount of AVAX that has been burned above the required fee denominated // in C-Chain native 18 decimal places - blockFeeContribution := new(big.Int).Mul(new(big.Int).SetUint64(excessBurned), x2cRate.ToBig()) + blockFeeContribution := new(big.Int).Mul(new(big.Int).SetUint64(excessBurned), X2CRate.ToBig()) return blockFeeContribution, new(big.Int).SetUint64(gasUsed), nil } @@ -255,7 +301,7 @@ func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { bigCost := new(big.Int).SetUint64(cost) fee := new(big.Int).Mul(bigCost, baseFee) feeToRoundUp := new(big.Int).Add(fee, x2cRateMinus1.ToBig()) - feeInNAVAX := new(big.Int).Div(feeToRoundUp, x2cRate.ToBig()) + feeInNAVAX := new(big.Int).Div(feeToRoundUp, X2CRate.ToBig()) if !feeInNAVAX.IsUint64() { // the fee is more than can fit in a uint64 return 0, errFeeOverflow @@ -266,36 +312,3 @@ func CalculateDynamicFee(cost uint64, baseFee *big.Int) (uint64, error) { func calcBytesCost(len int) uint64 { return uint64(len) * TxBytesGas } - -// mergeAtomicOps merges atomic requests represented by [txs] -// to the [output] map, depending on whether [chainID] is present in the map. -func mergeAtomicOps(txs []*Tx) (map[ids.ID]*atomic.Requests, error) { - if len(txs) > 1 { - // txs should be stored in order of txID to ensure consistency - // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) - copy(copyTxs, txs) - utils.Sort(copyTxs) - txs = copyTxs - } - output := make(map[ids.ID]*atomic.Requests) - for _, tx := range txs { - chainID, txRequests, err := tx.UnsignedAtomicTx.AtomicOps() - if err != nil { - return nil, err - } - mergeAtomicOpsToMap(output, chainID, txRequests) - } - return output, nil -} - -// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] -// to the [output] map provided. -func mergeAtomicOpsToMap(output map[ids.ID]*atomic.Requests, chainID ids.ID, requests *atomic.Requests) { - if request, exists := output[chainID]; exists { - request.PutRequests = append(request.PutRequests, requests.PutRequests...) - request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) - } else { - output[chainID] = requests - } -} diff --git a/plugin/evm/atomic/utils.go b/plugin/evm/atomic/utils.go new file mode 100644 index 0000000000..8872e09861 --- /dev/null +++ b/plugin/evm/atomic/utils.go @@ -0,0 +1,32 @@ +// (c) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "errors" + + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var errInvalidAddr = errors.New("invalid hex address") + +// ParseEthAddress parses [addrStr] and returns an Ethereum address +func ParseEthAddress(addrStr string) (common.Address, error) { + if !common.IsHexAddress(addrStr) { + return common.Address{}, errInvalidAddr + } + return common.HexToAddress(addrStr), nil +} + +// GetEthAddress returns the ethereum address derived from [privKey] +func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { + return PublicKeyToEthAddress(privKey.PublicKey()) +} + +// PublicKeyToEthAddress returns the ethereum address derived from [pubKey] +func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { + return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) +} diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic_backend.go index 5a84ac3748..2420021d6f 100644 --- a/plugin/evm/atomic_backend.go +++ b/plugin/evm/atomic_backend.go @@ -8,13 +8,15 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -32,7 +34,7 @@ type AtomicBackend interface { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) // Returns an AtomicState corresponding to a block hash that has been inserted // but not Accepted or Rejected yet. @@ -73,7 +75,7 @@ type atomicBackend struct { bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing db *versiondb.Database // Underlying database metadataDB database.Database // Underlying database containing the atomic trie metadata - sharedMemory atomic.SharedMemory + sharedMemory avalancheatomic.SharedMemory repo AtomicTxRepository atomicTrie AtomicTrie @@ -84,7 +86,7 @@ type atomicBackend struct { // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( - db *versiondb.Database, sharedMemory atomic.SharedMemory, + db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (AtomicBackend, error) { @@ -150,7 +152,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // iterate over the transactions, indexing them if the height is < commit height // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap height = binary.BigEndian.Uint64(iter.Key()) - txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) + txs, err := atomic.ExtractAtomicTxs(iter.Value(), true, a.codec) if err != nil { return err } @@ -266,7 +268,7 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { it.Next() } - batchOps := make(map[ids.ID]*atomic.Requests) + batchOps := make(map[ids.ID]*avalancheatomic.Requests) for it.Next() { height := it.BlockNumber() if height > lastAcceptedBlock { @@ -318,7 +320,7 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { lastHeight = height lastBlockchainID = blockchainID putRequests, removeRequests = 0, 0 - batchOps = make(map[ids.ID]*atomic.Requests) + batchOps = make(map[ids.ID]*avalancheatomic.Requests) } } if err := it.Error(); err != nil { @@ -395,7 +397,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) { +func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -455,3 +457,36 @@ func (a *atomicBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool func (a *atomicBackend) AtomicTrie() AtomicTrie { return a.atomicTrie } + +// mergeAtomicOps merges atomic requests represented by [txs] +// to the [output] map, depending on whether [chainID] is present in the map. +func mergeAtomicOps(txs []*atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { + if len(txs) > 1 { + // txs should be stored in order of txID to ensure consistency + // with txs initialized from the txID index. + copyTxs := make([]*atomic.Tx, len(txs)) + copy(copyTxs, txs) + utils.Sort(copyTxs) + txs = copyTxs + } + output := make(map[ids.ID]*avalancheatomic.Requests) + for _, tx := range txs { + chainID, txRequests, err := tx.UnsignedAtomicTx.AtomicOps() + if err != nil { + return nil, err + } + mergeAtomicOpsToMap(output, chainID, txRequests) + } + return output, nil +} + +// mergeAtomicOps merges atomic ops for [chainID] represented by [requests] +// to the [output] map provided. +func mergeAtomicOpsToMap(output map[ids.ID]*avalancheatomic.Requests, chainID ids.ID, requests *avalancheatomic.Requests) { + if request, exists := output[chainID]; exists { + request.PutRequests = append(request.PutRequests, requests.PutRequests...) + request.RemoveRequests = append(request.RemoveRequests, requests.RemoveRequests...) + } else { + output[chainID] = requests + } +} diff --git a/plugin/evm/atomic_state.go b/plugin/evm/atomic_state.go index 667e4c2517..911f1afb3a 100644 --- a/plugin/evm/atomic_state.go +++ b/plugin/evm/atomic_state.go @@ -6,9 +6,10 @@ package evm import ( "fmt" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -25,7 +26,7 @@ type AtomicState interface { Root() common.Hash // Accept applies the state change to VM's persistent storage // Changes are persisted atomically along with the provided [commitBatch]. - Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error + Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error // Reject frees memory associated with the state change. Reject() error } @@ -36,8 +37,8 @@ type atomicState struct { backend *atomicBackend blockHash common.Hash blockHeight uint64 - txs []*Tx - atomicOps map[ids.ID]*atomic.Requests + txs []*atomic.Tx + atomicOps map[ids.ID]*avalancheatomic.Requests atomicRoot common.Hash } @@ -46,7 +47,7 @@ func (a *atomicState) Root() common.Hash { } // Accept applies the state change to VM's persistent storage. -func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*atomic.Requests) error { +func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error { // Add the new requests to the batch to be accepted for chainID, requests := range requests { mergeAtomicOpsToMap(a.atomicOps, chainID, requests) @@ -83,7 +84,7 @@ func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*at // to shared memory. if a.backend.IsBonus(a.blockHeight, a.blockHash) { log.Info("skipping atomic tx acceptance on bonus block", "block", a.blockHash) - return atomic.WriteAll(commitBatch, atomicChangesBatch) + return avalancheatomic.WriteAll(commitBatch, atomicChangesBatch) } // Otherwise, atomically commit pending changes in the version db with diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go index 2760850d18..d734268e23 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic_trie.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" "github.com/ava-labs/coreth/triedb" @@ -52,7 +53,7 @@ type AtomicTrie interface { OpenTrie(hash common.Hash) (*trie.Trie, error) // UpdateTrie updates [tr] to inlude atomicOps for height. - UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error + UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error // Iterator returns an AtomicTrieIterator to iterate the trie at the given // root hash starting at [cursor]. @@ -108,7 +109,7 @@ type AtomicTrieIterator interface { // AtomicOps returns a map of blockchainIDs to the set of atomic requests // for that blockchainID at the current block number - AtomicOps() *atomic.Requests + AtomicOps() *avalancheatomic.Requests // Error returns error, if any encountered during this iteration Error() error @@ -221,9 +222,9 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { return a.updateLastCommitted(root, height) } -func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error { +func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { - valueBytes, err := a.codec.Marshal(codecVersion, requests) + valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) if err != nil { // highly unlikely but possible if atomic.Element // has a change that is unsupported by the codec diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic_trie_test.go index 5334c87101..193226f588 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic_trie_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" @@ -19,24 +19,25 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" ) const testCommitInterval = 100 -func (tx *Tx) mustAtomicOps() map[ids.ID]*atomic.Requests { +func mustAtomicOps(tx *atomic.Tx) map[ids.ID]*avalancheatomic.Requests { id, reqs, err := tx.AtomicOps() if err != nil { panic(err) } - return map[ids.ID]*atomic.Requests{id: reqs} + return map[ids.ID]*avalancheatomic.Requests{id: reqs} } // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. -func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*atomic.Requests) error { +func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { snapshot, err := tr.OpenTrie(tr.LastAcceptedRoot()) if err != nil { return err @@ -143,7 +144,7 @@ func TestAtomicTrieInitialize(t *testing.T) { if err != nil { t.Fatal(err) } - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time @@ -230,7 +231,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { codec := testTxCodec() repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository @@ -246,7 +247,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*Tx{testDataExportTx()}) + err = repo.Write(15, []*atomic.Tx{testDataExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie @@ -281,7 +282,7 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests := testDataImportTx().mustAtomicOps() + atomicRequests := mustAtomicOps(testDataImportTx()) err := indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) if height%testCommitInterval == 0 { @@ -314,9 +315,9 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { tx1 := testDataImportTx() tx2 := testDataImportTx() - atomicRequests1, err := mergeAtomicOps([]*Tx{tx1, tx2}) + atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) assert.NoError(t, err) - atomicRequests2, err := mergeAtomicOps([]*Tx{tx2, tx1}) + atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie1, height, atomicRequests1) @@ -343,7 +344,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { if err != nil { t.Fatal(err) } - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(numTxsPerBlock), nil, operationsMap) bonusBlocks := map[uint64]ids.ID{ @@ -368,9 +369,9 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index - ops := make([]map[ids.ID]*atomic.Requests, 0) + ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - ops = append(ops, testDataImportTx().mustAtomicOps()) + ops = append(ops, mustAtomicOps(testDataImportTx())) } // without nils @@ -411,19 +412,19 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { } type sharedMemories struct { - thisChain atomic.SharedMemory - peerChain atomic.SharedMemory + thisChain avalancheatomic.SharedMemory + peerChain avalancheatomic.SharedMemory thisChainID ids.ID peerChainID ids.ID } -func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { +func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*avalancheatomic.Requests) error { for _, reqs := range ops { - puts := make(map[ids.ID]*atomic.Requests) - puts[s.thisChainID] = &atomic.Requests{} + puts := make(map[ids.ID]*avalancheatomic.Requests) + puts[s.thisChainID] = &avalancheatomic.Requests{} for _, key := range reqs.RemoveRequests { val := []byte{0x1} - puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &atomic.Element{Key: key, Value: val}) + puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &avalancheatomic.Element{Key: key, Value: val}) } if err := s.peerChain.Apply(puts); err != nil { return err @@ -432,7 +433,7 @@ func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.R return nil } -func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { +func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { t.Helper() for _, reqs := range ops { // should be able to get put requests @@ -452,7 +453,7 @@ func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.R } } -func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { +func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { t.Helper() for _, reqs := range ops { // should not be able to get put requests @@ -470,7 +471,7 @@ func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomi } } -func newSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { +func newSharedMemories(atomicMemory *avalancheatomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { return &sharedMemories{ thisChain: atomicMemory.NewSharedMemory(thisChainID), peerChain: atomicMemory.NewSharedMemory(peerChainID), @@ -529,11 +530,11 @@ func TestApplyToSharedMemory(t *testing.T) { codec := testTxCodec() repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) assert.NoError(t, err) - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - m := atomic.NewMemory(db) + m := avalancheatomic.NewMemory(db) sharedMemories := newSharedMemories(m, testCChainID, blockChainID) backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) @@ -594,7 +595,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) codec := testTxCodec() - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25000) // add 25000 * 3 = 75000 transactions @@ -629,7 +630,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) codec := testTxCodec() - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) lastAcceptedHeight := uint64(25_000) // add 25000 * 3 = 75000 transactions diff --git a/plugin/evm/atomic_tx_repository.go b/plugin/evm/atomic_tx_repository.go index 4ee44576fe..d1074f60f2 100644 --- a/plugin/evm/atomic_tx_repository.go +++ b/plugin/evm/atomic_tx_repository.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) const ( @@ -39,10 +40,10 @@ var ( // atomic transactions type AtomicTxRepository interface { GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*Tx, uint64, error) - GetByHeight(height uint64) ([]*Tx, error) - Write(height uint64, txs []*Tx) error - WriteBonus(height uint64, txs []*Tx) error + GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) + GetByHeight(height uint64) ([]*atomic.Tx, error) + Write(height uint64, txs []*atomic.Tx) error + WriteBonus(height uint64, txs []*atomic.Tx) error IterateByHeight(start uint64) database.Iterator Codec() codec.Manager @@ -136,7 +137,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er // Get the tx iter is pointing to, len(txs) == 1 is expected here. txBytes := iterValue[wrappers.LongLen+wrappers.IntLen:] - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return err } @@ -198,10 +199,10 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { return indexHeight, nil } -// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*Tx] object +// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { +func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -215,7 +216,7 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { packer := wrappers.Packer{Bytes: indexedTxBytes} height := packer.UnpackLong() txBytes := packer.UnpackBytes() - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return nil, 0, err } @@ -229,40 +230,40 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*Tx, error) { +func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*Tx, error) { +func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err } - return ExtractAtomicTxsBatch(txsBytes, a.codec) + return atomic.ExtractAtomicTxsBatch(txsBytes, a.codec) } // Write updates indexes maintained on atomic txs, so they can be queried // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { +func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) + copyTxs := make([]*atomic.Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs @@ -300,8 +301,8 @@ func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { - txBytes, err := a.codec.Marshal(codecVersion, tx) +func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { + txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) if err != nil { return err } @@ -320,8 +321,8 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) error { - txsBytes, err := a.codec.Marshal(codecVersion, txs) +func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { + txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) if err != nil { return err } @@ -335,7 +336,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) err // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *Tx) error { +func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic_tx_repository_test.go index b52860d57d..091bcd8f56 100644 --- a/plugin/evm/atomic_tx_repository_test.go +++ b/plugin/evm/atomic_tx_repository_test.go @@ -7,11 +7,12 @@ import ( "encoding/binary" "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/codec" @@ -27,13 +28,13 @@ import ( // addTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) directly to [acceptedAtomicTxDB], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*atomic.Requests) { +func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { for height := fromHeight; height < toHeight; height++ { - txs := make([]*Tx, 0, txsPerHeight) + txs := make([]*atomic.Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { tx := newTestTx() txs = append(txs, tx) - txBytes, err := codec.Marshal(codecVersion, tx) + txBytes, err := codec.Marshal(atomic.CodecVersion, tx) assert.NoError(t, err) // Write atomic transactions to the [acceptedAtomicTxDB] @@ -70,7 +71,7 @@ func constTxsPerHeight(txCount int) func(uint64) int { // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, - txsPerHeight func(height uint64) int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*atomic.Requests, + txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { txs := newTestTxs(txsPerHeight(height)) @@ -95,7 +96,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { +func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) @@ -115,7 +116,7 @@ func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to // the atomic operations contained in [operationsMap] on the same interval. -func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*atomic.Requests) { +func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { t.Helper() // Start the iterator at [from] @@ -187,7 +188,7 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(1), txMap, nil) verifyTxs(t, repo, txMap) @@ -200,7 +201,7 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(10), txMap, nil) verifyTxs(t, repo, txMap) @@ -211,7 +212,7 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { codec := testTxCodec() acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) @@ -236,7 +237,7 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { codec := testTxCodec() acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) addTxs(t, codec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) if err := db.Commit(); err != nil { @@ -261,7 +262,7 @@ func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeig codec := testTxCodec() acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) addTxs(b, codec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) if err := db.Commit(); err != nil { diff --git a/plugin/evm/block.go b/plugin/evm/block.go index a8d9084464..99451cb071 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" @@ -31,9 +32,7 @@ var ( _ block.WithVerifyContext = (*Block)(nil) ) -var ( - errMissingUTXOs = errors.New("missing UTXOs") -) +var errMissingUTXOs = errors.New("missing UTXOs") // readMainnetBonusBlocks returns maps of bonus block numbers to block IDs. // Note bonus blocks are indexed in the atomic trie. @@ -114,13 +113,13 @@ type Block struct { id ids.ID ethBlock *types.Block vm *VM - atomicTxs []*Tx + atomicTxs []*atomic.Tx } // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { isApricotPhase5 := vm.chainConfig.IsApricotPhase5(ethBlock.Time()) - atomicTxs, err := ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, vm.codec) + atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) if err != nil { return nil, err } @@ -136,6 +135,8 @@ func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { // ID implements the snowman.Block interface func (b *Block) ID() ids.ID { return b.id } +func (b *Block) AtomicTxs() []*atomic.Tx { return b.atomicTxs } + // Accept implements the snowman.Block interface func (b *Block) Accept(context.Context) error { vm := b.vm diff --git a/plugin/evm/client.go b/plugin/evm/client/client.go similarity index 78% rename from plugin/evm/client.go rename to plugin/evm/client/client.go index 4701c22b9c..f92e55e59f 100644 --- a/plugin/evm/client.go +++ b/plugin/evm/client/client.go @@ -1,13 +1,14 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package client import ( "context" "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "golang.org/x/exp/slog" "github.com/ava-labs/avalanchego/api" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // Interface compliance @@ -25,7 +27,7 @@ var _ Client = (*client)(nil) // Client interface for interacting with EVM [chain] type Client interface { IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) - GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) + GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (atomic.Status, error) GetAtomicTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, sourceChain string, limit uint32, startAddress ids.ShortID, startUTXOID ids.ID, options ...rpc.Option) ([][]byte, ids.ShortID, ids.ID, error) ExportKey(ctx context.Context, userPass api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) @@ -38,7 +40,7 @@ type Client interface { MemoryProfile(ctx context.Context, options ...rpc.Option) error LockProfile(ctx context.Context, options ...rpc.Option) error SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error - GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) + // GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) } // Client implementation for interacting with EVM [chain] @@ -74,8 +76,14 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt return res.TxID, err } +// GetAtomicTxStatusReply defines the GetAtomicTxStatus replies returned from the API +type GetAtomicTxStatusReply struct { + Status atomic.Status `json:"status"` + BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` +} + // GetAtomicTxStatus returns the status of [txID] -func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (Status, error) { +func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (atomic.Status, error) { res := &GetAtomicTxStatusReply{} err := c.requester.SendRequest(ctx, "avax.getAtomicTxStatus", &api.JSONTxID{ TxID: txID, @@ -131,6 +139,19 @@ func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []ids.ShortID, source return utxos, endAddr, endUTXOID, err } +// ExportKeyArgs are arguments for ExportKey +type ExportKeyArgs struct { + api.UserPass + Address string `json:"address"` +} + +// ExportKeyReply is the response for ExportKey +type ExportKeyReply struct { + // The decrypted PrivateKey for the Address provided in the arguments + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` + PrivateKeyHex string `json:"privateKeyHex"` +} + // ExportKey returns the private key corresponding to [addr] controlled by [user] // in both Avalanche standard format and hex format func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.Address, options ...rpc.Option) (*secp256k1.PrivateKey, string, error) { @@ -142,6 +163,12 @@ func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr common.A return res.PrivateKey, res.PrivateKeyHex, err } +// ImportKeyArgs are arguments for ImportKey +type ImportKeyArgs struct { + api.UserPass + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` +} + // ImportKey imports [privateKey] to [user] func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (common.Address, error) { res := &api.JSONAddress{} @@ -152,7 +179,21 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *s if err != nil { return common.Address{}, err } - return ParseEthAddress(res.Address) + return atomic.ParseEthAddress(res.Address) +} + +// ImportArgs are arguments for passing into Import requests +type ImportArgs struct { + api.UserPass + + // Fee that should be used when creating the tx + BaseFee *hexutil.Big `json:"baseFee"` + + // Chain the funds are coming from + SourceChain string `json:"sourceChain"` + + // The address that will receive the imported funds + To common.Address `json:"to"` } // Import sends an import transaction to import funds from [sourceChain] and @@ -180,6 +221,32 @@ func (c *client) ExportAVAX( return c.Export(ctx, user, amount, to, targetChain, "AVAX", options...) } +// ExportAVAXArgs are the arguments to ExportAVAX +type ExportAVAXArgs struct { + api.UserPass + + // Fee that should be used when creating the tx + BaseFee *hexutil.Big `json:"baseFee"` + + // Amount of asset to send + Amount json.Uint64 `json:"amount"` + + // Chain the funds are going to. Optional. Used if To address does not + // include the chainID. + TargetChain string `json:"targetChain"` + + // ID of the address that will receive the AVAX. This address may include + // the chainID, which is used to determine what the destination chain is. + To string `json:"to"` +} + +// ExportArgs are the arguments to Export +type ExportArgs struct { + ExportAVAXArgs + // AssetID of the tokens + AssetID string `json:"assetID"` +} + // Export sends an asset from this chain to the P/C-Chain. // After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. // Returns the ID of the newly created atomic transaction @@ -221,6 +288,10 @@ func (c *client) LockProfile(ctx context.Context, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}, options...) } +type SetLogLevelArgs struct { + Level string `json:"level"` +} + // SetLogLevel dynamically sets the log level for the C Chain func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error { return c.adminRequester.SendRequest(ctx, "admin.setLogLevel", &SetLogLevelArgs{ @@ -229,8 +300,8 @@ func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...r } // GetVMConfig returns the current config of the VM -func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { - res := &ConfigReply{} - err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) - return res.Config, err -} +// func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { +// res := &ConfigReply{} +// err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) +// return res.Config, err +// } diff --git a/plugin/evm/client_interface_test.go b/plugin/evm/client/client_interface_test.go similarity index 97% rename from plugin/evm/client_interface_test.go rename to plugin/evm/client/client_interface_test.go index d88c4926b4..332bb8bcf4 100644 --- a/plugin/evm/client_interface_test.go +++ b/plugin/evm/client/client_interface_test.go @@ -1,4 +1,4 @@ -package evm +package client import ( "reflect" diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index d8a7fed80f..3e2b0d2160 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -9,7 +9,7 @@ import ( "math/big" "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" engCommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" @@ -18,13 +18,14 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). -func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *atomic.Memory) []*Tx { +func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { // Add a UTXO to shared memory utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, @@ -37,14 +38,14 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -84,9 +85,13 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } // Use the funds to create 3 conflicting export transactions sending the funds to each of the test addresses - exportTxs := make([]*Tx, 0, 3) + exportTxs := make([]*atomic.Tx, 0, 3) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } for _, addr := range testShortIDAddrs { - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -99,7 +104,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, func TestExportTxEVMStateTransfer(t *testing.T) { key := testKeys[0] addr := key.PublicKey().Address() - ethAddr := GetEthAddress(key) + ethAddr := atomic.GetEthAddress(key) avaxAmount := 50 * units.MilliAvax avaxUTXOID := avax.UTXOID{ @@ -128,7 +133,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { tests := []struct { name string - tx []EVMInput + tx []atomic.EVMInput avaxBalance *uint256.Int balances map[ids.ID]*big.Int expectedNonce uint64 @@ -137,7 +142,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { name: "no transfers", tx: nil, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount)), }, @@ -146,7 +151,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend half AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount / 2, @@ -154,7 +159,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount / 2 * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount / 2 * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount)), }, @@ -163,7 +168,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend all AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount, @@ -180,7 +185,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend too much AVAX", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxAmount + 1, @@ -197,7 +202,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend half custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount / 2, @@ -205,7 +210,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(int64(customAmount / 2)), }, @@ -214,7 +219,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend all custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -222,7 +227,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(0), }, @@ -231,7 +236,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend too much custom", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount + 1, @@ -239,7 +244,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Nonce: 0, }, }, - avaxBalance: uint256.NewInt(avaxAmount * x2cRateUint64), + avaxBalance: uint256.NewInt(avaxAmount * atomic.X2CRateUint64), balances: map[ids.ID]*big.Int{ customAssetID: big.NewInt(0), }, @@ -248,7 +253,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -271,7 +276,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything wrong nonce", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -294,7 +299,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, { name: "spend everything changing nonces", - tx: []EVMInput{ + tx: []atomic.EVMInput{ { Address: ethAddr, Amount: customAmount, @@ -337,18 +342,18 @@ func TestExportTxEVMStateTransfer(t *testing.T) { }, } - avaxUTXOBytes, err := vm.codec.Marshal(codecVersion, avaxUTXO) + avaxUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, avaxUTXO) if err != nil { t.Fatal(err) } - customUTXOBytes, err := vm.codec.Marshal(codecVersion, customUTXO) + customUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, customUTXO) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{ { Key: avaxInputID[:], Value: avaxUTXOBytes, @@ -395,7 +400,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - newTx := UnsignedExportTx{ + newTx := atomic.UnsignedExportTx{ Ins: test.tx, } @@ -457,11 +462,11 @@ func TestExportTxSemanticVerify(t *testing.T) { custom1AssetID = ids.ID{1, 2, 3, 4, 5, 6} ) - validExportTx := &UnsignedExportTx{ + validExportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -495,11 +500,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } - validAVAXExportTx := &UnsignedExportTx{ + validAVAXExportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -523,7 +528,7 @@ func TestExportTxSemanticVerify(t *testing.T) { tests := []struct { name string - tx *Tx + tx *atomic.Tx signers [][]*secp256k1.PrivateKey baseFee *big.Int rules params.Rules @@ -531,7 +536,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }{ { name: "valid", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -543,10 +548,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain before AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -557,10 +562,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -571,10 +576,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "random chain after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validAVAXExportTx validExportTx.DestinationChain = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -585,10 +590,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain multi-coin before AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -601,10 +606,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "P-chain multi-coin after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = constants.PlatformChainID - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -617,10 +622,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "random chain multi-coin after AP5", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.DestinationChain = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -633,10 +638,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "no outputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = nil - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -649,10 +654,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong networkID", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.NetworkID++ - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -665,10 +670,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong chainID", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.BlockchainID = ids.GenerateTestID() - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -681,11 +686,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "invalid input", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx - validExportTx.Ins = append([]EVMInput{}, validExportTx.Ins...) + validExportTx.Ins = append([]atomic.EVMInput{}, validExportTx.Ins...) validExportTx.Ins[2].Amount = 0 - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -698,7 +703,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "invalid output", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{{ Asset: avax.Asset{ID: custom0AssetID}, @@ -710,7 +715,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, }} - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -723,7 +728,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "unsorted outputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx exportOutputs := []*avax.TransferableOutput{ { @@ -748,10 +753,10 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } // Sort the outputs and then swap the ordering to ensure that they are ordered incorrectly - avax.SortTransferableOutputs(exportOutputs, Codec) + avax.SortTransferableOutputs(exportOutputs, atomic.Codec) exportOutputs[0], exportOutputs[1] = exportOutputs[1], exportOutputs[0] validExportTx.ExportedOutputs = exportOutputs - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -764,11 +769,11 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "not unique inputs", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx - validExportTx.Ins = append([]EVMInput{}, validExportTx.Ins...) + validExportTx.Ins = append([]atomic.EVMInput{}, validExportTx.Ins...) validExportTx.Ins[2] = validExportTx.Ins[1] - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -781,7 +786,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "custom asset insufficient funds", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -795,7 +800,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, } - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -808,7 +813,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "avax insufficient funds", - tx: func() *Tx { + tx: func() *atomic.Tx { validExportTx := *validExportTx validExportTx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -822,7 +827,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, }, } - return &Tx{UnsignedAtomicTx: &validExportTx} + return &atomic.Tx{UnsignedAtomicTx: &validExportTx} }(), signers: [][]*secp256k1.PrivateKey{ {key}, @@ -835,7 +840,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too many signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -848,7 +853,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too few signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key}, {key}, @@ -859,7 +864,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too many signatures on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {key, testKeys[1]}, {key}, @@ -871,7 +876,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "too few signatures on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {}, {key}, @@ -883,7 +888,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "wrong signature on credential", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ {testKeys[1]}, {key}, @@ -895,7 +900,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }, { name: "no signatures", - tx: &Tx{UnsignedAtomicTx: validExportTx}, + tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{}, baseFee: initialBaseFee, rules: apricotRulesPhase3, @@ -903,15 +908,24 @@ func TestExportTxSemanticVerify(t *testing.T) { }, } for _, test := range tests { - if err := test.tx.Sign(vm.codec, test.signers); err != nil { + if err := test.tx.Sign(atomic.Codec, test.signers); err != nil { t.Fatal(err) } + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: test.rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + t.Run(test.name, func(t *testing.T) { tx := test.tx exportTx := tx.UnsignedAtomicTx - err := exportTx.SemanticVerify(vm, tx, parent, test.baseFee, test.rules) + err := exportTx.SemanticVerify(backend, tx, parent, test.baseFee) if test.shouldErr && err == nil { t.Fatalf("should have errored but returned valid") } @@ -943,11 +957,11 @@ func TestExportTxAccept(t *testing.T) { custom0AssetID = ids.ID{1, 2, 3, 4, 5} ) - exportTx := &UnsignedExportTx{ + exportTx := &atomic.UnsignedExportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, DestinationChain: vm.ctx.XChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: ethAddr, Amount: avaxBalance, @@ -985,7 +999,7 @@ func TestExportTxAccept(t *testing.T) { }, } - tx := &Tx{UnsignedAtomicTx: exportTx} + tx := &atomic.Tx{UnsignedAtomicTx: exportTx} signers := [][]*secp256k1.PrivateKey{ {key}, @@ -993,7 +1007,7 @@ func TestExportTxAccept(t *testing.T) { {key}, } - if err := tx.Sign(vm.codec, signers); err != nil { + if err := tx.Sign(atomic.Codec, signers); err != nil { t.Fatal(err) } @@ -1006,7 +1020,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } indexedValues, _, _, err := xChainSharedMemory.Indexed(vm.ctx.ChainID, [][]byte{addr.Bytes()}, nil, nil, 3) @@ -1045,7 +1059,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatalf("inconsistent values returned fetched %x indexed %x", fetchedValues[1], indexedValues[1]) } - customUTXOBytes, err := Codec.Marshal(codecVersion, &avax.UTXO{ + customUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &avax.UTXO{ UTXOID: customUTXOID, Asset: avax.Asset{ID: custom0AssetID}, Out: exportTx.ExportedOutputs[1].Out, @@ -1054,7 +1068,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatal(err) } - avaxUTXOBytes, err := Codec.Marshal(codecVersion, &avax.UTXO{ + avaxUTXOBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &avax.UTXO{ UTXOID: avaxUTXOID, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, Out: exportTx.ExportedOutputs[0].Out, @@ -1073,11 +1087,11 @@ func TestExportTxAccept(t *testing.T) { func TestExportTxVerify(t *testing.T) { var exportAmount uint64 = 10000000 - exportTx := &UnsignedExportTx{ + exportTx := &atomic.UnsignedExportTx{ NetworkID: testNetworkID, BlockchainID: testCChainID, DestinationChain: testXChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1118,25 +1132,25 @@ func TestExportTxVerify(t *testing.T) { } // Sort the inputs and outputs to ensure the transaction is canonical - avax.SortTransferableOutputs(exportTx.ExportedOutputs, Codec) + avax.SortTransferableOutputs(exportTx.ExportedOutputs, atomic.Codec) // Pass in a list of signers here with the appropriate length // to avoid causing a nil-pointer error in the helper method emptySigners := make([][]*secp256k1.PrivateKey, 2) - SortEVMInputsAndSigners(exportTx.Ins, emptySigners) + atomic.SortEVMInputsAndSigners(exportTx.Ins, emptySigners) ctx := NewContext() tests := map[string]atomicTxVerifyTest{ "nil tx": { - generate: func(t *testing.T) UnsignedAtomicTx { - return (*UnsignedExportTx)(nil) + generate: func(t *testing.T) atomic.UnsignedAtomicTx { + return (*atomic.UnsignedExportTx)(nil) }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNilTx.Error(), + expectedErr: atomic.ErrNilTx.Error(), }, "valid export tx": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return exportTx }, ctx: ctx, @@ -1144,7 +1158,7 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "valid export tx banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return exportTx }, ctx: ctx, @@ -1152,47 +1166,47 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "incorrect networkID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.NetworkID++ return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongNetworkID.Error(), + expectedErr: atomic.ErrWrongNetworkID.Error(), }, "incorrect blockchainID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.BlockchainID = nonExistentID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongBlockchainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "incorrect destination chain": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.DestinationChain = nonExistentID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongChainID.Error(), // TODO make this error more specific to destination not just chainID + expectedErr: atomic.ErrWrongChainID.Error(), // TODO make this error more specific to destination not just chainID }, "no exported outputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoExportOutputs.Error(), + expectedErr: atomic.ErrNoExportOutputs.Error(), }, "unsorted outputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ tx.ExportedOutputs[1], @@ -1202,10 +1216,10 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errOutputsNotSorted.Error(), + expectedErr: atomic.ErrOutputsNotSorted.Error(), }, "invalid exported output": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{tx.ExportedOutputs[0], nil} return &tx @@ -1215,9 +1229,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "nil transferable output is not valid", }, "unsorted EVM inputs before AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ tx.Ins[1], tx.Ins[0], } @@ -1228,9 +1242,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "unsorted EVM inputs after AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ tx.Ins[1], tx.Ins[0], } @@ -1238,12 +1252,12 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "EVM input with amount 0": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 0, @@ -1255,12 +1269,12 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoValueInput.Error(), + expectedErr: atomic.ErrNoValueInput.Error(), }, "non-unique EVM input before AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{tx.Ins[0], tx.Ins[0]} + tx.Ins = []atomic.EVMInput{tx.Ins[0], tx.Ins[0]} return &tx }, ctx: ctx, @@ -1268,19 +1282,19 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-unique EVM input after AP1": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{tx.Ins[0], tx.Ins[0]} + tx.Ins = []atomic.EVMInput{tx.Ins[0], tx.Ins[0]} return &tx }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "non-AVAX input Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 1, @@ -1295,7 +1309,7 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX output Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -1317,9 +1331,9 @@ func TestExportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX input Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx - tx.Ins = []EVMInput{ + tx.Ins = []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: 1, @@ -1331,10 +1345,10 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errExportNonAVAXInputBanff.Error(), + expectedErr: atomic.ErrExportNonAVAXInputBanff.Error(), }, "non-AVAX output Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *exportTx tx.ExportedOutputs = []*avax.TransferableOutput{ { @@ -1353,7 +1367,7 @@ func TestExportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errExportNonAVAXOutputBanff.Error(), + expectedErr: atomic.ErrExportNonAVAXOutputBanff.Error(), }, } @@ -1374,7 +1388,7 @@ func TestExportTxGasCost(t *testing.T) { exportAmount := uint64(5000000) tests := map[string]struct { - UnsignedExportTx *UnsignedExportTx + UnsignedExportTx *atomic.UnsignedExportTx Keys [][]*secp256k1.PrivateKey BaseFee *big.Int @@ -1383,11 +1397,11 @@ func TestExportTxGasCost(t *testing.T) { FixedFee bool }{ "simple export 1wei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1415,11 +1429,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(1), }, "simple export 1wei BaseFee + fixed fee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1448,11 +1462,11 @@ func TestExportTxGasCost(t *testing.T) { FixedFee: true, }, "simple export 25Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1480,11 +1494,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "simple export 225Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1512,11 +1526,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(225 * params.GWei), }, "complex export 25Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1556,11 +1570,11 @@ func TestExportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "complex export 225Gwei BaseFee": { - UnsignedExportTx: &UnsignedExportTx{ + UnsignedExportTx: &atomic.UnsignedExportTx{ NetworkID: networkID, BlockchainID: chainID, DestinationChain: xChainID, - Ins: []EVMInput{ + Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, @@ -1603,10 +1617,10 @@ func TestExportTxGasCost(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - tx := &Tx{UnsignedAtomicTx: test.UnsignedExportTx} + tx := &atomic.Tx{UnsignedAtomicTx: test.UnsignedExportTx} // Sign with the correct key - if err := tx.Sign(Codec, test.Keys); err != nil { + if err := tx.Sign(atomic.Codec, test.Keys); err != nil { t.Fatal(err) } @@ -1618,7 +1632,7 @@ func TestExportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) + fee, err := atomic.CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } @@ -1705,14 +1719,14 @@ func TestNewExportTx(t *testing.T) { }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -1753,14 +1767,28 @@ func TestNewExportTx(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - tx, err = vm.newExportTx(vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } + + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - if err := exportTx.SemanticVerify(vm, tx, parent, parent.ethBlock.BaseFee(), test.rules); err != nil { + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: vm.currentRules(), + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + + if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } @@ -1781,7 +1809,7 @@ func TestNewExportTx(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } @@ -1794,7 +1822,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := GetEthAddress(testKeys[0]) + addr := atomic.GetEthAddress(testKeys[0]) if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } @@ -1864,7 +1892,7 @@ func TestNewExportTxMulticoin(t *testing.T) { }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } @@ -1885,14 +1913,14 @@ func TestNewExportTxMulticoin(t *testing.T) { }, }, } - utxoBytes2, err := vm.codec.Marshal(codecVersion, utxo2) + utxoBytes2, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo2) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID2 := utxo2.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{ { Key: inputID[:], Value: utxoBytes, @@ -1942,20 +1970,33 @@ func TestNewExportTxMulticoin(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - testKeys0Addr := GetEthAddress(testKeys[0]) + testKeys0Addr := atomic.GetEthAddress(testKeys[0]) exportId, err := ids.ToShortID(testKeys0Addr[:]) if err != nil { t.Fatal(err) } - tx, err = vm.newExportTx(tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } + + tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: vm.currentRules(), + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } - if err := exportTx.SemanticVerify(vm, tx, parent, parent.ethBlock.BaseFee(), test.rules); err != nil { + if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } @@ -1968,7 +2009,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatalf("Failed to accept export transaction due to: %s", err) } - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { t.Fatal(err) } @@ -1981,7 +2022,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := GetEthAddress(testKeys[0]) + addr := atomic.GetEthAddress(testKeys[0]) if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } diff --git a/plugin/evm/formatting.go b/plugin/evm/formatting.go index ba9cea589f..feeab134b7 100644 --- a/plugin/evm/formatting.go +++ b/plugin/evm/formatting.go @@ -8,10 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" ) // ParseServiceAddress get address ID from address string, being it either localized (using address manager, @@ -53,21 +50,3 @@ func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { hrp := constants.GetHRP(vm.ctx.NetworkID) return address.Format(chainIDAlias, hrp, addr.Bytes()) } - -// ParseEthAddress parses [addrStr] and returns an Ethereum address -func ParseEthAddress(addrStr string) (common.Address, error) { - if !common.IsHexAddress(addrStr) { - return common.Address{}, errInvalidAddr - } - return common.HexToAddress(addrStr), nil -} - -// GetEthAddress returns the ethereum address derived from [privKey] -func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { - return PublicKeyToEthAddress(privKey.PublicKey()) -} - -// PublicKeyToEthAddress returns the ethereum address derived from [pubKey] -func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { - return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) -} diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index a760936021..d6b377d13a 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -7,7 +7,7 @@ import ( "context" "fmt" "sync" - "sync/atomic" + syncatomic "sync/atomic" "time" ethcommon "github.com/ethereum/go-ethereum/common" @@ -24,6 +24,7 @@ import ( "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) const pendingTxsBuffer = 10 @@ -97,14 +98,14 @@ func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, err } func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { - tx, err := ExtractAtomicTx(bytes, Codec) + tx, err := atomic.ExtractAtomicTx(bytes, atomic.Codec) return &GossipAtomicTx{ Tx: tx, }, err } type GossipAtomicTx struct { - Tx *Tx + Tx *atomic.Tx } func (tx *GossipAtomicTx) GossipID() ids.ID { @@ -133,7 +134,7 @@ type GossipEthTxPool struct { // subscribed is set to true when the gossip subscription is active // mostly used for testing - subscribed atomic.Bool + subscribed syncatomic.Bool } // IsSubscribed returns whether or not the gossip subscription is active. diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go index 8ed7aee3cf..15ebd15871 100644 --- a/plugin/evm/gossip_test.go +++ b/plugin/evm/gossip_test.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/prometheus/client_golang/prometheus" @@ -33,15 +34,15 @@ func TestGossipAtomicTxMarshaller(t *testing.T) { require := require.New(t) want := &GossipAtomicTx{ - Tx: &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{}, + Tx: &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{}, Creds: []verify.Verifiable{}, }, } marshaller := GossipAtomicTxMarshaller{} key0 := testKeys[0] - require.NoError(want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}})) + require.NoError(want.Tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}})) bytes, err := marshaller.MarshalGossip(want) require.NoError(err) @@ -54,14 +55,14 @@ func TestGossipAtomicTxMarshaller(t *testing.T) { func TestAtomicMempoolIterate(t *testing.T) { txs := []*GossipAtomicTx{ { - Tx: &Tx{ + Tx: &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, }, }, { - Tx: &Tx{ + Tx: &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go index ce970c822f..c4b41a85e7 100644 --- a/plugin/evm/handler.go +++ b/plugin/evm/handler.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" ) @@ -47,15 +48,15 @@ func (h *GossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGo // In the case that the gossip message contains a transaction, // attempt to parse it and add it as a remote. - tx := Tx{} - if _, err := Codec.Unmarshal(msg.Tx, &tx); err != nil { + tx := atomic.Tx{} + if _, err := atomic.Codec.Unmarshal(msg.Tx, &tx); err != nil { log.Trace( "AppGossip provided invalid tx", "err", err, ) return nil } - unsignedBytes, err := Codec.Marshal(codecVersion, &tx.UnsignedAtomicTx) + unsignedBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &tx.UnsignedAtomicTx) if err != nil { log.Trace( "AppGossip failed to marshal unsigned tx", diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index ec8b2b2fb4..d254153712 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -8,10 +8,11 @@ import ( "testing" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" @@ -23,7 +24,7 @@ import ( // createImportTxOptions adds a UTXO to shared memory and generates a list of import transactions sending this UTXO // to each of the three test keys (conflicting transactions) -func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) []*Tx { +func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -35,14 +36,14 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) [] }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -52,7 +53,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) [] t.Fatal(err) } - importTxs := make([]*Tx, 0, 3) + importTxs := make([]*atomic.Tx, 0, 3) for _, ethAddr := range testEthAddrs { importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { @@ -69,7 +70,7 @@ func TestImportTxVerify(t *testing.T) { var importAmount uint64 = 10000000 txID := ids.GenerateTestID() - importTx := &UnsignedImportTx{ + importTx := &atomic.UnsignedImportTx{ NetworkID: ctx.NetworkID, BlockchainID: ctx.ChainID, SourceChain: ctx.XChainID, @@ -101,7 +102,7 @@ func TestImportTxVerify(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount - params.AvalancheAtomicTxFee, @@ -121,16 +122,16 @@ func TestImportTxVerify(t *testing.T) { tests := map[string]atomicTxVerifyTest{ "nil tx": { - generate: func(t *testing.T) UnsignedAtomicTx { - var importTx *UnsignedImportTx + generate: func(t *testing.T) atomic.UnsignedAtomicTx { + var importTx *atomic.UnsignedImportTx return importTx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNilTx.Error(), + expectedErr: atomic.ErrNilTx.Error(), }, "valid import tx": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return importTx }, ctx: ctx, @@ -138,7 +139,7 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", // Expect this transaction to be valid in Apricot Phase 0 }, "valid import tx banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { return importTx }, ctx: ctx, @@ -146,37 +147,37 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", // Expect this transaction to be valid in Banff }, "invalid network ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.NetworkID++ return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongNetworkID.Error(), + expectedErr: atomic.ErrWrongNetworkID.Error(), }, "invalid blockchain ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.BlockchainID = ids.GenerateTestID() return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongBlockchainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "P-chain source before AP5": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = constants.PlatformChainID return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errWrongChainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "P-chain source after AP5": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = constants.PlatformChainID return &tx @@ -185,27 +186,27 @@ func TestImportTxVerify(t *testing.T) { rules: apricotRulesPhase5, }, "invalid source chain ID": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.SourceChain = ids.GenerateTestID() return &tx }, ctx: ctx, rules: apricotRulesPhase5, - expectedErr: errWrongChainID.Error(), + expectedErr: atomic.ErrWrongChainID.Error(), }, "no inputs": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errNoImportInputs.Error(), + expectedErr: atomic.ErrNoImportInputs.Error(), }, "inputs sorted incorrectly": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ tx.ImportedInputs[1], @@ -215,10 +216,10 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase0, - expectedErr: errInputsNotSortedUnique.Error(), + expectedErr: atomic.ErrInputsNotSortedUnique.Error(), }, "invalid input": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ tx.ImportedInputs[0], @@ -231,9 +232,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "atomic input failed verification", }, "unsorted outputs phase 0 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -244,9 +245,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-unique outputs phase 0 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -257,9 +258,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "unsorted outputs phase 1 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -267,12 +268,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase1, - expectedErr: errOutputsNotSorted.Error(), + expectedErr: atomic.ErrOutputsNotSorted.Error(), }, "non-unique outputs phase 1 passes verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -283,9 +284,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "outputs not sorted and unique phase 2 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[0], tx.Outs[0], } @@ -293,12 +294,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase2, - expectedErr: errOutputsNotSortedUnique.Error(), + expectedErr: atomic.ErrOutputsNotSortedUnique.Error(), }, "outputs not sorted phase 2 fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ tx.Outs[1], tx.Outs[0], } @@ -306,12 +307,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: apricotRulesPhase2, - expectedErr: errOutputsNotSortedUnique.Error(), + expectedErr: atomic.ErrOutputsNotSortedUnique.Error(), }, "invalid EVMOutput fails verification": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: 0, @@ -325,17 +326,17 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "EVM Output failed verification", }, "no outputs apricot phase 3": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.Outs = nil return &tx }, ctx: ctx, rules: apricotRulesPhase3, - expectedErr: errNoEVMOutputs.Error(), + expectedErr: atomic.ErrNoEVMOutputs.Error(), }, "non-AVAX input Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ { @@ -359,9 +360,9 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX output Apricot Phase 6": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: importTx.Outs[0].Address, Amount: importTx.Outs[0].Amount, @@ -375,7 +376,7 @@ func TestImportTxVerify(t *testing.T) { expectedErr: "", }, "non-AVAX input Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx tx.ImportedInputs = []*avax.TransferableInput{ { @@ -396,12 +397,12 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errImportNonAVAXInputBanff.Error(), + expectedErr: atomic.ErrImportNonAVAXInputBanff.Error(), }, "non-AVAX output Banff": { - generate: func(t *testing.T) UnsignedAtomicTx { + generate: func(t *testing.T) atomic.UnsignedAtomicTx { tx := *importTx - tx.Outs = []EVMOutput{ + tx.Outs = []atomic.EVMOutput{ { Address: importTx.Outs[0].Address, Amount: importTx.Outs[0].Amount, @@ -412,7 +413,7 @@ func TestImportTxVerify(t *testing.T) { }, ctx: ctx, rules: banffRules, - expectedErr: errImportNonAVAXOutputBanff.Error(), + expectedErr: atomic.ErrImportNonAVAXOutputBanff.Error(), }, } for name, test := range tests { @@ -426,7 +427,7 @@ func TestNewImportTx(t *testing.T) { importAmount := uint64(5000000) // createNewImportAVAXTx adds a UTXO to shared memory and then constructs a new import transaction // and checks that it has the correct fee for the base fee that has been used - createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) if err != nil { @@ -450,7 +451,7 @@ func TestNewImportTx(t *testing.T) { if err != nil { t.Fatal(err) } - actualFee, err = CalculateDynamicFee(actualCost, initialBaseFee) + actualFee, err = atomic.CalculateDynamicFee(actualCost, initialBaseFee) if err != nil { t.Fatal(err) } @@ -496,8 +497,8 @@ func TestNewImportTx(t *testing.T) { } expectedRemainingBalance := new(uint256.Int).Mul( - uint256.NewInt(importAmount-actualAVAXBurned), x2cRate) - addr := GetEthAddress(testKeys[0]) + uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) + addr := atomic.GetEthAddress(testKeys[0]) if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) } @@ -543,7 +544,7 @@ func TestImportTxGasCost(t *testing.T) { importAmount := uint64(5000000) tests := map[string]struct { - UnsignedImportTx *UnsignedImportTx + UnsignedImportTx *atomic.UnsignedImportTx Keys [][]*secp256k1.PrivateKey ExpectedGasUsed uint64 @@ -552,7 +553,7 @@ func TestImportTxGasCost(t *testing.T) { FixedFee bool }{ "simple import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -564,7 +565,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -576,7 +577,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "simple import 1wei": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -588,7 +589,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -600,7 +601,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(1), }, "simple import 1wei + fixed fee": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -612,7 +613,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -625,7 +626,7 @@ func TestImportTxGasCost(t *testing.T) { FixedFee: true, }, "simple ANT import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -647,7 +648,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount, @@ -661,7 +662,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "complex ANT import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -683,7 +684,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount, @@ -702,7 +703,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "multisig import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -714,7 +715,7 @@ func TestImportTxGasCost(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0, 1}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, @@ -726,7 +727,7 @@ func TestImportTxGasCost(t *testing.T) { BaseFee: big.NewInt(25 * params.GWei), }, "large import": { - UnsignedImportTx: &UnsignedImportTx{ + UnsignedImportTx: &atomic.UnsignedImportTx{ NetworkID: networkID, BlockchainID: chainID, SourceChain: xChainID, @@ -812,7 +813,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: importAmount * 10, @@ -840,10 +841,10 @@ func TestImportTxGasCost(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - tx := &Tx{UnsignedAtomicTx: test.UnsignedImportTx} + tx := &atomic.Tx{UnsignedAtomicTx: test.UnsignedImportTx} // Sign with the correct key - if err := tx.Sign(Codec, test.Keys); err != nil { + if err := tx.Sign(atomic.Codec, test.Keys); err != nil { t.Fatal(err) } @@ -855,7 +856,7 @@ func TestImportTxGasCost(t *testing.T) { t.Fatalf("Expected gasUsed to be %d, but found %d", test.ExpectedGasUsed, gasUsed) } - fee, err := CalculateDynamicFee(gasUsed, test.BaseFee) + fee, err := atomic.CalculateDynamicFee(gasUsed, test.BaseFee) if err != nil { t.Fatal(err) } @@ -869,8 +870,8 @@ func TestImportTxGasCost(t *testing.T) { func TestImportTxSemanticVerify(t *testing.T) { tests := map[string]atomicTxTest{ "UTXO not present during bootstrapping": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -884,13 +885,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -898,8 +899,8 @@ func TestImportTxSemanticVerify(t *testing.T) { bootstrapping: true, }, "UTXO not present": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -913,13 +914,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -927,11 +928,11 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to fetch import UTXOs from", }, "garbage UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxoID.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: []byte("hey there"), Traits: [][]byte{ @@ -941,7 +942,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -953,13 +954,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -967,7 +968,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to unmarshal UTXO", }, "UTXO AssetID mismatch": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testShortIDAddrs[0]) @@ -975,7 +976,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -987,28 +988,28 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx }, - semanticVerifyErr: errAssetIDMismatch.Error(), + semanticVerifyErr: atomic.ErrAssetIDMismatch.Error(), }, "insufficient AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1020,13 +1021,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1034,7 +1035,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "insufficient non-AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) @@ -1042,7 +1043,7 @@ func TestImportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1054,13 +1055,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1068,14 +1069,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "no signatures": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1087,13 +1088,13 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, nil); err != nil { + if err := tx.Sign(atomic.Codec, nil); err != nil { t.Fatal(err) } return tx @@ -1101,14 +1102,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx contained mismatched number of inputs/credentials", }, "incorrect signature": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1120,14 +1121,14 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} // Sign the transaction with the incorrect key - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { t.Fatal(err) } return tx @@ -1135,14 +1136,14 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx transfer failed verification", }, "non-unique EVM Outputs": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1154,7 +1155,7 @@ func TestImportTxSemanticVerify(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{ + Outs: []atomic.EVMOutput{ { Address: testEthAddrs[0], Amount: 1, @@ -1167,13 +1168,13 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx }, genesisJSON: genesisJSONApricotPhase3, - semanticVerifyErr: errOutputsNotSortedUnique.Error(), + semanticVerifyErr: atomic.ErrOutputsNotSortedUnique.Error(), }, } @@ -1188,14 +1189,14 @@ func TestImportTxEVMStateTransfer(t *testing.T) { assetID := ids.GenerateTestID() tests := map[string]atomicTxTest{ "AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1207,13 +1208,13 @@ func TestImportTxEVMStateTransfer(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1227,20 +1228,20 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } avaxBalance := sdb.GetBalance(testEthAddrs[0]) - if avaxBalance.Cmp(x2cRate) != 0 { - t.Fatalf("Expected AVAX balance to be %d, found balance: %d", x2cRate, avaxBalance) + if avaxBalance.Cmp(atomic.X2CRate) != 0 { + t.Fatalf("Expected AVAX balance to be %d, found balance: %d", *atomic.X2CRate, avaxBalance) } }, }, "non-AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx := &Tx{UnsignedAtomicTx: &UnsignedImportTx{ + tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, SourceChain: vm.ctx.XChainID, @@ -1252,13 +1253,13 @@ func TestImportTxEVMStateTransfer(t *testing.T) { Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: testEthAddrs[0], Amount: 1, AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx diff --git a/plugin/evm/mempool.go b/plugin/evm/mempool.go index 69a832cd2e..acb8db4e3f 100644 --- a/plugin/evm/mempool.go +++ b/plugin/evm/mempool.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/log" ) @@ -58,12 +59,12 @@ type Mempool struct { // maxSize is the maximum number of transactions allowed to be kept in mempool maxSize int // currentTxs is the set of transactions about to be added to a block. - currentTxs map[ids.ID]*Tx + currentTxs map[ids.ID]*atomic.Tx // issuedTxs is the set of transactions that have been issued into a new block - issuedTxs map[ids.ID]*Tx + issuedTxs map[ids.ID]*atomic.Tx // discardedTxs is an LRU Cache of transactions that have been discarded after failing // verification. - discardedTxs *cache.LRU[ids.ID, *Tx] + discardedTxs *cache.LRU[ids.ID, *atomic.Tx] // Pending is a channel of length one, which the mempool ensures has an item on // it as long as there is an unissued transaction remaining in [txs] Pending chan struct{} @@ -71,17 +72,17 @@ type Mempool struct { // NOTE: [txHeap] ONLY contains pending txs txHeap *txHeap // utxoSpenders maps utxoIDs to the transaction consuming them in the mempool - utxoSpenders map[ids.ID]*Tx + utxoSpenders map[ids.ID]*atomic.Tx // bloom is a bloom filter containing the txs in the mempool bloom *gossip.BloomFilter metrics *mempoolMetrics - verify func(tx *Tx) error + verify func(tx *atomic.Tx) error } // NewMempool returns a Mempool with [maxSize] -func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *Tx) error) (*Mempool, error) { +func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *atomic.Tx) error) (*Mempool, error) { bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) if err != nil { return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) @@ -89,13 +90,13 @@ func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int return &Mempool{ ctx: ctx, - issuedTxs: make(map[ids.ID]*Tx), - discardedTxs: &cache.LRU[ids.ID, *Tx]{Size: discardedTxsCacheSize}, - currentTxs: make(map[ids.ID]*Tx), + issuedTxs: make(map[ids.ID]*atomic.Tx), + discardedTxs: &cache.LRU[ids.ID, *atomic.Tx]{Size: discardedTxsCacheSize}, + currentTxs: make(map[ids.ID]*atomic.Tx), Pending: make(chan struct{}, 1), txHeap: newTxHeap(maxSize), maxSize: maxSize, - utxoSpenders: make(map[ids.ID]*Tx), + utxoSpenders: make(map[ids.ID]*atomic.Tx), bloom: bloom, metrics: newMempoolMetrics(), verify: verify, @@ -117,7 +118,7 @@ func (m *Mempool) length() int { // atomicTxGasPrice is the [gasPrice] paid by a transaction to burn a given // amount of [AVAXAssetID] given the value of [gasUsed]. -func (m *Mempool) atomicTxGasPrice(tx *Tx) (uint64, error) { +func (m *Mempool) atomicTxGasPrice(tx *atomic.Tx) (uint64, error) { gasUsed, err := tx.GasUsed(true) if err != nil { return 0, err @@ -158,7 +159,7 @@ func (m *Mempool) Add(tx *GossipAtomicTx) error { // AddTx attempts to add [tx] to the mempool and returns an error if // it could not be added to the mempool. -func (m *Mempool) AddTx(tx *Tx) error { +func (m *Mempool) AddTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -180,7 +181,7 @@ func (m *Mempool) AddTx(tx *Tx) error { return err } -func (m *Mempool) AddLocalTx(tx *Tx) error { +func (m *Mempool) AddLocalTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -192,8 +193,8 @@ func (m *Mempool) AddLocalTx(tx *Tx) error { return err } -// forceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. -func (m *Mempool) ForceAddTx(tx *Tx) error { +// forceAddTx forcibly adds a *atomic.Tx to the mempool and bypasses all verification. +func (m *Mempool) ForceAddTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -208,13 +209,13 @@ func (m *Mempool) ForceAddTx(tx *Tx) error { // checkConflictTx checks for any transactions in the mempool that spend the same input UTXOs as [tx]. // If any conflicts are present, it returns the highest gas price of any conflicting transaction, the // txID of the corresponding tx and the full list of transactions that conflict with [tx]. -func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { +func (m *Mempool) checkConflictTx(tx *atomic.Tx) (uint64, ids.ID, []*atomic.Tx, error) { utxoSet := tx.InputUTXOs() var ( - highestGasPrice uint64 = 0 - conflictingTxs []*Tx = make([]*Tx, 0) - highestGasPriceConflictTxID ids.ID = ids.ID{} + highestGasPrice uint64 = 0 + conflictingTxs []*atomic.Tx = make([]*atomic.Tx, 0) + highestGasPriceConflictTxID ids.ID = ids.ID{} ) for utxoID := range utxoSet { // Get current gas price of the existing tx in the mempool @@ -239,7 +240,7 @@ func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { // addTx adds [tx] to the mempool. Assumes [m.lock] is held. // If [force], skips conflict checks within the mempool. -func (m *Mempool) addTx(tx *Tx, force bool) error { +func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { txID := tx.ID() // If [txID] has already been issued or is in the currentTxs map // there's no need to add it. @@ -371,7 +372,7 @@ func (m *Mempool) GetFilter() ([]byte, []byte) { } // NextTx returns a transaction to be issued from the mempool. -func (m *Mempool) NextTx() (*Tx, bool) { +func (m *Mempool) NextTx() (*atomic.Tx, bool) { m.lock.Lock() defer m.lock.Unlock() @@ -391,7 +392,7 @@ func (m *Mempool) NextTx() (*Tx, bool) { // GetPendingTx returns the transaction [txID] and true if it is // currently in the [txHeap] waiting to be issued into a block. // Returns nil, false otherwise. -func (m *Mempool) GetPendingTx(txID ids.ID) (*Tx, bool) { +func (m *Mempool) GetPendingTx(txID ids.ID) (*atomic.Tx, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -401,7 +402,7 @@ func (m *Mempool) GetPendingTx(txID ids.ID) (*Tx, bool) { // GetTx returns the transaction [txID] if it was issued // by this node and returns whether it was dropped and whether // it exists. -func (m *Mempool) GetTx(txID ids.ID) (*Tx, bool, bool) { +func (m *Mempool) GetTx(txID ids.ID) (*atomic.Tx, bool, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -484,7 +485,7 @@ func (m *Mempool) CancelCurrentTxs() { // cancelTx removes [tx] from current transactions and moves it back into the // tx heap. // assumes the lock is held. -func (m *Mempool) cancelTx(tx *Tx) { +func (m *Mempool) cancelTx(tx *atomic.Tx) { // Add tx to heap sorted by gasPrice gasPrice, err := m.atomicTxGasPrice(tx) if err == nil { @@ -526,7 +527,7 @@ func (m *Mempool) DiscardCurrentTxs() { // discardCurrentTx discards [tx] from the set of current transactions. // Assumes the lock is held. -func (m *Mempool) discardCurrentTx(tx *Tx) { +func (m *Mempool) discardCurrentTx(tx *atomic.Tx) { m.removeSpenders(tx) m.discardedTxs.Put(tx.ID(), tx) delete(m.currentTxs, tx.ID()) @@ -540,7 +541,7 @@ func (m *Mempool) discardCurrentTx(tx *Tx) { // removeTx must be called for all conflicts before overwriting the utxoSpenders // map. // Assumes lock is held. -func (m *Mempool) removeTx(tx *Tx, discard bool) { +func (m *Mempool) removeTx(tx *atomic.Tx, discard bool) { txID := tx.ID() // Remove from [currentTxs], [txHeap], and [issuedTxs]. @@ -565,7 +566,7 @@ func (m *Mempool) removeTx(tx *Tx, discard bool) { // removeSpenders deletes the entries for all input UTXOs of [tx] from the // [utxoSpenders] map. // Assumes the lock is held. -func (m *Mempool) removeSpenders(tx *Tx) { +func (m *Mempool) removeSpenders(tx *atomic.Tx) { for utxoID := range tx.InputUTXOs() { delete(m.utxoSpenders, utxoID) } @@ -573,7 +574,7 @@ func (m *Mempool) removeSpenders(tx *Tx) { // RemoveTx removes [txID] from the mempool completely. // Evicts [tx] from the discarded cache if present. -func (m *Mempool) RemoveTx(tx *Tx) { +func (m *Mempool) RemoveTx(tx *atomic.Tx) { m.lock.Lock() defer m.lock.Unlock() diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index b44f2097b4..3e22fef486 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/chain" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/stretchr/testify/assert" ) @@ -32,7 +33,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // generate a valid and conflicting tx var ( - tx, conflictingTx *Tx + tx, conflictingTx *atomic.Tx ) if name == "import" { importTxs := createImportTxOptions(t, vm, sharedMemory) diff --git a/plugin/evm/mempool_test.go b/plugin/evm/mempool_test.go index a56c43bbee..8129edc577 100644 --- a/plugin/evm/mempool_test.go +++ b/plugin/evm/mempool_test.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -20,7 +21,7 @@ func TestMempoolAddTx(t *testing.T) { txs := make([]*GossipAtomicTx, 0) for i := 0; i < 3_000; i++ { tx := &GossipAtomicTx{ - Tx: &Tx{ + Tx: &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, @@ -43,7 +44,7 @@ func TestMempoolAdd(t *testing.T) { require.NoError(err) tx := &GossipAtomicTx{ - Tx: &Tx{ + Tx: &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 7f57be5520..59fddb1ea4 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -12,11 +12,12 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -91,24 +92,11 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl return nil } -// ExportKeyArgs are arguments for ExportKey -type ExportKeyArgs struct { - api.UserPass - Address string `json:"address"` -} - -// ExportKeyReply is the response for ExportKey -type ExportKeyReply struct { - // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` - PrivateKeyHex string `json:"privateKeyHex"` -} - // ExportKey returns a private key from the provided user -func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { +func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { log.Info("EVM: ExportKey called") - address, err := ParseEthAddress(args.Address) + address, err := atomic.ParseEthAddress(args.Address) if err != nil { return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) } @@ -131,21 +119,15 @@ func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E return nil } -// ImportKeyArgs are arguments for ImportKey -type ImportKeyArgs struct { - api.UserPass - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` -} - // ImportKey adds a private key to the provided user -func (service *AvaxAPI) ImportKey(r *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { +func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, reply *api.JSONAddress) error { log.Info("EVM: ImportKey called", "username", args.Username) if args.PrivateKey == nil { return errMissingPrivateKey } - reply.Address = GetEthAddress(args.PrivateKey).Hex() + reply.Address = atomic.GetEthAddress(args.PrivateKey).Hex() service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() @@ -163,28 +145,14 @@ func (service *AvaxAPI) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a return nil } -// ImportArgs are arguments for passing into Import requests -type ImportArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Chain the funds are coming from - SourceChain string `json:"sourceChain"` - - // The address that will receive the imported funds - To common.Address `json:"to"` -} - // ImportAVAX is a deprecated name for Import. -func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *ImportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { return service.Import(nil, args, response) } // Import issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { log.Info("EVM: ImportAVAX called") chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) @@ -232,44 +200,18 @@ func (service *AvaxAPI) Import(_ *http.Request, args *ImportArgs, response *api. return nil } -// ExportAVAXArgs are the arguments to ExportAVAX -type ExportAVAXArgs struct { - api.UserPass - - // Fee that should be used when creating the tx - BaseFee *hexutil.Big `json:"baseFee"` - - // Amount of asset to send - Amount json.Uint64 `json:"amount"` - - // Chain the funds are going to. Optional. Used if To address does not - // include the chainID. - TargetChain string `json:"targetChain"` - - // ID of the address that will receive the AVAX. This address may include - // the chainID, which is used to determine what the destination chain is. - To string `json:"to"` -} - // ExportAVAX exports AVAX from the C-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxID) error { - return service.Export(nil, &ExportArgs{ +func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *client.ExportAVAXArgs, response *api.JSONTxID) error { + return service.Export(nil, &client.ExportArgs{ ExportAVAXArgs: *args, AssetID: service.vm.ctx.AVAXAssetID.String(), }, response) } -// ExportArgs are the arguments to Export -type ExportArgs struct { - ExportAVAXArgs - // AssetID of the tokens - AssetID string `json:"assetID"` -} - // Export exports an asset from the C-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) Export(_ *http.Request, args *ExportArgs, response *api.JSONTxID) error { +func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, response *api.JSONTxID) error { log.Info("EVM: Export called") assetID, err := service.parseAssetID(args.AssetID) @@ -401,7 +343,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply reply.UTXOs = make([]string, len(utxos)) for i, utxo := range utxos { - b, err := service.vm.codec.Marshal(codecVersion, utxo) + b, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { return fmt.Errorf("problem marshalling UTXO: %w", err) } @@ -432,11 +374,11 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response return fmt.Errorf("problem decoding transaction: %w", err) } - tx := &Tx{} - if _, err := service.vm.codec.Unmarshal(txBytes, tx); err != nil { + tx := &atomic.Tx{} + if _, err := atomic.Codec.Unmarshal(txBytes, tx); err != nil { return fmt.Errorf("problem parsing transaction: %w", err) } - if err := tx.Sign(service.vm.codec, nil); err != nil { + if err := tx.Sign(atomic.Codec, nil); err != nil { return fmt.Errorf("problem initializing transaction: %w", err) } @@ -452,14 +394,8 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response return nil } -// GetAtomicTxStatusReply defines the GetAtomicTxStatus replies returned from the API -type GetAtomicTxStatusReply struct { - Status Status `json:"status"` - BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` -} - // GetAtomicTxStatus returns the status of the specified transaction -func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *GetAtomicTxStatusReply) error { +func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *client.GetAtomicTxStatusReply) error { log.Info("EVM: GetAtomicTxStatus called", "txID", args.TxID) if args.TxID == ids.Empty { @@ -472,13 +408,13 @@ func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, r _, status, height, _ := service.vm.getAtomicTx(args.TxID) reply.Status = status - if status == Accepted { + if status == atomic.Accepted { // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. lastAccepted := service.vm.blockChain.LastAcceptedBlock() if height > lastAccepted.NumberU64() { - reply.Status = Processing + reply.Status = atomic.Processing return nil } @@ -509,7 +445,7 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply return err } - if status == Unknown { + if status == atomic.Unknown { return fmt.Errorf("could not find tx %s", args.TxID) } @@ -519,7 +455,7 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply } reply.Tx = txBytes reply.Encoding = args.Encoding - if status == Accepted { + if status == atomic.Accepted { // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index bba4663153..bd7f993cb5 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -35,6 +35,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -291,7 +292,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(serverVM.Shutdown(context.Background())) }) var ( - importTx, exportTx *Tx + importTx, exportTx *atomic.Tx err error ) generateAndAcceptBlocks(t, serverVM, numBlocks, func(i int, gen *core.BlockGen) { @@ -336,8 +337,8 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(serverVM.db.Commit()) serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - serverSharedMemories.assertOpsApplied(t, importTx.mustAtomicOps()) - serverSharedMemories.assertOpsApplied(t, exportTx.mustAtomicOps()) + serverSharedMemories.assertOpsApplied(t, mustAtomicOps(importTx)) + serverSharedMemories.assertOpsApplied(t, mustAtomicOps(exportTx)) // make some accounts trieDB := triedb.NewDatabase(serverVM.chaindb, nil) @@ -406,7 +407,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s return &syncVMSetup{ serverVM: serverVM, serverAppSender: serverAppSender, - includedAtomicTxs: []*Tx{ + includedAtomicTxs: []*atomic.Tx{ importTx, exportTx, }, @@ -425,13 +426,13 @@ type syncVMSetup struct { serverVM *VM serverAppSender *enginetest.Sender - includedAtomicTxs []*Tx + includedAtomicTxs []*atomic.Tx fundedAccounts map[*keystore.Key]*types.StateAccount syncerVM *VM syncerDB database.Database syncerEngineChan <-chan commonEng.Message - syncerAtomicMemory *atomic.Memory + syncerAtomicMemory *avalancheatomic.Memory shutdownOnceSyncerVM *shutdownOnceVM } @@ -560,7 +561,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) for _, tx := range includedAtomicTxs { - syncerSharedMemories.assertOpsApplied(t, tx.mustAtomicOps()) + syncerSharedMemories.assertOpsApplied(t, mustAtomicOps(tx)) } // Generate blocks after we have entered normal consensus as well diff --git a/plugin/evm/test_tx.go b/plugin/evm/test_tx.go index c057c874ad..e001cb4dda 100644 --- a/plugin/evm/test_tx.go +++ b/plugin/evm/test_tx.go @@ -9,21 +9,21 @@ import ( "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) type TestUnsignedTx struct { - GasUsedV uint64 `serialize:"true"` - AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` - AcceptRequestsV *atomic.Requests `serialize:"true"` + GasUsedV uint64 `serialize:"true"` + AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` + AcceptRequestsV *avalancheatomic.Requests `serialize:"true"` VerifyV error IDV ids.ID `serialize:"true" json:"id"` BurnedV uint64 `serialize:"true"` @@ -34,7 +34,7 @@ type TestUnsignedTx struct { EVMStateTransferV error } -var _ UnsignedAtomicTx = &TestUnsignedTx{} +var _ atomic.UnsignedAtomicTx = &TestUnsignedTx{} // GasUsed implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUsedV, nil } @@ -43,7 +43,7 @@ func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUs func (t *TestUnsignedTx) Verify(ctx *snow.Context, rules params.Rules) error { return t.VerifyV } // AtomicOps implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) AtomicOps() (ids.ID, *atomic.Requests, error) { +func (t *TestUnsignedTx) AtomicOps() (ids.ID, *avalancheatomic.Requests, error) { return t.AcceptRequestsBlockchainIDV, t.AcceptRequestsV, nil } @@ -66,12 +66,12 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules) error { +func (t *TestUnsignedTx) SemanticVerify(backend *atomic.Backend, stx *atomic.Tx, parent atomic.AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } // EVMStateTransfer implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state *state.StateDB) error { +func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state atomic.StateDB) error { return t.EVMStateTransferV } @@ -82,9 +82,9 @@ func testTxCodec() codec.Manager { errs := wrappers.Errs{} errs.Add( c.RegisterType(&TestUnsignedTx{}), - c.RegisterType(&atomic.Element{}), - c.RegisterType(&atomic.Requests{}), - codec.RegisterCodec(codecVersion, c), + c.RegisterType(&avalancheatomic.Element{}), + c.RegisterType(&avalancheatomic.Requests{}), + codec.RegisterCodec(atomic.CodecVersion, c), ) if errs.Errored() { @@ -95,12 +95,12 @@ func testTxCodec() codec.Manager { var blockChainID = ids.GenerateTestID() -func testDataImportTx() *Tx { - return &Tx{ +func testDataImportTx() *atomic.Tx { + return &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: blockChainID, - AcceptRequestsV: &atomic.Requests{ + AcceptRequestsV: &avalancheatomic.Requests{ RemoveRequests: [][]byte{ utils.RandomBytes(32), utils.RandomBytes(32), @@ -110,13 +110,13 @@ func testDataImportTx() *Tx { } } -func testDataExportTx() *Tx { - return &Tx{ +func testDataExportTx() *atomic.Tx { + return &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: blockChainID, - AcceptRequestsV: &atomic.Requests{ - PutRequests: []*atomic.Element{ + AcceptRequestsV: &avalancheatomic.Requests{ + PutRequests: []*avalancheatomic.Element{ { Key: utils.RandomBytes(16), Value: utils.RandomBytes(24), @@ -131,7 +131,7 @@ func testDataExportTx() *Tx { } } -func newTestTx() *Tx { +func newTestTx() *atomic.Tx { txType := rand.Intn(2) switch txType { case 0: @@ -143,8 +143,8 @@ func newTestTx() *Tx { } } -func newTestTxs(numTxs int) []*Tx { - txs := make([]*Tx, 0, numTxs) +func newTestTxs(numTxs int) []*atomic.Tx { + txs := make([]*atomic.Tx, 0, numTxs) for i := 0; i < numTxs; i++ { txs = append(txs, newTestTx()) } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index af544e9415..fb1d7388d9 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" @@ -35,6 +35,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/utils" ) @@ -47,7 +48,7 @@ func TestEthTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -170,12 +171,12 @@ func TestAtomicTxGossip(t *testing.T) { snowCtx.AVAXAssetID = ids.GenerateTestID() validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) + memory := avalancheatomic.NewMemory(memdb.New()) snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -267,7 +268,7 @@ func TestAtomicTxGossip(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) @@ -314,7 +315,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -374,7 +375,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -428,12 +429,12 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { snowCtx.AVAXAssetID = ids.GenerateTestID() validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) + memory := avalancheatomic.NewMemory(memdb.New()) snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -475,7 +476,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) @@ -501,12 +502,12 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { snowCtx.AVAXAssetID = ids.GenerateTestID() validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState - memory := atomic.NewMemory(memdb.New()) + memory := avalancheatomic.NewMemory(memdb.New()) snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := GetEthAddress(pk) + address := atomic.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -546,7 +547,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) diff --git a/plugin/evm/tx_heap.go b/plugin/evm/tx_heap.go index d44020039e..c6562fd9b0 100644 --- a/plugin/evm/tx_heap.go +++ b/plugin/evm/tx_heap.go @@ -7,6 +7,7 @@ import ( "container/heap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // txEntry is used to track the [gasPrice] transactions pay to be included in @@ -14,7 +15,7 @@ import ( type txEntry struct { id ids.ID gasPrice uint64 - tx *Tx + tx *atomic.Tx index int } @@ -91,7 +92,7 @@ func newTxHeap(maxSize int) *txHeap { } } -func (th *txHeap) Push(tx *Tx, gasPrice uint64) { +func (th *txHeap) Push(tx *atomic.Tx, gasPrice uint64) { txID := tx.ID() oldLen := th.Len() heap.Push(th.maxHeap, &txEntry{ @@ -109,28 +110,28 @@ func (th *txHeap) Push(tx *Tx, gasPrice uint64) { } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMax() (*Tx, uint64) { +func (th *txHeap) PeekMax() (*atomic.Tx, uint64) { txEntry := th.maxHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMin() (*Tx, uint64) { +func (th *txHeap) PeekMin() (*atomic.Tx, uint64) { txEntry := th.minHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMax() *Tx { +func (th *txHeap) PopMax() *atomic.Tx { return th.Remove(th.maxHeap.items[0].id) } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMin() *Tx { +func (th *txHeap) PopMin() *atomic.Tx { return th.Remove(th.minHeap.items[0].id) } -func (th *txHeap) Remove(id ids.ID) *Tx { +func (th *txHeap) Remove(id ids.ID) *atomic.Tx { maxEntry, ok := th.maxHeap.Get(id) if !ok { return nil @@ -150,7 +151,7 @@ func (th *txHeap) Len() int { return th.maxHeap.Len() } -func (th *txHeap) Get(id ids.ID) (*Tx, bool) { +func (th *txHeap) Get(id ids.ID) (*atomic.Tx, bool) { txEntry, ok := th.maxHeap.Get(id) if !ok { return nil, false diff --git a/plugin/evm/tx_heap_test.go b/plugin/evm/tx_heap_test.go index 206b87bbdb..a054b7362e 100644 --- a/plugin/evm/tx_heap_test.go +++ b/plugin/evm/tx_heap_test.go @@ -6,27 +6,28 @@ package evm import ( "testing" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/stretchr/testify/assert" ) func TestTxHeap(t *testing.T) { var ( - tx0 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx0 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 0, }, } tx0Bytes = []byte{0} - tx1 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx1 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 1, }, } tx1Bytes = []byte{1} - tx2 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx2 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 2, }, } diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index d99ee70309..a710a3c9e1 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -14,8 +14,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" ) @@ -30,7 +31,7 @@ func TestCalculateDynamicFee(t *testing.T) { var tests []test = []test{ { gas: 1, - baseFee: new(big.Int).Set(x2cRate.ToBig()), + baseFee: new(big.Int).Set(atomic.X2CRate.ToBig()), expectedValue: 1, }, { @@ -41,7 +42,7 @@ func TestCalculateDynamicFee(t *testing.T) { } for _, test := range tests { - cost, err := CalculateDynamicFee(test.gas, test.baseFee) + cost, err := atomic.CalculateDynamicFee(test.gas, test.baseFee) if test.expectedErr == nil { if err != nil { t.Fatalf("Unexpectedly failed to calculate dynamic fee: %s", err) @@ -59,7 +60,7 @@ func TestCalculateDynamicFee(t *testing.T) { type atomicTxVerifyTest struct { ctx *snow.Context - generate func(t *testing.T) UnsignedAtomicTx + generate func(t *testing.T) atomic.UnsignedAtomicTx rules params.Rules expectedErr string } @@ -78,7 +79,7 @@ func executeTxVerifyTest(t *testing.T, test atomicTxVerifyTest) { type atomicTxTest struct { // setup returns the atomic transaction for the test - setup func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) *Tx + setup func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx // define a string that should be contained in the error message if the tx fails verification // at some point. If the strings are empty, then the tx should pass verification at the // respective step. @@ -115,7 +116,15 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - if err := tx.UnsignedAtomicTx.SemanticVerify(vm, tx, lastAcceptedBlock, baseFee, rules); len(test.semanticVerifyErr) == 0 && err != nil { + backend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + if err := tx.UnsignedAtomicTx.SemanticVerify(backend, tx, lastAcceptedBlock, baseFee); len(test.semanticVerifyErr) == 0 && err != nil { t.Fatalf("SemanticVerify failed unexpectedly due to: %s", err) } else if len(test.semanticVerifyErr) != 0 { if err == nil { @@ -191,18 +200,18 @@ func executeTxTest(t *testing.T, test atomicTxTest) { func TestEVMOutputCompare(t *testing.T) { type test struct { name string - a, b EVMOutput + a, b atomic.EVMOutput expected int } tests := []test{ { name: "address less", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{0}, }, @@ -210,11 +219,11 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "address greater; assetIDs equal", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{}, }, @@ -222,11 +231,11 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "addresses equal; assetID less", - a: EVMOutput{ + a: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{0}, }, - b: EVMOutput{ + b: atomic.EVMOutput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, @@ -234,8 +243,8 @@ func TestEVMOutputCompare(t *testing.T) { }, { name: "equal", - a: EVMOutput{}, - b: EVMOutput{}, + a: atomic.EVMOutput{}, + b: atomic.EVMOutput{}, expected: 0, }, } @@ -253,18 +262,18 @@ func TestEVMOutputCompare(t *testing.T) { func TestEVMInputCompare(t *testing.T) { type test struct { name string - a, b EVMInput + a, b atomic.EVMInput expected int } tests := []test{ { name: "address less", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{0}, }, @@ -272,11 +281,11 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "address greater; assetIDs equal", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x02}), AssetID: ids.ID{}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{}, }, @@ -284,11 +293,11 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "addresses equal; assetID less", - a: EVMInput{ + a: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{0}, }, - b: EVMInput{ + b: atomic.EVMInput{ Address: common.BytesToAddress([]byte{0x01}), AssetID: ids.ID{1}, }, @@ -296,8 +305,8 @@ func TestEVMInputCompare(t *testing.T) { }, { name: "equal", - a: EVMInput{}, - b: EVMInput{}, + a: atomic.EVMInput{}, + b: atomic.EVMInput{}, expected: 0, }, } diff --git a/plugin/evm/user.go b/plugin/evm/user.go index 330d03b01d..627a7af1d1 100644 --- a/plugin/evm/user.go +++ b/plugin/evm/user.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" ) @@ -47,7 +48,7 @@ func (u *user) getAddresses() ([]common.Address, error) { return nil, err } addresses := []common.Address{} - if _, err := Codec.Unmarshal(bytes, &addresses); err != nil { + if _, err := atomic.Codec.Unmarshal(bytes, &addresses); err != nil { return nil, err } return addresses, nil @@ -69,7 +70,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { return errKeyNil } - address := GetEthAddress(privKey) // address the privKey controls + address := atomic.GetEthAddress(privKey) // address the privKey controls controlsAddress, err := u.controlsAddress(address) if err != nil { return err @@ -93,7 +94,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { } } addresses = append(addresses, address) - bytes, err := Codec.Marshal(codecVersion, addresses) + bytes, err := atomic.Codec.Marshal(atomic.CodecVersion, addresses) if err != nil { return err } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 50606b0af7..4adab936bc 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -23,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/upgrade" avalanchegoConstants "github.com/ava-labs/avalanchego/utils/constants" - "github.com/holiman/uint256" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/consensus/dummy" @@ -41,6 +40,7 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" @@ -84,7 +84,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" @@ -108,27 +107,12 @@ var ( _ secp256k1fx.VM = &VM{} ) -const ( - x2cRateUint64 uint64 = 1_000_000_000 - x2cRateMinus1Uint64 uint64 = x2cRateUint64 - 1 -) - -var ( - // x2cRate is the conversion rate between the smallest denomination on the X-Chain - // 1 nAVAX and the smallest denomination on the C-Chain 1 wei. Where 1 nAVAX = 1 gWei. - // This is only required for AVAX because the denomination of 1 AVAX is 9 decimal - // places on the X and P chains, but is 18 decimal places within the EVM. - x2cRate = uint256.NewInt(x2cRateUint64) - x2cRateMinus1 = uint256.NewInt(x2cRateMinus1Uint64) -) - const ( // Max time from current time allowed for blocks, before they're considered future blocks // and fail verification maxFutureBlockTime = 10 * time.Second maxUTXOsToFetch = 1024 defaultMempoolSize = 4096 - codecVersion = uint16(0) secpCacheSize = 1024 decidedCacheSize = 10 * units.MiB @@ -184,30 +168,15 @@ var ( errEmptyBlock = errors.New("empty block") errUnsupportedFXs = errors.New("unsupported feature extensions") errInvalidBlock = errors.New("invalid block") - errInvalidAddr = errors.New("invalid hex address") errInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") - errNoImportInputs = errors.New("tx has no imported inputs") - errInputsNotSortedUnique = errors.New("inputs not sorted and unique") - errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") - errWrongChainID = errors.New("tx has wrong chain ID") - errInsufficientFunds = errors.New("insufficient funds") - errNoExportOutputs = errors.New("tx has no export outputs") - errOutputsNotSorted = errors.New("tx outputs not sorted") - errOutputsNotSortedUnique = errors.New("outputs not sorted and unique") - errOverflowExport = errors.New("overflow when computing export amount + txFee") errInvalidNonce = errors.New("invalid nonce") - errConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") errUnclesUnsupported = errors.New("uncles unsupported") errRejectedParent = errors.New("rejected parent") - errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") - errNoEVMOutputs = errors.New("tx has no EVM outputs") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") errConflictingAtomicTx = errors.New("conflicting atomic tx present") errTooManyAtomicTx = errors.New("too many atomic tx") - errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") ) @@ -295,7 +264,6 @@ type VM struct { builder *blockBuilder baseCodec codec.Registry - codec codec.Manager clock mockable.Clock mempool *Mempool @@ -574,8 +542,6 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to verify chain config: %w", err) } - vm.codec = Codec - // TODO: read size from settings vm.mempool, err = NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) if err != nil { @@ -641,7 +607,7 @@ func (vm *VM) Initialize( } // initialize atomic repository - vm.atomicTxRepository, err = NewAtomicTxRepository(vm.db, vm.codec, lastAcceptedHeight) + vm.atomicTxRepository, err = NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } @@ -875,7 +841,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S continue } - atomicTxBytes, err := vm.codec.Marshal(codecVersion, tx) + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, tx) if err != nil { // Discard the transaction from the mempool and error if the transaction // cannot be marshalled. This should never happen. @@ -904,7 +870,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S // assumes that we are in at least Apricot Phase 5. func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { var ( - batchAtomicTxs []*Tx + batchAtomicTxs []*atomic.Tx batchAtomicUTXOs set.Set[ids.ID] batchContribution *big.Int = new(big.Int).Set(common.Big0) batchGasUsed *big.Int = new(big.Int).Set(common.Big0) @@ -979,7 +945,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. // If there is a non-zero number of transactions, marshal them and return the byte slice // for the block's extra data along with the contribution and gas used. if len(batchAtomicTxs) > 0 { - atomicTxBytes, err := vm.codec.Marshal(codecVersion, batchAtomicTxs) + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, batchAtomicTxs) if err != nil { // If we fail to marshal the batch of atomic transactions for any reason, // discard the entire set of current transactions. @@ -1017,7 +983,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big rules = vm.chainConfig.Rules(header.Number, header.Time) ) - txs, err := ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, vm.codec) + txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, atomic.Codec) if err != nil { return nil, nil, err } @@ -1602,61 +1568,22 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er ****************************************************************************** */ -// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] -// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is -// accepted, then nil will be returned immediately. -// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func (vm *VM) conflicts(inputs set.Set[ids.ID], ancestor *Block) error { - lastAcceptedBlock := vm.LastAcceptedBlock() - lastAcceptedHeight := lastAcceptedBlock.Height() - for ancestor.Height() > lastAcceptedHeight { - // If any of the atomic transactions in the ancestor conflict with [inputs] - // return an error. - for _, atomicTx := range ancestor.atomicTxs { - if inputs.Overlaps(atomicTx.InputUTXOs()) { - return errConflictingAtomicInputs - } - } - - // Move up the chain. - nextAncestorID := ancestor.Parent() - // If the ancestor is unknown, then the parent failed - // verification when it was called. - // If the ancestor is rejected, then this block shouldn't be - // inserted into the canonical chain because the parent is - // will be missing. - // If the ancestor is processing, then the block may have - // been verified. - nextAncestorIntf, err := vm.GetBlockInternal(context.TODO(), nextAncestorID) - if err != nil { - return errRejectedParent - } - nextAncestor, ok := nextAncestorIntf.(*Block) - if !ok { - return fmt.Errorf("ancestor block %s had unexpected type %T", nextAncestor.ID(), nextAncestorIntf) - } - ancestor = nextAncestor - } - - return nil -} - // getAtomicTx returns the requested transaction, status, and height. // If the status is Unknown, then the returned transaction will be nil. -func (vm *VM) getAtomicTx(txID ids.ID) (*Tx, Status, uint64, error) { +func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { - return tx, Accepted, height, nil + return tx, atomic.Accepted, height, nil } else if err != database.ErrNotFound { - return nil, Unknown, 0, err + return nil, atomic.Unknown, 0, err } tx, dropped, found := vm.mempool.GetTx(txID) switch { case found && dropped: - return tx, Dropped, 0, nil + return tx, atomic.Dropped, 0, nil case found: - return tx, Processing, 0, nil + return tx, atomic.Processing, 0, nil default: - return nil, Unknown, 0, nil + return nil, atomic.Unknown, 0, nil } } @@ -1687,7 +1614,7 @@ func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { } // verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *VM) verifyTxAtTip(tx *Tx) error { +func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) } @@ -1728,7 +1655,7 @@ func (vm *VM) verifyTxAtTip(tx *Tx) error { // Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. -func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { +func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) @@ -1737,7 +1664,15 @@ func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state * if !ok { return fmt.Errorf("parent block %s had unexpected type %T", parentIntf.ID(), parentIntf) } - if err := tx.UnsignedAtomicTx.SemanticVerify(vm, tx, parent, baseFee, rules); err != nil { + atomicBackend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + if err := tx.UnsignedAtomicTx.SemanticVerify(atomicBackend, tx, parent, baseFee); err != nil { return err } return tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state) @@ -1745,7 +1680,7 @@ func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state * // verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] // using [rules] as the current rule set. -func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { +func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { // Ensure that the parent was verified and inserted correctly. if !vm.blockChain.HasBlock(parentHash, height-1) { return errRejectedParent @@ -1768,14 +1703,22 @@ func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, hei // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} + atomicBackend := &atomic.Backend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } for _, atomicTx := range txs { utx := atomicTx.UnsignedAtomicTx - if err := utx.SemanticVerify(vm, atomicTx, ancestor, baseFee, rules); err != nil { + if err := utx.SemanticVerify(atomicBackend, atomicTx, ancestor, baseFee); err != nil { return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) } txInputs := utx.InputUTXOs() if inputs.Overlaps(txInputs) { - return errConflictingAtomicInputs + return atomic.ErrConflictingAtomicInputs } inputs.Union(txInputs) } @@ -1797,7 +1740,7 @@ func (vm *VM) GetAtomicUTXOs( return avax.GetAtomicUTXOs( vm.ctx.SharedMemory, - vm.codec, + atomic.Codec, chainID, addrs, startAddr, @@ -1806,176 +1749,6 @@ func (vm *VM) GetAtomicUTXOs( ) } -// GetSpendableFunds returns a list of EVMInputs and keys (in corresponding -// order) to total [amount] of [assetID] owned by [keys]. -// Note: we return [][]*secp256k1.PrivateKey even though each input -// corresponds to a single key, so that the signers can be passed in to -// [tx.Sign] which supports multiple keys on a single input. -func (vm *VM) GetSpendableFunds( - keys []*secp256k1.PrivateKey, - assetID ids.ID, - amount uint64, -) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return nil, nil, err - } - inputs := []EVMInput{} - signers := [][]*secp256k1.PrivateKey{} - // Note: we assume that each key in [keys] is unique, so that iterating over - // the keys will not produce duplicated nonces in the returned EVMInput slice. - for _, key := range keys { - if amount == 0 { - break - } - addr := GetEthAddress(key) - var balance uint64 - if assetID == vm.ctx.AVAXAssetID { - // If the asset is AVAX, we divide by the x2cRate to convert back to the correct - // denomination of AVAX that can be exported. - balance = new(uint256.Int).Div(state.GetBalance(addr), x2cRate).Uint64() - } else { - balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() - } - if balance == 0 { - continue - } - if amount < balance { - balance = amount - } - nonce, err := vm.GetCurrentNonce(addr) - if err != nil { - return nil, nil, err - } - inputs = append(inputs, EVMInput{ - Address: addr, - Amount: balance, - AssetID: assetID, - Nonce: nonce, - }) - signers = append(signers, []*secp256k1.PrivateKey{key}) - amount -= balance - } - - if amount > 0 { - return nil, nil, errInsufficientFunds - } - - return inputs, signers, nil -} - -// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding -// order) to total [amount] + [fee] of [AVAX] owned by [keys]. -// This function accounts for the added cost of the additional inputs needed to -// create the transaction and makes sure to skip any keys with a balance that is -// insufficient to cover the additional fee. -// Note: we return [][]*secp256k1.PrivateKey even though each input -// corresponds to a single key, so that the signers can be passed in to -// [tx.Sign] which supports multiple keys on a single input. -func (vm *VM) GetSpendableAVAXWithFee( - keys []*secp256k1.PrivateKey, - amount uint64, - cost uint64, - baseFee *big.Int, -) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return nil, nil, err - } - - initialFee, err := CalculateDynamicFee(cost, baseFee) - if err != nil { - return nil, nil, err - } - - newAmount, err := math.Add64(amount, initialFee) - if err != nil { - return nil, nil, err - } - amount = newAmount - - inputs := []EVMInput{} - signers := [][]*secp256k1.PrivateKey{} - // Note: we assume that each key in [keys] is unique, so that iterating over - // the keys will not produce duplicated nonces in the returned EVMInput slice. - for _, key := range keys { - if amount == 0 { - break - } - - prevFee, err := CalculateDynamicFee(cost, baseFee) - if err != nil { - return nil, nil, err - } - - newCost := cost + EVMInputGas - newFee, err := CalculateDynamicFee(newCost, baseFee) - if err != nil { - return nil, nil, err - } - - additionalFee := newFee - prevFee - - addr := GetEthAddress(key) - // Since the asset is AVAX, we divide by the x2cRate to convert back to - // the correct denomination of AVAX that can be exported. - balance := new(uint256.Int).Div(state.GetBalance(addr), x2cRate).Uint64() - // If the balance for [addr] is insufficient to cover the additional cost - // of adding an input to the transaction, skip adding the input altogether - if balance <= additionalFee { - continue - } - - // Update the cost for the next iteration - cost = newCost - - newAmount, err := math.Add64(amount, additionalFee) - if err != nil { - return nil, nil, err - } - amount = newAmount - - // Use the entire [balance] as an input, but if the required [amount] - // is less than the balance, update the [inputAmount] to spend the - // minimum amount to finish the transaction. - inputAmount := balance - if amount < balance { - inputAmount = amount - } - nonce, err := vm.GetCurrentNonce(addr) - if err != nil { - return nil, nil, err - } - inputs = append(inputs, EVMInput{ - Address: addr, - Amount: inputAmount, - AssetID: vm.ctx.AVAXAssetID, - Nonce: nonce, - }) - signers = append(signers, []*secp256k1.PrivateKey{key}) - amount -= inputAmount - } - - if amount > 0 { - return nil, nil, errInsufficientFunds - } - - return inputs, signers, nil -} - -// GetCurrentNonce returns the nonce associated with the address at the -// preferred block -func (vm *VM) GetCurrentNonce(address common.Address) (uint64, error) { - // Note: current state uses the state of the preferred block. - state, err := vm.blockChain.State() - if err != nil { - return 0, err - } - return state.GetNonce(address), nil -} - // currentRules returns the chain rules for the current block. func (vm *VM) currentRules() params.Rules { header := vm.eth.APIBackend.CurrentHeader() @@ -2109,3 +1882,55 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { // enable state sync by default if the chain is empty. return lastAcceptedHeight == 0 } + +func (vm *VM) newImportTx( + chainID ids.ID, // chain to import from + to common.Address, // Address of recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Keys to import the funds +) (*atomic.Tx, error) { + kc := secp256k1fx.NewKeychain() + for _, key := range keys { + kc.Add(key) + } + + atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + + return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, chainID, to, baseFee, kc, atomicUTXOs) +} + +// newExportTx returns a new ExportTx +func (vm *VM) newExportTx( + assetID ids.ID, // AssetID of the tokens to export + amount uint64, // Amount of tokens to export + chainID ids.ID, // Chain to send the UTXOs to + to ids.ShortID, // Address of chain recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens +) (*atomic.Tx, error) { + state, err := vm.blockChain.State() + if err != nil { + return nil, err + } + + // Create the transaction + tx, err := atomic.NewExportTx( + vm.ctx, // Context + vm.currentRules(), // VM rules + state, + assetID, // AssetID + amount, // Amount + chainID, // ID of the chain to send the funds to + to, // Address + baseFee, + keys, // Private keys + ) + if err != nil { + return nil, err + } + + return tx, nil +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 3d60b23356..83914a038e 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -24,6 +24,7 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -31,7 +32,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -160,7 +161,7 @@ func init() { b, _ = cb58.Decode(key) pk, _ := secp256k1.ToPrivateKey(b) testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, GetEthAddress(pk)) + testEthAddrs = append(testEthAddrs, atomic.GetEthAddress(pk)) testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) } } @@ -246,7 +247,7 @@ func setupGenesis( database.Database, []byte, chan commonEng.Message, - *atomic.Memory, + *avalancheatomic.Memory, ) { if len(genesisJSON) == 0 { genesisJSON = genesisJSONLatest @@ -257,7 +258,7 @@ func setupGenesis( baseDB := memdb.New() // initialize the atomic memory - atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + atomicMemory := avalancheatomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) // NB: this lock is intentionally left locked when this function returns. @@ -288,7 +289,7 @@ func GenesisVM(t *testing.T, chan commonEng.Message, *VM, database.Database, - *atomic.Memory, + *avalancheatomic.Memory, *enginetest.Sender, ) { return GenesisVMWithClock(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON, mockable.Clock{}) @@ -307,7 +308,7 @@ func GenesisVMWithClock( chan commonEng.Message, *VM, database.Database, - *atomic.Memory, + *avalancheatomic.Memory, *enginetest.Sender, ) { vm := &VM{clock: clock} @@ -336,7 +337,7 @@ func GenesisVMWithClock( return issuer, vm, dbManager, m, appSender } -func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { +func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: txID, @@ -351,14 +352,14 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index }, }, } - utxoBytes, err := Codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { return nil, err } xChainSharedMemory := sharedMemory.NewSharedMemory(ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -374,7 +375,7 @@ func addUTXO(sharedMemory *atomic.Memory, ctx *snow.Context, txID ids.ID, index // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *atomic.Memory, *enginetest.Sender) { +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) @@ -683,13 +684,13 @@ func TestIssueAtomicTxs(t *testing.T) { // Check that both atomic transactions were indexed as expected. indexedImportTx, status, height, err := vm.getAtomicTx(importTx.ID()) assert.NoError(t, err) - assert.Equal(t, Accepted, status) + assert.Equal(t, atomic.Accepted, status) assert.Equal(t, uint64(1), height, "expected height of indexed import tx to be 1") assert.Equal(t, indexedImportTx.ID(), importTx.ID(), "expected ID of indexed import tx to match original txID") indexedExportTx, status, height, err := vm.getAtomicTx(exportTx.ID()) assert.NoError(t, err) - assert.Equal(t, Accepted, status) + assert.Equal(t, atomic.Accepted, status) assert.Equal(t, uint64(2), height, "expected height of indexed export tx to be 2") assert.Equal(t, indexedExportTx.ID(), exportTx.ID(), "expected ID of indexed import tx to match original txID") } @@ -849,8 +850,8 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } }() - importTxs := make([]*Tx, 0, 3) - conflictTxs := make([]*Tx, 0, 3) + importTxs := make([]*atomic.Tx, 0, 3) + conflictTxs := make([]*atomic.Tx, 0, 3) for i, key := range testKeys { importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { @@ -944,9 +945,9 @@ func testConflictingImportTxs(t *testing.T, genesis string) { var extraData []byte switch { case rules.IsApricotPhase5: - extraData, err = vm.codec.Marshal(codecVersion, []*Tx{conflictTxs[1]}) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{conflictTxs[1]}) default: - extraData, err = vm.codec.Marshal(codecVersion, conflictTxs[1]) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, conflictTxs[1]) } if err != nil { t.Fatal(err) @@ -972,15 +973,15 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) } if !rules.IsApricotPhase5 { return } - extraData, err = vm.codec.Marshal(codecVersion, []*Tx{importTxs[2], conflictTxs[2]}) + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTxs[2], conflictTxs[2]}) if err != nil { t.Fatal(err) } @@ -1008,25 +1009,25 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) } } func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) - for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, discarded []*Tx){ - "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ + "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } - tx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -1038,9 +1039,9 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{tx2}, []*Tx{tx1} + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1049,11 +1050,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } - tx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } @@ -1065,9 +1066,9 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{tx2}, []*Tx{tx1} + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "hola": func(t *testing.T, vm *VM, sharedMemory *atomic.Memory) (issued []*Tx, evicted []*Tx) { + "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1077,17 +1078,17 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - importTx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) + importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } - importTx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) + importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) if err != nil { t.Fatal(err) } - reissuanceTx1, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1107,7 +1108,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { assert.True(t, vm.mempool.Has(importTx2.ID())) assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) - reissuanceTx2, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1115,7 +1116,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - return []*Tx{reissuanceTx2}, []*Tx{importTx1, importTx2} + return []*atomic.Tx{reissuanceTx2}, []*atomic.Tx{importTx1, importTx2} }, } { t.Run(name, func(t *testing.T) { @@ -1536,14 +1537,14 @@ func TestBonusBlocksTxs(t *testing.T) { }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -1573,7 +1574,7 @@ func TestBonusBlocksTxs(t *testing.T) { vm.atomicBackend.(*atomicBackend).bonusBlocks = map[uint64]ids.ID{blk.Height(): blk.ID()} // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { t.Fatal(err) } @@ -3073,10 +3074,10 @@ func TestBuildInvalidBlockHead(t *testing.T) { addr0 := key0.PublicKey().Address() // Create the transaction - utx := &UnsignedImportTx{ + utx := &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, - Outs: []EVMOutput{{ + Outs: []atomic.EVMOutput{{ Address: common.Address(addr0), Amount: 1 * units.Avax, AssetID: vm.ctx.AVAXAssetID, @@ -3094,8 +3095,8 @@ func TestBuildInvalidBlockHead(t *testing.T) { }, SourceChain: vm.ctx.XChainID, } - tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { + tx := &atomic.Tx{UnsignedAtomicTx: utx} + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { t.Fatal(err) } @@ -3231,14 +3232,14 @@ func TestBuildApricotPhase4Block(t *testing.T) { }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -3401,14 +3402,14 @@ func TestBuildApricotPhase5Block(t *testing.T) { }, }, } - utxoBytes, err := vm.codec.Marshal(codecVersion, utxo) + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) if err != nil { t.Fatal(err) } xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ @@ -3678,7 +3679,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) assert.NoError(t, err) - importTx, err := vm.newImportTxWithUTXOs(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -3756,7 +3757,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { validEthBlock := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock - extraData, err := vm2.codec.Marshal(codecVersion, []*Tx{importTx}) + extraData, err := atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTx}) if err != nil { t.Fatal(err) } From 9c3e28448d56607c5a10ee3e81d2183b9d4b39d7 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 11 Dec 2024 18:16:11 +0300 Subject: [PATCH 02/91] bump avago --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 2a6106661b..ea7f6cc744 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa + github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 9ddf41790c..435d1113c8 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa h1:8eSy+tegp9Kq2zft54wk0FyWU87utdrVwsj9EBIb/NA= github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa/go.mod h1:256D2s2FIKo07uUeY25uDXFuqBo6TeWIJqeEA+Xchwk= +github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 h1:3Zqc3TxHt6gsdSFD/diW2f2jT2oCx0rppN7yoXxviQg= +github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1/go.mod h1:Wxl57pLTlR/8pkaNtou8HiynG+xdgiF4YnzFuJyqSDg= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From ddbbab792ca1a938cb6e69ec190e3ce367761d31 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 11 Dec 2024 19:16:18 +0300 Subject: [PATCH 03/91] bump versions --- scripts/versions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/versions.sh b/scripts/versions.sh index 33282d011a..ce7b1cdb05 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'1dc4192013aa'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'f3ca1a0f8bb1'} From 0ecd789f5e552af0aa85e9e048a8652a4e8c9849 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 14 Dec 2024 01:18:08 +0300 Subject: [PATCH 04/91] move attomic gossip --- peer/network.go | 22 +-- peer/network_test.go | 71 +++----- plugin/evm/admin.go | 6 +- plugin/evm/atomic/gossip.go | 35 ++++ plugin/evm/atomic/gossip_test.go | 120 ++++++++++++++ plugin/evm/{ => atomic}/mempool.go | 121 +++++++------- plugin/evm/{ => atomic}/mempool_test.go | 7 +- plugin/evm/{ => atomic}/test_tx.go | 88 ++++++---- plugin/evm/atomic/tx_heap.go | 163 +++++++++++++++++++ plugin/evm/atomic/tx_heap_test.go | 142 ++++++++++++++++ plugin/evm/atomic_syncer_test.go | 5 +- plugin/evm/atomic_trie_iterator_test.go | 16 +- plugin/evm/atomic_trie_test.go | 36 ++-- plugin/evm/atomic_tx_repository_test.go | 14 +- plugin/evm/block.go | 3 +- plugin/evm/block_builder.go | 3 +- plugin/evm/client/client.go | 17 +- plugin/evm/{ => config}/config.go | 19 ++- plugin/evm/{ => config}/config_test.go | 2 +- plugin/evm/config/constants.go | 8 + plugin/evm/export_tx_test.go | 2 +- plugin/evm/gossip.go | 47 ++---- plugin/evm/gossip_stats.go | 67 -------- plugin/evm/gossip_test.go | 109 ------------- plugin/evm/gossiper_atomic_gossiping_test.go | 54 ++++-- plugin/evm/handler.go | 140 ---------------- plugin/evm/mempool_atomic_gossiping_test.go | 44 +++-- plugin/evm/message/codec.go | 10 +- plugin/evm/message/handler.go | 25 +-- plugin/evm/message/handler_test.go | 62 ------- plugin/evm/message/message.go | 78 --------- plugin/evm/message/message_test.go | 78 --------- plugin/evm/service.go | 6 +- plugin/evm/tx_gossip_test.go | 27 ++- plugin/evm/vm.go | 55 +++---- plugin/evm/vm_test.go | 19 ++- 36 files changed, 811 insertions(+), 910 deletions(-) create mode 100644 plugin/evm/atomic/gossip.go create mode 100644 plugin/evm/atomic/gossip_test.go rename plugin/evm/{ => atomic}/mempool.go (86%) rename plugin/evm/{ => atomic}/mempool_test.go (92%) rename plugin/evm/{ => atomic}/test_tx.go (74%) create mode 100644 plugin/evm/atomic/tx_heap.go create mode 100644 plugin/evm/atomic/tx_heap_test.go rename plugin/evm/{ => config}/config.go (95%) rename plugin/evm/{ => config}/config_test.go (99%) create mode 100644 plugin/evm/config/constants.go delete mode 100644 plugin/evm/gossip_stats.go delete mode 100644 plugin/evm/handler.go delete mode 100644 plugin/evm/message/handler_test.go delete mode 100644 plugin/evm/message/message.go delete mode 100644 plugin/evm/message/message_test.go diff --git a/peer/network.go b/peer/network.go index 7739e279bc..ebe067fe0c 100644 --- a/peer/network.go +++ b/peer/network.go @@ -56,9 +56,6 @@ type Network interface { // by calling OnPeerConnected for each peer Shutdown() - // SetGossipHandler sets the provided gossip handler as the gossip handler - SetGossipHandler(handler message.GossipHandler) - // SetRequestHandler sets the provided request handler as the request handler SetRequestHandler(handler message.RequestHandler) @@ -87,7 +84,6 @@ type network struct { appSender common.AppSender // avalanchego AppSender for sending messages codec codec.Manager // Codec used for parsing messages appRequestHandler message.RequestHandler // maps request type => handler - gossipHandler message.GossipHandler // maps gossip type => handler peers *peerTracker // tracking of peers & bandwidth appStats stats.RequestHandlerStats // Provide request handler metrics @@ -110,7 +106,6 @@ func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), p2pNetwork: p2pNetwork, - gossipHandler: message.NoopMempoolGossipHandler{}, appRequestHandler: message.NoopRequestHandler{}, peers: NewPeerTracker(), appStats: stats.NewRequestHandlerStats(), @@ -345,14 +340,8 @@ func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandle // from a peer. An error returned by this function is treated as fatal by the // engine. func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - var gossipMsg message.GossipMessage - if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { - log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) - return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) - } - - log.Debug("processing AppGossip from node", "nodeID", nodeID, "msg", gossipMsg) - return gossipMsg.Handle(n.gossipHandler, nodeID) + log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes)) + return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } // Connected adds the given nodeID to the peer list so that it can receive messages @@ -407,13 +396,6 @@ func (n *network) Shutdown() { n.closed.Set(true) // mark network as closed } -func (n *network) SetGossipHandler(handler message.GossipHandler) { - n.lock.Lock() - defer n.lock.Unlock() - - n.gossipHandler = handler -} - func (n *network) SetRequestHandler(handler message.RequestHandler) { n.lock.Lock() defer n.lock.Unlock() diff --git a/peer/network_test.go b/peer/network_test.go index c792cf9064..3d13c6e679 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" "sync" - "sync/atomic" + syncatomic "sync/atomic" "testing" "time" @@ -37,6 +37,8 @@ var ( Patch: 0, } + codecVersion uint16 = 0 + _ message.Request = &HelloRequest{} _ = &HelloResponse{} _ = &GreetingRequest{} @@ -46,9 +48,7 @@ var ( _ message.RequestHandler = &HelloGreetingRequestHandler{} _ message.RequestHandler = &testRequestHandler{} - _ common.AppSender = testAppSender{} - _ message.GossipMessage = HelloGossip{} - _ message.GossipHandler = &testGossipHandler{} + _ common.AppSender = testAppSender{} _ p2p.Handler = &testSDKHandler{} ) @@ -85,7 +85,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } - atomic.AddUint32(&callNum, 1) + syncatomic.AddUint32(&callNum, 1) }() return nil }, @@ -130,7 +130,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { requestWg.Wait() senderWg.Wait() - assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) + assert.Equal(t, totalCalls, int(syncatomic.LoadUint32(&callNum))) } func TestAppRequestOnCtxCancellation(t *testing.T) { @@ -190,7 +190,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } - atomic.AddUint32(&callNum, 1) + syncatomic.AddUint32(&callNum, 1) }() return nil }, @@ -245,7 +245,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { requestWg.Wait() senderWg.Wait() - assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) + assert.Equal(t, totalCalls, int(syncatomic.LoadUint32(&callNum))) for _, nodeID := range nodes { if _, exists := contactedNodes[nodeID]; !exists { t.Fatalf("expected nodeID %s to be contacted but was not", nodeID) @@ -386,14 +386,14 @@ func TestRequestMinVersion(t *testing.T) { var net Network sender := testAppSender{ sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], reqID uint32, messageBytes []byte) error { - atomic.AddUint32(&callNum, 1) + syncatomic.AddUint32(&callNum, 1) assert.True(t, nodes.Contains(nodeID), "request nodes should contain expected nodeID") assert.Len(t, nodes, 1, "request nodes should contain exactly one node") go func() { time.Sleep(200 * time.Millisecond) - atomic.AddUint32(&callNum, 1) - responseBytes, err := codecManager.Marshal(message.Version, TestMessage{Message: "this is a response"}) + syncatomic.AddUint32(&callNum, 1) + responseBytes, err := codecManager.Marshal(codecVersion, TestMessage{Message: "this is a response"}) if err != nil { panic(err) } @@ -503,7 +503,6 @@ func TestHandleInvalidMessages(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -511,7 +510,8 @@ func TestHandleInvalidMessages(t *testing.T) { defer clientNetwork.Shutdown() // Ensure a valid gossip message sent as any App specific message type does not trigger a fatal error - gossipMsg, err := buildGossip(codecManager, HelloGossip{Msg: "hello there!"}) + marshaller := helloGossipMarshaller{codec: codecManager} + gossipMsg, err := marshaller.MarshalGossip(&HelloGossip{Msg: "hello there!"}) assert.NoError(t, err) // Ensure a valid request message sent as any App specific message type does not trigger a fatal error @@ -552,7 +552,6 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { p2pNetwork, err := p2p.NewNetwork(logging.NoLog{}, nil, prometheus.NewRegistry(), "") require.NoError(t, err) clientNetwork := NewNetwork(p2pNetwork, sender, codecManager, ids.EmptyNodeID, 1) - clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) @@ -615,18 +614,14 @@ func buildCodec(t *testing.T, types ...interface{}) codec.Manager { for _, typ := range types { assert.NoError(t, c.RegisterType(typ)) } - assert.NoError(t, codecManager.RegisterCodec(message.Version, c)) + assert.NoError(t, codecManager.RegisterCodec(codecVersion, c)) return codecManager } // marshalStruct is a helper method used to marshal an object as `interface{}` // so that the codec is able to include the TypeID in the resulting bytes func marshalStruct(codec codec.Manager, obj interface{}) ([]byte, error) { - return codec.Marshal(message.Version, &obj) -} - -func buildGossip(codec codec.Manager, msg message.GossipMessage) ([]byte, error) { - return codec.Marshal(message.Version, &msg) + return codec.Marshal(codecVersion, &obj) } type testAppSender struct { @@ -696,11 +691,11 @@ type HelloGreetingRequestHandler struct { } func (h *HelloGreetingRequestHandler) HandleHelloRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request *HelloRequest) ([]byte, error) { - return h.codec.Marshal(message.Version, HelloResponse{Response: "Hi"}) + return h.codec.Marshal(codecVersion, HelloResponse{Response: "Hi"}) } func (h *HelloGreetingRequestHandler) HandleGreetingRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request *GreetingRequest) ([]byte, error) { - return h.codec.Marshal(message.Version, GreetingResponse{Greet: "Hey there"}) + return h.codec.Marshal(codecVersion, GreetingResponse{Greet: "Hey there"}) } type TestMessage struct { @@ -719,34 +714,22 @@ type HelloGossip struct { Msg string `serialize:"true"` } -func (h HelloGossip) Handle(handler message.GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, message.EthTxsGossip{}) -} - -func (h HelloGossip) String() string { - return fmt.Sprintf("HelloGossip(%s)", h.Msg) -} - -func (h HelloGossip) Bytes() []byte { - // no op - return nil +func (tx *HelloGossip) GossipID() ids.ID { + return ids.FromStringOrPanic(tx.Msg) } -type testGossipHandler struct { - received bool - nodeID ids.NodeID +type helloGossipMarshaller struct { + codec codec.Manager } -func (t *testGossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { - t.received = true - t.nodeID = nodeID - return nil +func (g helloGossipMarshaller) MarshalGossip(tx *HelloGossip) ([]byte, error) { + return g.codec.Marshal(0, tx) } -func (t *testGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - t.received = true - t.nodeID = nodeID - return nil +func (g helloGossipMarshaller) UnmarshalGossip(bytes []byte) (*HelloGossip, error) { + h := &HelloGossip{} + _, err := g.codec.Unmarshal(bytes, h) + return h, err } type testRequestHandler struct { diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index e90be473a7..34595a0b0e 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -78,11 +78,7 @@ func (p *Admin) SetLogLevel(_ *http.Request, args *client.SetLogLevelArgs, reply return nil } -type ConfigReply struct { - Config *Config `json:"config"` -} - -func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *ConfigReply) error { +func (p *Admin) GetVMConfig(_ *http.Request, _ *struct{}, reply *client.ConfigReply) error { reply.Config = &p.vm.config return nil } diff --git a/plugin/evm/atomic/gossip.go b/plugin/evm/atomic/gossip.go new file mode 100644 index 0000000000..2c6cb35da2 --- /dev/null +++ b/plugin/evm/atomic/gossip.go @@ -0,0 +1,35 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/gossip" +) + +var ( + _ gossip.Gossipable = (*GossipAtomicTx)(nil) + _ gossip.Marshaller[*GossipAtomicTx] = (*GossipAtomicTxMarshaller)(nil) +) + +type GossipAtomicTxMarshaller struct{} + +func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, error) { + return tx.Tx.SignedBytes(), nil +} + +func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { + tx, err := ExtractAtomicTx(bytes, Codec) + return &GossipAtomicTx{ + Tx: tx, + }, err +} + +type GossipAtomicTx struct { + Tx *Tx +} + +func (tx *GossipAtomicTx) GossipID() ids.ID { + return tx.Tx.ID() +} diff --git a/plugin/evm/atomic/gossip_test.go b/plugin/evm/atomic/gossip_test.go new file mode 100644 index 0000000000..edd88bae18 --- /dev/null +++ b/plugin/evm/atomic/gossip_test.go @@ -0,0 +1,120 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestGossipAtomicTxMarshaller(t *testing.T) { + require := require.New(t) + + want := &GossipAtomicTx{ + Tx: &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{}, + Creds: []verify.Verifiable{}, + }, + } + marshaller := GossipAtomicTxMarshaller{} + + key0, err := secp256k1.NewPrivateKey() + require.NoError(err) + require.NoError(want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}})) + + bytes, err := marshaller.MarshalGossip(want) + require.NoError(err) + + got, err := marshaller.UnmarshalGossip(bytes) + require.NoError(err) + require.Equal(want.GossipID(), got.GossipID()) +} + +func TestAtomicMempoolIterate(t *testing.T) { + txs := []*GossipAtomicTx{ + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + { + Tx: &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + } + + tests := []struct { + name string + add []*GossipAtomicTx + f func(tx *GossipAtomicTx) bool + possibleValues []*GossipAtomicTx + expectedLen int + }{ + { + name: "func matches nothing", + add: txs, + f: func(*GossipAtomicTx) bool { + return false + }, + possibleValues: nil, + }, + { + name: "func matches all", + add: txs, + f: func(*GossipAtomicTx) bool { + return true + }, + possibleValues: txs, + expectedLen: 2, + }, + { + name: "func matches subset", + add: txs, + f: func(tx *GossipAtomicTx) bool { + return tx.Tx == txs[0].Tx + }, + possibleValues: txs, + expectedLen: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) + require.NoError(err) + + for _, add := range tt.add { + require.NoError(m.Add(add)) + } + + matches := make([]*GossipAtomicTx, 0) + f := func(tx *GossipAtomicTx) bool { + match := tt.f(tx) + + if match { + matches = append(matches, tx) + } + + return match + } + + m.Iterate(f) + + require.Len(matches, tt.expectedLen) + require.Subset(tt.possibleValues, matches) + }) + } +} diff --git a/plugin/evm/mempool.go b/plugin/evm/atomic/mempool.go similarity index 86% rename from plugin/evm/mempool.go rename to plugin/evm/atomic/mempool.go index acb8db4e3f..69e1e509b6 100644 --- a/plugin/evm/mempool.go +++ b/plugin/evm/atomic/mempool.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "errors" @@ -15,7 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ethereum/go-ethereum/log" ) @@ -24,8 +24,11 @@ const ( ) var ( - errTxAlreadyKnown = errors.New("tx already known") - errNoGasUsed = errors.New("no gas used") + errTxAlreadyKnown = errors.New("tx already known") + errNoGasUsed = errors.New("no gas used") + ErrConflictingAtomicTx = errors.New("conflicting atomic tx present") + ErrInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") + ErrTooManyAtomicTx = errors.New("too many atomic tx") _ gossip.Set[*GossipAtomicTx] = (*Mempool)(nil) ) @@ -59,12 +62,12 @@ type Mempool struct { // maxSize is the maximum number of transactions allowed to be kept in mempool maxSize int // currentTxs is the set of transactions about to be added to a block. - currentTxs map[ids.ID]*atomic.Tx + currentTxs map[ids.ID]*Tx // issuedTxs is the set of transactions that have been issued into a new block - issuedTxs map[ids.ID]*atomic.Tx + issuedTxs map[ids.ID]*Tx // discardedTxs is an LRU Cache of transactions that have been discarded after failing // verification. - discardedTxs *cache.LRU[ids.ID, *atomic.Tx] + discardedTxs *cache.LRU[ids.ID, *Tx] // Pending is a channel of length one, which the mempool ensures has an item on // it as long as there is an unissued transaction remaining in [txs] Pending chan struct{} @@ -72,31 +75,35 @@ type Mempool struct { // NOTE: [txHeap] ONLY contains pending txs txHeap *txHeap // utxoSpenders maps utxoIDs to the transaction consuming them in the mempool - utxoSpenders map[ids.ID]*atomic.Tx + utxoSpenders map[ids.ID]*Tx // bloom is a bloom filter containing the txs in the mempool bloom *gossip.BloomFilter metrics *mempoolMetrics - verify func(tx *atomic.Tx) error + verify func(tx *Tx) error } // NewMempool returns a Mempool with [maxSize] -func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *atomic.Tx) error) (*Mempool, error) { - bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) +func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *Tx) error) (*Mempool, error) { + bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) if err != nil { return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) } return &Mempool{ ctx: ctx, - issuedTxs: make(map[ids.ID]*atomic.Tx), - discardedTxs: &cache.LRU[ids.ID, *atomic.Tx]{Size: discardedTxsCacheSize}, - currentTxs: make(map[ids.ID]*atomic.Tx), + issuedTxs: make(map[ids.ID]*Tx), + discardedTxs: &cache.LRU[ids.ID, *Tx]{Size: discardedTxsCacheSize}, + currentTxs: make(map[ids.ID]*Tx), Pending: make(chan struct{}, 1), txHeap: newTxHeap(maxSize), maxSize: maxSize, - utxoSpenders: make(map[ids.ID]*atomic.Tx), + utxoSpenders: make(map[ids.ID]*Tx), bloom: bloom, metrics: newMempoolMetrics(), verify: verify, @@ -118,7 +125,7 @@ func (m *Mempool) length() int { // atomicTxGasPrice is the [gasPrice] paid by a transaction to burn a given // amount of [AVAXAssetID] given the value of [gasUsed]. -func (m *Mempool) atomicTxGasPrice(tx *atomic.Tx) (uint64, error) { +func (m *Mempool) atomicTxGasPrice(tx *Tx) (uint64, error) { gasUsed, err := tx.GasUsed(true) if err != nil { return 0, err @@ -137,35 +144,19 @@ func (m *Mempool) Add(tx *GossipAtomicTx) error { m.ctx.Lock.RLock() defer m.ctx.Lock.RUnlock() - m.lock.Lock() - defer m.lock.Unlock() - - err := m.addTx(tx.Tx, false) - if errors.Is(err, errTxAlreadyKnown) { - return err - } - - if err != nil { - txID := tx.Tx.ID() - m.discardedTxs.Put(txID, tx.Tx) - log.Debug("failed to issue remote tx to mempool", - "txID", txID, - "err", err, - ) - } - - return err + return m.AddRemoteTx(tx.Tx) } -// AddTx attempts to add [tx] to the mempool and returns an error if +// AddRemoteTx attempts to add [tx] to the mempool and returns an error if // it could not be added to the mempool. -func (m *Mempool) AddTx(tx *atomic.Tx) error { +func (m *Mempool) AddRemoteTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, false) + err := m.addTx(tx, false, false) + // Do not attempt to discard the tx if it was already known if errors.Is(err, errTxAlreadyKnown) { - return nil + return err } if err != nil { @@ -181,11 +172,11 @@ func (m *Mempool) AddTx(tx *atomic.Tx) error { return err } -func (m *Mempool) AddLocalTx(tx *atomic.Tx) error { +func (m *Mempool) AddLocalTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, false) + err := m.addTx(tx, true, false) if errors.Is(err, errTxAlreadyKnown) { return nil } @@ -193,29 +184,24 @@ func (m *Mempool) AddLocalTx(tx *atomic.Tx) error { return err } -// forceAddTx forcibly adds a *atomic.Tx to the mempool and bypasses all verification. -func (m *Mempool) ForceAddTx(tx *atomic.Tx) error { +// forceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. +func (m *Mempool) ForceAddTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() - err := m.addTx(tx, true) - if errors.Is(err, errTxAlreadyKnown) { - return nil - } - - return nil + return m.addTx(tx, true, true) } // checkConflictTx checks for any transactions in the mempool that spend the same input UTXOs as [tx]. // If any conflicts are present, it returns the highest gas price of any conflicting transaction, the // txID of the corresponding tx and the full list of transactions that conflict with [tx]. -func (m *Mempool) checkConflictTx(tx *atomic.Tx) (uint64, ids.ID, []*atomic.Tx, error) { +func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { utxoSet := tx.InputUTXOs() var ( - highestGasPrice uint64 = 0 - conflictingTxs []*atomic.Tx = make([]*atomic.Tx, 0) - highestGasPriceConflictTxID ids.ID = ids.ID{} + highestGasPrice uint64 = 0 + conflictingTxs []*Tx = make([]*Tx, 0) + highestGasPriceConflictTxID ids.ID = ids.ID{} ) for utxoID := range utxoSet { // Get current gas price of the existing tx in the mempool @@ -240,7 +226,7 @@ func (m *Mempool) checkConflictTx(tx *atomic.Tx) (uint64, ids.ID, []*atomic.Tx, // addTx adds [tx] to the mempool. Assumes [m.lock] is held. // If [force], skips conflict checks within the mempool. -func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { +func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { txID := tx.ID() // If [txID] has already been issued or is in the currentTxs map // there's no need to add it. @@ -253,6 +239,11 @@ func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { if _, exists := m.txHeap.Get(txID); exists { return fmt.Errorf("%w: tx %s is pending", errTxAlreadyKnown, tx.ID()) } + if !local { + if _, exists := m.discardedTxs.Get(txID); exists { + return fmt.Errorf("%w: tx %s was discarded", errTxAlreadyKnown, tx.ID()) + } + } if !force && m.verify != nil { if err := m.verify(tx); err != nil { return err @@ -271,7 +262,7 @@ func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { if highestGasPrice >= gasPrice { return fmt.Errorf( "%w: issued tx (%s) gas price %d <= conflict tx (%s) gas price %d (%d total conflicts in mempool)", - errConflictingAtomicTx, + ErrConflictingAtomicTx, txID, gasPrice, highestGasPriceConflictTxID, @@ -296,7 +287,7 @@ func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { if minGasPrice >= gasPrice { return fmt.Errorf( "%w currentMin=%d provided=%d", - errInsufficientAtomicTxFee, + ErrInsufficientAtomicTxFee, minGasPrice, gasPrice, ) @@ -306,7 +297,7 @@ func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { } else { // This could occur if we have used our entire size allowance on // transactions that are currently processing. - return errTooManyAtomicTx + return ErrTooManyAtomicTx } } @@ -330,7 +321,7 @@ func (m *Mempool) addTx(tx *atomic.Tx, force bool) error { } m.bloom.Add(&GossipAtomicTx{Tx: tx}) - reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*txGossipBloomChurnMultiplier) + reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*config.TxGossipBloomChurnMultiplier) if err != nil { return err } @@ -372,7 +363,7 @@ func (m *Mempool) GetFilter() ([]byte, []byte) { } // NextTx returns a transaction to be issued from the mempool. -func (m *Mempool) NextTx() (*atomic.Tx, bool) { +func (m *Mempool) NextTx() (*Tx, bool) { m.lock.Lock() defer m.lock.Unlock() @@ -392,7 +383,7 @@ func (m *Mempool) NextTx() (*atomic.Tx, bool) { // GetPendingTx returns the transaction [txID] and true if it is // currently in the [txHeap] waiting to be issued into a block. // Returns nil, false otherwise. -func (m *Mempool) GetPendingTx(txID ids.ID) (*atomic.Tx, bool) { +func (m *Mempool) GetPendingTx(txID ids.ID) (*Tx, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -402,7 +393,7 @@ func (m *Mempool) GetPendingTx(txID ids.ID) (*atomic.Tx, bool) { // GetTx returns the transaction [txID] if it was issued // by this node and returns whether it was dropped and whether // it exists. -func (m *Mempool) GetTx(txID ids.ID) (*atomic.Tx, bool, bool) { +func (m *Mempool) GetTx(txID ids.ID) (*Tx, bool, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -485,7 +476,7 @@ func (m *Mempool) CancelCurrentTxs() { // cancelTx removes [tx] from current transactions and moves it back into the // tx heap. // assumes the lock is held. -func (m *Mempool) cancelTx(tx *atomic.Tx) { +func (m *Mempool) cancelTx(tx *Tx) { // Add tx to heap sorted by gasPrice gasPrice, err := m.atomicTxGasPrice(tx) if err == nil { @@ -527,7 +518,7 @@ func (m *Mempool) DiscardCurrentTxs() { // discardCurrentTx discards [tx] from the set of current transactions. // Assumes the lock is held. -func (m *Mempool) discardCurrentTx(tx *atomic.Tx) { +func (m *Mempool) discardCurrentTx(tx *Tx) { m.removeSpenders(tx) m.discardedTxs.Put(tx.ID(), tx) delete(m.currentTxs, tx.ID()) @@ -541,7 +532,7 @@ func (m *Mempool) discardCurrentTx(tx *atomic.Tx) { // removeTx must be called for all conflicts before overwriting the utxoSpenders // map. // Assumes lock is held. -func (m *Mempool) removeTx(tx *atomic.Tx, discard bool) { +func (m *Mempool) removeTx(tx *Tx, discard bool) { txID := tx.ID() // Remove from [currentTxs], [txHeap], and [issuedTxs]. @@ -566,7 +557,7 @@ func (m *Mempool) removeTx(tx *atomic.Tx, discard bool) { // removeSpenders deletes the entries for all input UTXOs of [tx] from the // [utxoSpenders] map. // Assumes the lock is held. -func (m *Mempool) removeSpenders(tx *atomic.Tx) { +func (m *Mempool) removeSpenders(tx *Tx) { for utxoID := range tx.InputUTXOs() { delete(m.utxoSpenders, utxoID) } @@ -574,7 +565,7 @@ func (m *Mempool) removeSpenders(tx *atomic.Tx) { // RemoveTx removes [txID] from the mempool completely. // Evicts [tx] from the discarded cache if present. -func (m *Mempool) RemoveTx(tx *atomic.Tx) { +func (m *Mempool) RemoveTx(tx *Tx) { m.lock.Lock() defer m.lock.Unlock() diff --git a/plugin/evm/mempool_test.go b/plugin/evm/atomic/mempool_test.go similarity index 92% rename from plugin/evm/mempool_test.go rename to plugin/evm/atomic/mempool_test.go index 8129edc577..9334853a5e 100644 --- a/plugin/evm/mempool_test.go +++ b/plugin/evm/atomic/mempool_test.go @@ -1,14 +1,13 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "testing" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -21,7 +20,7 @@ func TestMempoolAddTx(t *testing.T) { txs := make([]*GossipAtomicTx, 0) for i := 0; i < 3_000; i++ { tx := &GossipAtomicTx{ - Tx: &atomic.Tx{ + Tx: &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, @@ -44,7 +43,7 @@ func TestMempoolAdd(t *testing.T) { require.NoError(err) tx := &GossipAtomicTx{ - Tx: &atomic.Tx{ + Tx: &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), }, diff --git a/plugin/evm/test_tx.go b/plugin/evm/atomic/test_tx.go similarity index 74% rename from plugin/evm/test_tx.go rename to plugin/evm/atomic/test_tx.go index e001cb4dda..50af59e09f 100644 --- a/plugin/evm/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -1,25 +1,44 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "math/big" "math/rand" + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/chains/atomic" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" ) +var TestTxCodec codec.Manager + +func init() { + TestTxCodec = codec.NewDefaultManager() + c := linearcodec.NewDefault() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&TestUnsignedTx{}), + c.RegisterType(&avalancheatomic.Element{}), + c.RegisterType(&avalancheatomic.Requests{}), + TestTxCodec.RegisterCodec(atomic.CodecVersion, c), + ) + + if errs.Errored() { + panic(errs.Err) + } +} + type TestUnsignedTx struct { GasUsedV uint64 `serialize:"true"` AcceptRequestsBlockchainIDV ids.ID `serialize:"true"` @@ -34,7 +53,7 @@ type TestUnsignedTx struct { EVMStateTransferV error } -var _ atomic.UnsignedAtomicTx = &TestUnsignedTx{} +var _ UnsignedAtomicTx = &TestUnsignedTx{} // GasUsed implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUsedV, nil } @@ -66,40 +85,39 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(backend *atomic.Backend, stx *atomic.Tx, parent atomic.AtomicBlockContext, baseFee *big.Int) error { +func (t *TestUnsignedTx) SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } // EVMStateTransfer implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state atomic.StateDB) error { +func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { return t.EVMStateTransferV } -func testTxCodec() codec.Manager { - codec := codec.NewDefaultManager() - c := linearcodec.NewDefault() +var TestBlockchainID = ids.GenerateTestID() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&TestUnsignedTx{}), - c.RegisterType(&avalancheatomic.Element{}), - c.RegisterType(&avalancheatomic.Requests{}), - codec.RegisterCodec(atomic.CodecVersion, c), - ) - - if errs.Errored() { - panic(errs.Err) +func GenerateTestImportTxWithGas(gasUsed uint64, burned uint64) *Tx { + return &Tx{ + UnsignedAtomicTx: &TestUnsignedTx{ + IDV: ids.GenerateTestID(), + GasUsedV: gasUsed, + BurnedV: burned, + AcceptRequestsBlockchainIDV: TestBlockchainID, + AcceptRequestsV: &avalancheatomic.Requests{ + RemoveRequests: [][]byte{ + utils.RandomBytes(32), + utils.RandomBytes(32), + }, + }, + }, } - return codec } -var blockChainID = ids.GenerateTestID() - -func testDataImportTx() *atomic.Tx { - return &atomic.Tx{ +func GenerateTestImportTx() *Tx { + return &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), - AcceptRequestsBlockchainIDV: blockChainID, + AcceptRequestsBlockchainIDV: TestBlockchainID, AcceptRequestsV: &avalancheatomic.Requests{ RemoveRequests: [][]byte{ utils.RandomBytes(32), @@ -110,11 +128,11 @@ func testDataImportTx() *atomic.Tx { } } -func testDataExportTx() *atomic.Tx { - return &atomic.Tx{ +func GenerateTestExportTx() *Tx { + return &Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), - AcceptRequestsBlockchainIDV: blockChainID, + AcceptRequestsBlockchainIDV: TestBlockchainID, AcceptRequestsV: &avalancheatomic.Requests{ PutRequests: []*avalancheatomic.Element{ { @@ -131,22 +149,22 @@ func testDataExportTx() *atomic.Tx { } } -func newTestTx() *atomic.Tx { +func NewTestTx() *Tx { txType := rand.Intn(2) switch txType { case 0: - return testDataImportTx() + return GenerateTestImportTx() case 1: - return testDataExportTx() + return GenerateTestExportTx() default: panic("rng generated unexpected value for tx type") } } -func newTestTxs(numTxs int) []*atomic.Tx { - txs := make([]*atomic.Tx, 0, numTxs) +func NewTestTxs(numTxs int) []*Tx { + txs := make([]*Tx, 0, numTxs) for i := 0; i < numTxs; i++ { - txs = append(txs, newTestTx()) + txs = append(txs, NewTestTx()) } return txs diff --git a/plugin/evm/atomic/tx_heap.go b/plugin/evm/atomic/tx_heap.go new file mode 100644 index 0000000000..58cbcf0c0b --- /dev/null +++ b/plugin/evm/atomic/tx_heap.go @@ -0,0 +1,163 @@ +// (c) 2020-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "container/heap" + + "github.com/ava-labs/avalanchego/ids" +) + +// txEntry is used to track the [gasPrice] transactions pay to be included in +// the mempool. +type txEntry struct { + id ids.ID + gasPrice uint64 + tx *Tx + index int +} + +// internalTxHeap is used to track pending atomic transactions by [gasPrice] +type internalTxHeap struct { + isMinHeap bool + items []*txEntry + lookup map[ids.ID]*txEntry +} + +func newInternalTxHeap(items int, isMinHeap bool) *internalTxHeap { + return &internalTxHeap{ + isMinHeap: isMinHeap, + items: make([]*txEntry, 0, items), + lookup: map[ids.ID]*txEntry{}, + } +} + +func (th internalTxHeap) Len() int { return len(th.items) } + +func (th internalTxHeap) Less(i, j int) bool { + if th.isMinHeap { + return th.items[i].gasPrice < th.items[j].gasPrice + } + return th.items[i].gasPrice > th.items[j].gasPrice +} + +func (th internalTxHeap) Swap(i, j int) { + th.items[i], th.items[j] = th.items[j], th.items[i] + th.items[i].index = i + th.items[j].index = j +} + +func (th *internalTxHeap) Push(x interface{}) { + entry := x.(*txEntry) + if th.Has(entry.id) { + return + } + th.items = append(th.items, entry) + th.lookup[entry.id] = entry +} + +func (th *internalTxHeap) Pop() interface{} { + n := len(th.items) + item := th.items[n-1] + th.items[n-1] = nil // avoid memory leak + th.items = th.items[0 : n-1] + delete(th.lookup, item.id) + return item +} + +func (th *internalTxHeap) Get(id ids.ID) (*txEntry, bool) { + entry, ok := th.lookup[id] + if !ok { + return nil, false + } + return entry, true +} + +func (th *internalTxHeap) Has(id ids.ID) bool { + _, has := th.Get(id) + return has +} + +type txHeap struct { + maxHeap *internalTxHeap + minHeap *internalTxHeap +} + +func newTxHeap(maxSize int) *txHeap { + return &txHeap{ + maxHeap: newInternalTxHeap(maxSize, false), + minHeap: newInternalTxHeap(maxSize, true), + } +} + +func (th *txHeap) Push(tx *Tx, gasPrice uint64) { + txID := tx.ID() + oldLen := th.Len() + heap.Push(th.maxHeap, &txEntry{ + id: txID, + gasPrice: gasPrice, + tx: tx, + index: oldLen, + }) + heap.Push(th.minHeap, &txEntry{ + id: txID, + gasPrice: gasPrice, + tx: tx, + index: oldLen, + }) +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PeekMax() (*Tx, uint64) { + txEntry := th.maxHeap.items[0] + return txEntry.tx, txEntry.gasPrice +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PeekMin() (*Tx, uint64) { + txEntry := th.minHeap.items[0] + return txEntry.tx, txEntry.gasPrice +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PopMax() *Tx { + return th.Remove(th.maxHeap.items[0].id) +} + +// Assumes there is non-zero items in [txHeap] +func (th *txHeap) PopMin() *Tx { + return th.Remove(th.minHeap.items[0].id) +} + +func (th *txHeap) Remove(id ids.ID) *Tx { + maxEntry, ok := th.maxHeap.Get(id) + if !ok { + return nil + } + heap.Remove(th.maxHeap, maxEntry.index) + + minEntry, ok := th.minHeap.Get(id) + if !ok { + // This should never happen, as that would mean the heaps are out of + // sync. + return nil + } + return heap.Remove(th.minHeap, minEntry.index).(*txEntry).tx +} + +func (th *txHeap) Len() int { + return th.maxHeap.Len() +} + +func (th *txHeap) Get(id ids.ID) (*Tx, bool) { + txEntry, ok := th.maxHeap.Get(id) + if !ok { + return nil, false + } + return txEntry.tx, true +} + +func (th *txHeap) Has(id ids.ID) bool { + return th.maxHeap.Has(id) +} diff --git a/plugin/evm/atomic/tx_heap_test.go b/plugin/evm/atomic/tx_heap_test.go new file mode 100644 index 0000000000..c9f602ccea --- /dev/null +++ b/plugin/evm/atomic/tx_heap_test.go @@ -0,0 +1,142 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTxHeap(t *testing.T) { + var ( + tx0 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 0, + }, + } + tx0Bytes = []byte{0} + + tx1 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 1, + }, + } + tx1Bytes = []byte{1} + + tx2 = &Tx{ + UnsignedAtomicTx: &UnsignedImportTx{ + NetworkID: 2, + }, + } + tx2Bytes = []byte{2} + ) + tx0.Initialize(tx0Bytes, tx0Bytes) + tx1.Initialize(tx1Bytes, tx1Bytes) + tx2.Initialize(tx2Bytes, tx2Bytes) + + id0 := tx0.ID() + id1 := tx1.ID() + id2 := tx2.ID() + + t.Run("add/remove single entry", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + assert := assert.New(t) + h.Push(tx0, 5) + assert.True(h.Has(id0)) + gTx0, gHas0 := h.Get(id0) + assert.Equal(tx0, gTx0) + assert.True(gHas0) + h.Remove(id0) + assert.False(h.Has(id0)) + assert.Zero(h.Len()) + h.Push(tx0, 5) + assert.True(h.Has(id0)) + assert.Equal(1, h.Len()) + }) + + t.Run("add other items", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + assert := assert.New(t) + h.Push(tx1, 10) + assert.True(h.Has(id1)) + gTx1, gHas1 := h.Get(id1) + assert.Equal(tx1, gTx1) + assert.True(gHas1) + + h.Push(tx2, 2) + assert.True(h.Has(id2)) + gTx2, gHas2 := h.Get(id2) + assert.Equal(tx2, gTx2) + assert.True(gHas2) + + assert.Equal(id1, h.PopMax().ID()) + assert.Equal(id2, h.PopMax().ID()) + + assert.False(h.Has(id0)) + gTx0, gHas0 := h.Get(id0) + assert.Nil(gTx0) + assert.False(gHas0) + + assert.False(h.Has(id1)) + gTx1, gHas1 = h.Get(id1) + assert.Nil(gTx1) + assert.False(gHas1) + + assert.False(h.Has(id2)) + gTx2, gHas2 = h.Get(id2) + assert.Nil(gTx2) + assert.False(gHas2) + }) + + verifyRemovalOrder := func(t *testing.T, h *txHeap) { + t.Helper() + + assert := assert.New(t) + assert.Equal(id2, h.PopMin().ID()) + assert.True(h.Has(id0)) + assert.True(h.Has(id1)) + assert.False(h.Has(id2)) + assert.Equal(id0, h.PopMin().ID()) + assert.False(h.Has(id0)) + assert.True(h.Has(id1)) + assert.False(h.Has(id2)) + assert.Equal(id1, h.PopMin().ID()) + assert.False(h.Has(id0)) + assert.False(h.Has(id1)) + assert.False(h.Has(id2)) + } + + t.Run("drop", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx0, 5) + h.Push(tx1, 10) + h.Push(tx2, 2) + verifyRemovalOrder(t, h) + }) + t.Run("drop (alt order)", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx0, 5) + h.Push(tx2, 2) + h.Push(tx1, 10) + verifyRemovalOrder(t, h) + }) + t.Run("drop (alt order 2)", func(t *testing.T) { + h := newTxHeap(3) + assert.Zero(t, h.Len()) + + h.Push(tx2, 2) + h.Push(tx0, 5) + h.Push(tx1, 10) + verifyRemovalOrder(t, h) + }) +} diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic_syncer_test.go index 7540be1a32..86589cc4d8 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic_syncer_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" @@ -64,7 +65,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -91,7 +92,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, defaultStateSyncRequestSize) + syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic_trie_iterator_test.go index 922aed4cfc..50ba586ffd 100644 --- a/plugin/evm/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic_trie_iterator_test.go @@ -6,7 +6,7 @@ package evm import ( "testing" - "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -14,24 +14,26 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/ava-labs/coreth/plugin/evm/atomic" ) -func testSharedMemory() atomic.SharedMemory { - m := atomic.NewMemory(memdb.New()) +func testSharedMemory() avalancheatomic.SharedMemory { + m := avalancheatomic.NewMemory(memdb.New()) return m.NewSharedMemory(testCChainID) } func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) // create state with multiple transactions // since each test transaction generates random ID for blockchainID we should get // multiple blockchain IDs per block in the overall combined atomic operation map - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) // create an atomic trie @@ -64,14 +66,14 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) require.NoError(err) // create state with multiple transactions // since each test transaction generates random ID for blockchainID we should get // multiple blockchain IDs per block in the overall combined atomic operation map - operationsMap := make(map[uint64]map[ids.ID]*atomic.Requests) + operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) // create an atomic trie diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic_trie_test.go index 193226f588..2a82964e94 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic_trie_test.go @@ -139,7 +139,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -188,7 +188,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // during the initialization phase will cause an invalid root when indexing continues. nextCommitHeight := nearestCommitHeight(test.lastAcceptedHeight+test.commitInterval, test.commitInterval) for i := test.lastAcceptedHeight + 1; i <= nextCommitHeight; i++ { - txs := newTestTxs(test.numTxsPerBlock(i)) + txs := atomic.NewTestTxs(test.numTxsPerBlock(i)) if err := repo.Write(i, txs); err != nil { t.Fatal(err) } @@ -228,7 +228,7 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -247,7 +247,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*atomic.Tx{testDataExportTx()}) + err = repo.Write(15, []*atomic.Tx{atomic.GenerateTestExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie @@ -262,7 +262,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec(), 0) + repo, err := NewAtomicTxRepository(db, atomic.TestTxCodec, 0) if err != nil { t.Fatal(err) } @@ -282,7 +282,7 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests := mustAtomicOps(testDataImportTx()) + atomicRequests := mustAtomicOps(atomic.GenerateTestImportTx()) err := indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) if height%testCommitInterval == 0 { @@ -313,8 +313,8 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { atomicTrie2 := newTestAtomicTrie(t) for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { - tx1 := testDataImportTx() - tx2 := testDataImportTx() + tx1 := atomic.GenerateTestImportTx() + tx2 := atomic.GenerateTestImportTx() atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) assert.NoError(t, err) atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) @@ -339,7 +339,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -371,7 +371,7 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - ops = append(ops, mustAtomicOps(testDataImportTx())) + ops = append(ops, mustAtomicOps(atomic.GenerateTestImportTx())) } // without nils @@ -511,9 +511,9 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval: 10, lastAcceptedHeight: 25, setMarker: func(a *atomicBackend) error { - cursor := make([]byte, wrappers.LongLen+len(blockChainID[:])) + cursor := make([]byte, wrappers.LongLen+len(atomic.TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) - copy(cursor[wrappers.LongLen:], blockChainID[:]) + copy(cursor[wrappers.LongLen:], atomic.TestBlockchainID[:]) return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, @@ -527,7 +527,7 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -535,7 +535,7 @@ func TestApplyToSharedMemory(t *testing.T) { // Initialize atomic repository m := avalancheatomic.NewMemory(db) - sharedMemories := newSharedMemories(m, testCChainID, blockChainID) + sharedMemories := newSharedMemories(m, testCChainID, atomic.TestBlockchainID) backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -593,7 +593,7 @@ func TestApplyToSharedMemory(t *testing.T) { func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -628,7 +628,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -707,7 +707,7 @@ func BenchmarkApplyToSharedMemory(b *testing.B) { func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks uint64) { db := versiondb.New(disk) - codec := testTxCodec() + codec := atomic.TestTxCodec sharedMemory := testSharedMemory() lastAcceptedHeight := blocks @@ -720,7 +720,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u } trie := backend.AtomicTrie() for height := uint64(1); height <= lastAcceptedHeight; height++ { - txs := newTestTxs(constTxsPerHeight(3)(height)) + txs := atomic.NewTestTxs(constTxsPerHeight(3)(height)) ops, err := mergeAtomicOps(txs) assert.NoError(b, err) assert.NoError(b, indexAtomicTxs(trie, height, ops)) diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic_tx_repository_test.go index 091bcd8f56..224f8fa726 100644 --- a/plugin/evm/atomic_tx_repository_test.go +++ b/plugin/evm/atomic_tx_repository_test.go @@ -32,7 +32,7 @@ func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Datab for height := fromHeight; height < toHeight; height++ { txs := make([]*atomic.Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { - tx := newTestTx() + tx := atomic.NewTestTx() txs = append(txs, tx) txBytes, err := codec.Marshal(atomic.CodecVersion, tx) assert.NoError(t, err) @@ -74,7 +74,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { - txs := newTestTxs(txsPerHeight(height)) + txs := atomic.NewTestTxs(txsPerHeight(height)) if err := repo.Write(height, txs); err != nil { t.Fatal(err) } @@ -183,7 +183,7 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) @@ -196,7 +196,7 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) @@ -209,7 +209,7 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*atomic.Tx) @@ -234,7 +234,7 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*atomic.Tx) @@ -259,7 +259,7 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeight int) { db := versiondb.New(memdb.New()) - codec := testTxCodec() + codec := atomic.TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*atomic.Tx) diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 99451cb071..9a2de32601 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -225,8 +225,9 @@ func (b *Block) handlePrecompileAccept(rules params.Rules) error { func (b *Block) Reject(context.Context) error { log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) for _, tx := range b.atomicTxs { + // Re-issue the transaction in the mempool, continue even if it fails b.vm.mempool.RemoveTx(tx) - if err := b.vm.mempool.AddTx(tx); err != nil { + if err := b.vm.mempool.AddRemoteTx(tx); err != nil { log.Debug("Failed to re-issue transaction in rejected block", "txID", tx.ID(), "err", err) } } diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index d8a1d07024..721561ff40 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/snow" @@ -29,7 +30,7 @@ type blockBuilder struct { chainConfig *params.ChainConfig txPool *txpool.TxPool - mempool *Mempool + mempool *atomic.Mempool shutdownChan <-chan struct{} shutdownWg *sync.WaitGroup diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index f92e55e59f..110036904e 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" ) // Interface compliance @@ -40,7 +41,7 @@ type Client interface { MemoryProfile(ctx context.Context, options ...rpc.Option) error LockProfile(ctx context.Context, options ...rpc.Option) error SetLogLevel(ctx context.Context, level slog.Level, options ...rpc.Option) error - // GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) + GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) } // Client implementation for interacting with EVM [chain] @@ -299,9 +300,13 @@ func (c *client) SetLogLevel(ctx context.Context, level slog.Level, options ...r }, &api.EmptyReply{}, options...) } +type ConfigReply struct { + Config *config.Config `json:"config"` +} + // GetVMConfig returns the current config of the VM -// func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*Config, error) { -// res := &ConfigReply{} -// err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) -// return res.Config, err -// } +func (c *client) GetVMConfig(ctx context.Context, options ...rpc.Option) (*config.Config, error) { + res := &ConfigReply{} + err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res, options...) + return res.Config, err +} diff --git a/plugin/evm/config.go b/plugin/evm/config/config.go similarity index 95% rename from plugin/evm/config.go rename to plugin/evm/config/config.go index c0fa3b0386..748f9e115a 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config/config.go @@ -1,13 +1,14 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package config import ( "encoding/json" "fmt" "time" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/coreth/core/txpool/legacypool" "github.com/ava-labs/coreth/eth" "github.com/ethereum/go-ethereum/common" @@ -59,7 +60,7 @@ const ( // - normal bootstrap processing time: ~14 blocks / second // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 - defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + DefaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request ) var ( @@ -277,7 +278,7 @@ func (c *Config) SetDefaults() { c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache c.StateSyncCommitInterval = defaultSyncableCommitInterval c.StateSyncMinBlocks = defaultStateSyncMinBlocks - c.StateSyncRequestSize = defaultStateSyncRequestSize + c.StateSyncRequestSize = DefaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize } @@ -302,7 +303,17 @@ func (d Duration) MarshalJSON() ([]byte, error) { } // Validate returns an error if this is an invalid config. -func (c *Config) Validate() error { +func (c *Config) Validate(networkID uint32) error { + // Ensure that non-standard commit interval is not allowed for production networks + if constants.ProductionNetworkIDs.Contains(networkID) { + if c.CommitInterval != defaultCommitInterval { + return fmt.Errorf("cannot start non-local network with commit interval %d", c.CommitInterval) + } + if c.StateSyncCommitInterval != defaultSyncableCommitInterval { + return fmt.Errorf("cannot start non-local network with syncable interval %d", c.StateSyncCommitInterval) + } + } + if c.PopulateMissingTries != nil && (c.OfflinePruning || c.Pruning) { return fmt.Errorf("cannot enable populate missing tries while offline pruning (enabled: %t)/pruning (enabled: %t) are enabled", c.OfflinePruning, c.Pruning) } diff --git a/plugin/evm/config_test.go b/plugin/evm/config/config_test.go similarity index 99% rename from plugin/evm/config_test.go rename to plugin/evm/config/config_test.go index 9a8384bf5d..ad13ebdfed 100644 --- a/plugin/evm/config_test.go +++ b/plugin/evm/config/config_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package config import ( "encoding/json" diff --git a/plugin/evm/config/constants.go b/plugin/evm/config/constants.go new file mode 100644 index 0000000000..2e47489f1c --- /dev/null +++ b/plugin/evm/config/constants.go @@ -0,0 +1,8 @@ +package config + +const ( + TxGossipBloomMinTargetElements = 8 * 1024 + TxGossipBloomTargetFalsePositiveRate = 0.01 + TxGossipBloomResetFalsePositiveRate = 0.05 + TxGossipBloomChurnMultiplier = 3 +) diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 3e2b0d2160..9bc1f498a9 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -1944,7 +1944,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddTx(tx); err != nil { + if err := vm.mempool.AddRemoteTx(tx); err != nil { t.Fatal(err) } diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index d6b377d13a..16d632bd94 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -1,13 +1,15 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +// TODO: move to network + package evm import ( "context" "fmt" "sync" - syncatomic "sync/atomic" + "sync/atomic" "time" ethcommon "github.com/ethereum/go-ethereum/common" @@ -24,7 +26,7 @@ import ( "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" ) const pendingTxsBuffer = 10 @@ -32,11 +34,9 @@ const pendingTxsBuffer = 10 var ( _ p2p.Handler = (*txGossipHandler)(nil) - _ gossip.Gossipable = (*GossipEthTx)(nil) - _ gossip.Gossipable = (*GossipAtomicTx)(nil) - _ gossip.Marshaller[*GossipAtomicTx] = (*GossipAtomicTxMarshaller)(nil) - _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) - _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) + _ gossip.Gossipable = (*GossipEthTx)(nil) + _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) + _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) _ eth.PushGossiper = (*EthPushGossiper)(nil) ) @@ -91,29 +91,14 @@ func (t txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, dead return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) } -type GossipAtomicTxMarshaller struct{} - -func (g GossipAtomicTxMarshaller) MarshalGossip(tx *GossipAtomicTx) ([]byte, error) { - return tx.Tx.SignedBytes(), nil -} - -func (g GossipAtomicTxMarshaller) UnmarshalGossip(bytes []byte) (*GossipAtomicTx, error) { - tx, err := atomic.ExtractAtomicTx(bytes, atomic.Codec) - return &GossipAtomicTx{ - Tx: tx, - }, err -} - -type GossipAtomicTx struct { - Tx *atomic.Tx -} - -func (tx *GossipAtomicTx) GossipID() ids.ID { - return tx.Tx.ID() -} - func NewGossipEthTxPool(mempool *txpool.TxPool, registerer prometheus.Registerer) (*GossipEthTxPool, error) { - bloom, err := gossip.NewBloomFilter(registerer, "eth_tx_bloom_filter", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + bloom, err := gossip.NewBloomFilter( + registerer, + "eth_tx_bloom_filter", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) if err != nil { return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) } @@ -134,7 +119,7 @@ type GossipEthTxPool struct { // subscribed is set to true when the gossip subscription is active // mostly used for testing - subscribed syncatomic.Bool + subscribed atomic.Bool } // IsSubscribed returns whether or not the gossip subscription is active. @@ -161,7 +146,7 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) { return case pendingTxs := <-g.pendingTxs: g.lock.Lock() - optimalElements := (g.mempool.PendingSize(txpool.PendingFilter{}) + len(pendingTxs.Txs)) * txGossipBloomChurnMultiplier + optimalElements := (g.mempool.PendingSize(txpool.PendingFilter{}) + len(pendingTxs.Txs)) * config.TxGossipBloomChurnMultiplier for _, pendingTx := range pendingTxs.Txs { tx := &GossipEthTx{Tx: pendingTx} g.bloom.Add(tx) diff --git a/plugin/evm/gossip_stats.go b/plugin/evm/gossip_stats.go deleted file mode 100644 index 9805c7f1ff..0000000000 --- a/plugin/evm/gossip_stats.go +++ /dev/null @@ -1,67 +0,0 @@ -// (c) 2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import "github.com/ava-labs/coreth/metrics" - -var _ GossipStats = &gossipStats{} - -// GossipStats contains methods for updating incoming and outgoing gossip stats. -type GossipStats interface { - IncAtomicGossipReceived() - IncEthTxsGossipReceived() - - // new vs. known txs received - IncAtomicGossipReceivedDropped() - IncAtomicGossipReceivedError() - IncAtomicGossipReceivedKnown() - IncAtomicGossipReceivedNew() - IncEthTxsGossipReceivedError() - IncEthTxsGossipReceivedKnown() - IncEthTxsGossipReceivedNew() -} - -// gossipStats implements stats for incoming and outgoing gossip stats. -type gossipStats struct { - // messages - atomicGossipReceived metrics.Counter - ethTxsGossipReceived metrics.Counter - - // new vs. known txs received - atomicGossipReceivedDropped metrics.Counter - atomicGossipReceivedError metrics.Counter - atomicGossipReceivedKnown metrics.Counter - atomicGossipReceivedNew metrics.Counter - ethTxsGossipReceivedError metrics.Counter - ethTxsGossipReceivedKnown metrics.Counter - ethTxsGossipReceivedNew metrics.Counter -} - -func NewGossipStats() GossipStats { - return &gossipStats{ - atomicGossipReceived: metrics.GetOrRegisterCounter("gossip_atomic_received", nil), - ethTxsGossipReceived: metrics.GetOrRegisterCounter("gossip_eth_txs_received", nil), - - atomicGossipReceivedDropped: metrics.GetOrRegisterCounter("gossip_atomic_received_dropped", nil), - atomicGossipReceivedError: metrics.GetOrRegisterCounter("gossip_atomic_received_error", nil), - atomicGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_atomic_received_known", nil), - atomicGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_atomic_received_new", nil), - ethTxsGossipReceivedError: metrics.GetOrRegisterCounter("gossip_eth_txs_received_error", nil), - ethTxsGossipReceivedKnown: metrics.GetOrRegisterCounter("gossip_eth_txs_received_known", nil), - ethTxsGossipReceivedNew: metrics.GetOrRegisterCounter("gossip_eth_txs_received_new", nil), - } -} - -// incoming messages -func (g *gossipStats) IncAtomicGossipReceived() { g.atomicGossipReceived.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceived() { g.ethTxsGossipReceived.Inc(1) } - -// new vs. known txs received -func (g *gossipStats) IncAtomicGossipReceivedDropped() { g.atomicGossipReceivedDropped.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedError() { g.atomicGossipReceivedError.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedKnown() { g.atomicGossipReceivedKnown.Inc(1) } -func (g *gossipStats) IncAtomicGossipReceivedNew() { g.atomicGossipReceivedNew.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedError() { g.ethTxsGossipReceivedError.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedKnown() { g.ethTxsGossipReceivedKnown.Inc(1) } -func (g *gossipStats) IncEthTxsGossipReceivedNew() { g.ethTxsGossipReceivedNew.Inc(1) } diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go index 15ebd15871..84380001dc 100644 --- a/plugin/evm/gossip_test.go +++ b/plugin/evm/gossip_test.go @@ -9,11 +9,7 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p/gossip" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" @@ -22,7 +18,6 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/prometheus/client_golang/prometheus" @@ -30,110 +25,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestGossipAtomicTxMarshaller(t *testing.T) { - require := require.New(t) - - want := &GossipAtomicTx{ - Tx: &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{}, - Creds: []verify.Verifiable{}, - }, - } - marshaller := GossipAtomicTxMarshaller{} - - key0 := testKeys[0] - require.NoError(want.Tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}})) - - bytes, err := marshaller.MarshalGossip(want) - require.NoError(err) - - got, err := marshaller.UnmarshalGossip(bytes) - require.NoError(err) - require.Equal(want.GossipID(), got.GossipID()) -} - -func TestAtomicMempoolIterate(t *testing.T) { - txs := []*GossipAtomicTx{ - { - Tx: &atomic.Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - { - Tx: &atomic.Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - } - - tests := []struct { - name string - add []*GossipAtomicTx - f func(tx *GossipAtomicTx) bool - possibleValues []*GossipAtomicTx - expectedLen int - }{ - { - name: "func matches nothing", - add: txs, - f: func(*GossipAtomicTx) bool { - return false - }, - possibleValues: nil, - }, - { - name: "func matches all", - add: txs, - f: func(*GossipAtomicTx) bool { - return true - }, - possibleValues: txs, - expectedLen: 2, - }, - { - name: "func matches subset", - add: txs, - f: func(tx *GossipAtomicTx) bool { - return tx.Tx == txs[0].Tx - }, - possibleValues: txs, - expectedLen: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) - require.NoError(err) - - for _, add := range tt.add { - require.NoError(m.Add(add)) - } - - matches := make([]*GossipAtomicTx, 0) - f := func(tx *GossipAtomicTx) bool { - match := tt.f(tx) - - if match { - matches = append(matches, tx) - } - - return match - } - - m.Iterate(f) - - require.Len(matches, tt.expectedLen) - require.Subset(tt.possibleValues, matches) - }) - } -} - func TestGossipEthTxMarshaller(t *testing.T) { require := require.New(t) diff --git a/plugin/evm/gossiper_atomic_gossiping_test.go b/plugin/evm/gossiper_atomic_gossiping_test.go index 0974b50638..33405d2ace 100644 --- a/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/gossiper_atomic_gossiping_test.go @@ -5,17 +5,21 @@ package evm import ( "context" + "encoding/binary" "sync" "testing" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/utils/set" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // show that a txID discovered from gossip is requested to the same node only if @@ -53,14 +57,17 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { tx, conflictingTx := importTxs[0], importTxs[1] // gossip tx and check it is accepted and gossiped - msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + msg := atomic.GossipAtomicTx{ + Tx: tx, } - msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) + marshaller := atomic.GossipAtomicTxMarshaller{} + txBytes, err := marshaller.MarshalGossip(&msg) assert.NoError(err) - vm.ctx.Lock.Unlock() + msgBytes, err := buildAtomicPushGossip(txBytes) + assert.NoError(err) + // show that no txID is requested assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) time.Sleep(500 * time.Millisecond) @@ -85,14 +92,17 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { txGossipedLock.Unlock() // show that conflicting tx is not added to mempool - msg = message.AtomicTxGossip{ - Tx: conflictingTx.SignedBytes(), + msg = atomic.GossipAtomicTx{ + Tx: conflictingTx, } - msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) + marshaller = atomic.GossipAtomicTxMarshaller{} + txBytes, err = marshaller.MarshalGossip(&msg) assert.NoError(err) vm.ctx.Lock.Unlock() + msgBytes, err = buildAtomicPushGossip(txBytes) + assert.NoError(err) assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) vm.ctx.Lock.Lock() @@ -137,7 +147,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { tx, conflictingTx := importTxs[0], importTxs[1] txID := tx.ID() - mempool.AddTx(tx) + mempool.AddRemoteTx(tx) mempool.NextTx() mempool.DiscardCurrentTx(txID) @@ -147,14 +157,17 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // Gossip the transaction to the VM and ensure that it is not added to the mempool // and is not re-gossipped. nodeID := ids.GenerateTestNodeID() - msg := message.AtomicTxGossip{ - Tx: tx.SignedBytes(), + msg := atomic.GossipAtomicTx{ + Tx: tx, } - msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) + marshaller := atomic.GossipAtomicTxMarshaller{} + txBytes, err := marshaller.MarshalGossip(&msg) assert.NoError(err) vm.ctx.Lock.Unlock() + msgBytes, err := buildAtomicPushGossip(txBytes) + assert.NoError(err) assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) vm.ctx.Lock.Lock() @@ -171,8 +184,8 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // Conflicting tx must be submitted over the API to be included in push gossip. // (i.e., txs received via p2p are not included in push gossip) // This test adds it directly to the mempool + gossiper to simulate that. - vm.mempool.AddTx(conflictingTx) - vm.atomicTxPushGossiper.Add(&GossipAtomicTx{conflictingTx}) + vm.mempool.AddRemoteTx(conflictingTx) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{conflictingTx}) time.Sleep(500 * time.Millisecond) vm.ctx.Lock.Lock() @@ -185,3 +198,16 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { assert.False(mempool.Has(txID)) assert.True(mempool.Has(conflictingTx.ID())) } + +func buildAtomicPushGossip(txBytes []byte) ([]byte, error) { + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{txBytes}, + } + inboundGossipBytes, err := proto.Marshal(inboundGossip) + if err != nil { + return nil, err + } + + inboundGossipMsg := append(binary.AppendUvarint(nil, p2p.AtomicTxGossipHandlerID), inboundGossipBytes...) + return inboundGossipMsg, nil +} diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go deleted file mode 100644 index c4b41a85e7..0000000000 --- a/plugin/evm/handler.go +++ /dev/null @@ -1,140 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "github.com/ava-labs/avalanchego/ids" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - - "github.com/ava-labs/coreth/core/txpool" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/message" -) - -// GossipHandler handles incoming gossip messages -type GossipHandler struct { - vm *VM - atomicMempool *Mempool - txPool *txpool.TxPool - stats GossipStats -} - -func NewGossipHandler(vm *VM, stats GossipStats) *GossipHandler { - return &GossipHandler{ - vm: vm, - atomicMempool: vm.mempool, - txPool: vm.txPool, - stats: stats, - } -} - -func (h *GossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { - log.Trace( - "AppGossip called with AtomicTxGossip", - "peerID", nodeID, - ) - - if len(msg.Tx) == 0 { - log.Trace( - "AppGossip received empty AtomicTxGossip Message", - "peerID", nodeID, - ) - return nil - } - - // In the case that the gossip message contains a transaction, - // attempt to parse it and add it as a remote. - tx := atomic.Tx{} - if _, err := atomic.Codec.Unmarshal(msg.Tx, &tx); err != nil { - log.Trace( - "AppGossip provided invalid tx", - "err", err, - ) - return nil - } - unsignedBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, &tx.UnsignedAtomicTx) - if err != nil { - log.Trace( - "AppGossip failed to marshal unsigned tx", - "err", err, - ) - return nil - } - tx.Initialize(unsignedBytes, msg.Tx) - - txID := tx.ID() - h.stats.IncAtomicGossipReceived() - if _, dropped, found := h.atomicMempool.GetTx(txID); found { - h.stats.IncAtomicGossipReceivedKnown() - return nil - } else if dropped { - h.stats.IncAtomicGossipReceivedDropped() - return nil - } - - h.stats.IncAtomicGossipReceivedNew() - - h.vm.ctx.Lock.RLock() - defer h.vm.ctx.Lock.RUnlock() - - if err := h.vm.mempool.AddTx(&tx); err != nil { - log.Trace( - "AppGossip provided invalid transaction", - "peerID", nodeID, - "err", err, - ) - h.stats.IncAtomicGossipReceivedError() - } - - return nil -} - -func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip) error { - log.Trace( - "AppGossip called with EthTxsGossip", - "peerID", nodeID, - "size(txs)", len(msg.Txs), - ) - - if len(msg.Txs) == 0 { - log.Trace( - "AppGossip received empty EthTxsGossip Message", - "peerID", nodeID, - ) - return nil - } - - // The maximum size of this encoded object is enforced by the codec. - txs := make([]*types.Transaction, 0) - if err := rlp.DecodeBytes(msg.Txs, &txs); err != nil { - log.Trace( - "AppGossip provided invalid txs", - "peerID", nodeID, - "err", err, - ) - return nil - } - h.stats.IncEthTxsGossipReceived() - errs := h.txPool.Add(txs, false, false) - for i, err := range errs { - if err != nil { - log.Trace( - "AppGossip failed to add to mempool", - "err", err, - "tx", txs[i].Hash(), - ) - if err == txpool.ErrAlreadyKnown { - h.stats.IncEthTxsGossipReceivedKnown() - } else { - h.stats.IncEthTxsGossipReceivedError() - } - continue - } - h.stats.IncEthTxsGossipReceivedNew() - } - return nil -} diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index 3e22fef486..f35d2749f1 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -9,9 +9,11 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" ) @@ -53,7 +55,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // try to add a conflicting tx err = vm.mempool.AddLocalTx(conflictingTx) - assert.ErrorIs(err, errConflictingAtomicTx) + assert.ErrorIs(err, atomic.ErrConflictingAtomicTx) has = mempool.Has(conflictingTxID) assert.False(has, "conflicting tx in mempool") @@ -92,27 +94,22 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { func TestMempoolMaxMempoolSizeHandling(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, _ := GenesisVM(t, true, "", "", "") - defer func() { - err := vm.Shutdown(context.Background()) - assert.NoError(err) - }() - mempool := vm.mempool - + mempool, err := atomic.NewMempool(&snow.Context{}, prometheus.NewRegistry(), 1, nil) + assert.NoError(err) // create candidate tx (we will drop before validation) - tx := createImportTxOptions(t, vm, sharedMemory)[0] + tx := atomic.GenerateTestImportTx() - // shortcut to simulated almost filled mempool - mempool.maxSize = 0 - - assert.ErrorIs(mempool.AddTx(tx), errTooManyAtomicTx) - assert.False(mempool.Has(tx.ID())) - - // shortcut to simulated empty mempool - mempool.maxSize = defaultMempoolSize - - assert.NoError(mempool.AddTx(tx)) + assert.NoError(mempool.AddRemoteTx(tx)) assert.True(mempool.Has(tx.ID())) + // promote tx to be issued + _, ok := mempool.NextTx() + assert.True(ok) + mempool.IssueCurrentTxs() + + // try to add one more tx + tx2 := atomic.GenerateTestImportTx() + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrTooManyAtomicTx) + assert.False(mempool.Has(tx2.ID())) } // mempool will drop transaction with the lowest fee @@ -129,21 +126,20 @@ func TestMempoolPriorityDrop(t *testing.T) { err := vm.Shutdown(context.Background()) assert.NoError(err) }() - mempool := vm.mempool - mempool.maxSize = 1 + mempool, err := atomic.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } - assert.NoError(mempool.AddTx(tx1)) + assert.NoError(mempool.AddRemoteTx(tx1)) assert.True(mempool.Has(tx1.ID())) tx2, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) if err != nil { t.Fatal(err) } - assert.ErrorIs(mempool.AddTx(tx2), errInsufficientAtomicTxFee) + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrInsufficientAtomicTxFee) assert.True(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) @@ -151,7 +147,7 @@ func TestMempoolPriorityDrop(t *testing.T) { if err != nil { t.Fatal(err) } - assert.NoError(mempool.AddTx(tx3)) + assert.NoError(mempool.AddRemoteTx(tx3)) assert.False(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) assert.True(mempool.Has(tx3.ID())) diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index de3603b9c2..664c9252bb 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -15,20 +15,16 @@ const ( maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) -var ( - Codec codec.Manager -) +var Codec codec.Manager func init() { Codec = codec.NewManager(maxMessageSize) c := linearcodec.NewDefault() errs := wrappers.Errs{} + // Gossip types removed from codec + c.SkipRegistrations(2) errs.Add( - // Gossip types - c.RegisterType(AtomicTxGossip{}), - c.RegisterType(EthTxsGossip{}), - // Types for state sync frontier consensus c.RegisterType(SyncSummary{}), diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index 9b94828509..1b910e3826 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -6,33 +6,10 @@ package message import ( "context" - "github.com/ethereum/go-ethereum/log" - "github.com/ava-labs/avalanchego/ids" ) -var ( - _ GossipHandler = NoopMempoolGossipHandler{} - _ RequestHandler = NoopRequestHandler{} -) - -// GossipHandler handles incoming gossip messages -type GossipHandler interface { - HandleAtomicTx(nodeID ids.NodeID, msg AtomicTxGossip) error - HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error -} - -type NoopMempoolGossipHandler struct{} - -func (NoopMempoolGossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg AtomicTxGossip) error { - log.Debug("dropping unexpected AtomicTxGossip message", "peerID", nodeID) - return nil -} - -func (NoopMempoolGossipHandler) HandleEthTxs(nodeID ids.NodeID, msg EthTxsGossip) error { - log.Debug("dropping unexpected EthTxsGossip message", "peerID", nodeID) - return nil -} +var _ RequestHandler = NoopRequestHandler{} // RequestHandler interface handles incoming requests from peers // Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error diff --git a/plugin/evm/message/handler_test.go b/plugin/evm/message/handler_test.go deleted file mode 100644 index a27b1f9d4f..0000000000 --- a/plugin/evm/message/handler_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - - "github.com/stretchr/testify/assert" -) - -type CounterHandler struct { - AtomicTx, EthTxs int -} - -func (h *CounterHandler) HandleAtomicTx(ids.NodeID, AtomicTxGossip) error { - h.AtomicTx++ - return nil -} - -func (h *CounterHandler) HandleEthTxs(ids.NodeID, EthTxsGossip) error { - h.EthTxs++ - return nil -} - -func TestHandleAtomicTx(t *testing.T) { - assert := assert.New(t) - - handler := CounterHandler{} - msg := AtomicTxGossip{} - - err := msg.Handle(&handler, ids.EmptyNodeID) - assert.NoError(err) - assert.Equal(1, handler.AtomicTx) - assert.Zero(handler.EthTxs) -} - -func TestHandleEthTxs(t *testing.T) { - assert := assert.New(t) - - handler := CounterHandler{} - msg := EthTxsGossip{} - - err := msg.Handle(&handler, ids.EmptyNodeID) - assert.NoError(err) - assert.Zero(handler.AtomicTx) - assert.Equal(1, handler.EthTxs) -} - -func TestNoopHandler(t *testing.T) { - assert := assert.New(t) - - handler := NoopMempoolGossipHandler{} - - err := handler.HandleEthTxs(ids.EmptyNodeID, EthTxsGossip{}) - assert.NoError(err) - - err = handler.HandleAtomicTx(ids.EmptyNodeID, AtomicTxGossip{}) - assert.NoError(err) -} diff --git a/plugin/evm/message/message.go b/plugin/evm/message/message.go deleted file mode 100644 index c8c80a0343..0000000000 --- a/plugin/evm/message/message.go +++ /dev/null @@ -1,78 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "errors" - "fmt" - - "github.com/ava-labs/avalanchego/codec" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/units" -) - -const ( - // EthMsgSoftCapSize is the ideal size of encoded transaction bytes we send in - // any [EthTxsGossip] or [AtomicTxGossip] message. We do not limit inbound messages to - // this size, however. Max inbound message size is enforced by the codec - // (512KB). - EthMsgSoftCapSize = 64 * units.KiB -) - -var ( - _ GossipMessage = AtomicTxGossip{} - _ GossipMessage = EthTxsGossip{} - - errUnexpectedCodecVersion = errors.New("unexpected codec version") -) - -type GossipMessage interface { - // types implementing GossipMessage should also implement fmt.Stringer for logging purposes. - fmt.Stringer - - // Handle this gossip message with the gossip handler. - Handle(handler GossipHandler, nodeID ids.NodeID) error -} - -type AtomicTxGossip struct { - Tx []byte `serialize:"true"` -} - -func (msg AtomicTxGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleAtomicTx(nodeID, msg) -} - -func (msg AtomicTxGossip) String() string { - return fmt.Sprintf("AtomicTxGossip(Len=%d)", len(msg.Tx)) -} - -type EthTxsGossip struct { - Txs []byte `serialize:"true"` -} - -func (msg EthTxsGossip) Handle(handler GossipHandler, nodeID ids.NodeID) error { - return handler.HandleEthTxs(nodeID, msg) -} - -func (msg EthTxsGossip) String() string { - return fmt.Sprintf("EthTxsGossip(Len=%d)", len(msg.Txs)) -} - -func ParseGossipMessage(codec codec.Manager, bytes []byte) (GossipMessage, error) { - var msg GossipMessage - version, err := codec.Unmarshal(bytes, &msg) - if err != nil { - return nil, err - } - if version != Version { - return nil, errUnexpectedCodecVersion - } - return msg, nil -} - -func BuildGossipMessage(codec codec.Manager, msg GossipMessage) ([]byte, error) { - bytes, err := codec.Marshal(Version, &msg) - return bytes, err -} diff --git a/plugin/evm/message/message_test.go b/plugin/evm/message/message_test.go deleted file mode 100644 index dbcdea2d75..0000000000 --- a/plugin/evm/message/message_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "encoding/base64" - "testing" - - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" - - "github.com/stretchr/testify/assert" -) - -// TestMarshalAtomicTx asserts that the structure or serialization logic hasn't changed, primarily to -// ensure compatibility with the network. -func TestMarshalAtomicTx(t *testing.T) { - assert := assert.New(t) - - base64AtomicTxGossip := "AAAAAAAAAAAABGJsYWg=" - msg := []byte("blah") - builtMsg := AtomicTxGossip{ - Tx: msg, - } - builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) - assert.NoError(err) - assert.Equal(base64AtomicTxGossip, base64.StdEncoding.EncodeToString(builtMsgBytes)) - - parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) - assert.NoError(err) - - parsedMsg, ok := parsedMsgIntf.(AtomicTxGossip) - assert.True(ok) - - assert.Equal(msg, parsedMsg.Tx) -} - -// TestMarshalEthTxs asserts that the structure or serialization logic hasn't changed, primarily to -// ensure compatibility with the network. -func TestMarshalEthTxs(t *testing.T) { - assert := assert.New(t) - - base64EthTxGossip := "AAAAAAABAAAABGJsYWg=" - msg := []byte("blah") - builtMsg := EthTxsGossip{ - Txs: msg, - } - builtMsgBytes, err := BuildGossipMessage(Codec, builtMsg) - assert.NoError(err) - assert.Equal(base64EthTxGossip, base64.StdEncoding.EncodeToString(builtMsgBytes)) - - parsedMsgIntf, err := ParseGossipMessage(Codec, builtMsgBytes) - assert.NoError(err) - - parsedMsg, ok := parsedMsgIntf.(EthTxsGossip) - assert.True(ok) - - assert.Equal(msg, parsedMsg.Txs) -} - -func TestEthTxsTooLarge(t *testing.T) { - assert := assert.New(t) - - builtMsg := EthTxsGossip{ - Txs: utils.RandomBytes(maxMessageSize), - } - _, err := BuildGossipMessage(Codec, builtMsg) - assert.Error(err) -} - -func TestParseGibberish(t *testing.T) { - assert := assert.New(t) - - randomBytes := utils.RandomBytes(256 * units.KiB) - _, err := ParseGossipMessage(Codec, randomBytes) - assert.Error(err) -} diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 59fddb1ea4..c53a1da4c1 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -196,7 +196,7 @@ func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, respons if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) return nil } @@ -280,7 +280,7 @@ func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, respons if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) return nil } @@ -390,7 +390,7 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) return nil } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index fb1d7388d9..ad9f4667a8 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -36,6 +36,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/utils" ) @@ -104,7 +105,13 @@ func TestEthTxGossip(t *testing.T) { } // Ask the VM for any new transactions. We should get nothing at first. - emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + emptyBloomFilter, err := gossip.NewBloomFilter( + prometheus.NewRegistry(), + "", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) require.NoError(err) emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() request := &sdk.PullGossipRequest{ @@ -231,7 +238,13 @@ func TestAtomicTxGossip(t *testing.T) { } // Ask the VM for any new transactions. We should get nothing at first. - emptyBloomFilter, err := gossip.NewBloomFilter(prometheus.NewRegistry(), "", txGossipBloomMinTargetElements, txGossipBloomTargetFalsePositiveRate, txGossipBloomResetFalsePositiveRate) + emptyBloomFilter, err := gossip.NewBloomFilter( + prometheus.NewRegistry(), + "", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) require.NoError(err) emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() request := &sdk.PullGossipRequest{ @@ -278,7 +291,7 @@ func TestAtomicTxGossip(t *testing.T) { // Ask the VM for new transactions. We should get the newly issued tx. wg.Add(1) - marshaller := GossipAtomicTxMarshaller{} + marshaller := atomic.GossipAtomicTxMarshaller{} onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { require.NoError(err) @@ -479,7 +492,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) - vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) gossipedBytes := <-sender.SentAppGossip require.Equal(byte(p2p.AtomicTxGossipHandlerID), gossipedBytes[0]) @@ -488,7 +501,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { require.NoError(proto.Unmarshal(gossipedBytes[1:], outboundGossipMsg)) require.Len(outboundGossipMsg.Gossip, 1) - marshaller := GossipAtomicTxMarshaller{} + marshaller := atomic.GossipAtomicTxMarshaller{} gossipedTx, err := marshaller.UnmarshalGossip(outboundGossipMsg.Gossip[0]) require.NoError(err) require.Equal(tx.ID(), gossipedTx.Tx.ID()) @@ -551,8 +564,8 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) - marshaller := GossipAtomicTxMarshaller{} - gossipedTx := &GossipAtomicTx{ + marshaller := atomic.GossipAtomicTxMarshaller{} + gossipedTx := &atomic.GossipAtomicTx{ Tx: tx, } gossipBytes, err := marshaller.MarshalGossip(gossipedTx) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4adab936bc..c1b9426bea 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -41,6 +41,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" @@ -129,16 +130,12 @@ const ( targetAtomicTxsSize = 40 * units.KiB // gossip constants - pushGossipDiscardedElements = 16_384 - txGossipBloomMinTargetElements = 8 * 1024 - txGossipBloomTargetFalsePositiveRate = 0.01 - txGossipBloomResetFalsePositiveRate = 0.05 - txGossipBloomChurnMultiplier = 3 - txGossipTargetMessageSize = 20 * units.KiB - maxValidatorSetStaleness = time.Minute - txGossipThrottlingPeriod = 10 * time.Second - txGossipThrottlingLimit = 2 - txGossipPollSize = 1 + pushGossipDiscardedElements = 16_384 + txGossipTargetMessageSize = 20 * units.KiB + maxValidatorSetStaleness = time.Minute + txGossipThrottlingPeriod = 10 * time.Second + txGossipThrottlingLimit = 2 + txGossipPollSize = 1 ) // Define the API endpoints for the VM @@ -168,15 +165,12 @@ var ( errEmptyBlock = errors.New("empty block") errUnsupportedFXs = errors.New("unsupported feature extensions") errInvalidBlock = errors.New("invalid block") - errInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") errInvalidNonce = errors.New("invalid nonce") errUnclesUnsupported = errors.New("uncles unsupported") errRejectedParent = errors.New("rejected parent") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") - errConflictingAtomicTx = errors.New("conflicting atomic tx present") - errTooManyAtomicTx = errors.New("too many atomic tx") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") ) @@ -217,7 +211,7 @@ type VM struct { // with an efficient caching layer. *chain.State - config Config + config config.Config chainID *big.Int networkID uint64 @@ -265,7 +259,7 @@ type VM struct { baseCodec codec.Registry clock mockable.Clock - mempool *Mempool + mempool *atomic.Mempool shutdownChan chan struct{} shutdownWg sync.WaitGroup @@ -303,7 +297,7 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper atomicTxGossipHandler p2p.Handler - atomicTxPushGossiper *gossip.PushGossiper[*GossipAtomicTx] + atomicTxPushGossiper *gossip.PushGossiper[*atomic.GossipAtomicTx] atomicTxPullGossiper gossip.Gossiper chainAlias string @@ -349,7 +343,9 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) } } - if err := vm.config.Validate(); err != nil { + vm.ctx = chainCtx + + if err := vm.config.Validate(vm.ctx.NetworkID); err != nil { return err } // We should deprecate config flags as the first thing, before we do anything else @@ -357,8 +353,6 @@ func (vm *VM) Initialize( // initialized the logger. deprecateMsg := vm.config.Deprecate() - vm.ctx = chainCtx - // Create logger alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) if err != nil { @@ -452,16 +446,6 @@ func (vm *VM) Initialize( } vm.syntacticBlockValidator = NewBlockValidator(extDataHashes) - // Ensure that non-standard commit interval is not allowed for production networks - if avalanchegoConstants.ProductionNetworkIDs.Contains(chainCtx.NetworkID) { - if vm.config.CommitInterval != defaultCommitInterval { - return fmt.Errorf("cannot start non-local network with commit interval %d", vm.config.CommitInterval) - } - if vm.config.StateSyncCommitInterval != defaultSyncableCommitInterval { - return fmt.Errorf("cannot start non-local network with syncable interval %d", vm.config.StateSyncCommitInterval) - } - } - // Free the memory of the extDataHash map that is not used (i.e. if mainnet // config, free fuji) fujiExtDataHashes = nil @@ -543,7 +527,7 @@ func (vm *VM) Initialize( } // TODO: read size from settings - vm.mempool, err = NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) + vm.mempool, err = atomic.NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) if err != nil { return fmt.Errorf("failed to initialize mempool: %w", err) } @@ -553,6 +537,7 @@ func (vm *VM) Initialize( vm.p2pSender = appSender } + // TODO: move all network stuff to peer.NewNetwork p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, vm.p2pSender, vm.sdkMetrics, "p2p") if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) @@ -1106,7 +1091,7 @@ func (vm *VM) initBlockBuilding() error { vm.shutdownWg.Done() }() - atomicTxGossipMarshaller := GossipAtomicTxMarshaller{} + atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) atomicTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, atomicTxGossipNamespace) if err != nil { @@ -1144,7 +1129,7 @@ func (vm *VM) initBlockBuilding() error { } if vm.atomicTxPushGossiper == nil { - vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*GossipAtomicTx]( + vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*atomic.GossipAtomicTx]( atomicTxGossipMarshaller, vm.mempool, vm.validators, @@ -1162,10 +1147,8 @@ func (vm *VM) initBlockBuilding() error { } // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. - gossipStats := NewGossipStats() vm.builder = vm.NewBlockBuilder(vm.toEngine) vm.builder.awaitSubmittedTxs() - vm.Network.SetGossipHandler(NewGossipHandler(vm, gossipStats)) if vm.ethTxGossipHandler == nil { vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( @@ -1185,7 +1168,7 @@ func (vm *VM) initBlockBuilding() error { } if vm.atomicTxGossipHandler == nil { - vm.atomicTxGossipHandler = newTxGossipHandler[*GossipAtomicTx]( + vm.atomicTxGossipHandler = newTxGossipHandler[*atomic.GossipAtomicTx]( vm.ctx.Log, atomicTxGossipMarshaller, vm.mempool, @@ -1229,7 +1212,7 @@ func (vm *VM) initBlockBuilding() error { }() if vm.atomicTxPullGossiper == nil { - atomicTxPullGossiper := gossip.NewPullGossiper[*GossipAtomicTx]( + atomicTxPullGossiper := gossip.NewPullGossiper[*atomic.GossipAtomicTx]( vm.ctx.Log, atomicTxGossipMarshaller, vm.mempool, diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index cc9c7d6f7b..4ec59de0bc 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -406,7 +407,7 @@ func TestVMConfigDefaults(t *testing.T) { configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") - var vmConfig Config + var vmConfig config.Config vmConfig.SetDefaults() vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs @@ -418,7 +419,7 @@ func TestVMNilConfig(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, false, "", "", "") // VM Config should match defaults if no config is passed in - var vmConfig Config + var vmConfig config.Config vmConfig.SetDefaults() require.Equal(t, vmConfig, vm.config, "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) @@ -1100,8 +1101,8 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, errConflictingAtomicTx) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicTx, err) + if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, atomic.ErrConflictingAtomicTx) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicTx, err) } assert.True(t, vm.mempool.Has(importTx1.ID())) @@ -1124,12 +1125,12 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { issuedTxs, evictedTxs := issueTxs(t, vm, sharedMemory) for i, tx := range issuedTxs { - _, issued := vm.mempool.txHeap.Get(tx.ID()) + _, issued := vm.mempool.GetPendingTx(tx.ID()) assert.True(t, issued, "expected issued tx at index %d to be issued", i) } for i, tx := range evictedTxs { - _, discarded := vm.mempool.discardedTxs.Get(tx.ID()) + _, discarded, _ := vm.mempool.GetTx(tx.ID()) assert.True(t, discarded, "expected discarded tx at index %d to be discarded", i) } }) @@ -3045,7 +3046,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { t.Fatal("Should have failed to issue due to an invalid export tx") } - if err := vm.mempool.AddTx(exportTx2); err == nil { + if err := vm.mempool.AddRemoteTx(exportTx2); err == nil { t.Fatal("Should have failed to add because conflicting") } @@ -3582,8 +3583,8 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions // will fail verification when build block is called. - vm.mempool.AddTx(importTxs[1]) - vm.mempool.AddTx(importTxs[2]) + vm.mempool.AddRemoteTx(importTxs[1]) + vm.mempool.AddRemoteTx(importTxs[2]) if _, err := vm.BuildBlock(context.Background()); err == nil { t.Fatal("Expected build block to fail due to empty block") From 019bb048a552b8c212bef7f74ec0e9db2e825c0d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 14 Dec 2024 23:33:25 +0300 Subject: [PATCH 05/91] lint --- plugin/evm/gossiper_atomic_gossiping_test.go | 2 +- plugin/evm/mempool_atomic_gossiping_test.go | 1 + plugin/evm/service.go | 6 +++--- plugin/evm/tx_gossip_test.go | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/plugin/evm/gossiper_atomic_gossiping_test.go b/plugin/evm/gossiper_atomic_gossiping_test.go index 33405d2ace..e2c97167f8 100644 --- a/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/gossiper_atomic_gossiping_test.go @@ -185,7 +185,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // (i.e., txs received via p2p are not included in push gossip) // This test adds it directly to the mempool + gossiper to simulate that. vm.mempool.AddRemoteTx(conflictingTx) - vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{conflictingTx}) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: conflictingTx}) time.Sleep(500 * time.Millisecond) vm.ctx.Lock.Lock() diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index f35d2749f1..9f2cc89535 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -127,6 +127,7 @@ func TestMempoolPriorityDrop(t *testing.T) { assert.NoError(err) }() mempool, err := atomic.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) + assert.NoError(err) tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { diff --git a/plugin/evm/service.go b/plugin/evm/service.go index c53a1da4c1..39baf3eed1 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -196,7 +196,7 @@ func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, respons if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } @@ -280,7 +280,7 @@ func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, respons if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } @@ -390,7 +390,7 @@ func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response if err := service.vm.mempool.AddLocalTx(tx); err != nil { return err } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) return nil } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index ad9f4667a8..bfe9ba4b84 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -492,7 +492,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) - vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{tx}) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) gossipedBytes := <-sender.SentAppGossip require.Equal(byte(p2p.AtomicTxGossipHandlerID), gossipedBytes[0]) From aa50ce6a1dcbb34e86c925e929723fd02ac9d88f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 15 Dec 2024 13:09:17 +0300 Subject: [PATCH 06/91] change newimport clk to time --- plugin/evm/atomic/import_tx.go | 6 ++---- plugin/evm/tx_gossip_test.go | 6 +++--- plugin/evm/vm.go | 2 +- plugin/evm/vm_test.go | 18 +++++++++--------- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 0d4d367d4e..9213299d8c 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -291,7 +290,7 @@ func (utx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { func NewImportTx( ctx *snow.Context, rules params.Rules, - clk mockable.Clock, + time uint64, chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 @@ -302,9 +301,8 @@ func NewImportTx( signers := [][]*secp256k1.PrivateKey{} importedAmount := make(map[ids.ID]uint64) - now := clk.Unix() for _, utxo := range atomicUTXOs { - inputIntf, utxoSigners, err := kc.Spend(utxo.Out, now) + inputIntf, utxoSigners, err := kc.Spend(utxo.Out, time) if err != nil { continue } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index fb1d7388d9..0e46d61ea0 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -268,7 +268,7 @@ func TestAtomicTxGossip(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) @@ -476,7 +476,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) vm.atomicTxPushGossiper.Add(&GossipAtomicTx{tx}) @@ -547,7 +547,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { pk.PublicKey().Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4adab936bc..ad28908b6b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1899,7 +1899,7 @@ func (vm *VM) newImportTx( return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) } - return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, chainID, to, baseFee, kc, atomicUTXOs) + return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) } // newExportTx returns a new ExportTx diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index cc9c7d6f7b..7c0a682324 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1023,11 +1023,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } - tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -1050,11 +1050,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } - tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } @@ -1078,17 +1078,17 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) + importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } - importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) + importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) if err != nil { t.Fatal(err) } - reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1108,7 +1108,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { assert.True(t, vm.mempool.Has(importTx2.ID())) assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) - reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -3679,7 +3679,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) assert.NoError(t, err) - importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) + importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } From 52081a9d5f45f5b49794696b0415e4c7de9e23d0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 15 Dec 2024 13:30:37 +0300 Subject: [PATCH 07/91] move utils --- plugin/evm/atomic/export_tx.go | 11 ++++++----- plugin/evm/client/client.go | 3 ++- plugin/evm/export_tx_test.go | 9 +++++---- plugin/evm/import_tx_test.go | 9 +++++---- plugin/evm/service.go | 5 +++-- plugin/evm/tx_gossip_test.go | 12 ++++++------ plugin/evm/user.go | 3 ++- plugin/evm/vm_test.go | 2 +- {plugin/evm/atomic => utils}/utils.go | 3 +-- 9 files changed, 31 insertions(+), 26 deletions(-) rename {plugin/evm/atomic => utils}/utils.go (91%) diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 26307cface..906753265c 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -10,12 +10,13 @@ import ( "math/big" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" @@ -123,7 +124,7 @@ func (utx *UnsignedExportTx) Verify( if !avax.IsSortedTransferableOutputs(utx.ExportedOutputs, Codec) { return ErrOutputsNotSorted } - if rules.IsApricotPhase1 && !utils.IsSortedAndUnique(utx.Ins) { + if rules.IsApricotPhase1 && !avalancheutils.IsSortedAndUnique(utx.Ins) { return ErrInputsNotSortedUnique } @@ -240,7 +241,7 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - if input.Address != PublicKeyToEthAddress(pubKey) { + if input.Address != utils.PublicKeyToEthAddress(pubKey) { return errPublicKeySignatureMismatch } } @@ -431,7 +432,7 @@ func GetSpendableFunds( if amount == 0 { break } - addr := GetEthAddress(key) + addr := utils.GetEthAddress(key) var balance uint64 if assetID == ctx.AVAXAssetID { // If the asset is AVAX, we divide by the x2cRate to convert back to the correct @@ -514,7 +515,7 @@ func GetSpendableAVAXWithFee( additionalFee := newFee - prevFee - addr := GetEthAddress(key) + addr := utils.GetEthAddress(key) // Since the asset is AVAX, we divide by the x2cRate to convert back to // the correct denomination of AVAX that can be exported. balance := new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index f92e55e59f..93ac27ed5b 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" ) // Interface compliance @@ -179,7 +180,7 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *s if err != nil { return common.Address{}, err } - return atomic.ParseEthAddress(res.Address) + return utils.ParseEthAddress(res.Address) } // ImportArgs are arguments for passing into Import requests diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 3e2b0d2160..61c80fb7dd 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -104,7 +105,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, func TestExportTxEVMStateTransfer(t *testing.T) { key := testKeys[0] addr := key.PublicKey().Address() - ethAddr := atomic.GetEthAddress(key) + ethAddr := utils.GetEthAddress(key) avaxAmount := 50 * units.MilliAvax avaxUTXOID := avax.UTXOID{ @@ -1822,7 +1823,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := atomic.GetEthAddress(testKeys[0]) + addr := utils.GetEthAddress(testKeys[0]) if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } @@ -1970,7 +1971,7 @@ func TestNewExportTxMulticoin(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - testKeys0Addr := atomic.GetEthAddress(testKeys[0]) + testKeys0Addr := utils.GetEthAddress(testKeys[0]) exportId, err := ids.ToShortID(testKeys0Addr[:]) if err != nil { t.Fatal(err) @@ -2022,7 +2023,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := atomic.GetEthAddress(testKeys[0]) + addr := utils.GetEthAddress(testKeys[0]) if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index d254153712..58f7baa6fc 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -9,12 +9,13 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -117,8 +118,8 @@ func TestImportTxVerify(t *testing.T) { } // Sort the inputs and outputs to ensure the transaction is canonical - utils.Sort(importTx.ImportedInputs) - utils.Sort(importTx.Outs) + avalancheutils.Sort(importTx.ImportedInputs) + avalancheutils.Sort(importTx.Outs) tests := map[string]atomicTxVerifyTest{ "nil tx": { @@ -498,7 +499,7 @@ func TestNewImportTx(t *testing.T) { expectedRemainingBalance := new(uint256.Int).Mul( uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) - addr := atomic.GetEthAddress(testKeys[0]) + addr := utils.GetEthAddress(testKeys[0]) if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 59fddb1ea4..92529e32c2 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/client" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -96,7 +97,7 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { log.Info("EVM: ExportKey called") - address, err := atomic.ParseEthAddress(args.Address) + address, err := utils.ParseEthAddress(args.Address) if err != nil { return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) } @@ -127,7 +128,7 @@ func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, r return errMissingPrivateKey } - reply.Address = atomic.GetEthAddress(args.PrivateKey).Hex() + reply.Address = utils.GetEthAddress(args.PrivateKey).Hex() service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 0e46d61ea0..99cef8beb3 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -48,7 +48,7 @@ func TestEthTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -176,7 +176,7 @@ func TestAtomicTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -315,7 +315,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -375,7 +375,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -434,7 +434,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -507,7 +507,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := atomic.GetEthAddress(pk) + address := utils.GetEthAddress(pk) genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) diff --git a/plugin/evm/user.go b/plugin/evm/user.go index 627a7af1d1..4a68eca2fb 100644 --- a/plugin/evm/user.go +++ b/plugin/evm/user.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) @@ -70,7 +71,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { return errKeyNil } - address := atomic.GetEthAddress(privKey) // address the privKey controls + address := utils.GetEthAddress(privKey) // address the privKey controls controlsAddress, err := u.controlsAddress(address) if err != nil { return err diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 7c0a682324..668a1b31c2 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -161,7 +161,7 @@ func init() { b, _ = cb58.Decode(key) pk, _ := secp256k1.ToPrivateKey(b) testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, atomic.GetEthAddress(pk)) + testEthAddrs = append(testEthAddrs, utils.GetEthAddress(pk)) testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) } } diff --git a/plugin/evm/atomic/utils.go b/utils/utils.go similarity index 91% rename from plugin/evm/atomic/utils.go rename to utils/utils.go index 8872e09861..af2c0f822d 100644 --- a/plugin/evm/atomic/utils.go +++ b/utils/utils.go @@ -1,7 +1,7 @@ // (c) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package utils import ( "errors" @@ -13,7 +13,6 @@ import ( var errInvalidAddr = errors.New("invalid hex address") -// ParseEthAddress parses [addrStr] and returns an Ethereum address func ParseEthAddress(addrStr string) (common.Address, error) { if !common.IsHexAddress(addrStr) { return common.Address{}, errInvalidAddr From d93257b06d861723f34f5c2a28852112ca542f10 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 15 Dec 2024 16:07:56 +0300 Subject: [PATCH 08/91] remove extra heaps files --- plugin/evm/tx_heap.go | 164 ------------------------------------- plugin/evm/tx_heap_test.go | 143 -------------------------------- 2 files changed, 307 deletions(-) delete mode 100644 plugin/evm/tx_heap.go delete mode 100644 plugin/evm/tx_heap_test.go diff --git a/plugin/evm/tx_heap.go b/plugin/evm/tx_heap.go deleted file mode 100644 index c6562fd9b0..0000000000 --- a/plugin/evm/tx_heap.go +++ /dev/null @@ -1,164 +0,0 @@ -// (c) 2020-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "container/heap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" -) - -// txEntry is used to track the [gasPrice] transactions pay to be included in -// the mempool. -type txEntry struct { - id ids.ID - gasPrice uint64 - tx *atomic.Tx - index int -} - -// internalTxHeap is used to track pending atomic transactions by [gasPrice] -type internalTxHeap struct { - isMinHeap bool - items []*txEntry - lookup map[ids.ID]*txEntry -} - -func newInternalTxHeap(items int, isMinHeap bool) *internalTxHeap { - return &internalTxHeap{ - isMinHeap: isMinHeap, - items: make([]*txEntry, 0, items), - lookup: map[ids.ID]*txEntry{}, - } -} - -func (th internalTxHeap) Len() int { return len(th.items) } - -func (th internalTxHeap) Less(i, j int) bool { - if th.isMinHeap { - return th.items[i].gasPrice < th.items[j].gasPrice - } - return th.items[i].gasPrice > th.items[j].gasPrice -} - -func (th internalTxHeap) Swap(i, j int) { - th.items[i], th.items[j] = th.items[j], th.items[i] - th.items[i].index = i - th.items[j].index = j -} - -func (th *internalTxHeap) Push(x interface{}) { - entry := x.(*txEntry) - if th.Has(entry.id) { - return - } - th.items = append(th.items, entry) - th.lookup[entry.id] = entry -} - -func (th *internalTxHeap) Pop() interface{} { - n := len(th.items) - item := th.items[n-1] - th.items[n-1] = nil // avoid memory leak - th.items = th.items[0 : n-1] - delete(th.lookup, item.id) - return item -} - -func (th *internalTxHeap) Get(id ids.ID) (*txEntry, bool) { - entry, ok := th.lookup[id] - if !ok { - return nil, false - } - return entry, true -} - -func (th *internalTxHeap) Has(id ids.ID) bool { - _, has := th.Get(id) - return has -} - -type txHeap struct { - maxHeap *internalTxHeap - minHeap *internalTxHeap -} - -func newTxHeap(maxSize int) *txHeap { - return &txHeap{ - maxHeap: newInternalTxHeap(maxSize, false), - minHeap: newInternalTxHeap(maxSize, true), - } -} - -func (th *txHeap) Push(tx *atomic.Tx, gasPrice uint64) { - txID := tx.ID() - oldLen := th.Len() - heap.Push(th.maxHeap, &txEntry{ - id: txID, - gasPrice: gasPrice, - tx: tx, - index: oldLen, - }) - heap.Push(th.minHeap, &txEntry{ - id: txID, - gasPrice: gasPrice, - tx: tx, - index: oldLen, - }) -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMax() (*atomic.Tx, uint64) { - txEntry := th.maxHeap.items[0] - return txEntry.tx, txEntry.gasPrice -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMin() (*atomic.Tx, uint64) { - txEntry := th.minHeap.items[0] - return txEntry.tx, txEntry.gasPrice -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMax() *atomic.Tx { - return th.Remove(th.maxHeap.items[0].id) -} - -// Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMin() *atomic.Tx { - return th.Remove(th.minHeap.items[0].id) -} - -func (th *txHeap) Remove(id ids.ID) *atomic.Tx { - maxEntry, ok := th.maxHeap.Get(id) - if !ok { - return nil - } - heap.Remove(th.maxHeap, maxEntry.index) - - minEntry, ok := th.minHeap.Get(id) - if !ok { - // This should never happen, as that would mean the heaps are out of - // sync. - return nil - } - return heap.Remove(th.minHeap, minEntry.index).(*txEntry).tx -} - -func (th *txHeap) Len() int { - return th.maxHeap.Len() -} - -func (th *txHeap) Get(id ids.ID) (*atomic.Tx, bool) { - txEntry, ok := th.maxHeap.Get(id) - if !ok { - return nil, false - } - return txEntry.tx, true -} - -func (th *txHeap) Has(id ids.ID) bool { - return th.maxHeap.Has(id) -} diff --git a/plugin/evm/tx_heap_test.go b/plugin/evm/tx_heap_test.go deleted file mode 100644 index a054b7362e..0000000000 --- a/plugin/evm/tx_heap_test.go +++ /dev/null @@ -1,143 +0,0 @@ -// (c) 2019-2021, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "testing" - - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/stretchr/testify/assert" -) - -func TestTxHeap(t *testing.T) { - var ( - tx0 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 0, - }, - } - tx0Bytes = []byte{0} - - tx1 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 1, - }, - } - tx1Bytes = []byte{1} - - tx2 = &atomic.Tx{ - UnsignedAtomicTx: &atomic.UnsignedImportTx{ - NetworkID: 2, - }, - } - tx2Bytes = []byte{2} - ) - tx0.Initialize(tx0Bytes, tx0Bytes) - tx1.Initialize(tx1Bytes, tx1Bytes) - tx2.Initialize(tx2Bytes, tx2Bytes) - - id0 := tx0.ID() - id1 := tx1.ID() - id2 := tx2.ID() - - t.Run("add/remove single entry", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - assert := assert.New(t) - h.Push(tx0, 5) - assert.True(h.Has(id0)) - gTx0, gHas0 := h.Get(id0) - assert.Equal(tx0, gTx0) - assert.True(gHas0) - h.Remove(id0) - assert.False(h.Has(id0)) - assert.Zero(h.Len()) - h.Push(tx0, 5) - assert.True(h.Has(id0)) - assert.Equal(1, h.Len()) - }) - - t.Run("add other items", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - assert := assert.New(t) - h.Push(tx1, 10) - assert.True(h.Has(id1)) - gTx1, gHas1 := h.Get(id1) - assert.Equal(tx1, gTx1) - assert.True(gHas1) - - h.Push(tx2, 2) - assert.True(h.Has(id2)) - gTx2, gHas2 := h.Get(id2) - assert.Equal(tx2, gTx2) - assert.True(gHas2) - - assert.Equal(id1, h.PopMax().ID()) - assert.Equal(id2, h.PopMax().ID()) - - assert.False(h.Has(id0)) - gTx0, gHas0 := h.Get(id0) - assert.Nil(gTx0) - assert.False(gHas0) - - assert.False(h.Has(id1)) - gTx1, gHas1 = h.Get(id1) - assert.Nil(gTx1) - assert.False(gHas1) - - assert.False(h.Has(id2)) - gTx2, gHas2 = h.Get(id2) - assert.Nil(gTx2) - assert.False(gHas2) - }) - - verifyRemovalOrder := func(t *testing.T, h *txHeap) { - t.Helper() - - assert := assert.New(t) - assert.Equal(id2, h.PopMin().ID()) - assert.True(h.Has(id0)) - assert.True(h.Has(id1)) - assert.False(h.Has(id2)) - assert.Equal(id0, h.PopMin().ID()) - assert.False(h.Has(id0)) - assert.True(h.Has(id1)) - assert.False(h.Has(id2)) - assert.Equal(id1, h.PopMin().ID()) - assert.False(h.Has(id0)) - assert.False(h.Has(id1)) - assert.False(h.Has(id2)) - } - - t.Run("drop", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx0, 5) - h.Push(tx1, 10) - h.Push(tx2, 2) - verifyRemovalOrder(t, h) - }) - t.Run("drop (alt order)", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx0, 5) - h.Push(tx2, 2) - h.Push(tx1, 10) - verifyRemovalOrder(t, h) - }) - t.Run("drop (alt order 2)", func(t *testing.T) { - h := newTxHeap(3) - assert.Zero(t, h.Len()) - - h.Push(tx2, 2) - h.Push(tx0, 5) - h.Push(tx1, 10) - verifyRemovalOrder(t, h) - }) -} From 0b10cc497eb3f5de971d0f0f3eca4f076887c294 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 15 Dec 2024 16:08:18 +0300 Subject: [PATCH 09/91] move database to own pkg --- plugin/evm/atomic_trie.go | 33 ++++++++--------- .../wrapped_database.go} | 32 +++++++++-------- plugin/evm/syncervm_test.go | 7 ++-- plugin/evm/vm.go | 35 ++++++++++--------- 4 files changed, 57 insertions(+), 50 deletions(-) rename plugin/evm/{database.go => database/wrapped_database.go} (55%) diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic_trie.go index d734268e23..f6add46623 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic_trie.go @@ -9,7 +9,7 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" "github.com/ava-labs/coreth/triedb" @@ -117,12 +118,12 @@ type AtomicTrieIterator interface { // atomicTrie implements the AtomicTrie interface type atomicTrie struct { - commitInterval uint64 // commit interval, same as commitHeightInterval by default - metadataDB database.Database // Underlying database containing the atomic trie metadata - trieDB *triedb.Database // Trie database - lastCommittedRoot common.Hash // trie root of the most recent commit - lastCommittedHeight uint64 // index height of the most recent commit - lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. + commitInterval uint64 // commit interval, same as commitHeightInterval by default + metadataDB avalanchedatabase.Database // Underlying database containing the atomic trie metadata + trieDB *triedb.Database // Trie database + lastCommittedRoot common.Hash // trie root of the most recent commit + lastCommittedHeight uint64 // index height of the most recent commit + lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize tipBuffer *core.BoundedBuffer[common.Hash] @@ -131,7 +132,7 @@ type atomicTrie struct { // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. func newAtomicTrie( - atomicTrieDB database.Database, metadataDB database.Database, + atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*atomicTrie, error) { root, height, err := lastCommittedRootIfExists(metadataDB) @@ -153,7 +154,7 @@ func newAtomicTrie( } trieDB := triedb.NewDatabase( - rawdb.NewDatabase(Database{atomicTrieDB}), + rawdb.NewDatabase(database.WrapDatabase(atomicTrieDB)), &triedb.Config{ HashDB: &hashdb.Config{ CleanCacheSize: 64 * units.MiB, // Allocate 64MB of memory for clean cache @@ -182,17 +183,17 @@ func newAtomicTrie( // else returns empty common.Hash{} and 0 // returns error only if there are issues with the underlying data store // or if values present in the database are not as expected -func lastCommittedRootIfExists(db database.Database) (common.Hash, uint64, error) { +func lastCommittedRootIfExists(db avalanchedatabase.Database) (common.Hash, uint64, error) { // read the last committed entry if it exists and set the root hash lastCommittedHeightBytes, err := db.Get(lastCommittedKey) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, 0, nil case err != nil: return common.Hash{}, 0, err } - height, err := database.ParseUInt64(lastCommittedHeightBytes) + height, err := avalanchedatabase.ParseUInt64(lastCommittedHeightBytes) if err != nil { return common.Hash{}, 0, fmt.Errorf("expected value at lastCommittedKey to be a valid uint64: %w", err) } @@ -251,7 +252,7 @@ func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) // now save the trie hash against the height it was committed at if err := a.metadataDB.Put(heightBytes, root[:]); err != nil { @@ -297,7 +298,7 @@ func (a *atomicTrie) Root(height uint64) (common.Hash, error) { // getRoot is a helper function to return the committed atomic trie root hash at [height] // from [metadataDB]. -func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { +func getRoot(metadataDB avalanchedatabase.Database, height uint64) (common.Hash, error) { if height == 0 { // if root is queried at height == 0, return the empty root hash // this may occur if peers ask for the most recent state summary @@ -305,10 +306,10 @@ func getRoot(metadataDB database.Database, height uint64) (common.Hash, error) { return types.EmptyRootHash, nil } - heightBytes := database.PackUInt64(height) + heightBytes := avalanchedatabase.PackUInt64(height) hash, err := metadataDB.Get(heightBytes) switch { - case err == database.ErrNotFound: + case err == avalanchedatabase.ErrNotFound: return common.Hash{}, nil case err != nil: return common.Hash{}, err diff --git a/plugin/evm/database.go b/plugin/evm/database/wrapped_database.go similarity index 55% rename from plugin/evm/database.go rename to plugin/evm/database/wrapped_database.go index 479c995ba3..f8a36913bb 100644 --- a/plugin/evm/database.go +++ b/plugin/evm/database/wrapped_database.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package database import ( "errors" @@ -11,25 +11,29 @@ import ( ) var ( - _ ethdb.KeyValueStore = &Database{} + _ ethdb.KeyValueStore = ðDbWrapper{} ErrSnapshotNotSupported = errors.New("snapshot is not supported") ) -// Database implements ethdb.Database -type Database struct{ database.Database } +// ethDbWrapper implements ethdb.Database +type ethDbWrapper struct{ database.Database } + +func WrapDatabase(db database.Database) ethdb.KeyValueStore { return ethDbWrapper{db} } // Stat implements ethdb.Database -func (db Database) Stat(string) (string, error) { return "", database.ErrNotFound } +func (db ethDbWrapper) Stat(string) (string, error) { return "", database.ErrNotFound } // NewBatch implements ethdb.Database -func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatch() ethdb.Batch { return wrappedBatch{db.Database.NewBatch()} } // NewBatchWithSize implements ethdb.Database // TODO: propagate size through avalanchego Database interface -func (db Database) NewBatchWithSize(size int) ethdb.Batch { return Batch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatchWithSize(size int) ethdb.Batch { + return wrappedBatch{db.Database.NewBatch()} +} -func (db Database) NewSnapshot() (ethdb.Snapshot, error) { +func (db ethDbWrapper) NewSnapshot() (ethdb.Snapshot, error) { return nil, ErrSnapshotNotSupported } @@ -37,7 +41,7 @@ func (db Database) NewSnapshot() (ethdb.Snapshot, error) { // // Note: This method assumes that the prefix is NOT part of the start, so there's // no need for the caller to prepend the prefix to the start. -func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIterator(prefix []byte, start []byte) ethdb.Iterator { // avalanchego's database implementation assumes that the prefix is part of the // start, so it is added here (if it is provided). if len(prefix) > 0 { @@ -50,15 +54,15 @@ func (db Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { } // NewIteratorWithStart implements ethdb.Database -func (db Database) NewIteratorWithStart(start []byte) ethdb.Iterator { +func (db ethDbWrapper) NewIteratorWithStart(start []byte) ethdb.Iterator { return db.Database.NewIteratorWithStart(start) } -// Batch implements ethdb.Batch -type Batch struct{ database.Batch } +// wrappedBatch implements ethdb.wrappedBatch +type wrappedBatch struct{ database.Batch } // ValueSize implements ethdb.Batch -func (batch Batch) ValueSize() int { return batch.Batch.Size() } +func (batch wrappedBatch) ValueSize() int { return batch.Batch.Size() } // Replay implements ethdb.Batch -func (batch Batch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } +func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index bd7f993cb5..24491d9fa5 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/require" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -36,6 +36,7 @@ import ( "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -430,7 +431,7 @@ type syncVMSetup struct { fundedAccounts map[*keystore.Key]*types.StateAccount syncerVM *VM - syncerDB database.Database + syncerDB avalanchedatabase.Database syncerEngineChan <-chan commonEng.Message syncerAtomicMemory *avalancheatomic.Memory shutdownOnceSyncerVM *shutdownOnceVM @@ -491,7 +492,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { if test.expectedErr != nil { require.ErrorIs(err, test.expectedErr) // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. - chaindb := Database{prefixdb.NewNested(ethDBPrefix, syncerVM.db)} + chaindb := database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, syncerVM.db)) assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) return } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index c1b9426bea..c83557d594 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" @@ -75,7 +76,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/database" + avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" @@ -229,18 +230,18 @@ type VM struct { db *versiondb.Database // metadataDB is used to store one off keys. - metadataDB database.Database + metadataDB avalanchedatabase.Database // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database // [acceptedBlockDB] is the database to store the last accepted // block. - acceptedBlockDB database.Database + acceptedBlockDB avalanchedatabase.Database // [warpDB] is used to store warp message signatures // set to a prefixDB with the prefix [warpPrefix] - warpDB database.Database + warpDB avalanchedatabase.Database toEngine chan<- commonEng.Message @@ -329,7 +330,7 @@ func (vm *VM) GetActivationTime() time.Time { func (vm *VM) Initialize( _ context.Context, chainCtx *snow.Context, - db database.Database, + db avalanchedatabase.Database, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, @@ -560,7 +561,7 @@ func (vm *VM) Initialize( // clear warpdb on initialization if config enabled if vm.config.PruneWarpDB { - if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { + if err := avalanchedatabase.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { return fmt.Errorf("failed to prune warpDB: %w", err) } } @@ -1376,10 +1377,10 @@ func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { // by ChainState. func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { ethBlock := vm.blockChain.GetBlockByHash(common.Hash(id)) - // If [ethBlock] is nil, return [database.ErrNotFound] here + // If [ethBlock] is nil, return [avalanchedatabase.ErrNotFound] here // so that the miss is considered cacheable. if ethBlock == nil { - return nil, database.ErrNotFound + return nil, avalanchedatabase.ErrNotFound } // Note: the status of block is set by ChainState return vm.newBlock(ethBlock) @@ -1401,7 +1402,7 @@ func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block if acceptedBlkID != blkID { // The provided block is not accepted. - return nil, database.ErrNotFound + return nil, avalanchedatabase.ErrNotFound } return blk, nil } @@ -1427,17 +1428,17 @@ func (vm *VM) VerifyHeightIndex(context.Context) error { // GetBlockIDAtHeight returns the canonical block at [height]. // Note: the engine assumes that if a block is not found at [height], then -// [database.ErrNotFound] will be returned. This indicates that the VM has state +// [avalanchedatabase.ErrNotFound] will be returned. This indicates that the VM has state // synced and does not have all historical blocks available. func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { lastAcceptedBlock := vm.LastAcceptedBlock() if lastAcceptedBlock.Height() < height { - return ids.ID{}, database.ErrNotFound + return ids.ID{}, avalanchedatabase.ErrNotFound } hash := vm.blockChain.GetCanonicalHash(height) if hash == (common.Hash{}) { - return ids.ID{}, database.ErrNotFound + return ids.ID{}, avalanchedatabase.ErrNotFound } return ids.ID(hash), nil } @@ -1514,11 +1515,11 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } // initializeDBs initializes the databases used by the VM. -// coreth always uses the avalanchego provided database. -func (vm *VM) initializeDBs(db database.Database) error { +// coreth always uses the avalanchego provided avalanchedatabase. +func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { // Use NewNested rather than New so that the structure of the database // remains the same regardless of the provided baseDB type. - vm.chaindb = rawdb.NewDatabase(Database{prefixdb.NewNested(ethDBPrefix, db)}) + vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) @@ -1556,7 +1557,7 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { return tx, atomic.Accepted, height, nil - } else if err != database.ErrNotFound { + } else if err != avalanchedatabase.ErrNotFound { return nil, atomic.Unknown, 0, err } tx, dropped, found := vm.mempool.GetTx(txID) @@ -1802,7 +1803,7 @@ func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) switch { - case lastAcceptedErr == database.ErrNotFound: + case lastAcceptedErr == avalanchedatabase.ErrNotFound: // If there is nothing in the database, return the genesis block hash and height return vm.genesisHash, 0, nil case lastAcceptedErr != nil: From 8eb3056b810ae28f531f580771f552e8f53f8783 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 18 Dec 2024 17:58:12 +0300 Subject: [PATCH 10/91] move atomic trie/state/backend to separaet pkg --- go.sum | 2 - plugin/evm/{ => atomic}/atomic_backend.go | 29 +++- plugin/evm/{ => atomic}/atomic_state.go | 5 +- plugin/evm/{ => atomic}/atomic_syncer.go | 2 +- plugin/evm/{ => atomic}/atomic_syncer_test.go | 5 +- plugin/evm/{ => atomic}/atomic_trie.go | 5 +- .../evm/{ => atomic}/atomic_trie_iterator.go | 2 +- .../{ => atomic}/atomic_trie_iterator_test.go | 24 +-- plugin/evm/{ => atomic}/atomic_trie_test.go | 154 +++++------------- .../evm/{ => atomic}/atomic_tx_repository.go | 43 +++-- .../{ => atomic}/atomic_tx_repository_test.go | 37 ++--- plugin/evm/atomic/export_tx.go | 2 +- plugin/evm/atomic/import_tx.go | 4 +- plugin/evm/atomic/test_shared_memories.go | 78 +++++++++ plugin/evm/atomic/test_tx.go | 2 +- plugin/evm/atomic/tx.go | 4 +- plugin/evm/atomic/utils.go | 10 ++ plugin/evm/block.go | 6 +- plugin/evm/export_tx_test.go | 38 ++--- plugin/evm/import_tx_test.go | 11 +- plugin/evm/syncervm_client.go | 124 ++++++++------ plugin/evm/syncervm_server.go | 11 +- plugin/evm/syncervm_test.go | 39 +++-- plugin/evm/tx_test.go | 2 +- plugin/evm/vm.go | 59 +++---- plugin/evm/vm_test.go | 65 +++----- utils/snow.go | 26 ++- 27 files changed, 409 insertions(+), 380 deletions(-) rename plugin/evm/{ => atomic}/atomic_backend.go (96%) rename plugin/evm/{ => atomic}/atomic_state.go (97%) rename plugin/evm/{ => atomic}/atomic_syncer.go (99%) rename plugin/evm/{ => atomic}/atomic_syncer_test.go (97%) rename plugin/evm/{ => atomic}/atomic_trie.go (99%) rename plugin/evm/{ => atomic}/atomic_trie_iterator.go (99%) rename plugin/evm/{ => atomic}/atomic_trie_iterator_test.go (83%) rename plugin/evm/{ => atomic}/atomic_trie_test.go (79%) rename plugin/evm/{ => atomic}/atomic_tx_repository.go (91%) rename plugin/evm/{ => atomic}/atomic_tx_repository_test.go (91%) create mode 100644 plugin/evm/atomic/test_shared_memories.go diff --git a/go.sum b/go.sum index 435d1113c8..7fe7f45671 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,6 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa h1:8eSy+tegp9Kq2zft54wk0FyWU87utdrVwsj9EBIb/NA= -github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa/go.mod h1:256D2s2FIKo07uUeY25uDXFuqBo6TeWIJqeEA+Xchwk= github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 h1:3Zqc3TxHt6gsdSFD/diW2f2jT2oCx0rppN7yoXxviQg= github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1/go.mod h1:Wxl57pLTlR/8pkaNtou8HiynG+xdgiF4YnzFuJyqSDg= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/atomic_backend.go b/plugin/evm/atomic/atomic_backend.go similarity index 96% rename from plugin/evm/atomic_backend.go rename to plugin/evm/atomic/atomic_backend.go index 2420021d6f..eefb254321 100644 --- a/plugin/evm/atomic_backend.go +++ b/plugin/evm/atomic/atomic_backend.go @@ -1,9 +1,10 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( + "context" "encoding/binary" "fmt" "time" @@ -16,14 +17,28 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) +// Syncer represents a step in state sync, +// along with Start/Done methods to control +// and monitor progress. +// Error returns an error if any was encountered. +type Syncer interface { + Start(ctx context.Context) error + Done() <-chan error +} + var _ AtomicBackend = &atomicBackend{} +var ( + // Prefixes for atomic trie + atomicTrieDBPrefix = []byte("atomicTrieDB") + atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") +) + // AtomicBackend abstracts the verification and processing // of atomic transactions type AtomicBackend interface { @@ -34,7 +49,7 @@ type AtomicBackend interface { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) // Returns an AtomicState corresponding to a block hash that has been inserted // but not Accepted or Rejected yet. @@ -152,7 +167,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // iterate over the transactions, indexing them if the height is < commit height // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap height = binary.BigEndian.Uint64(iter.Key()) - txs, err := atomic.ExtractAtomicTxs(iter.Value(), true, a.codec) + txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) if err != nil { return err } @@ -397,7 +412,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { +func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -460,11 +475,11 @@ func (a *atomicBackend) AtomicTrie() AtomicTrie { // mergeAtomicOps merges atomic requests represented by [txs] // to the [output] map, depending on whether [chainID] is present in the map. -func mergeAtomicOps(txs []*atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { +func mergeAtomicOps(txs []*Tx) (map[ids.ID]*avalancheatomic.Requests, error) { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*atomic.Tx, len(txs)) + copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs diff --git a/plugin/evm/atomic_state.go b/plugin/evm/atomic/atomic_state.go similarity index 97% rename from plugin/evm/atomic_state.go rename to plugin/evm/atomic/atomic_state.go index 911f1afb3a..5b64145d62 100644 --- a/plugin/evm/atomic_state.go +++ b/plugin/evm/atomic/atomic_state.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "fmt" @@ -9,7 +9,6 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -37,7 +36,7 @@ type atomicState struct { backend *atomicBackend blockHash common.Hash blockHeight uint64 - txs []*atomic.Tx + txs []*Tx atomicOps map[ids.ID]*avalancheatomic.Requests atomicRoot common.Hash } diff --git a/plugin/evm/atomic_syncer.go b/plugin/evm/atomic/atomic_syncer.go similarity index 99% rename from plugin/evm/atomic_syncer.go rename to plugin/evm/atomic/atomic_syncer.go index d68d61d597..daffb9d771 100644 --- a/plugin/evm/atomic_syncer.go +++ b/plugin/evm/atomic/atomic_syncer.go @@ -1,7 +1,7 @@ // (c) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" diff --git a/plugin/evm/atomic_syncer_test.go b/plugin/evm/atomic/atomic_syncer_test.go similarity index 97% rename from plugin/evm/atomic_syncer_test.go rename to plugin/evm/atomic/atomic_syncer_test.go index 86589cc4d8..140a627710 100644 --- a/plugin/evm/atomic_syncer_test.go +++ b/plugin/evm/atomic/atomic_syncer_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "bytes" @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/triedb" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) @@ -56,7 +57,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := NewAtomicBackend(clientDB, testSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(clientDB, utils.TestSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) if err != nil { t.Fatal("could not initialize atomic backend", err) } diff --git a/plugin/evm/atomic_trie.go b/plugin/evm/atomic/atomic_trie.go similarity index 99% rename from plugin/evm/atomic_trie.go rename to plugin/evm/atomic/atomic_trie.go index f6add46623..bbb299a391 100644 --- a/plugin/evm/atomic_trie.go +++ b/plugin/evm/atomic/atomic_trie.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "fmt" @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" @@ -225,7 +224,7 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { - valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) + valueBytes, err := a.codec.Marshal(CodecVersion, requests) if err != nil { // highly unlikely but possible if atomic.Element // has a change that is unsupported by the codec diff --git a/plugin/evm/atomic_trie_iterator.go b/plugin/evm/atomic/atomic_trie_iterator.go similarity index 99% rename from plugin/evm/atomic_trie_iterator.go rename to plugin/evm/atomic/atomic_trie_iterator.go index 2bdf90b581..20be76416e 100644 --- a/plugin/evm/atomic_trie_iterator.go +++ b/plugin/evm/atomic/atomic_trie_iterator.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic/atomic_trie_iterator_test.go similarity index 83% rename from plugin/evm/atomic_trie_iterator_test.go rename to plugin/evm/atomic/atomic_trie_iterator_test.go index 50ba586ffd..8766752b28 100644 --- a/plugin/evm/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic/atomic_trie_iterator_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "testing" @@ -10,23 +10,17 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/ava-labs/coreth/plugin/evm/atomic" ) -func testSharedMemory() avalancheatomic.SharedMemory { - m := avalancheatomic.NewMemory(memdb.New()) - return m.NewSharedMemory(testCChainID) -} - func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) @@ -38,7 +32,7 @@ func TestIteratorCanIterate(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie1 := atomicBackend.AtomicTrie() @@ -51,7 +45,7 @@ func TestIteratorCanIterate(t *testing.T) { // iterate on a new atomic trie to make sure there is no resident state affecting the data and the // iterator - atomicBackend2, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend2, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie2 := atomicBackend2.AtomicTrie() lastCommittedHash2, lastCommittedHeight2 := atomicTrie2.LastCommitted() @@ -66,7 +60,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) require.NoError(err) @@ -79,7 +73,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository commitInterval := uint64(100) - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) require.NoError(err) atomicTrie := atomicBackend.AtomicTrie() @@ -94,7 +88,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { // handles an error when it runs into an unexpected key-value pair in the trie. atomicTrieSnapshot, err := atomicTrie.OpenTrie(lastCommittedHash) require.NoError(err) - require.NoError(atomicTrieSnapshot.Update(utils.RandomBytes(50), utils.RandomBytes(50))) + require.NoError(atomicTrieSnapshot.Update(avalancheutils.RandomBytes(50), avalancheutils.RandomBytes(50))) nextRoot, nodes, err := atomicTrieSnapshot.Commit(false) require.NoError(err) diff --git a/plugin/evm/atomic_trie_test.go b/plugin/evm/atomic/atomic_trie_test.go similarity index 79% rename from plugin/evm/atomic_trie_test.go rename to plugin/evm/atomic/atomic_trie_test.go index 2a82964e94..4c9c84468a 100644 --- a/plugin/evm/atomic_trie_test.go +++ b/plugin/evm/atomic/atomic_trie_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -19,21 +19,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) const testCommitInterval = 100 -func mustAtomicOps(tx *atomic.Tx) map[ids.ID]*avalancheatomic.Requests { - id, reqs, err := tx.AtomicOps() - if err != nil { - panic(err) - } - return map[ids.ID]*avalancheatomic.Requests{id: reqs} -} - // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. @@ -139,7 +131,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -148,7 +140,7 @@ func TestAtomicTrieInitialize(t *testing.T) { writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time - atomicBackend1, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend1, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -164,7 +156,7 @@ func TestAtomicTrieInitialize(t *testing.T) { verifyOperations(t, atomicTrie1, codec, rootHash1, 1, test.expectedCommitHeight, operationsMap) // Construct the atomic trie again (on the same database) and ensure the last accepted root is correct. - atomicBackend2, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend2, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -173,7 +165,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // Construct the atomic trie again (on an empty database) and ensure that it produces the same hash. atomicBackend3, err := NewAtomicBackend( - versiondb.New(memdb.New()), testSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -188,7 +180,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // during the initialization phase will cause an invalid root when indexing continues. nextCommitHeight := nearestCommitHeight(test.lastAcceptedHeight+test.commitInterval, test.commitInterval) for i := test.lastAcceptedHeight + 1; i <= nextCommitHeight; i++ { - txs := atomic.NewTestTxs(test.numTxsPerBlock(i)) + txs := NewTestTxs(test.numTxsPerBlock(i)) if err := repo.Write(i, txs); err != nil { t.Fatal(err) } @@ -211,7 +203,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // Generate a new atomic trie to compare the root against. atomicBackend4, err := NewAtomicBackend( - versiondb.New(memdb.New()), testSharedMemory(), nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSharedMemory(), nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -228,14 +220,14 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) assert.NoError(t, err) atomicTrie := atomicBackend.AtomicTrie() @@ -247,11 +239,11 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*atomic.Tx{atomic.GenerateTestExportTx()}) + err = repo.Write(15, []*Tx{GenerateTestExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie - atomicBackend, err = NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) + atomicBackend, err = NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) assert.NoError(t, err) atomicTrie = atomicBackend.AtomicTrie() @@ -262,11 +254,11 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, atomic.TestTxCodec, 0) + repo, err := NewAtomicTxRepository(db, TestTxCodec, 0) if err != nil { t.Fatal(err) } - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, 0, common.Hash{}, testCommitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, 0, common.Hash{}, testCommitInterval) if err != nil { t.Fatal(err) } @@ -282,8 +274,9 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests := mustAtomicOps(atomic.GenerateTestImportTx()) - err := indexAtomicTxs(atomicTrie, height, atomicRequests) + atomicRequests, err := ConvertToAtomicOps(GenerateTestImportTx()) + assert.NoError(t, err) + err = indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) if height%testCommitInterval == 0 { lastCommittedBlockHash, lastCommittedBlockHeight = atomicTrie.LastCommitted() @@ -313,11 +306,11 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { atomicTrie2 := newTestAtomicTrie(t) for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { - tx1 := atomic.GenerateTestImportTx() - tx2 := atomic.GenerateTestImportTx() - atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) + tx1 := GenerateTestImportTx() + tx2 := GenerateTestImportTx() + atomicRequests1, err := mergeAtomicOps([]*Tx{tx1, tx2}) assert.NoError(t, err) - atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) + atomicRequests2, err := mergeAtomicOps([]*Tx{tx2, tx1}) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie1, height, atomicRequests1) @@ -339,7 +332,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -353,7 +346,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { 14: {}, } // Construct the atomic trie for the first time - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) if err != nil { t.Fatal(err) } @@ -371,7 +364,9 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - ops = append(ops, mustAtomicOps(atomic.GenerateTestImportTx())) + atomicOps, err := ConvertToAtomicOps(GenerateTestImportTx()) + assert.NoError(t, err) + ops = append(ops, atomicOps) } // without nils @@ -411,75 +406,6 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { assert.Equal(t, root1, root2) } -type sharedMemories struct { - thisChain avalancheatomic.SharedMemory - peerChain avalancheatomic.SharedMemory - thisChainID ids.ID - peerChainID ids.ID -} - -func (s *sharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*avalancheatomic.Requests) error { - for _, reqs := range ops { - puts := make(map[ids.ID]*avalancheatomic.Requests) - puts[s.thisChainID] = &avalancheatomic.Requests{} - for _, key := range reqs.RemoveRequests { - val := []byte{0x1} - puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &avalancheatomic.Element{Key: key, Value: val}) - } - if err := s.peerChain.Apply(puts); err != nil { - return err - } - } - return nil -} - -func (s *sharedMemories) assertOpsApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { - t.Helper() - for _, reqs := range ops { - // should be able to get put requests - for _, elem := range reqs.PutRequests { - val, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) - if err != nil { - t.Fatalf("error finding puts in peerChainMemory: %s", err) - } - assert.Equal(t, elem.Value, val[0]) - } - - // should not be able to get remove requests - for _, key := range reqs.RemoveRequests { - _, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) - assert.EqualError(t, err, "not found") - } - } -} - -func (s *sharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*avalancheatomic.Requests) { - t.Helper() - for _, reqs := range ops { - // should not be able to get put requests - for _, elem := range reqs.PutRequests { - _, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) - assert.EqualError(t, err, "not found") - } - - // should be able to get remove requests (these were previously added as puts on peerChain) - for _, key := range reqs.RemoveRequests { - val, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) - assert.NoError(t, err) - assert.Equal(t, []byte{0x1}, val[0]) - } - } -} - -func newSharedMemories(atomicMemory *avalancheatomic.Memory, thisChainID, peerChainID ids.ID) *sharedMemories { - return &sharedMemories{ - thisChain: atomicMemory.NewSharedMemory(thisChainID), - peerChain: atomicMemory.NewSharedMemory(peerChainID), - thisChainID: thisChainID, - peerChainID: peerChainID, - } -} - func TestApplyToSharedMemory(t *testing.T) { type test struct { commitInterval, lastAcceptedHeight uint64 @@ -511,9 +437,9 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval: 10, lastAcceptedHeight: 25, setMarker: func(a *atomicBackend) error { - cursor := make([]byte, wrappers.LongLen+len(atomic.TestBlockchainID[:])) + cursor := make([]byte, wrappers.LongLen+len(TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) - copy(cursor[wrappers.LongLen:], atomic.TestBlockchainID[:]) + copy(cursor[wrappers.LongLen:], TestBlockchainID[:]) return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, @@ -527,7 +453,7 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -535,7 +461,7 @@ func TestApplyToSharedMemory(t *testing.T) { // Initialize atomic repository m := avalancheatomic.NewMemory(db) - sharedMemories := newSharedMemories(m, testCChainID, atomic.TestBlockchainID) + sharedMemories := NewSharedMemories(m, ids.GenerateTestID(), TestBlockchainID) backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -558,7 +484,7 @@ func TestApplyToSharedMemory(t *testing.T) { // assert ops were applied as expected for height, ops := range operationsMap { if test.expectOpsApplied(height) { - sharedMemories.assertOpsApplied(t, ops) + sharedMemories.AssertOpsApplied(t, ops) } else { sharedMemories.assertOpsNotApplied(t, ops) } @@ -577,7 +503,7 @@ func TestApplyToSharedMemory(t *testing.T) { // assert they are as they were prior to reinitializing for height, ops := range operationsMap { if test.expectOpsApplied(height) { - sharedMemories.assertOpsApplied(t, ops) + sharedMemories.AssertOpsApplied(t, ops) } else { sharedMemories.assertOpsNotApplied(t, ops) } @@ -593,7 +519,7 @@ func TestApplyToSharedMemory(t *testing.T) { func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -611,7 +537,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - sharedMemory := testSharedMemory() + sharedMemory := utils.TestSharedMemory() atomicBackend, err := NewAtomicBackend(db, sharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie = atomicBackend.AtomicTrie() @@ -628,7 +554,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -638,7 +564,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) - atomicBackend, err := NewAtomicBackend(db, testSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 5000) + atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie := atomicBackend.AtomicTrie() @@ -707,8 +633,8 @@ func BenchmarkApplyToSharedMemory(b *testing.B) { func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks uint64) { db := versiondb.New(disk) - codec := atomic.TestTxCodec - sharedMemory := testSharedMemory() + codec := TestTxCodec + sharedMemory := utils.TestSharedMemory() lastAcceptedHeight := blocks repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) @@ -720,7 +646,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u } trie := backend.AtomicTrie() for height := uint64(1); height <= lastAcceptedHeight; height++ { - txs := atomic.NewTestTxs(constTxsPerHeight(3)(height)) + txs := NewTestTxs(constTxsPerHeight(3)(height)) ops, err := mergeAtomicOps(txs) assert.NoError(b, err) assert.NoError(b, indexAtomicTxs(trie, height, ops)) @@ -733,7 +659,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - backend.(*atomicBackend).sharedMemory = testSharedMemory() + backend.(*atomicBackend).sharedMemory = utils.TestSharedMemory() assert.NoError(b, backend.MarkApplyToSharedMemoryCursor(0)) assert.NoError(b, db.Commit()) assert.NoError(b, backend.ApplyToSharedMemory(lastAcceptedHeight)) diff --git a/plugin/evm/atomic_tx_repository.go b/plugin/evm/atomic/atomic_tx_repository.go similarity index 91% rename from plugin/evm/atomic_tx_repository.go rename to plugin/evm/atomic/atomic_tx_repository.go index d1074f60f2..d451ce9d86 100644 --- a/plugin/evm/atomic_tx_repository.go +++ b/plugin/evm/atomic/atomic_tx_repository.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic" ) const ( @@ -40,10 +39,10 @@ var ( // atomic transactions type AtomicTxRepository interface { GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) - GetByHeight(height uint64) ([]*atomic.Tx, error) - Write(height uint64, txs []*atomic.Tx) error - WriteBonus(height uint64, txs []*atomic.Tx) error + GetByTxID(txID ids.ID) (*Tx, uint64, error) + GetByHeight(height uint64) ([]*Tx, error) + Write(height uint64, txs []*Tx) error + WriteBonus(height uint64, txs []*Tx) error IterateByHeight(start uint64) database.Iterator Codec() codec.Manager @@ -137,7 +136,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er // Get the tx iter is pointing to, len(txs) == 1 is expected here. txBytes := iterValue[wrappers.LongLen+wrappers.IntLen:] - tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) + tx, err := ExtractAtomicTx(txBytes, a.codec) if err != nil { return err } @@ -199,10 +198,10 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { return indexHeight, nil } -// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object +// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { +func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -216,7 +215,7 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) packer := wrappers.Packer{Bytes: indexedTxBytes} height := packer.UnpackLong() txBytes := packer.UnpackBytes() - tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) + tx, err := ExtractAtomicTx(txBytes, a.codec) if err != nil { return nil, 0, err } @@ -230,40 +229,40 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { +func (a *atomicTxRepository) GetByHeight(height uint64) ([]*Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { +func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err } - return atomic.ExtractAtomicTxsBatch(txsBytes, a.codec) + return ExtractAtomicTxsBatch(txsBytes, a.codec) } // Write updates indexes maintained on atomic txs, so they can be queried // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { +func (a *atomicTxRepository) Write(height uint64, txs []*Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { +func (a *atomicTxRepository) WriteBonus(height uint64, txs []*Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { +func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*atomic.Tx, len(txs)) + copyTxs := make([]*Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs @@ -301,8 +300,8 @@ func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { - txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) +func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { + txBytes, err := a.codec.Marshal(CodecVersion, tx) if err != nil { return err } @@ -321,8 +320,8 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) erro } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { - txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) +func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) error { + txsBytes, err := a.codec.Marshal(CodecVersion, txs) if err != nil { return err } @@ -336,7 +335,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic. // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { +func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err diff --git a/plugin/evm/atomic_tx_repository_test.go b/plugin/evm/atomic/atomic_tx_repository_test.go similarity index 91% rename from plugin/evm/atomic_tx_repository_test.go rename to plugin/evm/atomic/atomic_tx_repository_test.go index 224f8fa726..4dda70ab10 100644 --- a/plugin/evm/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/atomic_tx_repository_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package atomic import ( "encoding/binary" @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/codec" @@ -28,13 +27,13 @@ import ( // addTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) directly to [acceptedAtomicTxDB], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { +func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { for height := fromHeight; height < toHeight; height++ { - txs := make([]*atomic.Tx, 0, txsPerHeight) + txs := make([]*Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { - tx := atomic.NewTestTx() + tx := NewTestTx() txs = append(txs, tx) - txBytes, err := codec.Marshal(atomic.CodecVersion, tx) + txBytes, err := codec.Marshal(CodecVersion, tx) assert.NoError(t, err) // Write atomic transactions to the [acceptedAtomicTxDB] @@ -71,10 +70,10 @@ func constTxsPerHeight(txCount int) func(uint64) int { // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, - txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, + txsPerHeight func(height uint64) int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { - txs := atomic.NewTestTxs(txsPerHeight(height)) + txs := NewTestTxs(txsPerHeight(height)) if err := repo.Write(height, txs); err != nil { t.Fatal(err) } @@ -96,7 +95,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { +func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) @@ -183,12 +182,12 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(1), txMap, nil) verifyTxs(t, repo, txMap) @@ -196,12 +195,12 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(10), txMap, nil) verifyTxs(t, repo, txMap) @@ -209,10 +208,10 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) @@ -234,10 +233,10 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) addTxs(t, codec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) addTxs(t, codec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) if err := db.Commit(); err != nil { @@ -259,10 +258,10 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeight int) { db := versiondb.New(memdb.New()) - codec := atomic.TestTxCodec + codec := TestTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*atomic.Tx) + txMap := make(map[uint64][]*Tx) addTxs(b, codec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) if err := db.Commit(); err != nil { diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 26307cface..a07b4f4fee 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -180,7 +180,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedExportTx) SemanticVerify( - backend *Backend, + backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int, diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 0d4d367d4e..76799dabaf 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -188,7 +188,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { // SemanticVerify this transaction is valid. func (utx *UnsignedImportTx) SemanticVerify( - backend *Backend, + backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int, @@ -443,7 +443,7 @@ func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) // or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is // accepted, then nil will be returned immediately. // If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func conflicts(backend *Backend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { +func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { fetcher := backend.BlockFetcher lastAcceptedBlock := fetcher.LastAcceptedBlockInternal() lastAcceptedHeight := lastAcceptedBlock.Height() diff --git a/plugin/evm/atomic/test_shared_memories.go b/plugin/evm/atomic/test_shared_memories.go new file mode 100644 index 0000000000..f526748b03 --- /dev/null +++ b/plugin/evm/atomic/test_shared_memories.go @@ -0,0 +1,78 @@ +package atomic + +import ( + "testing" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/stretchr/testify/assert" +) + +type SharedMemories struct { + thisChain atomic.SharedMemory + peerChain atomic.SharedMemory + thisChainID ids.ID + peerChainID ids.ID +} + +func (s *SharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { + for _, reqs := range ops { + puts := make(map[ids.ID]*atomic.Requests) + puts[s.thisChainID] = &atomic.Requests{} + for _, key := range reqs.RemoveRequests { + val := []byte{0x1} + puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &atomic.Element{Key: key, Value: val}) + } + if err := s.peerChain.Apply(puts); err != nil { + return err + } + } + return nil +} + +func (s *SharedMemories) AssertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { + t.Helper() + for _, reqs := range ops { + // should be able to get put requests + for _, elem := range reqs.PutRequests { + val, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + if err != nil { + t.Fatalf("error finding puts in peerChainMemory: %s", err) + } + assert.Equal(t, elem.Value, val[0]) + } + + // should not be able to get remove requests + for _, key := range reqs.RemoveRequests { + _, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + assert.EqualError(t, err, "not found") + } + } +} + +func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { + t.Helper() + for _, reqs := range ops { + // should not be able to get put requests + for _, elem := range reqs.PutRequests { + _, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + assert.EqualError(t, err, "not found") + } + + // should be able to get remove requests (these were previously added as puts on peerChain) + for _, key := range reqs.RemoveRequests { + val, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + assert.NoError(t, err) + assert.Equal(t, []byte{0x1}, val[0]) + } + } +} + +func NewSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *SharedMemories { + return &SharedMemories{ + thisChain: atomicMemory.NewSharedMemory(thisChainID), + peerChain: atomicMemory.NewSharedMemory(peerChainID), + thisChainID: thisChainID, + peerChainID: peerChainID, + } +} diff --git a/plugin/evm/atomic/test_tx.go b/plugin/evm/atomic/test_tx.go index 50af59e09f..21adecfd39 100644 --- a/plugin/evm/atomic/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -85,7 +85,7 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { +func (t *TestUnsignedTx) SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } diff --git a/plugin/evm/atomic/tx.go b/plugin/evm/atomic/tx.go index a911402dea..5c44b61937 100644 --- a/plugin/evm/atomic/tx.go +++ b/plugin/evm/atomic/tx.go @@ -127,7 +127,7 @@ type UnsignedTx interface { SignedBytes() []byte } -type Backend struct { +type VerifierBackend struct { Ctx *snow.Context Fx fx.Fx Rules params.Rules @@ -170,7 +170,7 @@ type UnsignedAtomicTx interface { Verify(ctx *snow.Context, rules params.Rules) error // Attempts to verify this transaction with the provided state. // SemanticVerify this transaction is valid. - SemanticVerify(backend *Backend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error + SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. // The set of atomic requests must be returned in a consistent order. diff --git a/plugin/evm/atomic/utils.go b/plugin/evm/atomic/utils.go index 8872e09861..133b185930 100644 --- a/plugin/evm/atomic/utils.go +++ b/plugin/evm/atomic/utils.go @@ -6,6 +6,8 @@ package atomic import ( "errors" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -30,3 +32,11 @@ func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) } + +func ConvertToAtomicOps(tx *Tx) (map[ids.ID]*avalancheatomic.Requests, error) { + id, reqs, err := tx.AtomicOps() + if err != nil { + return nil, err + } + return map[ids.ID]*avalancheatomic.Requests{id: reqs}, nil +} diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 9a2de32601..ffadf284f3 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -158,7 +158,7 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("chain could not accept %s: %w", b.ID(), err) } - if err := vm.acceptedBlockDB.Put(lastAcceptedKey, b.id[:]); err != nil { + if err := vm.PutLastAcceptedID(b.id[:]); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } @@ -428,3 +428,7 @@ func (b *Block) Bytes() []byte { } func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } + +func (b *Block) GetEthBlock() *types.Block { + return b.ethBlock +} diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 9bc1f498a9..4bc0999797 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -155,7 +156,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount / 2, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -172,7 +173,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -189,7 +190,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount + 1, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -263,7 +264,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, }, @@ -286,7 +287,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 1, }, }, @@ -309,7 +310,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { { Address: ethAddr, Amount: avaxAmount, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 1, }, }, @@ -912,7 +913,7 @@ func TestExportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: test.rules, @@ -1087,27 +1088,28 @@ func TestExportTxAccept(t *testing.T) { func TestExportTxVerify(t *testing.T) { var exportAmount uint64 = 10000000 + ctx := utils.TestSnowContext() exportTx := &atomic.UnsignedExportTx{ - NetworkID: testNetworkID, - BlockchainID: testCChainID, - DestinationChain: testXChainID, + NetworkID: ctx.NetworkID, + BlockchainID: ctx.CChainID, + DestinationChain: ctx.XChainID, Ins: []atomic.EVMInput{ { Address: testEthAddrs[0], Amount: exportAmount, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, { Address: testEthAddrs[2], Amount: exportAmount, - AssetID: testAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, }, ExportedOutputs: []*avax.TransferableOutput{ { - Asset: avax.Asset{ID: testAvaxAssetID}, + Asset: avax.Asset{ID: ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: exportAmount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1118,7 +1120,7 @@ func TestExportTxVerify(t *testing.T) { }, }, { - Asset: avax.Asset{ID: testAvaxAssetID}, + Asset: avax.Asset{ID: ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: exportAmount, OutputOwners: secp256k1fx.OutputOwners{ @@ -1138,8 +1140,6 @@ func TestExportTxVerify(t *testing.T) { emptySigners := make([][]*secp256k1.PrivateKey, 2) atomic.SortEVMInputsAndSigners(exportTx.Ins, emptySigners) - ctx := NewContext() - tests := map[string]atomicTxVerifyTest{ "nil tx": { generate: func(t *testing.T) atomic.UnsignedAtomicTx { @@ -1261,7 +1261,7 @@ func TestExportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, Nonce: 0, }, } @@ -1779,7 +1779,7 @@ func TestNewExportTx(t *testing.T) { exportTx := tx.UnsignedAtomicTx - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: vm.currentRules(), @@ -1987,7 +1987,7 @@ func TestNewExportTxMulticoin(t *testing.T) { } exportTx := tx.UnsignedAtomicTx - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: vm.currentRules(), diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index d254153712..4ce2f9284e 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -9,12 +9,13 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" + avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -66,7 +67,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M } func TestImportTxVerify(t *testing.T) { - ctx := NewContext() + ctx := utils.TestSnowContext() var importAmount uint64 = 10000000 txID := ids.GenerateTestID() @@ -117,8 +118,8 @@ func TestImportTxVerify(t *testing.T) { } // Sort the inputs and outputs to ensure the transaction is canonical - utils.Sort(importTx.ImportedInputs) - utils.Sort(importTx.Outs) + avalancheutils.Sort(importTx.ImportedInputs) + avalancheutils.Sort(importTx.Outs) tests := map[string]atomicTxVerifyTest{ "nil tx": { @@ -316,7 +317,7 @@ func TestImportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: testAvaxAssetID, + AssetID: utils.TestAvaxAssetID, }, } return &tx diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 0b704d6233..15a5b384f5 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -1,6 +1,6 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. - +// TODO: move to separate package package evm import ( @@ -8,7 +8,10 @@ import ( "fmt" "sync" + syncclient "github.com/ava-labs/coreth/sync/client" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" @@ -16,51 +19,60 @@ import ( "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" - syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) -const ( - // State sync fetches [parentsToGet] parents of the block it syncs to. - // The last 256 block hashes are necessary to support the BLOCKHASH opcode. - parentsToGet = 256 +// State sync fetches [StateSyncParentsToFetch] parents of the block it syncs to. +// The last 256 block hashes are necessary to support the BLOCKHASH opcode. +const StateSyncParentsToFetch = 256 + +var ( + metadataPrefix = []byte("metadata") + stateSyncSummaryKey = []byte("stateSyncSummary") ) -var stateSyncSummaryKey = []byte("stateSyncSummary") +type BlockAcceptor interface { + PutLastAcceptedID([]byte) error +} + +type EthBlockWrapper interface { + GetEthBlock() *types.Block +} -// stateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient -type stateSyncClientConfig struct { - enabled bool - skipResume bool +// StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient +type StateSyncClientConfig struct { + Enabled bool + SkipResume bool // Specifies the number of blocks behind the latest state summary that the chain must be // in order to prefer performing state sync over falling back to the normal bootstrapping // algorithm. - stateSyncMinBlocks uint64 - stateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request + StateSyncMinBlocks uint64 + StateSyncRequestSize uint16 // number of key/value pairs to ask peers for per request - lastAcceptedHeight uint64 + LastAcceptedHeight uint64 - chain *eth.Ethereum - state *chain.State - chaindb ethdb.Database - metadataDB database.Database - acceptedBlockDB database.Database - db *versiondb.Database - atomicBackend AtomicBackend + Chain *eth.Ethereum + State *chain.State + ChaindDB ethdb.Database + Acceptor BlockAcceptor + DB *versiondb.Database + AtomicBackend atomic.AtomicBackend - client syncclient.Client + Client syncclient.Client - toEngine chan<- commonEng.Message + ToEngine chan<- commonEng.Message } type stateSyncerClient struct { - *stateSyncClientConfig + *StateSyncClientConfig resumableSummary message.SyncSummary @@ -70,11 +82,13 @@ type stateSyncerClient struct { // State Sync results syncSummary message.SyncSummary stateSyncErr error + metadataDB database.Database } -func NewStateSyncClient(config *stateSyncClientConfig) StateSyncClient { +func NewStateSyncClient(config *StateSyncClientConfig) StateSyncClient { return &stateSyncerClient{ - stateSyncClientConfig: config, + StateSyncClientConfig: config, + metadataDB: prefixdb.New(metadataPrefix, config.DB), } } @@ -101,14 +115,14 @@ type Syncer interface { // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. func (client *stateSyncerClient) StateSyncEnabled(context.Context) (bool, error) { - return client.enabled, nil + return client.Enabled, nil } // GetOngoingSyncStateSummary returns a state summary that was previously started // and not finished, and sets [resumableSummary] if one was found. // Returns [database.ErrNotFound] if no ongoing summary is found or if [client.skipResume] is true. func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) { - if client.skipResume { + if client.SkipResume { return nil, database.ErrNotFound } @@ -130,7 +144,7 @@ func (client *stateSyncerClient) ClearOngoingSummary() error { if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { return fmt.Errorf("failed to clear ongoing summary: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.DB.Commit(); err != nil { return fmt.Errorf("failed to commit db while clearing ongoing summary: %w", err) } @@ -145,7 +159,7 @@ func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryByt // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, parentsToGet); err != nil { + if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, StateSyncParentsToFetch); err != nil { return err } @@ -166,10 +180,10 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // Skip syncing if the blockchain is not significantly ahead of local state, // since bootstrapping would be faster. // (Also ensures we don't sync to a height prior to local state.) - if client.lastAcceptedHeight+client.stateSyncMinBlocks > proposedSummary.Height() { + if client.LastAcceptedHeight+client.StateSyncMinBlocks > proposedSummary.Height() { log.Info( "last accepted too close to most recent syncable block, skipping state sync", - "lastAccepted", client.lastAcceptedHeight, + "lastAccepted", client.LastAcceptedHeight, "syncableHeight", proposedSummary.Height(), ) return block.StateSyncSkipped, nil @@ -181,11 +195,11 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // sync marker will be wiped, so we do not accidentally resume progress from an incorrect version // of the snapshot. (if switching between versions that come before this change and back this could // lead to the snapshot not being cleaned up correctly) - <-snapshot.WipeSnapshot(client.chaindb, true) + <-snapshot.WipeSnapshot(client.ChaindDB, true) // Reset the snapshot generator here so that when state sync completes, snapshots will not attempt to read an // invalid generator. // Note: this must be called after WipeSnapshot is called so that we do not invalidate a partially generated snapshot. - snapshot.ResetSnapshotGeneration(client.chaindb) + snapshot.ResetSnapshotGeneration(client.ChaindDB) } client.syncSummary = proposedSummary @@ -195,7 +209,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to write state sync summary key to disk: %w", err) } - if err := client.db.Commit(); err != nil { + if err := client.DB.Commit(); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to commit db: %w", err) } @@ -218,7 +232,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // this error will be propagated to the engine when it calls // vm.SetState(snow.Bootstrapping) log.Info("stateSync completed, notifying engine", "err", client.stateSyncErr) - client.toEngine <- commonEng.StateSyncDone + client.ToEngine <- commonEng.StateSyncDone }() return block.StateSyncStatic, nil } @@ -235,7 +249,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common // first, check for blocks already available on disk so we don't // request them from peers. for parentsToGet >= 0 { - blk := rawdb.ReadBlock(client.chaindb, nextHash, nextHeight) + blk := rawdb.ReadBlock(client.ChaindDB, nextHash, nextHeight) if blk != nil { // block exists nextHash = blk.ParentHash() @@ -250,12 +264,12 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common // get any blocks we couldn't find on disk from peers and write // them to disk. - batch := client.chaindb.NewBatch() + batch := client.ChaindDB.NewBatch() for i := parentsToGet - 1; i >= 0 && (nextHash != common.Hash{}); { if err := ctx.Err(); err != nil { return err } - blocks, err := client.client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) + blocks, err := client.Client.GetBlocks(ctx, nextHash, nextHeight, parentsPerRequest) if err != nil { log.Error("could not get blocks from peer", "err", err, "nextHash", nextHash, "remaining", i+1) return err @@ -276,7 +290,7 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.atomicBackend.Syncer(client.client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.stateSyncRequestSize) + atomicSyncer, err := client.AtomicBackend.Syncer(client.Client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.StateSyncRequestSize) if err != nil { return err } @@ -291,13 +305,13 @@ func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { log.Info("state sync: sync starting", "root", client.syncSummary.BlockRoot) evmSyncer, err := statesync.NewStateSyncer(&statesync.StateSyncerConfig{ - Client: client.client, + Client: client.Client, Root: client.syncSummary.BlockRoot, BatchSize: ethdb.IdealBatchSize, - DB: client.chaindb, + DB: client.ChaindDB, MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, NumCodeFetchingWorkers: statesync.DefaultNumCodeFetchingWorkers, - RequestSize: client.stateSyncRequestSize, + RequestSize: client.StateSyncRequestSize, }) if err != nil { return err @@ -321,7 +335,7 @@ func (client *stateSyncerClient) Shutdown() error { // finishSync is responsible for updating disk and memory pointers so the VM is prepared // for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. func (client *stateSyncerClient) finishSync() error { - stateBlock, err := client.state.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) + stateBlock, err := client.State.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) if err != nil { return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) } @@ -330,12 +344,14 @@ func (client *stateSyncerClient) finishSync() error { if !ok { return fmt.Errorf("could not convert block(%T) to *chain.BlockWrapper", wrapper) } - evmBlock, ok := wrapper.Block.(*Block) + wrappedBlock := wrapper.Block + + evmBlockGetter, ok := wrappedBlock.(EthBlockWrapper) if !ok { return fmt.Errorf("could not convert block(%T) to evm.Block", stateBlock) } - block := evmBlock.ethBlock + block := evmBlockGetter.GetEthBlock() if block.Hash() != client.syncSummary.BlockHash { return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.BlockHash) @@ -354,9 +370,9 @@ func (client *stateSyncerClient) finishSync() error { // by [params.BloomBitsBlocks]. parentHeight := block.NumberU64() - 1 parentHash := block.ParentHash() - client.chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) + client.Chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) - if err := client.chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { + if err := client.Chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { return err } @@ -364,7 +380,7 @@ func (client *stateSyncerClient) finishSync() error { return fmt.Errorf("error updating vm markers, height=%d, hash=%s, err=%w", block.NumberU64(), block.Hash(), err) } - if err := client.state.SetLastAcceptedBlock(evmBlock); err != nil { + if err := client.State.SetLastAcceptedBlock(wrappedBlock); err != nil { return err } @@ -374,7 +390,7 @@ func (client *stateSyncerClient) finishSync() error { // ApplyToSharedMemory does this, and even if the VM is stopped // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor // is called, VM will resume ApplyToSharedMemory on Initialize. - return client.atomicBackend.ApplyToSharedMemory(block.NumberU64()) + return client.AtomicBackend.ApplyToSharedMemory(block.NumberU64()) } // updateVMMarkers updates the following markers in the VM's database @@ -387,17 +403,17 @@ func (client *stateSyncerClient) updateVMMarkers() error { // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory // is called. - if err := client.atomicBackend.MarkApplyToSharedMemoryCursor(client.lastAcceptedHeight); err != nil { + if err := client.AtomicBackend.MarkApplyToSharedMemoryCursor(client.LastAcceptedHeight); err != nil { return err } - client.atomicBackend.SetLastAccepted(client.syncSummary.BlockHash) - if err := client.acceptedBlockDB.Put(lastAcceptedKey, client.syncSummary.BlockHash[:]); err != nil { + client.AtomicBackend.SetLastAccepted(client.syncSummary.BlockHash) + if err := client.Acceptor.PutLastAcceptedID(client.syncSummary.BlockHash.Bytes()); err != nil { return err } if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { return err } - return client.db.Commit() + return client.DB.Commit() } // Error returns a non-nil error if one occurred during the sync. diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/syncervm_server.go index 3bf051bf87..f434f4ae7d 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/syncervm_server.go @@ -1,6 +1,6 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. - +// TODO: move to separate package package evm import ( @@ -11,14 +11,15 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -type stateSyncServerConfig struct { +type StateSyncServerConfig struct { Chain *core.BlockChain - AtomicTrie AtomicTrie + AtomicTrie atomic.AtomicTrie // SyncableInterval is the interval at which blocks are eligible to provide syncable block summaries. SyncableInterval uint64 @@ -26,7 +27,7 @@ type stateSyncServerConfig struct { type stateSyncServer struct { chain *core.BlockChain - atomicTrie AtomicTrie + atomicTrie atomic.AtomicTrie syncableInterval uint64 } @@ -36,7 +37,7 @@ type StateSyncServer interface { GetStateSummary(context.Context, uint64) (block.StateSummary, error) } -func NewStateSyncServer(config *stateSyncServerConfig) StateSyncServer { +func NewStateSyncServer(config *StateSyncServerConfig) StateSyncServer { return &stateSyncServer{ chain: config.Chain, atomicTrie: config.AtomicTrie, diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 24491d9fa5..6d07b6f721 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -55,7 +55,7 @@ func TestSkipStateSync(t *testing.T) { stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync syncMode: block.StateSyncSkipped, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } @@ -67,14 +67,14 @@ func TestStateSyncFromScratch(t *testing.T) { stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } func TestStateSyncFromScratchExceedParent(t *testing.T) { rand.Seed(1) - numToGen := parentsToGet + uint64(32) + numToGen := StateSyncParentsToFetch + uint64(32) test := syncTest{ syncableInterval: numToGen, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync @@ -121,7 +121,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -272,7 +272,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup = createSyncServerAndClientVMs(t, test, parentsToGet) + vmSetup = createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) } @@ -285,8 +285,9 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s testShortIDAddrs[0]: importAmount, } ) + config := fmt.Sprintf(`{"commit-interval": %d}`, test.syncableInterval) _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( - t, true, "", "", "", alloc, + t, true, "", config, "", alloc, ) t.Cleanup(func() { log.Info("Shutting down server VM") @@ -332,14 +333,17 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] // creates a commit at the height [syncableInterval]. This is necessary to support // fetching a state summary. - serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie) - serverAtomicTrie.commitInterval = test.syncableInterval - require.NoError(serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) + serverAtomicTrie := serverVM.atomicTrie + require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) require.NoError(serverVM.db.Commit()) - serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - serverSharedMemories.assertOpsApplied(t, mustAtomicOps(importTx)) - serverSharedMemories.assertOpsApplied(t, mustAtomicOps(exportTx)) + serverSharedMemories := atomic.NewSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) + importOps, err := atomic.ConvertToAtomicOps(importTx) + require.NoError(err) + exportOps, err := atomic.ConvertToAtomicOps(exportTx) + require.NoError(err) + serverSharedMemories.AssertOpsApplied(t, importOps) + serverSharedMemories.AssertOpsApplied(t, exportOps) // make some accounts trieDB := triedb.NewDatabase(serverVM.chaindb, nil) @@ -360,7 +364,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state - stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) + stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, ) @@ -373,9 +377,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(err) require.True(enabled) - // override [syncerVM]'s commit interval so the atomic trie works correctly. - syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval - // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { @@ -559,10 +560,12 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { require.True(syncerVM.bootstrapped.Get()) // check atomic memory was synced properly - syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) + syncerSharedMemories := atomic.NewSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) for _, tx := range includedAtomicTxs { - syncerSharedMemories.assertOpsApplied(t, mustAtomicOps(tx)) + ops, err := atomic.ConvertToAtomicOps(tx) + require.NoError(err) + syncerSharedMemories.AssertOpsApplied(t, ops) } // Generate blocks after we have entered normal consensus as well diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index a710a3c9e1..9bef967e68 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -116,7 +116,7 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - backend := &atomic.Backend{ + backend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index c83557d594..aebc94814c 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -107,6 +107,7 @@ var ( _ block.StateSyncableVM = &VM{} _ statesyncclient.EthBlockParser = &VM{} _ secp256k1fx.VM = &VM{} + _ BlockAcceptor = &VM{} ) const ( @@ -153,13 +154,8 @@ var ( // Set last accepted key to be longer than the keys used to store accepted block IDs. lastAcceptedKey = []byte("last_accepted_key") acceptedPrefix = []byte("snowman_accepted") - metadataPrefix = []byte("metadata") warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") - - // Prefixes for atomic trie - atomicTrieDBPrefix = []byte("atomicTrieDB") - atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") ) var ( @@ -229,9 +225,6 @@ type VM struct { // [db] is the VM's current database managed by ChainState db *versiondb.Database - // metadataDB is used to store one off keys. - metadataDB avalanchedatabase.Database - // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database @@ -250,11 +243,11 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository AtomicTxRepository + atomicTxRepository atomic.AtomicTxRepository // [atomicTrie] maintains a merkle forest of [height]=>[atomic txs]. - atomicTrie AtomicTrie + atomicTrie atomic.AtomicTrie // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend AtomicBackend + atomicBackend atomic.AtomicBackend builder *blockBuilder @@ -593,11 +586,11 @@ func (vm *VM) Initialize( } // initialize atomic repository - vm.atomicTxRepository, err = NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) + vm.atomicTxRepository, err = atomic.NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } - vm.atomicBackend, err = NewAtomicBackend( + vm.atomicBackend, err = atomic.NewAtomicBackend( vm.db, vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, vm.config.CommitInterval, @@ -624,7 +617,7 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ + vm.StateSyncServer = NewStateSyncServer(&StateSyncServerConfig{ Chain: vm.blockChain, AtomicTrie: vm.atomicTrie, SyncableInterval: vm.config.StateSyncCommitInterval, @@ -733,10 +726,10 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - vm.StateSyncClient = NewStateSyncClient(&stateSyncClientConfig{ - chain: vm.eth, - state: vm.State, - client: statesyncclient.NewClient( + vm.StateSyncClient = NewStateSyncClient(&StateSyncClientConfig{ + Chain: vm.eth, + State: vm.State, + Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, Codec: vm.networkCodec, @@ -745,17 +738,16 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { BlockParser: vm, }, ), - enabled: stateSyncEnabled, - skipResume: vm.config.StateSyncSkipResume, - stateSyncMinBlocks: vm.config.StateSyncMinBlocks, - stateSyncRequestSize: vm.config.StateSyncRequestSize, - lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around - chaindb: vm.chaindb, - metadataDB: vm.metadataDB, - acceptedBlockDB: vm.acceptedBlockDB, - db: vm.db, - atomicBackend: vm.atomicBackend, - toEngine: vm.toEngine, + Enabled: stateSyncEnabled, + SkipResume: vm.config.StateSyncSkipResume, + StateSyncMinBlocks: vm.config.StateSyncMinBlocks, + StateSyncRequestSize: vm.config.StateSyncRequestSize, + LastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around + ChaindDB: vm.chaindb, + DB: vm.db, + AtomicBackend: vm.atomicBackend, + ToEngine: vm.toEngine, + Acceptor: vm, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume @@ -1522,7 +1514,6 @@ func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) - vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) // Note warpDB is not part of versiondb because it is not necessary // that warp signatures are committed to the database atomically with // the last accepted block. @@ -1648,7 +1639,7 @@ func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, if !ok { return fmt.Errorf("parent block %s had unexpected type %T", parentIntf.ID(), parentIntf) } - atomicBackend := &atomic.Backend{ + atomicBackend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, @@ -1687,7 +1678,7 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} - atomicBackend := &atomic.Backend{ + atomicBackend := &atomic.VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, @@ -1918,3 +1909,7 @@ func (vm *VM) newExportTx( return tx, nil } + +func (vm *VM) PutLastAcceptedID(ID []byte) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, ID) +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 4ec59de0bc..0a4f9ebc20 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -39,10 +39,8 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" @@ -56,7 +54,6 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" - constantsEng "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" @@ -65,19 +62,15 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" accountKeystore "github.com/ava-labs/coreth/accounts/keystore" ) var ( testNetworkID uint32 = 10 - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} nonExistentID = ids.ID{'F'} testKeys []*secp256k1.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID - testAvaxAssetID = ids.ID{1, 2, 3} username = "Johns" password = "CjasdjhiPeirbSenfeI13" // #nosec G101 @@ -204,41 +197,6 @@ func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { return genesisBytes } -func NewContext() *snow.Context { - ctx := utils.TestSnowContext() - ctx.NodeID = ids.GenerateTestNodeID() - ctx.NetworkID = testNetworkID - ctx.ChainID = testCChainID - ctx.AVAXAssetID = testAvaxAssetID - ctx.XChainID = testXChainID - ctx.SharedMemory = testSharedMemory() - aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(testCChainID, "C") - _ = aliaser.Alias(testCChainID, testCChainID.String()) - _ = aliaser.Alias(testXChainID, "X") - _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validatorstest.State{ - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constantsEng.PlatformChainID: constantsEng.PrimaryNetworkID, - testXChainID: constantsEng.PrimaryNetworkID, - testCChainID: constantsEng.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil - }, - } - blsSecretKey, err := bls.NewSigner() - if err != nil { - panic(err) - } - ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = blsSecretKey.PublicKey() - return ctx -} - // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func setupGenesis( @@ -254,7 +212,7 @@ func setupGenesis( genesisJSON = genesisJSONLatest } genesisBytes := BuildGenesisTest(t, genesisJSON) - ctx := NewContext() + ctx := utils.TestSnowContext() baseDB := memdb.New() @@ -813,7 +771,7 @@ func TestBuildEthTxBlock(t *testing.T) { restartedVM := &VM{} if err := restartedVM.Initialize( context.Background(), - NewContext(), + utils.TestSnowContext(), dbManager, []byte(genesisJSONApricotPhase2), []byte(""), @@ -1515,6 +1473,19 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } } +type wrappedBackend struct { + atomic.AtomicBackend + registeredBonusBlocks map[uint64]common.Hash +} + +func (w *wrappedBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { + // Check if the block is a bonus block + if hash, ok := w.registeredBonusBlocks[blockHeight]; ok { + return blockHash.Cmp(hash) == 0 + } + return false +} + func TestBonusBlocksTxs(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") @@ -1572,7 +1543,11 @@ func TestBonusBlocksTxs(t *testing.T) { } // Make [blk] a bonus block. - vm.atomicBackend.(*atomicBackend).bonusBlocks = map[uint64]ids.ID{blk.Height(): blk.ID()} + wrappedBackend := &wrappedBackend{ + AtomicBackend: vm.atomicBackend, + registeredBonusBlocks: map[uint64]common.Hash{1: common.Hash(blk.ID())}, + } + vm.atomicBackend = wrappedBackend // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { diff --git a/utils/snow.go b/utils/snow.go index 36b9b7b7fb..e24c884b16 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -8,6 +8,8 @@ import ( "errors" "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" @@ -19,9 +21,10 @@ import ( ) var ( - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} + testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} + testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} + TestAvaxAssetID = ids.ID{1, 2, 3} ) func TestSnowContext() *snow.Context { @@ -31,19 +34,27 @@ func TestSnowContext() *snow.Context { } pk := sk.PublicKey() networkID := constants.UnitTestID - chainID := testChainID + chainID := testCChainID + + aliaser := ids.NewAliaser() + _ = aliaser.Alias(testCChainID, "C") + _ = aliaser.Alias(testCChainID, testCChainID.String()) + _ = aliaser.Alias(testXChainID, "X") + _ = aliaser.Alias(testXChainID, testXChainID.String()) ctx := &snow.Context{ NetworkID: networkID, SubnetID: ids.Empty, ChainID: chainID, + AVAXAssetID: TestAvaxAssetID, NodeID: ids.GenerateTestNodeID(), + SharedMemory: TestSharedMemory(), XChainID: testXChainID, CChainID: testCChainID, PublicKey: pk, WarpSigner: warp.NewSigner(sk, networkID, chainID), Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), + BCLookup: aliaser, Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", ValidatorState: NewTestValidatorState(), @@ -76,3 +87,8 @@ func NewTestValidatorState() *validatorstest.State { }, } } + +func TestSharedMemory() atomic.SharedMemory { + m := atomic.NewMemory(memdb.New()) + return m.NewSharedMemory(testCChainID) +} From aad2f5f678c1a92f4cc7b563f429b32e63d83ee8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Dec 2024 13:34:45 +0300 Subject: [PATCH 11/91] remove multicoin/deprecated apis --- core/genesis.go | 5 - core/state/dump.go | 4 - core/types/account.go | 3 - core/types/gen_genesis_account.go | 6 - internal/ethapi/api.go | 18 +-- plugin/evm/atomic/export_tx.go | 31 ++--- plugin/evm/client/client.go | 20 +-- plugin/evm/export_tx_test.go | 6 +- plugin/evm/service.go | 211 +----------------------------- plugin/evm/syncervm_test.go | 1 - plugin/evm/user.go | 137 ------------------- plugin/evm/vm.go | 50 ------- plugin/evm/vm_test.go | 33 ++++- 13 files changed, 54 insertions(+), 471 deletions(-) delete mode 100644 plugin/evm/user.go diff --git a/core/genesis.go b/core/genesis.go index 07d3ba072d..8163d514aa 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -258,11 +258,6 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *triedb.Database) *types.Blo for key, value := range account.Storage { statedb.SetState(addr, key, value) } - if account.MCBalance != nil { - for coinID, value := range account.MCBalance { - statedb.AddBalanceMultiCoin(addr, coinID, value) - } - } } root := statedb.IntermediateRoot(false) head.Root = root diff --git a/core/state/dump.go b/core/state/dump.go index a18184ca8c..0ab56f6b67 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -64,11 +64,9 @@ type DumpAccount struct { Root hexutil.Bytes `json:"root"` CodeHash hexutil.Bytes `json:"codeHash"` Code hexutil.Bytes `json:"code,omitempty"` - IsMultiCoin bool `json:"isMultiCoin"` Storage map[common.Hash]string `json:"storage,omitempty"` Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode AddressHash hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key - } // Dump represents the full dump in a collected format, as one large map. @@ -107,7 +105,6 @@ func (d iterativeDump) OnAccount(addr *common.Address, account DumpAccount) { Nonce: account.Nonce, Root: account.Root, CodeHash: account.CodeHash, - IsMultiCoin: account.IsMultiCoin, Code: account.Code, Storage: account.Storage, AddressHash: account.AddressHash, @@ -156,7 +153,6 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] Nonce: data.Nonce, Root: data.Root[:], CodeHash: data.CodeHash, - IsMultiCoin: data.IsMultiCoin, AddressHash: it.Key, } address *common.Address diff --git a/core/types/account.go b/core/types/account.go index 28a663009e..0d7f78d51a 100644 --- a/core/types/account.go +++ b/core/types/account.go @@ -87,14 +87,11 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { return nil } -type GenesisMultiCoinBalance map[common.Hash]*big.Int - // GenesisAccount is an account in the state of the genesis block. type GenesisAccount struct { Code []byte `json:"code,omitempty"` Storage map[common.Hash]common.Hash `json:"storage,omitempty"` Balance *big.Int `json:"balance" gencodec:"required"` - MCBalance GenesisMultiCoinBalance `json:"mcbalance,omitempty"` Nonce uint64 `json:"nonce,omitempty"` PrivateKey []byte `json:"secretKey,omitempty"` // for tests } diff --git a/core/types/gen_genesis_account.go b/core/types/gen_genesis_account.go index f4c352ec7e..389cc159a7 100644 --- a/core/types/gen_genesis_account.go +++ b/core/types/gen_genesis_account.go @@ -20,7 +20,6 @@ func (g GenesisAccount) MarshalJSON() ([]byte, error) { Code hexutil.Bytes `json:"code,omitempty"` Storage map[storageJSON]storageJSON `json:"storage,omitempty"` Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"` - MCBalance GenesisMultiCoinBalance `json:"mcbalance,omitempty"` Nonce math.HexOrDecimal64 `json:"nonce,omitempty"` PrivateKey hexutil.Bytes `json:"secretKey,omitempty"` } @@ -33,7 +32,6 @@ func (g GenesisAccount) MarshalJSON() ([]byte, error) { } } enc.Balance = (*math.HexOrDecimal256)(g.Balance) - enc.MCBalance = g.MCBalance enc.Nonce = math.HexOrDecimal64(g.Nonce) enc.PrivateKey = g.PrivateKey return json.Marshal(&enc) @@ -45,7 +43,6 @@ func (g *GenesisAccount) UnmarshalJSON(input []byte) error { Code *hexutil.Bytes `json:"code,omitempty"` Storage map[storageJSON]storageJSON `json:"storage,omitempty"` Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"` - MCBalance *GenesisMultiCoinBalance `json:"mcbalance,omitempty"` Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"` PrivateKey *hexutil.Bytes `json:"secretKey,omitempty"` } @@ -66,9 +63,6 @@ func (g *GenesisAccount) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'balance' for GenesisAccount") } g.Balance = (*big.Int)(dec.Balance) - if dec.MCBalance != nil { - g.MCBalance = *dec.MCBalance - } if dec.Nonce != nil { g.Nonce = uint64(*dec.Nonce) } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 3b72aeae33..bdf5140c26 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -35,7 +35,6 @@ import ( "strings" "time" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/accounts" "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/accounts/scwallet" @@ -229,7 +228,7 @@ func (s *TxPoolAPI) Inspect() map[string]map[string]map[string]string { pending, queue := s.b.TxPoolContent() // Define a formatter to flatten a transaction into a string - var format = func(tx *types.Transaction) string { + format := func(tx *types.Transaction) string { if to := tx.To(); to != nil { return fmt.Sprintf("%s: %v wei + %v gas Ă— %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) } @@ -646,17 +645,6 @@ func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address, return (*hexutil.Big)(b), state.Error() } -// GetAssetBalance returns the amount of [assetID] for the given address in the state of the -// given block number. The rpc.LatestBlockNumber, rpc.PendingBlockNumber, and -// rpc.AcceptedBlockNumber meta block numbers are also allowed. -func (s *BlockChainAPI) GetAssetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash, assetID ids.ID) (*hexutil.Big, error) { - state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return nil, err - } - return (*hexutil.Big)(state.GetBalanceMultiCoin(address, common.Hash(assetID))), state.Error() -} - // AccountResult structs for GetProof type AccountResult struct { Address common.Address `json:"address"` @@ -1966,11 +1954,11 @@ func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, g matchTx := sendArgs.toTransaction() // Before replacing the old transaction, ensure the _new_ transaction fee is reasonable. - var price = matchTx.GasPrice() + price := matchTx.GasPrice() if gasPrice != nil { price = gasPrice.ToInt() } - var gas = matchTx.Gas() + gas := matchTx.Gas() if gasLimit != nil { gas = uint64(*gasLimit) } diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index a07b4f4fee..ed6b464678 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -287,7 +287,6 @@ func NewExportTx( ctx *snow.Context, rules params.Rules, state StateDB, - assetID ids.ID, // AssetID of the tokens to export amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to to ids.ShortID, // Address of chain recipient @@ -295,7 +294,7 @@ func NewExportTx( keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens ) (*Tx, error) { outs := []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, + Asset: avax.Asset{ID: ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -307,22 +306,12 @@ func NewExportTx( }} var ( - avaxNeeded uint64 = 0 + avaxNeeded uint64 = amount ins, avaxIns []EVMInput signers, avaxSigners [][]*secp256k1.PrivateKey err error ) - // consume non-AVAX - if assetID != ctx.AVAXAssetID { - ins, signers, err = GetSpendableFunds(ctx, state, keys, assetID, amount) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) - } - } else { - avaxNeeded = amount - } - switch { case rules.IsApricotPhase3: utx := &UnsignedExportTx{ @@ -350,7 +339,7 @@ func NewExportTx( if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) + avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, newAvaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -420,7 +409,6 @@ func GetSpendableFunds( ctx *snow.Context, state StateDB, keys []*secp256k1.PrivateKey, - assetID ids.ID, amount uint64, ) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { inputs := []EVMInput{} @@ -433,13 +421,10 @@ func GetSpendableFunds( } addr := GetEthAddress(key) var balance uint64 - if assetID == ctx.AVAXAssetID { - // If the asset is AVAX, we divide by the x2cRate to convert back to the correct - // denomination of AVAX that can be exported. - balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() - } else { - balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() - } + // we divide by the x2cRate to convert back to the correct + // denomination of AVAX that can be exported. + balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + if balance == 0 { continue } @@ -451,7 +436,7 @@ func GetSpendableFunds( inputs = append(inputs, EVMInput{ Address: addr, Amount: balance, - AssetID: assetID, + AssetID: ctx.AVAXAssetID, Nonce: nonce, }) signers = append(signers, []*secp256k1.PrivateKey{key}) diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index 110036904e..fbc690d607 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -241,13 +241,6 @@ type ExportAVAXArgs struct { To string `json:"to"` } -// ExportArgs are the arguments to Export -type ExportArgs struct { - ExportAVAXArgs - // AssetID of the tokens - AssetID string `json:"assetID"` -} - // Export sends an asset from this chain to the P/C-Chain. // After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. // Returns the ID of the newly created atomic transaction @@ -261,14 +254,11 @@ func (c *client) Export( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "avax.export", &ExportArgs{ - ExportAVAXArgs: ExportAVAXArgs{ - UserPass: user, - Amount: json.Uint64(amount), - TargetChain: targetChain, - To: to.String(), - }, - AssetID: assetID, + err := c.requester.SendRequest(ctx, "avax.export", &ExportAVAXArgs{ + UserPass: user, + Amount: json.Uint64(amount), + TargetChain: targetChain, + To: to.String(), }, res, options...) return res.TxID, err } diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 4bc0999797..44fff762f5 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -92,7 +92,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } for _, addr := range testShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1772,7 +1772,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1981,7 +1981,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 39baf3eed1..12791d4e08 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" ) @@ -33,10 +32,9 @@ const ( ) var ( - errNoAddresses = errors.New("no addresses provided") - errNoSourceChain = errors.New("no source chain provided") - errNilTxID = errors.New("nil transaction ID") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errNoAddresses = errors.New("no addresses provided") + errNoSourceChain = errors.New("no source chain provided") + errNilTxID = errors.New("nil transaction ID") initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) ) @@ -71,17 +69,6 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { // AvaxAPI offers Avalanche network related API methods type AvaxAPI struct{ vm *VM } -// parseAssetID parses an assetID string into an ID -func (service *AvaxAPI) parseAssetID(assetID string) (ids.ID, error) { - if assetID == "" { - return ids.ID{}, fmt.Errorf("assetID is required") - } else if assetID == "AVAX" { - return service.vm.ctx.AVAXAssetID, nil - } else { - return ids.FromString(assetID) - } -} - type VersionReply struct { Version string `json:"version"` } @@ -92,198 +79,6 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl return nil } -// ExportKey returns a private key from the provided user -func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { - log.Info("EVM: ExportKey called") - - address, err := atomic.ParseEthAddress(args.Address) - if err != nil { - return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - reply.PrivateKey, err = user.getKey(address) - if err != nil { - return fmt.Errorf("problem retrieving private key: %w", err) - } - reply.PrivateKeyHex = hexutil.Encode(reply.PrivateKey.Bytes()) - return nil -} - -// ImportKey adds a private key to the provided user -func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, reply *api.JSONAddress) error { - log.Info("EVM: ImportKey called", "username", args.Username) - - if args.PrivateKey == nil { - return errMissingPrivateKey - } - - reply.Address = atomic.GetEthAddress(args.PrivateKey).Hex() - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving data: %w", err) - } - defer db.Close() - - user := user{db: db} - if err := user.putAddress(args.PrivateKey); err != nil { - return fmt.Errorf("problem saving key %w", err) - } - return nil -} - -// ImportAVAX is a deprecated name for Import. -func (service *AvaxAPI) ImportAVAX(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { - return service.Import(nil, args, response) -} - -// Import issues a transaction to import AVAX from the X-chain. The AVAX -// must have already been exported from the X-Chain. -func (service *AvaxAPI) Import(_ *http.Request, args *client.ImportArgs, response *api.JSONTxID) error { - log.Info("EVM: ImportAVAX called") - - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) - if err != nil { - return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - // Get the user's info - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("couldn't get user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - privKeys, err := user.getKeys() - if err != nil { // Get keys - return fmt.Errorf("couldn't get keys controlled by the user: %w", err) - } - - var baseFee *big.Int - if args.BaseFee == nil { - // Get the base fee to use - baseFee, err = service.vm.estimateBaseFee(context.Background()) - if err != nil { - return err - } - } else { - baseFee = args.BaseFee.ToInt() - } - - tx, err := service.vm.newImportTx(chainID, args.To, baseFee, privKeys) - if err != nil { - return err - } - - response.TxID = tx.ID() - if err := service.vm.mempool.AddLocalTx(tx); err != nil { - return err - } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - return nil -} - -// ExportAVAX exports AVAX from the C-Chain to the X-Chain -// It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) ExportAVAX(_ *http.Request, args *client.ExportAVAXArgs, response *api.JSONTxID) error { - return service.Export(nil, &client.ExportArgs{ - ExportAVAXArgs: *args, - AssetID: service.vm.ctx.AVAXAssetID.String(), - }, response) -} - -// Export exports an asset from the C-Chain to the X-Chain -// It must be imported on the X-Chain to complete the transfer -func (service *AvaxAPI) Export(_ *http.Request, args *client.ExportArgs, response *api.JSONTxID) error { - log.Info("EVM: Export called") - - assetID, err := service.parseAssetID(args.AssetID) - if err != nil { - return err - } - - if args.Amount == 0 { - return errors.New("argument 'amount' must be > 0") - } - - // Get the chainID and parse the to address - chainID, to, err := service.vm.ParseAddress(args.To) - if err != nil { - chainID, err = service.vm.ctx.BCLookup.Lookup(args.TargetChain) - if err != nil { - return err - } - to, err = ids.ShortFromString(args.To) - if err != nil { - return err - } - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - // Get this user's data - db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) - if err != nil { - return fmt.Errorf("problem retrieving user '%s': %w", args.Username, err) - } - defer db.Close() - - user := user{db: db} - privKeys, err := user.getKeys() - if err != nil { - return fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - var baseFee *big.Int - if args.BaseFee == nil { - // Get the base fee to use - baseFee, err = service.vm.estimateBaseFee(context.Background()) - if err != nil { - return err - } - } else { - baseFee = args.BaseFee.ToInt() - } - - // Create the transaction - tx, err := service.vm.newExportTx( - assetID, // AssetID - uint64(args.Amount), // Amount - chainID, // ID of the chain to send the funds to - to, // Address - baseFee, - privKeys, // Private keys - ) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - if err := service.vm.mempool.AddLocalTx(tx); err != nil { - return err - } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - return nil -} - // GetUTXOs gets all utxos for passed in addresses func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { log.Info("EVM: GetUTXOs called", "Addresses", args.Addresses) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 6d07b6f721..2f81fca2ea 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -312,7 +312,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s case 1: // export some of the imported UTXOs to test exportTx is properly synced exportTx, err = serverVM.newExportTx( - serverVM.ctx.AVAXAssetID, importAmount/2, serverVM.ctx.XChainID, testShortIDAddrs[0], diff --git a/plugin/evm/user.go b/plugin/evm/user.go deleted file mode 100644 index 627a7af1d1..0000000000 --- a/plugin/evm/user.go +++ /dev/null @@ -1,137 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "errors" - - "github.com/ava-labs/avalanchego/database/encdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ethereum/go-ethereum/common" -) - -// Key in the database whose corresponding value is the list of -// addresses this user controls -var addressesKey = ids.Empty[:] - -var ( - errDBNil = errors.New("db uninitialized") - errKeyNil = errors.New("key uninitialized") -) - -type user struct { - // This user's database, acquired from the keystore - db *encdb.Database -} - -// Get the addresses controlled by this user -func (u *user) getAddresses() ([]common.Address, error) { - if u.db == nil { - return nil, errDBNil - } - - // If user has no addresses, return empty list - hasAddresses, err := u.db.Has(addressesKey) - if err != nil { - return nil, err - } - if !hasAddresses { - return nil, nil - } - - // User has addresses. Get them. - bytes, err := u.db.Get(addressesKey) - if err != nil { - return nil, err - } - addresses := []common.Address{} - if _, err := atomic.Codec.Unmarshal(bytes, &addresses); err != nil { - return nil, err - } - return addresses, nil -} - -// controlsAddress returns true iff this user controls the given address -func (u *user) controlsAddress(address common.Address) (bool, error) { - if u.db == nil { - return false, errDBNil - //} else if address.IsZero() { - // return false, errEmptyAddress - } - return u.db.Has(address.Bytes()) -} - -// putAddress persists that this user controls address controlled by [privKey] -func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { - if privKey == nil { - return errKeyNil - } - - address := atomic.GetEthAddress(privKey) // address the privKey controls - controlsAddress, err := u.controlsAddress(address) - if err != nil { - return err - } - if controlsAddress { // user already controls this address. Do nothing. - return nil - } - - if err := u.db.Put(address.Bytes(), privKey.Bytes()); err != nil { // Address --> private key - return err - } - - addresses := make([]common.Address, 0) // Add address to list of addresses user controls - userHasAddresses, err := u.db.Has(addressesKey) - if err != nil { - return err - } - if userHasAddresses { // Get addresses this user already controls, if they exist - if addresses, err = u.getAddresses(); err != nil { - return err - } - } - addresses = append(addresses, address) - bytes, err := atomic.Codec.Marshal(atomic.CodecVersion, addresses) - if err != nil { - return err - } - if err := u.db.Put(addressesKey, bytes); err != nil { - return err - } - return nil -} - -// Key returns the private key that controls the given address -func (u *user) getKey(address common.Address) (*secp256k1.PrivateKey, error) { - if u.db == nil { - return nil, errDBNil - //} else if address.IsZero() { - // return nil, errEmptyAddress - } - - bytes, err := u.db.Get(address.Bytes()) - if err != nil { - return nil, err - } - return secp256k1.ToPrivateKey(bytes) -} - -// Return all private keys controlled by this user -func (u *user) getKeys() ([]*secp256k1.PrivateKey, error) { - addrs, err := u.getAddresses() - if err != nil { - return nil, err - } - keys := make([]*secp256k1.PrivateKey, len(addrs)) - for i, addr := range addrs { - key, err := u.getKey(addr) - if err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil -} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index aebc94814c..63824ef183 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1768,23 +1768,6 @@ func (vm *VM) startContinuousProfiler() { <-vm.shutdownChan } -func (vm *VM) estimateBaseFee(ctx context.Context) (*big.Int, error) { - // Get the base fee to use - baseFee, err := vm.eth.APIBackend.EstimateBaseFee(ctx) - if err != nil { - return nil, err - } - if baseFee == nil { - baseFee = initialBaseFee - } else { - // give some breathing room - baseFee.Mul(baseFee, big.NewInt(11)) - baseFee.Div(baseFee, big.NewInt(10)) - } - - return baseFee, nil -} - // readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. @@ -1877,39 +1860,6 @@ func (vm *VM) newImportTx( return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock, chainID, to, baseFee, kc, atomicUTXOs) } -// newExportTx returns a new ExportTx -func (vm *VM) newExportTx( - assetID ids.ID, // AssetID of the tokens to export - amount uint64, // Amount of tokens to export - chainID ids.ID, // Chain to send the UTXOs to - to ids.ShortID, // Address of chain recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens -) (*atomic.Tx, error) { - state, err := vm.blockChain.State() - if err != nil { - return nil, err - } - - // Create the transaction - tx, err := atomic.NewExportTx( - vm.ctx, // Context - vm.currentRules(), // VM rules - state, - assetID, // AssetID - amount, // Amount - chainID, // ID of the chain to send the funds to - to, // Address - baseFee, - keys, // Private keys - ) - if err != nil { - return nil, err - } - - return tx, nil -} - func (vm *VM) PutLastAcceptedID(ID []byte) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, ID) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 0a4f9ebc20..3e05606922 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -610,7 +610,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatal("Expected logs to be non-nil") } - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := vm.newExportTx(importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -973,6 +973,37 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } } +// newExportTx returns a new ExportTx +func (vm *VM) newExportTx( + amount uint64, // Amount of tokens to export + chainID ids.ID, // Chain to send the UTXOs to + to ids.ShortID, // Address of chain recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens +) (*atomic.Tx, error) { + state, err := vm.blockChain.State() + if err != nil { + return nil, err + } + + // Create the transaction + tx, err := atomic.NewExportTx( + vm.ctx, // Context + vm.currentRules(), // VM rules + state, + amount, // Amount + chainID, // ID of the chain to send the funds to + to, // Address + baseFee, + keys, // Private keys + ) + if err != nil { + return nil, err + } + + return tx, nil +} + func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) From 1815741bfde48711939cb35294b4ba9167617e4a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 23 Dec 2024 18:53:26 +0300 Subject: [PATCH 12/91] bump avago --- go.mod | 12 ++++++------ go.sum | 9 +++++++++ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 2a6106661b..0e194918c2 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa + github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -38,11 +38,11 @@ require ( github.com/urfave/cli/v2 v2.25.7 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.4.0 - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.3.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -121,7 +121,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/term v0.27.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect diff --git a/go.sum b/go.sum index 9ddf41790c..222315377d 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,10 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa h1:8eSy+tegp9Kq2zft54wk0FyWU87utdrVwsj9EBIb/NA= github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa/go.mod h1:256D2s2FIKo07uUeY25uDXFuqBo6TeWIJqeEA+Xchwk= +github.com/ava-labs/avalanchego v1.12.2-0.20241223153457-b91ce7d85936 h1:v42GPXrOEM6nkwvZbWHkoz24o3goztK8OBzBswjVUnE= +github.com/ava-labs/avalanchego v1.12.2-0.20241223153457-b91ce7d85936/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= +github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe h1:fkdavyY3MNRF64ZlxCaDUy4/WhprH7PA8tbjb4IsvLc= +github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -603,6 +607,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -710,6 +715,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -778,10 +784,12 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -794,6 +802,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 2b13c8ec2ff27948c6c0b9fe80b69ab96ecd329d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 23 Dec 2024 18:54:26 +0300 Subject: [PATCH 13/91] bump avago --- go.mod | 12 ++++++------ go.sum | 7 +++++++ scripts/versions.sh | 2 +- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index ea7f6cc744..0e194918c2 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 + github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -38,11 +38,11 @@ require ( github.com/urfave/cli/v2 v2.25.7 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.4.0 - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.3.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -121,7 +121,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/term v0.27.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect diff --git a/go.sum b/go.sum index 435d1113c8..3648cd7306 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa h1:8eSy+te github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa/go.mod h1:256D2s2FIKo07uUeY25uDXFuqBo6TeWIJqeEA+Xchwk= github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 h1:3Zqc3TxHt6gsdSFD/diW2f2jT2oCx0rppN7yoXxviQg= github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1/go.mod h1:Wxl57pLTlR/8pkaNtou8HiynG+xdgiF4YnzFuJyqSDg= +github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe h1:fkdavyY3MNRF64ZlxCaDUy4/WhprH7PA8tbjb4IsvLc= +github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -605,6 +607,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -712,6 +715,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -780,10 +784,12 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -796,6 +802,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/scripts/versions.sh b/scripts/versions.sh index ce7b1cdb05..8ea089db0f 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'f3ca1a0f8bb1'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'e8356d529cbe'} From 2798ad67da1f5d04f3ed0a0e3c542114fbb67e9d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 23 Dec 2024 19:02:01 +0300 Subject: [PATCH 14/91] go mod tidy --- go.sum | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/go.sum b/go.sum index 3648cd7306..f4e79a03f9 100644 --- a/go.sum +++ b/go.sum @@ -54,10 +54,6 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa h1:8eSy+tegp9Kq2zft54wk0FyWU87utdrVwsj9EBIb/NA= -github.com/ava-labs/avalanchego v1.12.1-0.20241209214115-1dc4192013aa/go.mod h1:256D2s2FIKo07uUeY25uDXFuqBo6TeWIJqeEA+Xchwk= -github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1 h1:3Zqc3TxHt6gsdSFD/diW2f2jT2oCx0rppN7yoXxviQg= -github.com/ava-labs/avalanchego v1.12.1-0.20241211144846-f3ca1a0f8bb1/go.mod h1:Wxl57pLTlR/8pkaNtou8HiynG+xdgiF4YnzFuJyqSDg= github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe h1:fkdavyY3MNRF64ZlxCaDUy4/WhprH7PA8tbjb4IsvLc= github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -605,8 +601,7 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -713,8 +708,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -782,13 +776,11 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -800,8 +792,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 278e055f835ff3da1f1050da418fc2b301f59d05 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 23 Dec 2024 19:25:52 +0300 Subject: [PATCH 15/91] update releases md --- RELEASES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/RELEASES.md b/RELEASES.md index b977d63812..7f8393b5de 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -2,6 +2,7 @@ ## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) - Remove API eth_getAssetBalance that was used to query ANT balances (deprecated since v0.10.0) +- Remove legacy gossip handler and metrics (deprecated since v0.10.0) ## [v0.14.0](https://github.com/ava-labs/coreth/releases/tag/v0.14.0) - Minor version update to correspond to avalanchego v1.12.0 / Etna. From e5f2c278bf01fea39533759d34b098a0ca41965d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 21:07:30 +0300 Subject: [PATCH 16/91] use address methods from avago --- go.mod | 2 +- go.sum | 4 ++-- plugin/evm/atomic/export_tx.go | 7 +++---- plugin/evm/client/client.go | 6 ++++-- plugin/evm/client/utils.go | 13 +++++++++++++ plugin/evm/export_tx_test.go | 31 +++++++++++++++---------------- plugin/evm/import_tx_test.go | 7 +++---- plugin/evm/service.go | 5 ++--- plugin/evm/tx_gossip_test.go | 18 +++++++++--------- plugin/evm/user.go | 3 +-- plugin/evm/vm_test.go | 22 +++++++++++----------- scripts/versions.sh | 2 +- utils/utils.go | 31 ------------------------------- 13 files changed, 65 insertions(+), 86 deletions(-) create mode 100644 plugin/evm/client/utils.go delete mode 100644 utils/utils.go diff --git a/go.mod b/go.mod index 0e194918c2..b750d0d897 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe + github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index f4e79a03f9..01fbb024fa 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe h1:fkdavyY3MNRF64ZlxCaDUy4/WhprH7PA8tbjb4IsvLc= -github.com/ava-labs/avalanchego v1.12.2-0.20241223154608-e8356d529cbe/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= +github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d h1:QCtjS4ANcNfCdL6Z2sKpanDVJNt1MU0bUyVdW0g5zuU= +github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 906753265c..4938ddc53e 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -10,7 +10,6 @@ import ( "math/big" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/utils" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/chains/atomic" @@ -241,7 +240,7 @@ func (utx *UnsignedExportTx) SemanticVerify( if err != nil { return err } - if input.Address != utils.PublicKeyToEthAddress(pubKey) { + if input.Address != pubKey.EthAddress() { return errPublicKeySignatureMismatch } } @@ -432,7 +431,7 @@ func GetSpendableFunds( if amount == 0 { break } - addr := utils.GetEthAddress(key) + addr := key.EthAddress() var balance uint64 if assetID == ctx.AVAXAssetID { // If the asset is AVAX, we divide by the x2cRate to convert back to the correct @@ -515,7 +514,7 @@ func GetSpendableAVAXWithFee( additionalFee := newFee - prevFee - addr := utils.GetEthAddress(key) + addr := key.EthAddress() // Since the asset is AVAX, we divide by the x2cRate to convert back to // the correct denomination of AVAX that can be exported. balance := new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index 93ac27ed5b..ddf05184d7 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -5,6 +5,7 @@ package client import ( "context" + "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -19,12 +20,13 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/utils" ) // Interface compliance var _ Client = (*client)(nil) +var errInvalidAddr = errors.New("invalid hex address") + // Client interface for interacting with EVM [chain] type Client interface { IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) @@ -180,7 +182,7 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *s if err != nil { return common.Address{}, err } - return utils.ParseEthAddress(res.Address) + return ParseEthAddress(res.Address) } // ImportArgs are arguments for passing into Import requests diff --git a/plugin/evm/client/utils.go b/plugin/evm/client/utils.go new file mode 100644 index 0000000000..5ea43f4a20 --- /dev/null +++ b/plugin/evm/client/utils.go @@ -0,0 +1,13 @@ +// (c) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package client + +import "github.com/ethereum/go-ethereum/common" + +func ParseEthAddress(addrStr string) (common.Address, error) { + if !common.IsHexAddress(addrStr) { + return common.Address{}, errInvalidAddr + } + return common.HexToAddress(addrStr), nil +} diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 61c80fb7dd..eabbb05ede 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -35,7 +34,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -50,7 +49,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -104,8 +103,8 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, func TestExportTxEVMStateTransfer(t *testing.T) { key := testKeys[0] - addr := key.PublicKey().Address() - ethAddr := utils.GetEthAddress(key) + addr := key.Address() + ethAddr := key.EthAddress() avaxAmount := 50 * units.MilliAvax avaxUTXOID := avax.UTXOID{ @@ -452,7 +451,7 @@ func TestExportTxSemanticVerify(t *testing.T) { parent := vm.LastAcceptedBlockInternal().(*Block) key := testKeys[0] - addr := key.PublicKey().Address() + addr := key.Address() ethAddr := testEthAddrs[0] var ( @@ -949,7 +948,7 @@ func TestExportTxAccept(t *testing.T) { }() key := testKeys[0] - addr := key.PublicKey().Address() + addr := key.Address() ethAddr := testEthAddrs[0] var ( @@ -1716,7 +1715,7 @@ func TestNewExportTx(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -1731,7 +1730,7 @@ func TestNewExportTx(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -1823,7 +1822,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := utils.GetEthAddress(testKeys[0]) + addr := testKeys[0].EthAddress() if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } @@ -1889,7 +1888,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -1910,7 +1909,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount2, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -1926,14 +1925,14 @@ func TestNewExportTxMulticoin(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }, { Key: inputID2[:], Value: utxoBytes2, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }, }}}); err != nil { @@ -1971,7 +1970,7 @@ func TestNewExportTxMulticoin(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - testKeys0Addr := utils.GetEthAddress(testKeys[0]) + testKeys0Addr := testKeys[0].EthAddress() exportId, err := ids.ToShortID(testKeys0Addr[:]) if err != nil { t.Fatal(err) @@ -2023,7 +2022,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := utils.GetEthAddress(testKeys[0]) + addr := testKeys[0].EthAddress() if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index 58f7baa6fc..3195fd7dae 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" @@ -33,7 +32,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -48,7 +47,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -499,7 +498,7 @@ func TestNewImportTx(t *testing.T) { expectedRemainingBalance := new(uint256.Int).Mul( uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) - addr := utils.GetEthAddress(testKeys[0]) + addr := testKeys[0].EthAddress() if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 92529e32c2..40f09314f4 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/client" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -97,7 +96,7 @@ func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionRepl func (service *AvaxAPI) ExportKey(r *http.Request, args *client.ExportKeyArgs, reply *client.ExportKeyReply) error { log.Info("EVM: ExportKey called") - address, err := utils.ParseEthAddress(args.Address) + address, err := client.ParseEthAddress(args.Address) if err != nil { return fmt.Errorf("couldn't parse %s to address: %s", args.Address, err) } @@ -128,7 +127,7 @@ func (service *AvaxAPI) ImportKey(r *http.Request, args *client.ImportKeyArgs, r return errMissingPrivateKey } - reply.Address = utils.GetEthAddress(args.PrivateKey).Hex() + reply.Address = args.PrivateKey.EthAddress().Hex() service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 99cef8beb3..514b7fadbd 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -48,7 +48,7 @@ func TestEthTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -176,7 +176,7 @@ func TestAtomicTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -265,7 +265,7 @@ func TestAtomicTxGossip(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) @@ -315,7 +315,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -375,7 +375,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -434,7 +434,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -473,7 +473,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) @@ -507,7 +507,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) - address := utils.GetEthAddress(pk) + address := pk.EthAddress() genesis := newPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -544,7 +544,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { 0, snowCtx.AVAXAssetID, 100_000_000_000, - pk.PublicKey().Address(), + pk.Address(), ) require.NoError(err) tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) diff --git a/plugin/evm/user.go b/plugin/evm/user.go index 4a68eca2fb..8cb3f73f34 100644 --- a/plugin/evm/user.go +++ b/plugin/evm/user.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" ) @@ -71,7 +70,7 @@ func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { return errKeyNil } - address := utils.GetEthAddress(privKey) // address the privKey controls + address := privKey.EthAddress() // address the privKey controls controlsAddress, err := u.controlsAddress(address) if err != nil { return err diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 668a1b31c2..38f4de9f3c 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -161,8 +161,8 @@ func init() { b, _ = cb58.Decode(key) pk, _ := secp256k1.ToPrivateKey(b) testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, utils.GetEthAddress(pk)) - testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) + testEthAddrs = append(testEthAddrs, pk.EthAddress()) + testShortIDAddrs = append(testShortIDAddrs, pk.Address()) } } @@ -1390,10 +1390,10 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } key0 := testKeys[0] - addr0 := key0.PublicKey().Address() + addr0 := key0.Address() key1 := testKeys[1] - addr1 := key1.PublicKey().Address() + addr1 := key1.Address() importAmount := uint64(1000000000) @@ -1533,7 +1533,7 @@ func TestBonusBlocksTxs(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -1548,7 +1548,7 @@ func TestBonusBlocksTxs(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -3071,7 +3071,7 @@ func TestBuildInvalidBlockHead(t *testing.T) { }() key0 := testKeys[0] - addr0 := key0.PublicKey().Address() + addr0 := key0.Address() // Create the transaction utx := &atomic.UnsignedImportTx{ @@ -3228,7 +3228,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -3243,7 +3243,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -3398,7 +3398,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].PublicKey().Address()}, + Addrs: []ids.ShortID{testKeys[0].Address()}, }, }, } @@ -3413,7 +3413,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].PublicKey().Address().Bytes(), + testKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) diff --git a/scripts/versions.sh b/scripts/versions.sh index 8ea089db0f..46f7f2376e 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'e8356d529cbe'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'3998475d671d'} diff --git a/utils/utils.go b/utils/utils.go deleted file mode 100644 index af2c0f822d..0000000000 --- a/utils/utils.go +++ /dev/null @@ -1,31 +0,0 @@ -// (c) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "errors" - - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -var errInvalidAddr = errors.New("invalid hex address") - -func ParseEthAddress(addrStr string) (common.Address, error) { - if !common.IsHexAddress(addrStr) { - return common.Address{}, errInvalidAddr - } - return common.HexToAddress(addrStr), nil -} - -// GetEthAddress returns the ethereum address derived from [privKey] -func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { - return PublicKeyToEthAddress(privKey.PublicKey()) -} - -// PublicKeyToEthAddress returns the ethereum address derived from [pubKey] -func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { - return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) -} From 71babe6231298708d7ad3a2419086f11af438bcc Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 21:26:34 +0300 Subject: [PATCH 17/91] bump avago --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index b750d0d897..5d184fd9e5 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d + github.com/ava-labs/avalanchego v1.12.2-0.20241224181600-fade5be3051d github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 01fbb024fa..40a7c0d3cf 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d h1:QCtjS4ANcNfCdL6Z2sKpanDVJNt1MU0bUyVdW0g5zuU= github.com/ava-labs/avalanchego v1.12.2-0.20241224161435-3998475d671d/go.mod h1:cDoT0Hq3P+/XfCyVvzrBj66yoid2I5LnMuj7LIkap+o= +github.com/ava-labs/avalanchego v1.12.2-0.20241224181600-fade5be3051d h1:iPlsqC9pIy4emCo8wyI/VmVmfljpzmw58ZqahVdcehI= +github.com/ava-labs/avalanchego v1.12.2-0.20241224181600-fade5be3051d/go.mod h1:dKawab3nXqwI7ZcOFatTOv//l1V0t8MRBnhXoOqbN4E= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From aab47b6ca331abfe2b3947c2ff47b7ae66766786 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 21:32:24 +0300 Subject: [PATCH 18/91] bump e2e avago version --- scripts/versions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/versions.sh b/scripts/versions.sh index 46f7f2376e..2018ff1398 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'3998475d671d'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'fade5be3051d'} From 6f1adc8852f69e6b346a24de5682f98cbe92e008 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:01:31 +0300 Subject: [PATCH 19/91] Update plugin/evm/atomic/gossip_test.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/atomic/gossip_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/evm/atomic/gossip_test.go b/plugin/evm/atomic/gossip_test.go index edd88bae18..c1db9435bb 100644 --- a/plugin/evm/atomic/gossip_test.go +++ b/plugin/evm/atomic/gossip_test.go @@ -27,7 +27,8 @@ func TestGossipAtomicTxMarshaller(t *testing.T) { key0, err := secp256k1.NewPrivateKey() require.NoError(err) - require.NoError(want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}})) + err = want.Tx.Sign(Codec, [][]*secp256k1.PrivateKey{{key0}}) + require.NoError(err) bytes, err := marshaller.MarshalGossip(want) require.NoError(err) From 876716ff3ec4568f16b0e8b58dc73e7448dce093 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:14:31 +0300 Subject: [PATCH 20/91] Update plugin/evm/atomic/mempool.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/atomic/mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/atomic/mempool.go b/plugin/evm/atomic/mempool.go index 69e1e509b6..f74eacda7e 100644 --- a/plugin/evm/atomic/mempool.go +++ b/plugin/evm/atomic/mempool.go @@ -184,7 +184,7 @@ func (m *Mempool) AddLocalTx(tx *Tx) error { return err } -// forceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. +// ForceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. func (m *Mempool) ForceAddTx(tx *Tx) error { m.lock.Lock() defer m.lock.Unlock() From 682e4bc0e360835feb7a9b0305e20b791eaff756 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:18:55 +0300 Subject: [PATCH 21/91] Update plugin/evm/config/config.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index 748f9e115a..3b8dca3807 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -307,7 +307,7 @@ func (c *Config) Validate(networkID uint32) error { // Ensure that non-standard commit interval is not allowed for production networks if constants.ProductionNetworkIDs.Contains(networkID) { if c.CommitInterval != defaultCommitInterval { - return fmt.Errorf("cannot start non-local network with commit interval %d", c.CommitInterval) + return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultCommitInterval) } if c.StateSyncCommitInterval != defaultSyncableCommitInterval { return fmt.Errorf("cannot start non-local network with syncable interval %d", c.StateSyncCommitInterval) From 38ad86848cf4bc0bea77ea3bbb52b91440ed8f4d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:19:06 +0300 Subject: [PATCH 22/91] Update plugin/evm/atomic/tx_heap.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/atomic/tx_heap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/atomic/tx_heap.go b/plugin/evm/atomic/tx_heap.go index 58cbcf0c0b..bcec314cd7 100644 --- a/plugin/evm/atomic/tx_heap.go +++ b/plugin/evm/atomic/tx_heap.go @@ -1,4 +1,4 @@ -// (c) 2020-2021, Ava Labs, Inc. All rights reserved. +// (c) 2020-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic From de328e93e341cd56b1329369b4e473197ef061d3 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:19:30 +0300 Subject: [PATCH 23/91] Update plugin/evm/config/config.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index 3b8dca3807..a92405bcb5 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -310,7 +310,7 @@ func (c *Config) Validate(networkID uint32) error { return fmt.Errorf("cannot start non-local network with commit interval %d different than %d", c.CommitInterval, defaultCommitInterval) } if c.StateSyncCommitInterval != defaultSyncableCommitInterval { - return fmt.Errorf("cannot start non-local network with syncable interval %d", c.StateSyncCommitInterval) + return fmt.Errorf("cannot start non-local network with syncable interval %d different than %d", c.StateSyncCommitInterval, defaultSyncableCommitInterval) } } From 55c0be98a71794d1b2ef3accbe79bc6ed0e5f48e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 24 Dec 2024 22:20:20 +0300 Subject: [PATCH 24/91] fix reviews --- peer/network.go | 1 - peer/network_test.go | 20 +++++++++++--------- plugin/evm/atomic/gossip_test.go | 20 ++++++++------------ plugin/evm/atomic/test_tx.go | 2 -- plugin/evm/atomic_trie_iterator_test.go | 3 +-- 5 files changed, 20 insertions(+), 26 deletions(-) diff --git a/peer/network.go b/peer/network.go index ebe067fe0c..a4dfd015f6 100644 --- a/peer/network.go +++ b/peer/network.go @@ -340,7 +340,6 @@ func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandle // from a peer. An error returned by this function is treated as fatal by the // engine. func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) error { - log.Debug("forwarding AppGossip to SDK network", "nodeID", nodeID, "gossipLen", len(gossipBytes)) return n.p2pNetwork.AppGossip(ctx, nodeID, gossipBytes) } diff --git a/peer/network_test.go b/peer/network_test.go index 3d13c6e679..6f357be874 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" "sync" - syncatomic "sync/atomic" + "sync/atomic" "testing" "time" @@ -30,6 +30,10 @@ import ( "github.com/ava-labs/avalanchego/version" ) +const ( + codecVersion uint16 = 0 +) + var ( defaultPeerVersion = &version.Application{ Major: 1, @@ -37,8 +41,6 @@ var ( Patch: 0, } - codecVersion uint16 = 0 - _ message.Request = &HelloRequest{} _ = &HelloResponse{} _ = &GreetingRequest{} @@ -85,7 +87,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } - syncatomic.AddUint32(&callNum, 1) + atomic.AddUint32(&callNum, 1) }() return nil }, @@ -130,7 +132,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { requestWg.Wait() senderWg.Wait() - assert.Equal(t, totalCalls, int(syncatomic.LoadUint32(&callNum))) + assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) } func TestAppRequestOnCtxCancellation(t *testing.T) { @@ -190,7 +192,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } - syncatomic.AddUint32(&callNum, 1) + atomic.AddUint32(&callNum, 1) }() return nil }, @@ -245,7 +247,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { requestWg.Wait() senderWg.Wait() - assert.Equal(t, totalCalls, int(syncatomic.LoadUint32(&callNum))) + assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) for _, nodeID := range nodes { if _, exists := contactedNodes[nodeID]; !exists { t.Fatalf("expected nodeID %s to be contacted but was not", nodeID) @@ -386,13 +388,13 @@ func TestRequestMinVersion(t *testing.T) { var net Network sender := testAppSender{ sendAppRequestFn: func(_ context.Context, nodes set.Set[ids.NodeID], reqID uint32, messageBytes []byte) error { - syncatomic.AddUint32(&callNum, 1) + atomic.AddUint32(&callNum, 1) assert.True(t, nodes.Contains(nodeID), "request nodes should contain expected nodeID") assert.Len(t, nodes, 1, "request nodes should contain exactly one node") go func() { time.Sleep(200 * time.Millisecond) - syncatomic.AddUint32(&callNum, 1) + atomic.AddUint32(&callNum, 1) responseBytes, err := codecManager.Marshal(codecVersion, TestMessage{Message: "this is a response"}) if err != nil { panic(err) diff --git a/plugin/evm/atomic/gossip_test.go b/plugin/evm/atomic/gossip_test.go index edd88bae18..920244432b 100644 --- a/plugin/evm/atomic/gossip_test.go +++ b/plugin/evm/atomic/gossip_test.go @@ -56,11 +56,10 @@ func TestAtomicMempoolIterate(t *testing.T) { } tests := []struct { - name string - add []*GossipAtomicTx - f func(tx *GossipAtomicTx) bool - possibleValues []*GossipAtomicTx - expectedLen int + name string + add []*GossipAtomicTx + f func(tx *GossipAtomicTx) bool + expectedTxs []*GossipAtomicTx }{ { name: "func matches nothing", @@ -68,7 +67,7 @@ func TestAtomicMempoolIterate(t *testing.T) { f: func(*GossipAtomicTx) bool { return false }, - possibleValues: nil, + expectedTxs: []*GossipAtomicTx{}, }, { name: "func matches all", @@ -76,8 +75,7 @@ func TestAtomicMempoolIterate(t *testing.T) { f: func(*GossipAtomicTx) bool { return true }, - possibleValues: txs, - expectedLen: 2, + expectedTxs: txs, }, { name: "func matches subset", @@ -85,8 +83,7 @@ func TestAtomicMempoolIterate(t *testing.T) { f: func(tx *GossipAtomicTx) bool { return tx.Tx == txs[0].Tx }, - possibleValues: txs, - expectedLen: 1, + expectedTxs: []*GossipAtomicTx{txs[0]}, }, } @@ -113,8 +110,7 @@ func TestAtomicMempoolIterate(t *testing.T) { m.Iterate(f) - require.Len(matches, tt.expectedLen) - require.Subset(tt.possibleValues, matches) + require.ElementsMatch(tt.expectedTxs, matches) }) } } diff --git a/plugin/evm/atomic/test_tx.go b/plugin/evm/atomic/test_tx.go index 50af59e09f..dcf62258b8 100644 --- a/plugin/evm/atomic/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -29,8 +29,6 @@ func init() { errs := wrappers.Errs{} errs.Add( c.RegisterType(&TestUnsignedTx{}), - c.RegisterType(&avalancheatomic.Element{}), - c.RegisterType(&avalancheatomic.Requests{}), TestTxCodec.RegisterCodec(atomic.CodecVersion, c), ) diff --git a/plugin/evm/atomic_trie_iterator_test.go b/plugin/evm/atomic_trie_iterator_test.go index 50ba586ffd..21ec9913f9 100644 --- a/plugin/evm/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic_trie_iterator_test.go @@ -11,11 +11,10 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/ava-labs/coreth/plugin/evm/atomic" ) func testSharedMemory() avalancheatomic.SharedMemory { From f17d97cb06013e52cfd215cc924fc3c7a30034c0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 28 Dec 2024 20:19:09 +0300 Subject: [PATCH 25/91] decouple atomic logic from statesyncers --- plugin/evm/atomic/atomic_sync_extender.go | 66 ++++++++++++ plugin/evm/atomic/atomic_sync_provider.go | 49 +++++++++ plugin/evm/atomic/syncable.go | 116 ++++++++++++++++++++++ plugin/evm/message/codec.go | 8 +- plugin/evm/message/syncable.go | 75 +++++++++----- plugin/evm/sync/block_provider.go | 38 +++++++ plugin/evm/{ => sync}/syncervm_client.go | 85 +++++++--------- plugin/evm/{ => sync}/syncervm_server.go | 58 ++--------- plugin/evm/syncervm_test.go | 23 ++--- plugin/evm/vm.go | 24 ++--- 10 files changed, 394 insertions(+), 148 deletions(-) create mode 100644 plugin/evm/atomic/atomic_sync_extender.go create mode 100644 plugin/evm/atomic/atomic_sync_provider.go create mode 100644 plugin/evm/atomic/syncable.go create mode 100644 plugin/evm/sync/block_provider.go rename plugin/evm/{ => sync}/syncervm_client.go (85%) rename plugin/evm/{ => sync}/syncervm_server.go (52%) diff --git a/plugin/evm/atomic/atomic_sync_extender.go b/plugin/evm/atomic/atomic_sync_extender.go new file mode 100644 index 0000000000..84e854dd23 --- /dev/null +++ b/plugin/evm/atomic/atomic_sync_extender.go @@ -0,0 +1,66 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +// TODO: move to separate package +package atomic + +import ( + "context" + "fmt" + + syncclient "github.com/ava-labs/coreth/sync/client" + + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ethereum/go-ethereum/log" +) + +type atomicSyncExtender struct { + backend AtomicBackend + stateSyncRequestSize uint16 +} + +func NewAtomicSyncExtender(backend AtomicBackend, stateSyncRequestSize uint16) sync.Extender { + return &atomicSyncExtender{ + backend: backend, + stateSyncRequestSize: stateSyncRequestSize, + } +} + +func (a *atomicSyncExtender) Sync(ctx context.Context, client syncclient.Client, syncSummary message.Syncable) error { + atomicSyncSummary, ok := syncSummary.(*AtomicBlockSyncSummary) + if !ok { + return fmt.Errorf("expected AtomicBlockSyncSummary, got %T", syncSummary) + } + log.Info("atomic tx: sync starting", "root", atomicSyncSummary) + atomicSyncer, err := a.backend.Syncer(client, atomicSyncSummary.AtomicRoot, atomicSyncSummary.BlockNumber, a.stateSyncRequestSize) + if err != nil { + return err + } + if err := atomicSyncer.Start(ctx); err != nil { + return err + } + err = <-atomicSyncer.Done() + log.Info("atomic tx: sync finished", "root", atomicSyncSummary.AtomicRoot, "err", err) + return err +} + +func (a *atomicSyncExtender) OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error { + // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared + // memory operations from the previously last accepted block when ApplyToSharedMemory + // is called. + if err := a.backend.MarkApplyToSharedMemoryCursor(lastAcceptedHeight); err != nil { + return err + } + a.backend.SetLastAccepted(syncSummary.GetBlockHash()) + return nil +} + +func (a *atomicSyncExtender) OnFinishAfterCommit(summaryHeight uint64) error { + // the chain state is already restored, and from this point on + // the block synced to is the accepted block. the last operation + // is updating shared memory with the atomic trie. + // ApplyToSharedMemory does this, and even if the VM is stopped + // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor + // is called, VM will resume ApplyToSharedMemory on Initialize. + return a.backend.ApplyToSharedMemory(summaryHeight) +} diff --git a/plugin/evm/atomic/atomic_sync_provider.go b/plugin/evm/atomic/atomic_sync_provider.go new file mode 100644 index 0000000000..2ef3345c62 --- /dev/null +++ b/plugin/evm/atomic/atomic_sync_provider.go @@ -0,0 +1,49 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package atomic + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ethereum/go-ethereum/common" +) + +type atomicSyncProvider struct { + chain *core.BlockChain + atomicTrie AtomicTrie +} + +func NewAtomicProvider(chain *core.BlockChain, atomicTrie AtomicTrie) sync.SummaryProvider { + return &atomicSyncProvider{chain: chain, atomicTrie: atomicTrie} +} + +// StateSummaryAtHeight returns the SyncSummary at [height] if valid and available. +func (a *atomicSyncProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { + atomicRoot, err := a.atomicTrie.Root(height) + if err != nil { + return nil, fmt.Errorf("error getting atomic trie root for height (%d): %w", height, err) + } + + if (atomicRoot == common.Hash{}) { + return nil, fmt.Errorf("atomic trie root not found for height (%d)", height) + } + + blk := a.chain.GetBlockByNumber(height) + if blk == nil { + return nil, fmt.Errorf("block not found for height (%d)", height) + } + + if !a.chain.HasState(blk.Root()) { + return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) + } + + summary, err := NewAtomicSyncSummary(blk.Hash(), height, blk.Root(), atomicRoot) + if err != nil { + return nil, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) + } + return summary, nil +} diff --git a/plugin/evm/atomic/syncable.go b/plugin/evm/atomic/syncable.go new file mode 100644 index 0000000000..a8e79d5a99 --- /dev/null +++ b/plugin/evm/atomic/syncable.go @@ -0,0 +1,116 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var _ message.Syncable = &AtomicBlockSyncSummary{} + +// AtomicBlockSyncSummary provides the information necessary to sync a node starting +// at the given block. +type AtomicBlockSyncSummary struct { + BlockNumber uint64 `serialize:"true"` + BlockHash common.Hash `serialize:"true"` + BlockRoot common.Hash `serialize:"true"` + AtomicRoot common.Hash `serialize:"true"` + + summaryID ids.ID + bytes []byte + acceptImpl message.AcceptImplFn +} + +func init() { + message.SyncSummaryType = &AtomicBlockSyncSummary{} +} + +type atomicSyncSummaryParser struct{} + +func NewAtomicSyncSummaryParser() message.SyncableParser { + return &atomicSyncSummaryParser{} +} + +func (b *atomicSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl message.AcceptImplFn) (message.Syncable, error) { + summary := AtomicBlockSyncSummary{} + if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { + return nil, err + } else if codecVersion != message.Version { + return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, message.Version) + } + + summary.bytes = summaryBytes + summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) + if err != nil { + return nil, err + } + summary.summaryID = summaryID + summary.acceptImpl = acceptImpl + return &summary, nil +} + +func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (message.Syncable, error) { + summary := AtomicBlockSyncSummary{ + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockRoot: blockRoot, + AtomicRoot: atomicRoot, + } + bytes, err := Codec.Marshal(message.Version, &summary) + if err != nil { + return nil, err + } + + summary.bytes = bytes + summaryID, err := ids.ToID(crypto.Keccak256(bytes)) + if err != nil { + return nil, err + } + summary.summaryID = summaryID + + return &summary, nil +} + +func (a *AtomicBlockSyncSummary) GetBlockNumber() uint64 { + return a.BlockNumber +} + +func (a *AtomicBlockSyncSummary) GetBlockHash() common.Hash { + return a.BlockHash +} + +func (a *AtomicBlockSyncSummary) GetBlockRoot() common.Hash { + return a.BlockRoot +} + +func (a *AtomicBlockSyncSummary) Bytes() []byte { + return a.bytes +} + +func (a *AtomicBlockSyncSummary) Height() uint64 { + return a.BlockNumber +} + +func (a *AtomicBlockSyncSummary) ID() ids.ID { + return a.summaryID +} + +func (a *AtomicBlockSyncSummary) String() string { + return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", a.BlockHash, a.BlockNumber, a.BlockRoot, a.AtomicRoot) +} + +func (a *AtomicBlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { + if a.acceptImpl == nil { + return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", a) + } + return a.acceptImpl(a) +} diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index 664c9252bb..d7de1820c6 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -15,7 +15,11 @@ const ( maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) -var Codec codec.Manager +var ( + Codec codec.Manager + // TODO: Remove this once we have a better way to register types (i.e use a different codec version or use build flags) + SyncSummaryType interface{} = BlockSyncSummary{} +) func init() { Codec = codec.NewManager(maxMessageSize) @@ -26,7 +30,7 @@ func init() { c.SkipRegistrations(2) errs.Add( // Types for state sync frontier consensus - c.RegisterType(SyncSummary{}), + c.RegisterType(SyncSummaryType), // state sync types c.RegisterType(BlockRequest{}), diff --git a/plugin/evm/message/syncable.go b/plugin/evm/message/syncable.go index c8631bbb5e..83a8d07325 100644 --- a/plugin/evm/message/syncable.go +++ b/plugin/evm/message/syncable.go @@ -14,78 +14,107 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -var _ block.StateSummary = &SyncSummary{} +var _ Syncable = &BlockSyncSummary{} -// SyncSummary provides the information necessary to sync a node starting +type Syncable interface { + block.StateSummary + GetBlockNumber() uint64 + GetBlockHash() common.Hash + GetBlockRoot() common.Hash +} + +type SyncableParser interface { + ParseFromBytes(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) +} + +type AcceptImplFn func(Syncable) (block.StateSyncMode, error) + +// BlockSyncSummary provides the information necessary to sync a node starting // at the given block. -type SyncSummary struct { +type BlockSyncSummary struct { BlockNumber uint64 `serialize:"true"` BlockHash common.Hash `serialize:"true"` BlockRoot common.Hash `serialize:"true"` - AtomicRoot common.Hash `serialize:"true"` summaryID ids.ID bytes []byte - acceptImpl func(SyncSummary) (block.StateSyncMode, error) + acceptImpl AcceptImplFn +} + +type BlockSyncSummaryParser struct{} + +func NewBlockSyncSummaryParser() SyncableParser { + return &BlockSyncSummaryParser{} } -func NewSyncSummaryFromBytes(summaryBytes []byte, acceptImpl func(SyncSummary) (block.StateSyncMode, error)) (SyncSummary, error) { - summary := SyncSummary{} +func (b *BlockSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { + summary := BlockSyncSummary{} if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { - return SyncSummary{}, err + return nil, err } else if codecVersion != Version { - return SyncSummary{}, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) + return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) } summary.bytes = summaryBytes summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) if err != nil { - return SyncSummary{}, err + return nil, err } summary.summaryID = summaryID summary.acceptImpl = acceptImpl - return summary, nil + return &summary, nil } -func NewSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (SyncSummary, error) { - summary := SyncSummary{ +func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (Syncable, error) { + summary := BlockSyncSummary{ BlockNumber: blockNumber, BlockHash: blockHash, BlockRoot: blockRoot, - AtomicRoot: atomicRoot, } bytes, err := Codec.Marshal(Version, &summary) if err != nil { - return SyncSummary{}, err + return nil, err } summary.bytes = bytes summaryID, err := ids.ToID(crypto.Keccak256(bytes)) if err != nil { - return SyncSummary{}, err + return nil, err } summary.summaryID = summaryID - return summary, nil + return &summary, nil +} + +func (s *BlockSyncSummary) GetBlockNumber() uint64 { + return s.BlockNumber +} + +func (s *BlockSyncSummary) GetBlockHash() common.Hash { + return s.BlockHash +} + +func (s *BlockSyncSummary) GetBlockRoot() common.Hash { + return s.BlockRoot } -func (s SyncSummary) Bytes() []byte { +func (s *BlockSyncSummary) Bytes() []byte { return s.bytes } -func (s SyncSummary) Height() uint64 { +func (s *BlockSyncSummary) Height() uint64 { return s.BlockNumber } -func (s SyncSummary) ID() ids.ID { +func (s *BlockSyncSummary) ID() ids.ID { return s.summaryID } -func (s SyncSummary) String() string { - return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot, s.AtomicRoot) +func (s *BlockSyncSummary) String() string { + return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) } -func (s SyncSummary) Accept(context.Context) (block.StateSyncMode, error) { +func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { if s.acceptImpl == nil { return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) } diff --git a/plugin/evm/sync/block_provider.go b/plugin/evm/sync/block_provider.go new file mode 100644 index 0000000000..2ba38dc0f9 --- /dev/null +++ b/plugin/evm/sync/block_provider.go @@ -0,0 +1,38 @@ +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package sync + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/plugin/evm/message" +) + +type blockProvider struct { + chain *core.BlockChain +} + +// TODO: this should be moved to a different place +func NewBlockProvider(chain *core.BlockChain) SummaryProvider { + return &blockProvider{chain: chain} +} + +// stateSummaryAtHeight returns the SyncSummary at [height] if valid and available. +func (bp *blockProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { + blk := bp.chain.GetBlockByNumber(height) + if blk == nil { + return nil, fmt.Errorf("block not found for height (%d)", height) + } + + if !bp.chain.HasState(blk.Root()) { + return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) + } + + summary, err := message.NewBlockSyncSummary(blk.Hash(), height, blk.Root()) + if err != nil { + return nil, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) + } + return summary, nil +} diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/sync/syncervm_client.go similarity index 85% rename from plugin/evm/syncervm_client.go rename to plugin/evm/sync/syncervm_client.go index 15a5b384f5..8ecc48c2ef 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -1,7 +1,6 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// TODO: move to separate package -package evm +package sync import ( "context" @@ -22,7 +21,6 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/statesync" "github.com/ethereum/go-ethereum/common" @@ -47,6 +45,12 @@ type EthBlockWrapper interface { GetEthBlock() *types.Block } +type Extender interface { + Sync(ctx context.Context, client syncclient.Client, syncSummary message.Syncable) error + OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error + OnFinishAfterCommit(summaryHeight uint64) error +} + // StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient type StateSyncClientConfig struct { Enabled bool @@ -59,12 +63,13 @@ type StateSyncClientConfig struct { LastAcceptedHeight uint64 - Chain *eth.Ethereum - State *chain.State - ChaindDB ethdb.Database - Acceptor BlockAcceptor - DB *versiondb.Database - AtomicBackend atomic.AtomicBackend + Chain *eth.Ethereum + State *chain.State + ChaindDB ethdb.Database + Acceptor BlockAcceptor + DB *versiondb.Database + SyncableParser message.SyncableParser + ExtraSyncer Extender Client syncclient.Client @@ -74,13 +79,13 @@ type StateSyncClientConfig struct { type stateSyncerClient struct { *StateSyncClientConfig - resumableSummary message.SyncSummary + resumableSummary message.Syncable cancel context.CancelFunc wg sync.WaitGroup // State Sync results - syncSummary message.SyncSummary + syncSummary message.Syncable stateSyncErr error metadataDB database.Database } @@ -131,7 +136,7 @@ func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (bl return nil, err // includes the [database.ErrNotFound] case } - summary, err := message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) + summary, err := client.SyncableParser.ParseFromBytes(summaryBytes, client.acceptSyncSummary) if err != nil { return nil, fmt.Errorf("failed to parse saved state sync summary to SyncSummary: %w", err) } @@ -153,13 +158,13 @@ func (client *stateSyncerClient) ClearOngoingSummary() error { // ParseStateSummary parses [summaryBytes] to [commonEng.Summary] func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { - return message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) + return client.SyncableParser.ParseFromBytes(summaryBytes, client.acceptSyncSummary) } // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, StateSyncParentsToFetch); err != nil { + if err := client.syncBlocks(ctx, client.syncSummary.GetBlockHash(), client.syncSummary.GetBlockNumber(), StateSyncParentsToFetch); err != nil { return err } @@ -169,13 +174,14 @@ func (client *stateSyncerClient) stateSync(ctx context.Context) error { return err } - return client.syncAtomicTrie(ctx) + return client.StateSyncClientConfig.ExtraSyncer.Sync(ctx, client.Client, client.syncSummary) } // acceptSyncSummary returns true if sync will be performed and launches the state sync process // in a goroutine. -func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (block.StateSyncMode, error) { - isResume := proposedSummary.BlockHash == client.resumableSummary.BlockHash +func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.Syncable) (block.StateSyncMode, error) { + isResume := client.resumableSummary != nil && + proposedSummary.GetBlockHash() == client.resumableSummary.GetBlockHash() if !isResume { // Skip syncing if the blockchain is not significantly ahead of local state, // since bootstrapping would be faster. @@ -288,25 +294,11 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common return batch.Write() } -func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { - log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.AtomicBackend.Syncer(client.Client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.StateSyncRequestSize) - if err != nil { - return err - } - if err := atomicSyncer.Start(ctx); err != nil { - return err - } - err = <-atomicSyncer.Done() - log.Info("atomic tx: sync finished", "root", client.syncSummary.AtomicRoot, "err", err) - return err -} - func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { - log.Info("state sync: sync starting", "root", client.syncSummary.BlockRoot) + log.Info("state sync: sync starting", "root", client.syncSummary.GetBlockRoot()) evmSyncer, err := statesync.NewStateSyncer(&statesync.StateSyncerConfig{ Client: client.Client, - Root: client.syncSummary.BlockRoot, + Root: client.syncSummary.GetBlockRoot(), BatchSize: ethdb.IdealBatchSize, DB: client.ChaindDB, MaxOutstandingCodeHashes: statesync.DefaultMaxOutstandingCodeHashes, @@ -320,7 +312,7 @@ func (client *stateSyncerClient) syncStateTrie(ctx context.Context) error { return err } err = <-evmSyncer.Done() - log.Info("state sync: sync finished", "root", client.syncSummary.BlockRoot, "err", err) + log.Info("state sync: sync finished", "root", client.syncSummary.GetBlockRoot(), "err", err) return err } @@ -335,9 +327,9 @@ func (client *stateSyncerClient) Shutdown() error { // finishSync is responsible for updating disk and memory pointers so the VM is prepared // for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. func (client *stateSyncerClient) finishSync() error { - stateBlock, err := client.State.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) + stateBlock, err := client.State.GetBlock(context.TODO(), ids.ID(client.syncSummary.GetBlockHash())) if err != nil { - return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) + return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.GetBlockHash()) } wrapper, ok := stateBlock.(*chain.BlockWrapper) @@ -353,11 +345,11 @@ func (client *stateSyncerClient) finishSync() error { block := evmBlockGetter.GetEthBlock() - if block.Hash() != client.syncSummary.BlockHash { - return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.BlockHash) + if block.Hash() != client.syncSummary.GetBlockHash() { + return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.GetBlockHash()) } - if block.NumberU64() != client.syncSummary.BlockNumber { - return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.BlockNumber) + if block.NumberU64() != client.syncSummary.GetBlockNumber() { + return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.GetBlockNumber()) } // BloomIndexer needs to know that some parts of the chain are not available @@ -384,13 +376,7 @@ func (client *stateSyncerClient) finishSync() error { return err } - // the chain state is already restored, and from this point on - // the block synced to is the accepted block. the last operation - // is updating shared memory with the atomic trie. - // ApplyToSharedMemory does this, and even if the VM is stopped - // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor - // is called, VM will resume ApplyToSharedMemory on Initialize. - return client.AtomicBackend.ApplyToSharedMemory(block.NumberU64()) + return client.ExtraSyncer.OnFinishAfterCommit(block.NumberU64()) } // updateVMMarkers updates the following markers in the VM's database @@ -403,11 +389,10 @@ func (client *stateSyncerClient) updateVMMarkers() error { // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory // is called. - if err := client.AtomicBackend.MarkApplyToSharedMemoryCursor(client.LastAcceptedHeight); err != nil { + if err := client.ExtraSyncer.OnFinishBeforeCommit(client.LastAcceptedHeight, client.syncSummary); err != nil { return err } - client.AtomicBackend.SetLastAccepted(client.syncSummary.BlockHash) - if err := client.Acceptor.PutLastAcceptedID(client.syncSummary.BlockHash.Bytes()); err != nil { + if err := client.Acceptor.PutLastAcceptedID(client.syncSummary.GetBlockHash().Bytes()); err != nil { return err } if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/sync/syncervm_server.go similarity index 52% rename from plugin/evm/syncervm_server.go rename to plugin/evm/sync/syncervm_server.go index f434f4ae7d..249719adc0 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/sync/syncervm_server.go @@ -1,33 +1,24 @@ // (c) 2021-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -// TODO: move to separate package -package evm +package sync import ( "context" - "fmt" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -type StateSyncServerConfig struct { - Chain *core.BlockChain - AtomicTrie atomic.AtomicTrie - - // SyncableInterval is the interval at which blocks are eligible to provide syncable block summaries. - SyncableInterval uint64 +type SummaryProvider interface { + StateSummaryAtHeight(height uint64) (block.StateSummary, error) } type stateSyncServer struct { - chain *core.BlockChain - atomicTrie atomic.AtomicTrie + chain *core.BlockChain + provider SummaryProvider syncableInterval uint64 } @@ -37,39 +28,12 @@ type StateSyncServer interface { GetStateSummary(context.Context, uint64) (block.StateSummary, error) } -func NewStateSyncServer(config *StateSyncServerConfig) StateSyncServer { +func NewStateSyncServer(chain *core.BlockChain, provider SummaryProvider, syncableInterval uint64) StateSyncServer { return &stateSyncServer{ - chain: config.Chain, - atomicTrie: config.AtomicTrie, - syncableInterval: config.SyncableInterval, - } -} - -// stateSummaryAtHeight returns the SyncSummary at [height] if valid and available. -func (server *stateSyncServer) stateSummaryAtHeight(height uint64) (message.SyncSummary, error) { - atomicRoot, err := server.atomicTrie.Root(height) - if err != nil { - return message.SyncSummary{}, fmt.Errorf("error getting atomic trie root for height (%d): %w", height, err) - } - - if (atomicRoot == common.Hash{}) { - return message.SyncSummary{}, fmt.Errorf("atomic trie root not found for height (%d)", height) + chain: chain, + provider: provider, + syncableInterval: syncableInterval, } - - blk := server.chain.GetBlockByNumber(height) - if blk == nil { - return message.SyncSummary{}, fmt.Errorf("block not found for height (%d)", height) - } - - if !server.chain.HasState(blk.Root()) { - return message.SyncSummary{}, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) - } - - summary, err := message.NewSyncSummary(blk.Hash(), height, blk.Root(), atomicRoot) - if err != nil { - return message.SyncSummary{}, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) - } - return summary, nil } // GetLastStateSummary returns the latest state summary. @@ -80,7 +44,7 @@ func (server *stateSyncServer) GetLastStateSummary(context.Context) (block.State lastHeight := server.chain.LastAcceptedBlock().NumberU64() lastSyncSummaryNumber := lastHeight - lastHeight%server.syncableInterval - summary, err := server.stateSummaryAtHeight(lastSyncSummaryNumber) + summary, err := server.provider.StateSummaryAtHeight(lastSyncSummaryNumber) if err != nil { log.Debug("could not get latest state summary", "err", err) return nil, database.ErrNotFound @@ -100,7 +64,7 @@ func (server *stateSyncServer) GetStateSummary(_ context.Context, height uint64) return nil, database.ErrNotFound } - summary, err := server.stateSummaryAtHeight(summaryBlock.NumberU64()) + summary, err := server.provider.StateSummaryAtHeight(summaryBlock.NumberU64()) if err != nil { log.Debug("could not get state summary", "height", height, "err", err) return nil, database.ErrNotFound diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 6d07b6f721..1c3a71d9db 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -37,6 +37,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/database" + vmsync "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -55,7 +56,7 @@ func TestSkipStateSync(t *testing.T) { stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync syncMode: block.StateSyncSkipped, } - vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } @@ -67,14 +68,14 @@ func TestStateSyncFromScratch(t *testing.T) { stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, } - vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) testSyncerVM(t, vmSetup, test) } func TestStateSyncFromScratchExceedParent(t *testing.T) { rand.Seed(1) - numToGen := StateSyncParentsToFetch + uint64(32) + numToGen := vmsync.StateSyncParentsToFetch + uint64(32) test := syncTest{ syncableInterval: numToGen, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync @@ -109,11 +110,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { panic(err) } - cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel - if cancel != nil { - cancel() - } else { - t.Fatal("state sync client not populated correctly") + if err := syncerVM.StateSyncClient.Shutdown(); err != nil { + panic(err) } } else { syncerVM.AppResponse(context.Background(), nodeID, requestID, response) @@ -121,7 +119,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -272,7 +270,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup = createSyncServerAndClientVMs(t, test, StateSyncParentsToFetch) + vmSetup = createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) } @@ -285,7 +283,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s testShortIDAddrs[0]: importAmount, } ) - config := fmt.Sprintf(`{"commit-interval": %d}`, test.syncableInterval) + config := fmt.Sprintf(`{"commit-interval": %d, "state-sync-commit-interval": %d}`, test.syncableInterval, test.syncableInterval) _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( t, true, "", config, "", alloc, ) @@ -360,9 +358,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(err) require.NoError(serverVM.State.SetLastAcceptedBlock(internalBlock)) - // patch syncableInterval for test - serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval - // initialise [syncerVM] with blank genesis state stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index aebc94814c..c4a9e1b244 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -44,6 +44,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/plugin/evm/message" + vmsync "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/utils" @@ -107,7 +108,7 @@ var ( _ block.StateSyncableVM = &VM{} _ statesyncclient.EthBlockParser = &VM{} _ secp256k1fx.VM = &VM{} - _ BlockAcceptor = &VM{} + _ vmsync.BlockAcceptor = &VM{} ) const ( @@ -278,8 +279,8 @@ type VM struct { logger CorethLogger // State sync server and client - StateSyncServer - StateSyncClient + vmsync.StateSyncServer + vmsync.StateSyncClient // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC @@ -617,11 +618,8 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - vm.StateSyncServer = NewStateSyncServer(&StateSyncServerConfig{ - Chain: vm.blockChain, - AtomicTrie: vm.atomicTrie, - SyncableInterval: vm.config.StateSyncCommitInterval, - }) + // TODO: register a different provider for the atomic txs + vm.StateSyncServer = vmsync.NewStateSyncServer(vm.blockChain, atomic.NewAtomicProvider(vm.blockChain, vm.atomicTrie), vm.config.StateSyncCommitInterval) return vm.initializeStateSyncClient(lastAcceptedHeight) } @@ -726,9 +724,11 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - vm.StateSyncClient = NewStateSyncClient(&StateSyncClientConfig{ - Chain: vm.eth, - State: vm.State, + // register different extrasyncer and parser for atomic txs + vm.StateSyncClient = vmsync.NewStateSyncClient(&vmsync.StateSyncClientConfig{ + Chain: vm.eth, + State: vm.State, + ExtraSyncer: atomic.NewAtomicSyncExtender(vm.atomicBackend, vm.config.StateSyncRequestSize), Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, @@ -745,9 +745,9 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { LastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around ChaindDB: vm.chaindb, DB: vm.db, - AtomicBackend: vm.atomicBackend, ToEngine: vm.toEngine, Acceptor: vm, + SyncableParser: atomic.NewAtomicSyncSummaryParser(), }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume From b5777f40e5bbba4a658e58245cc5ea519f911477 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 28 Dec 2024 20:20:36 +0300 Subject: [PATCH 26/91] remove unused provider --- plugin/evm/sync/block_provider.go | 38 ------------------------------- 1 file changed, 38 deletions(-) delete mode 100644 plugin/evm/sync/block_provider.go diff --git a/plugin/evm/sync/block_provider.go b/plugin/evm/sync/block_provider.go deleted file mode 100644 index 2ba38dc0f9..0000000000 --- a/plugin/evm/sync/block_provider.go +++ /dev/null @@ -1,38 +0,0 @@ -// (c) 2021-2025, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. -package sync - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/message" -) - -type blockProvider struct { - chain *core.BlockChain -} - -// TODO: this should be moved to a different place -func NewBlockProvider(chain *core.BlockChain) SummaryProvider { - return &blockProvider{chain: chain} -} - -// stateSummaryAtHeight returns the SyncSummary at [height] if valid and available. -func (bp *blockProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { - blk := bp.chain.GetBlockByNumber(height) - if blk == nil { - return nil, fmt.Errorf("block not found for height (%d)", height) - } - - if !bp.chain.HasState(blk.Root()) { - return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) - } - - summary, err := message.NewBlockSyncSummary(blk.Hash(), height, blk.Root()) - if err != nil { - return nil, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) - } - return summary, nil -} From 803e50a1c3de57d614c8f92fe7a72a7d3c5640dd Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 29 Dec 2024 11:34:45 +0300 Subject: [PATCH 27/91] fix linter --- utils/snow.go | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/snow.go b/utils/snow.go index e24c884b16..7f92db66a9 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -23,7 +23,6 @@ import ( var ( testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} TestAvaxAssetID = ids.ID{1, 2, 3} ) From 0b4018573527f506cba29af686fe2f3a31821b96 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 29 Dec 2024 13:30:52 +0300 Subject: [PATCH 28/91] nits --- plugin/evm/atomic/atomic_backend.go | 10 -------- plugin/evm/atomic/atomic_syncer.go | 9 ++++++++ plugin/evm/atomic/test_shared_memories.go | 1 + plugin/evm/database/wrapped_database.go | 28 +++++++++++++++++------ plugin/evm/syncervm_client.go | 2 +- 5 files changed, 32 insertions(+), 18 deletions(-) diff --git a/plugin/evm/atomic/atomic_backend.go b/plugin/evm/atomic/atomic_backend.go index eefb254321..2532261a10 100644 --- a/plugin/evm/atomic/atomic_backend.go +++ b/plugin/evm/atomic/atomic_backend.go @@ -4,7 +4,6 @@ package atomic import ( - "context" "encoding/binary" "fmt" "time" @@ -22,15 +21,6 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// Syncer represents a step in state sync, -// along with Start/Done methods to control -// and monitor progress. -// Error returns an error if any was encountered. -type Syncer interface { - Start(ctx context.Context) error - Done() <-chan error -} - var _ AtomicBackend = &atomicBackend{} var ( diff --git a/plugin/evm/atomic/atomic_syncer.go b/plugin/evm/atomic/atomic_syncer.go index daffb9d771..52a8376319 100644 --- a/plugin/evm/atomic/atomic_syncer.go +++ b/plugin/evm/atomic/atomic_syncer.go @@ -24,6 +24,15 @@ var ( _ syncclient.LeafSyncTask = &atomicSyncerLeafTask{} ) +// Syncer represents a step in state sync, +// along with Start/Done methods to control +// and monitor progress. +// Error returns an error if any was encountered. +type Syncer interface { + Start(ctx context.Context) error + Done() <-chan error +} + // atomicSyncer is used to sync the atomic trie from the network. The CallbackLeafSyncer // is responsible for orchestrating the sync while atomicSyncer is responsible for maintaining // the state of progress and writing the actual atomic trie to the trieDB. diff --git a/plugin/evm/atomic/test_shared_memories.go b/plugin/evm/atomic/test_shared_memories.go index f526748b03..76aa1bcf87 100644 --- a/plugin/evm/atomic/test_shared_memories.go +++ b/plugin/evm/atomic/test_shared_memories.go @@ -68,6 +68,7 @@ func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomi } } +// TODO: once tetsts are moved to atomic package, unexport this function func NewSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *SharedMemories { return &SharedMemories{ thisChain: atomicMemory.NewSharedMemory(thisChainID), diff --git a/plugin/evm/database/wrapped_database.go b/plugin/evm/database/wrapped_database.go index f8a36913bb..9421e514a8 100644 --- a/plugin/evm/database/wrapped_database.go +++ b/plugin/evm/database/wrapped_database.go @@ -17,15 +17,23 @@ var ( ) // ethDbWrapper implements ethdb.Database -type ethDbWrapper struct{ database.Database } +type ethDbWrapper struct { + database.Database +} -func WrapDatabase(db database.Database) ethdb.KeyValueStore { return ethDbWrapper{db} } +func WrapDatabase(db database.Database) ethdb.KeyValueStore { + return ethDbWrapper{db} +} // Stat implements ethdb.Database -func (db ethDbWrapper) Stat(string) (string, error) { return "", database.ErrNotFound } +func (db ethDbWrapper) Stat(string) (string, error) { + return "", database.ErrNotFound +} // NewBatch implements ethdb.Database -func (db ethDbWrapper) NewBatch() ethdb.Batch { return wrappedBatch{db.Database.NewBatch()} } +func (db ethDbWrapper) NewBatch() ethdb.Batch { + return wrappedBatch{db.Database.NewBatch()} +} // NewBatchWithSize implements ethdb.Database // TODO: propagate size through avalanchego Database interface @@ -59,10 +67,16 @@ func (db ethDbWrapper) NewIteratorWithStart(start []byte) ethdb.Iterator { } // wrappedBatch implements ethdb.wrappedBatch -type wrappedBatch struct{ database.Batch } +type wrappedBatch struct { + database.Batch +} // ValueSize implements ethdb.Batch -func (batch wrappedBatch) ValueSize() int { return batch.Batch.Size() } +func (batch wrappedBatch) ValueSize() int { + return batch.Batch.Size() +} // Replay implements ethdb.Batch -func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } +func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { + return batch.Batch.Replay(w) +} diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 15a5b384f5..ba9922d33d 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -348,7 +348,7 @@ func (client *stateSyncerClient) finishSync() error { evmBlockGetter, ok := wrappedBlock.(EthBlockWrapper) if !ok { - return fmt.Errorf("could not convert block(%T) to evm.Block", stateBlock) + return fmt.Errorf("could not convert block(%T) to evm.EthBlockWrapper", stateBlock) } block := evmBlockGetter.GetEthBlock() From 4ef96e855a56a84a29df3737b237624f2dd65bd1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 30 Dec 2024 11:29:06 +0300 Subject: [PATCH 29/91] Update plugin/evm/vm.go Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 0d01d69570..1989d34cc1 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -724,7 +724,6 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - // register different extrasyncer and parser for atomic txs vm.StateSyncClient = vmsync.NewStateSyncClient(&vmsync.StateSyncClientConfig{ Chain: vm.eth, State: vm.State, From caa305dd4955c1c45bcc27f4553cf18069ee3adb Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 30 Dec 2024 11:29:12 +0300 Subject: [PATCH 30/91] Update plugin/evm/vm.go Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 1989d34cc1..0cf53150d9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -618,7 +618,6 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - // TODO: register a different provider for the atomic txs vm.StateSyncServer = vmsync.NewStateSyncServer(vm.blockChain, atomic.NewAtomicProvider(vm.blockChain, vm.atomicTrie), vm.config.StateSyncCommitInterval) return vm.initializeStateSyncClient(lastAcceptedHeight) } From 2f1dbc5fccff317cf0f2516216d04279ae04fd8a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 3 Jan 2025 13:30:38 +0300 Subject: [PATCH 31/91] Update plugin/evm/vm_test.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/vm_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index bcb62d42af..2f6d6e08aa 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1545,7 +1545,7 @@ func TestBonusBlocksTxs(t *testing.T) { // Make [blk] a bonus block. wrappedBackend := &wrappedBackend{ AtomicBackend: vm.atomicBackend, - registeredBonusBlocks: map[uint64]common.Hash{1: common.Hash(blk.ID())}, + registeredBonusBlocks: map[uint64]common.Hash{blk.Height(): common.Hash(blk.ID())}, } vm.atomicBackend = wrappedBackend From 936bb5901b4fe53a50ba60f11cda71624e0543d6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 3 Jan 2025 13:30:50 +0300 Subject: [PATCH 32/91] Update plugin/evm/syncervm_test.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/syncervm_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 6d07b6f721..90d38b521b 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -364,6 +364,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state + // we also override [syncerVM]'s commit interval so the atomic trie works correctly. stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, From 4019a343f23fe8f8501a94dbf0c9e19bb4c07eda Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 3 Jan 2025 14:08:56 +0300 Subject: [PATCH 33/91] review fix --- plugin/evm/atomic/atomic_syncer_test.go | 2 +- .../evm/atomic/atomic_trie_iterator_test.go | 10 ++--- plugin/evm/atomic/atomic_trie_test.go | 40 +++++++++---------- .../evm/atomic/atomic_tx_repository_test.go | 10 ++--- plugin/evm/atomic/test_shared_memories.go | 1 + plugin/evm/atomic/test_tx.go | 6 +-- plugin/evm/block.go | 2 +- plugin/evm/export_tx_test.go | 2 +- plugin/evm/import_tx_test.go | 2 +- plugin/evm/syncervm_client.go | 12 ++++-- plugin/evm/syncervm_test.go | 8 ++-- plugin/evm/vm.go | 4 +- plugin/evm/vm_test.go | 7 +--- utils/snow.go | 10 ++--- 14 files changed, 59 insertions(+), 57 deletions(-) diff --git a/plugin/evm/atomic/atomic_syncer_test.go b/plugin/evm/atomic/atomic_syncer_test.go index 140a627710..6ff0da417a 100644 --- a/plugin/evm/atomic/atomic_syncer_test.go +++ b/plugin/evm/atomic/atomic_syncer_test.go @@ -57,7 +57,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := NewAtomicBackend(clientDB, utils.TestSharedMemory(), nil, repo, 0, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(clientDB, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) if err != nil { t.Fatal("could not initialize atomic backend", err) } diff --git a/plugin/evm/atomic/atomic_trie_iterator_test.go b/plugin/evm/atomic/atomic_trie_iterator_test.go index 8766752b28..03435d713d 100644 --- a/plugin/evm/atomic/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic/atomic_trie_iterator_test.go @@ -20,7 +20,7 @@ import ( func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) @@ -32,7 +32,7 @@ func TestIteratorCanIterate(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie1 := atomicBackend.AtomicTrie() @@ -45,7 +45,7 @@ func TestIteratorCanIterate(t *testing.T) { // iterate on a new atomic trie to make sure there is no resident state affecting the data and the // iterator - atomicBackend2, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend2, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie2 := atomicBackend2.AtomicTrie() lastCommittedHash2, lastCommittedHeight2 := atomicTrie2.LastCommitted() @@ -60,7 +60,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) require.NoError(err) @@ -73,7 +73,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository commitInterval := uint64(100) - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) require.NoError(err) atomicTrie := atomicBackend.AtomicTrie() diff --git a/plugin/evm/atomic/atomic_trie_test.go b/plugin/evm/atomic/atomic_trie_test.go index 4c9c84468a..7a48c75745 100644 --- a/plugin/evm/atomic/atomic_trie_test.go +++ b/plugin/evm/atomic/atomic_trie_test.go @@ -131,7 +131,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -140,7 +140,7 @@ func TestAtomicTrieInitialize(t *testing.T) { writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time - atomicBackend1, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend1, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -156,7 +156,7 @@ func TestAtomicTrieInitialize(t *testing.T) { verifyOperations(t, atomicTrie1, codec, rootHash1, 1, test.expectedCommitHeight, operationsMap) // Construct the atomic trie again (on the same database) and ensure the last accepted root is correct. - atomicBackend2, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend2, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -165,7 +165,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // Construct the atomic trie again (on an empty database) and ensure that it produces the same hash. atomicBackend3, err := NewAtomicBackend( - versiondb.New(memdb.New()), utils.TestSharedMemory(), nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -203,7 +203,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // Generate a new atomic trie to compare the root against. atomicBackend4, err := NewAtomicBackend( - versiondb.New(memdb.New()), utils.TestSharedMemory(), nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, + versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, ) if err != nil { t.Fatal(err) @@ -220,14 +220,14 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) assert.NoError(t, err) atomicTrie := atomicBackend.AtomicTrie() @@ -243,7 +243,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { assert.NoError(t, err) // Re-initialize the atomic trie - atomicBackend, err = NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) + atomicBackend, err = NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) assert.NoError(t, err) atomicTrie = atomicBackend.AtomicTrie() @@ -254,11 +254,11 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, TestTxCodec, 0) + repo, err := NewAtomicTxRepository(db, testTxCodec, 0) if err != nil { t.Fatal(err) } - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, 0, common.Hash{}, testCommitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, testCommitInterval) if err != nil { t.Fatal(err) } @@ -332,7 +332,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) if err != nil { t.Fatal(err) @@ -346,7 +346,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { 14: {}, } // Construct the atomic trie for the first time - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) if err != nil { t.Fatal(err) } @@ -453,7 +453,7 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, test.lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -519,7 +519,7 @@ func TestApplyToSharedMemory(t *testing.T) { func BenchmarkAtomicTrieInit(b *testing.B) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -537,7 +537,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - sharedMemory := utils.TestSharedMemory() + sharedMemory := utils.TestSnowContext().SharedMemory atomicBackend, err := NewAtomicBackend(db, sharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie = atomicBackend.AtomicTrie() @@ -554,7 +554,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { func BenchmarkAtomicTrieIterate(b *testing.B) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) @@ -564,7 +564,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) - atomicBackend, err := NewAtomicBackend(db, utils.TestSharedMemory(), nil, repo, lastAcceptedHeight, common.Hash{}, 5000) + atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie := atomicBackend.AtomicTrie() @@ -633,8 +633,8 @@ func BenchmarkApplyToSharedMemory(b *testing.B) { func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks uint64) { db := versiondb.New(disk) - codec := TestTxCodec - sharedMemory := utils.TestSharedMemory() + codec := testTxCodec + sharedMemory := utils.TestSnowContext().SharedMemory lastAcceptedHeight := blocks repo, err := NewAtomicTxRepository(db, codec, lastAcceptedHeight) @@ -659,7 +659,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - backend.(*atomicBackend).sharedMemory = utils.TestSharedMemory() + backend.(*atomicBackend).sharedMemory = utils.TestSnowContext().SharedMemory assert.NoError(b, backend.MarkApplyToSharedMemoryCursor(0)) assert.NoError(b, db.Commit()) assert.NoError(b, backend.ApplyToSharedMemory(lastAcceptedHeight)) diff --git a/plugin/evm/atomic/atomic_tx_repository_test.go b/plugin/evm/atomic/atomic_tx_repository_test.go index 4dda70ab10..209cf1d29c 100644 --- a/plugin/evm/atomic/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/atomic_tx_repository_test.go @@ -182,7 +182,7 @@ func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) @@ -195,7 +195,7 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec repo, err := NewAtomicTxRepository(db, codec, 0) if err != nil { t.Fatal(err) @@ -208,7 +208,7 @@ func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*Tx) @@ -233,7 +233,7 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*Tx) @@ -258,7 +258,7 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeight int) { db := versiondb.New(memdb.New()) - codec := TestTxCodec + codec := testTxCodec acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) txMap := make(map[uint64][]*Tx) diff --git a/plugin/evm/atomic/test_shared_memories.go b/plugin/evm/atomic/test_shared_memories.go index f526748b03..2f92639a5c 100644 --- a/plugin/evm/atomic/test_shared_memories.go +++ b/plugin/evm/atomic/test_shared_memories.go @@ -68,6 +68,7 @@ func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomi } } +// TODO: once tests are moved to atomic package, unexport this function func NewSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *SharedMemories { return &SharedMemories{ thisChain: atomicMemory.NewSharedMemory(thisChainID), diff --git a/plugin/evm/atomic/test_tx.go b/plugin/evm/atomic/test_tx.go index 21adecfd39..637997b505 100644 --- a/plugin/evm/atomic/test_tx.go +++ b/plugin/evm/atomic/test_tx.go @@ -20,10 +20,10 @@ import ( "github.com/ava-labs/coreth/params" ) -var TestTxCodec codec.Manager +var testTxCodec codec.Manager func init() { - TestTxCodec = codec.NewDefaultManager() + testTxCodec = codec.NewDefaultManager() c := linearcodec.NewDefault() errs := wrappers.Errs{} @@ -31,7 +31,7 @@ func init() { c.RegisterType(&TestUnsignedTx{}), c.RegisterType(&avalancheatomic.Element{}), c.RegisterType(&avalancheatomic.Requests{}), - TestTxCodec.RegisterCodec(atomic.CodecVersion, c), + testTxCodec.RegisterCodec(atomic.CodecVersion, c), ) if errs.Errored() { diff --git a/plugin/evm/block.go b/plugin/evm/block.go index ffadf284f3..d139b640a0 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -158,7 +158,7 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("chain could not accept %s: %w", b.ID(), err) } - if err := vm.PutLastAcceptedID(b.id[:]); err != nil { + if err := vm.PutLastAcceptedID(b.id); err != nil { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 4bc0999797..417bba57f2 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -1261,7 +1261,7 @@ func TestExportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: utils.TestAvaxAssetID, + AssetID: ctx.AVAXAssetID, Nonce: 0, }, } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index 4ce2f9284e..e2f44b3974 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -317,7 +317,7 @@ func TestImportTxVerify(t *testing.T) { { Address: testEthAddrs[0], Amount: 0, - AssetID: utils.TestAvaxAssetID, + AssetID: ctx.AVAXAssetID, }, } return &tx diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 15a5b384f5..35e2cf5e9b 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// State sync fetches [StateSyncParentsToFetch] parents of the block it syncs to. +// StateSyncParentsToFetch is the number of the block parents the state syncs to. // The last 256 block hashes are necessary to support the BLOCKHASH opcode. const StateSyncParentsToFetch = 256 @@ -40,14 +40,14 @@ var ( ) type BlockAcceptor interface { - PutLastAcceptedID([]byte) error + PutLastAcceptedID(ids.ID) error } type EthBlockWrapper interface { GetEthBlock() *types.Block } -// StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient +// StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncClient type StateSyncClientConfig struct { Enabled bool SkipResume bool @@ -407,7 +407,11 @@ func (client *stateSyncerClient) updateVMMarkers() error { return err } client.AtomicBackend.SetLastAccepted(client.syncSummary.BlockHash) - if err := client.Acceptor.PutLastAcceptedID(client.syncSummary.BlockHash.Bytes()); err != nil { + id, err := ids.ToID(client.syncSummary.BlockHash.Bytes()) + if err != nil { + return err + } + if err := client.Acceptor.PutLastAcceptedID(id); err != nil { return err } if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 6d07b6f721..10f55031ba 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -285,6 +285,10 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s testShortIDAddrs[0]: importAmount, } ) + + // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] + // creates a commit at the height [syncableInterval]. This is necessary to support + // fetching a state summary. config := fmt.Sprintf(`{"commit-interval": %d}`, test.syncableInterval) _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( t, true, "", config, "", alloc, @@ -330,9 +334,6 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s } }, nil) - // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] - // creates a commit at the height [syncableInterval]. This is necessary to support - // fetching a state summary. serverAtomicTrie := serverVM.atomicTrie require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) require.NoError(serverVM.db.Commit()) @@ -364,6 +365,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state + // we also override [syncerVM]'s commit interval so the atomic trie works correctly. stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index aebc94814c..aaae53d721 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1910,6 +1910,6 @@ func (vm *VM) newExportTx( return tx, nil } -func (vm *VM) PutLastAcceptedID(ID []byte) error { - return vm.acceptedBlockDB.Put(lastAcceptedKey, ID) +func (vm *VM) PutLastAcceptedID(ID ids.ID) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 0a4f9ebc20..bf3171d2ab 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1479,11 +1479,8 @@ type wrappedBackend struct { } func (w *wrappedBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { - // Check if the block is a bonus block - if hash, ok := w.registeredBonusBlocks[blockHeight]; ok { - return blockHash.Cmp(hash) == 0 - } - return false + hash, ok := w.registeredBonusBlocks[blockHeight] + return ok && blockHash.Cmp(hash) == 0 } func TestBonusBlocksTxs(t *testing.T) { diff --git a/utils/snow.go b/utils/snow.go index e24c884b16..6338bc8dcf 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -42,13 +42,16 @@ func TestSnowContext() *snow.Context { _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) + m := atomic.NewMemory(memdb.New()) + sm := m.NewSharedMemory(testCChainID) + ctx := &snow.Context{ NetworkID: networkID, SubnetID: ids.Empty, ChainID: chainID, AVAXAssetID: TestAvaxAssetID, NodeID: ids.GenerateTestNodeID(), - SharedMemory: TestSharedMemory(), + SharedMemory: sm, XChainID: testXChainID, CChainID: testCChainID, PublicKey: pk, @@ -87,8 +90,3 @@ func NewTestValidatorState() *validatorstest.State { }, } } - -func TestSharedMemory() atomic.SharedMemory { - m := atomic.NewMemory(memdb.New()) - return m.NewSharedMemory(testCChainID) -} From f651bad4a748a90c94f4e011ed2936efa7912869 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 6 Jan 2025 15:52:46 +0300 Subject: [PATCH 34/91] Reviews --- plugin/evm/atomic/atomic_sync_extender.go | 35 +++++++++++++---------- plugin/evm/atomic/atomic_sync_provider.go | 18 ++++++------ plugin/evm/atomic/syncable.go | 29 ++++++++++--------- plugin/evm/message/syncable.go | 16 +++++------ plugin/evm/vm.go | 3 +- 5 files changed, 56 insertions(+), 45 deletions(-) diff --git a/plugin/evm/atomic/atomic_sync_extender.go b/plugin/evm/atomic/atomic_sync_extender.go index 84e854dd23..c85328e313 100644 --- a/plugin/evm/atomic/atomic_sync_extender.go +++ b/plugin/evm/atomic/atomic_sync_extender.go @@ -1,4 +1,4 @@ -// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // TODO: move to separate package package atomic @@ -14,53 +14,58 @@ import ( "github.com/ethereum/go-ethereum/log" ) -type atomicSyncExtender struct { +var _ sync.Extender = (*AtomicSyncExtender)(nil) + +type AtomicSyncExtender struct { backend AtomicBackend stateSyncRequestSize uint16 } -func NewAtomicSyncExtender(backend AtomicBackend, stateSyncRequestSize uint16) sync.Extender { - return &atomicSyncExtender{ +func NewAtomicSyncExtender(backend AtomicBackend, stateSyncRequestSize uint16) *AtomicSyncExtender { + return &AtomicSyncExtender{ backend: backend, stateSyncRequestSize: stateSyncRequestSize, } } -func (a *atomicSyncExtender) Sync(ctx context.Context, client syncclient.Client, syncSummary message.Syncable) error { +func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.Client, syncSummary message.Syncable) error { atomicSyncSummary, ok := syncSummary.(*AtomicBlockSyncSummary) if !ok { - return fmt.Errorf("expected AtomicBlockSyncSummary, got %T", syncSummary) + return fmt.Errorf("expected *AtomicBlockSyncSummary, got %T", syncSummary) } log.Info("atomic tx: sync starting", "root", atomicSyncSummary) atomicSyncer, err := a.backend.Syncer(client, atomicSyncSummary.AtomicRoot, atomicSyncSummary.BlockNumber, a.stateSyncRequestSize) if err != nil { - return err + return fmt.Errorf("failed to create atomic syncer: %w", err) } if err := atomicSyncer.Start(ctx); err != nil { - return err + return fmt.Errorf("failed to start atomic syncer: %w", err) } err = <-atomicSyncer.Done() log.Info("atomic tx: sync finished", "root", atomicSyncSummary.AtomicRoot, "err", err) return err } -func (a *atomicSyncExtender) OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error { +func (a *AtomicSyncExtender) OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error { // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared // memory operations from the previously last accepted block when ApplyToSharedMemory // is called. if err := a.backend.MarkApplyToSharedMemoryCursor(lastAcceptedHeight); err != nil { - return err + return fmt.Errorf("failed to mark apply to shared memory cursor before commit: %w", err) } a.backend.SetLastAccepted(syncSummary.GetBlockHash()) return nil } -func (a *atomicSyncExtender) OnFinishAfterCommit(summaryHeight uint64) error { - // the chain state is already restored, and from this point on - // the block synced to is the accepted block. the last operation +func (a *AtomicSyncExtender) OnFinishAfterCommit(summaryHeight uint64) error { + // the chain state is already restored, and, from this point on, + // the block synced to is the accepted block. The last operation // is updating shared memory with the atomic trie. - // ApplyToSharedMemory does this, and even if the VM is stopped + // ApplyToSharedMemory does this, and, even if the VM is stopped // (gracefully or ungracefully), since MarkApplyToSharedMemoryCursor // is called, VM will resume ApplyToSharedMemory on Initialize. - return a.backend.ApplyToSharedMemory(summaryHeight) + if err := a.backend.ApplyToSharedMemory(summaryHeight); err != nil { + return fmt.Errorf("failed to apply atomic trie to shared memory after commit: %w", err) + } + return nil } diff --git a/plugin/evm/atomic/atomic_sync_provider.go b/plugin/evm/atomic/atomic_sync_provider.go index 2ef3345c62..118c241774 100644 --- a/plugin/evm/atomic/atomic_sync_provider.go +++ b/plugin/evm/atomic/atomic_sync_provider.go @@ -1,4 +1,4 @@ -// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -12,23 +12,25 @@ import ( "github.com/ethereum/go-ethereum/common" ) -type atomicSyncProvider struct { +var _ sync.SummaryProvider = &AtomicSyncProvider{} + +type AtomicSyncProvider struct { chain *core.BlockChain atomicTrie AtomicTrie } -func NewAtomicProvider(chain *core.BlockChain, atomicTrie AtomicTrie) sync.SummaryProvider { - return &atomicSyncProvider{chain: chain, atomicTrie: atomicTrie} +func NewAtomicProvider(chain *core.BlockChain, atomicTrie AtomicTrie) *AtomicSyncProvider { + return &AtomicSyncProvider{chain: chain, atomicTrie: atomicTrie} } -// StateSummaryAtHeight returns the SyncSummary at [height] if valid and available. -func (a *atomicSyncProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { +// StateSummaryAtHeight returns the block state summary at [height] if valid and available. +func (a *AtomicSyncProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { atomicRoot, err := a.atomicTrie.Root(height) if err != nil { - return nil, fmt.Errorf("error getting atomic trie root for height (%d): %w", height, err) + return nil, fmt.Errorf("failed to retrieve atomic trie root for height (%d): %w", height, err) } - if (atomicRoot == common.Hash{}) { + if atomicRoot == (common.Hash{}) { return nil, fmt.Errorf("atomic trie root not found for height (%d)", height) } diff --git a/plugin/evm/atomic/syncable.go b/plugin/evm/atomic/syncable.go index a8e79d5a99..0e26cba48c 100644 --- a/plugin/evm/atomic/syncable.go +++ b/plugin/evm/atomic/syncable.go @@ -1,4 +1,4 @@ -// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -15,7 +15,10 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -var _ message.Syncable = &AtomicBlockSyncSummary{} +var ( + _ message.Syncable = (*AtomicBlockSyncSummary)(nil) + _ message.SyncableParser = (*AtomicSyncSummaryParser)(nil) +) // AtomicBlockSyncSummary provides the information necessary to sync a node starting // at the given block. @@ -34,31 +37,31 @@ func init() { message.SyncSummaryType = &AtomicBlockSyncSummary{} } -type atomicSyncSummaryParser struct{} +type AtomicSyncSummaryParser struct{} -func NewAtomicSyncSummaryParser() message.SyncableParser { - return &atomicSyncSummaryParser{} +func NewAtomicSyncSummaryParser() *AtomicSyncSummaryParser { + return &AtomicSyncSummaryParser{} } -func (b *atomicSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl message.AcceptImplFn) (message.Syncable, error) { +func (a *AtomicSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl message.AcceptImplFn) (message.Syncable, error) { summary := AtomicBlockSyncSummary{} if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse syncable summary: %w", err) } else if codecVersion != message.Version { - return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, message.Version) + return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (got %d, expected %d)", codecVersion, message.Version) } summary.bytes = summaryBytes summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to compute summary ID: %w", err) } summary.summaryID = summaryID summary.acceptImpl = acceptImpl return &summary, nil } -func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (message.Syncable, error) { +func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (*AtomicBlockSyncSummary, error) { summary := AtomicBlockSyncSummary{ BlockNumber: blockNumber, BlockHash: blockHash, @@ -67,13 +70,13 @@ func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot c } bytes, err := Codec.Marshal(message.Version, &summary) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) } summary.bytes = bytes summaryID, err := ids.ToID(crypto.Keccak256(bytes)) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to compute summary ID: %w", err) } summary.summaryID = summaryID @@ -105,7 +108,7 @@ func (a *AtomicBlockSyncSummary) ID() ids.ID { } func (a *AtomicBlockSyncSummary) String() string { - return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", a.BlockHash, a.BlockNumber, a.BlockRoot, a.AtomicRoot) + return fmt.Sprintf("AtomicBlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", a.BlockHash, a.BlockNumber, a.BlockRoot, a.AtomicRoot) } func (a *AtomicBlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { diff --git a/plugin/evm/message/syncable.go b/plugin/evm/message/syncable.go index 83a8d07325..8a8918b891 100644 --- a/plugin/evm/message/syncable.go +++ b/plugin/evm/message/syncable.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -var _ Syncable = &BlockSyncSummary{} +var _ Syncable = (*BlockSyncSummary)(nil) type Syncable interface { block.StateSummary @@ -43,14 +43,14 @@ type BlockSyncSummary struct { type BlockSyncSummaryParser struct{} -func NewBlockSyncSummaryParser() SyncableParser { +func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { return &BlockSyncSummaryParser{} } func (b *BlockSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { summary := BlockSyncSummary{} if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse syncable summary: %w", err) } else if codecVersion != Version { return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) } @@ -58,14 +58,14 @@ func (b *BlockSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl summary.bytes = summaryBytes summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to compute summary ID: %w", err) } summary.summaryID = summaryID summary.acceptImpl = acceptImpl return &summary, nil } -func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (Syncable, error) { +func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (*BlockSyncSummary, error) { summary := BlockSyncSummary{ BlockNumber: blockNumber, BlockHash: blockHash, @@ -73,13 +73,13 @@ func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot co } bytes, err := Codec.Marshal(Version, &summary) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) } summary.bytes = bytes summaryID, err := ids.ToID(crypto.Keccak256(bytes)) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to compute summary ID: %w", err) } summary.summaryID = summaryID @@ -111,7 +111,7 @@ func (s *BlockSyncSummary) ID() ids.ID { } func (s *BlockSyncSummary) String() string { - return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) + return fmt.Sprintf("BlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) } func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 0cf53150d9..309830dfc7 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -618,7 +618,8 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - vm.StateSyncServer = vmsync.NewStateSyncServer(vm.blockChain, atomic.NewAtomicProvider(vm.blockChain, vm.atomicTrie), vm.config.StateSyncCommitInterval) + atomicProvider := atomic.NewAtomicProvider(vm.blockChain, vm.atomicTrie) + vm.StateSyncServer = vmsync.NewStateSyncServer(vm.blockChain, atomicProvider, vm.config.StateSyncCommitInterval) return vm.initializeStateSyncClient(lastAcceptedHeight) } From 460264c51d95b28469bb0aa25b77896092452e09 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 12:28:33 +0300 Subject: [PATCH 35/91] fix tests --- core/state/state_test.go | 13 +-- plugin/evm/export_tx_test.go | 203 ----------------------------------- 2 files changed, 5 insertions(+), 211 deletions(-) diff --git a/core/state/state_test.go b/core/state/state_test.go index 3d6dadf209..f5d784e9d8 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -81,7 +81,6 @@ func TestDump(t *testing.T) { "nonce": 0, "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", - "isMultiCoin": false, "address": "0x0000000000000000000000000000000000000001", "key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d" }, @@ -90,7 +89,6 @@ func TestDump(t *testing.T) { "nonce": 0, "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", - "isMultiCoin": false, "address": "0x0000000000000000000000000000000000000002", "key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62" }, @@ -100,7 +98,6 @@ func TestDump(t *testing.T) { "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3", "code": "0x03030303030303", - "isMultiCoin": false, "address": "0x0000000000000000000000000000000000000102", "key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1" } @@ -138,10 +135,10 @@ func TestIterativeDump(t *testing.T) { // check that DumpToCollector contains the state objects that are in trie got := b.String() want := `{"root":"0x0ffca661efa3b7504ac015083994c94fd7d0d24db60354c717c936afcced762a"} -{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"} -{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"} -{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"} -{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","isMultiCoin":false,"address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"} +{"balance":"22","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","address":"0x0000000000000000000000000000000000000001","key":"0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"} +{"balance":"1337","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","address":"0x0000000000000000000000000000000000000000","key":"0x5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a"} +{"balance":"0","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3","code":"0x03030303030303","address":"0x0000000000000000000000000000000000000102","key":"0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"} +{"balance":"44","nonce":0,"root":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","address":"0x0000000000000000000000000000000000000002","key":"0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"} ` if got != want { t.Errorf("DumpToCollector mismatch:\ngot: %s\nwant: %s\n", got, want) @@ -152,7 +149,7 @@ func TestNull(t *testing.T) { s := newStateEnv() address := common.HexToAddress("0x823140710bf13990e4500136726d8b55") s.state.CreateAccount(address) - //value := common.FromHex("0x823140710bf13990e4500136726d8b55") + // value := common.FromHex("0x823140710bf13990e4500136726d8b55") var value common.Hash s.state.SetState(address, common.Hash{}, value) diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index a6e9513150..bf2eb76723 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -1829,206 +1829,3 @@ func TestNewExportTx(t *testing.T) { }) } } - -func TestNewExportTxMulticoin(t *testing.T) { - tests := []struct { - name string - genesis string - rules params.Rules - bal uint64 - balmc uint64 - }{ - { - name: "apricot phase 0", - genesis: genesisJSONApricotPhase0, - rules: apricotRulesPhase0, - bal: 49000000, - balmc: 25000000, - }, - { - name: "apricot phase 1", - genesis: genesisJSONApricotPhase1, - rules: apricotRulesPhase1, - bal: 49000000, - balmc: 25000000, - }, - { - name: "apricot phase 2", - genesis: genesisJSONApricotPhase2, - rules: apricotRulesPhase2, - bal: 48000000, - balmc: 25000000, - }, - { - name: "apricot phase 3", - genesis: genesisJSONApricotPhase3, - rules: apricotRulesPhase3, - bal: 48947900, - balmc: 25000000, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, test.genesis, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - parent := vm.LastAcceptedBlockInternal().(*Block) - importAmount := uint64(50000000) - utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} - - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: importAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, - }, - }, - } - utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } - - inputID := utxo.InputID() - - tid := ids.GenerateTestID() - importAmount2 := uint64(30000000) - utxoID2 := avax.UTXOID{TxID: ids.GenerateTestID()} - utxo2 := &avax.UTXO{ - UTXOID: utxoID2, - Asset: avax.Asset{ID: tid}, - Out: &secp256k1fx.TransferOutput{ - Amt: importAmount2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, - }, - }, - } - utxoBytes2, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo2) - if err != nil { - t.Fatal(err) - } - - xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - inputID2 := utxo2.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{ - { - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - testKeys[0].Address().Bytes(), - }, - }, - { - Key: inputID2[:], - Value: utxoBytes2, - Traits: [][]byte{ - testKeys[0].Address().Bytes(), - }, - }, - }}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddRemoteTx(tx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - parent = vm.LastAcceptedBlockInternal().(*Block) - exportAmount := uint64(5000000) - - testKeys0Addr := testKeys[0].EthAddress() - exportId, err := ids.ToShortID(testKeys0Addr[:]) - if err != nil { - t.Fatal(err) - } - - state, err := vm.blockChain.State() - if err != nil { - t.Fatal(err) - } - - tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - exportTx := tx.UnsignedAtomicTx - backend := &atomic.VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: vm.currentRules(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, - } - - if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { - t.Fatal("newExportTx created an invalid transaction", err) - } - - commitBatch, err := vm.db.CommitBatch() - if err != nil { - t.Fatalf("Failed to create commit batch for VM due to %s", err) - } - chainID, atomicRequests, err := exportTx.AtomicOps() - if err != nil { - t.Fatalf("Failed to accept export transaction due to: %s", err) - } - - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{chainID: {PutRequests: atomicRequests.PutRequests}}, commitBatch); err != nil { - t.Fatal(err) - } - - stdb, err := vm.blockChain.State() - if err != nil { - t.Fatal(err) - } - err = exportTx.EVMStateTransfer(vm.ctx, stdb) - if err != nil { - t.Fatal(err) - } - - addr := testKeys[0].EthAddress() - if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { - t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) - } - if stdb.GetBalanceMultiCoin(addr, common.BytesToHash(tid[:])).Cmp(new(big.Int).SetUint64(test.balmc)) != 0 { - t.Fatalf("address balance multicoin %s equal %s not %s", addr.String(), stdb.GetBalanceMultiCoin(addr, common.BytesToHash(tid[:])), new(big.Int).SetUint64(test.balmc)) - } - }) - } -} From 9d1af78eacc33bd013d86d326b25f45ba91aa2cb Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 12:39:45 +0300 Subject: [PATCH 36/91] update releases md --- RELEASES.md | 6 ++++++ internal/ethapi/api.go | 6 +++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 7f8393b5de..9f68d41721 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,11 @@ # Release Notes +## Pending Release + +- Removed the `IsMultiCoin` field from the Dump results and related functions +- Removed the GenesisMultiCoinBalance type and MCBalance field from the GenesisAccount +- Removed deprecated `ExportKey`, `ImportKey`, `ImportAVAX`, `Import`, `ExportAVAX`, `Export` APIs + ## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) - Remove API eth_getAssetBalance that was used to query ANT balances (deprecated since v0.10.0) - Remove legacy gossip handler and metrics (deprecated since v0.10.0) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index bdf5140c26..93aadbe14c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -228,7 +228,7 @@ func (s *TxPoolAPI) Inspect() map[string]map[string]map[string]string { pending, queue := s.b.TxPoolContent() // Define a formatter to flatten a transaction into a string - format := func(tx *types.Transaction) string { + var format = func(tx *types.Transaction) string { if to := tx.To(); to != nil { return fmt.Sprintf("%s: %v wei + %v gas Ă— %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) } @@ -1954,11 +1954,11 @@ func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, g matchTx := sendArgs.toTransaction() // Before replacing the old transaction, ensure the _new_ transaction fee is reasonable. - price := matchTx.GasPrice() + var price = matchTx.GasPrice() if gasPrice != nil { price = gasPrice.ToInt() } - gas := matchTx.Gas() + var gas = matchTx.Gas() if gasLimit != nil { gas = uint64(*gasLimit) } From ab526d4664bf031d14c8f6b7b7ea844fb7bf36ae Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 19:00:38 +0300 Subject: [PATCH 37/91] Update RELEASES.md Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- RELEASES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASES.md b/RELEASES.md index bc34eb7264..bf7d06db07 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -3,7 +3,7 @@ ## Pending Release - Removed the `IsMultiCoin` field from the Dump results and related functions -- Removed the GenesisMultiCoinBalance type and MCBalance field from the GenesisAccount +- Removed the `GenesisMultiCoinBalance` type and `MCBalance` field from the `GenesisAccount` - Removed deprecated `ExportKey`, `ImportKey`, `ImportAVAX`, `Import`, `ExportAVAX`, `Export` APIs ## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) From 06e506404dececc3009fe180693c2fe79dec9f4d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 19:01:00 +0300 Subject: [PATCH 38/91] Update plugin/evm/vm_test.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/vm_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index c9183a9288..a16997a5b9 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -981,7 +981,7 @@ func (vm *VM) newExportTx( } // Create the transaction - tx, err := atomic.NewExportTx( + return atomic.NewExportTx( vm.ctx, // Context vm.currentRules(), // VM rules state, From 7aad0ec05abe1f035e4d87f14f435da920e2290b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 19:19:08 +0300 Subject: [PATCH 39/91] replace deprecated Add64 --- plugin/evm/atomic/export_tx.go | 22 +++++++-------- plugin/evm/atomic/import_tx.go | 10 +++---- plugin/evm/block_verification.go | 2 +- plugin/evm/syncervm_test.go | 9 +++++- plugin/evm/vm_test.go | 47 ++++++++++---------------------- 5 files changed, 38 insertions(+), 52 deletions(-) diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 37ed7b0b9d..04c5893e77 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -137,12 +137,12 @@ func (utx *UnsignedExportTx) GasUsed(fixedFee bool) (uint64, error) { if err != nil { return 0, err } - cost, err := math.Add64(byteCost, sigCost) + cost, err := math.Add(byteCost, sigCost) if err != nil { return 0, err } if fixedFee { - cost, err = math.Add64(cost, params.AtomicTxBaseCost) + cost, err = math.Add(cost, params.AtomicTxBaseCost) if err != nil { return 0, err } @@ -160,7 +160,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { ) for _, out := range utx.ExportedOutputs { if out.AssetID() == assetID { - spent, err = math.Add64(spent, out.Output().Amount()) + spent, err = math.Add(spent, out.Output().Amount()) if err != nil { return 0, err } @@ -168,7 +168,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { } for _, in := range utx.Ins { if in.AssetID == assetID { - input, err = math.Add64(input, in.Amount) + input, err = math.Add(input, in.Amount) if err != nil { return 0, err } @@ -287,7 +287,7 @@ func NewExportTx( ctx *snow.Context, rules params.Rules, state StateDB, - amount uint64, // Amount of tokens to export + amount uint64, // Amount of AVAX to export chainID ids.ID, // Chain to send the UTXOs to to ids.ShortID, // Address of chain recipient baseFee *big.Int, // fee to use post-AP3 @@ -306,7 +306,6 @@ func NewExportTx( }} var ( - avaxNeeded uint64 = amount ins, avaxIns []EVMInput signers, avaxSigners [][]*secp256k1.PrivateKey err error @@ -332,14 +331,13 @@ func NewExportTx( return nil, err } - avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) + avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, amount, cost, baseFee) default: - var newAvaxNeeded uint64 - newAvaxNeeded, err = math.Add64(avaxNeeded, params.AvalancheAtomicTxFee) + avaxNeeded, err := math.Add(amount, params.AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, newAvaxNeeded) + avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, avaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -471,7 +469,7 @@ func GetSpendableAVAXWithFee( return nil, nil, err } - newAmount, err := math.Add64(amount, initialFee) + newAmount, err := math.Add(amount, initialFee) if err != nil { return nil, nil, err } @@ -512,7 +510,7 @@ func GetSpendableAVAXWithFee( // Update the cost for the next iteration cost = newCost - newAmount, err := math.Add64(amount, additionalFee) + newAmount, err := math.Add(amount, additionalFee) if err != nil { return nil, nil, err } diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 5b608ca2f5..f1df990839 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -144,13 +144,13 @@ func (utx *UnsignedImportTx) GasUsed(fixedFee bool) (uint64, error) { if err != nil { return 0, err } - cost, err = math.Add64(cost, inCost) + cost, err = math.Add(cost, inCost) if err != nil { return 0, err } } if fixedFee { - cost, err = math.Add64(cost, params.AtomicTxBaseCost) + cost, err = math.Add(cost, params.AtomicTxBaseCost) if err != nil { return 0, err } @@ -167,7 +167,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { ) for _, out := range utx.Outs { if out.AssetID == assetID { - spent, err = math.Add64(spent, out.Amount) + spent, err = math.Add(spent, out.Amount) if err != nil { return 0, err } @@ -175,7 +175,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { } for _, in := range utx.ImportedInputs { if in.AssetID() == assetID { - input, err = math.Add64(input, in.Input().Amount()) + input, err = math.Add(input, in.Input().Amount()) if err != nil { return 0, err } @@ -311,7 +311,7 @@ func NewImportTx( continue } aid := utxo.AssetID() - importedAmount[aid], err = math.Add64(importedAmount[aid], input.Amount()) + importedAmount[aid], err = math.Add(importedAmount[aid], input.Amount()) if err != nil { return nil, err } diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go index f0f8db1658..7b927d0fa0 100644 --- a/plugin/evm/block_verification.go +++ b/plugin/evm/block_verification.go @@ -242,7 +242,7 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { if err != nil { return err } - totalGasUsed, err = safemath.Add64(totalGasUsed, gasUsed) + totalGasUsed, err = safemath.Add(totalGasUsed, gasUsed) if err != nil { return err } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 9ece0606d1..b8b6a78cd0 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -315,7 +315,14 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require.NoError(serverVM.mempool.AddLocalTx(importTx)) case 1: // export some of the imported UTXOs to test exportTx is properly synced - exportTx, err = serverVM.newExportTx( + state, err := serverVM.blockChain.State() + if err != nil { + t.Fatal(err) + } + exportTx, err = atomic.NewExportTx( + serverVM.ctx, + serverVM.currentRules(), + state, importAmount/2, serverVM.ctx.XChainID, testShortIDAddrs[0], diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index a16997a5b9..58b7788f84 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -603,8 +603,20 @@ func TestIssueAtomicTxs(t *testing.T) { if logs == nil { t.Fatal("Expected logs to be non-nil") } - - exportTx, err := vm.newExportTx(importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + state, err := vm.blockChain.State() + if err != nil { + t.Fatal(err) + } + exportTx, err := atomic.NewExportTx( + vm.ctx, + vm.currentRules(), + state, + importAmount-(2*params.AvalancheAtomicTxFee), + vm.ctx.XChainID, + testShortIDAddrs[0], + initialBaseFee, + []*secp256k1.PrivateKey{testKeys[0]}, + ) if err != nil { t.Fatal(err) } @@ -967,37 +979,6 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } } -// newExportTx returns a new ExportTx -func (vm *VM) newExportTx( - amount uint64, // Amount of tokens to export - chainID ids.ID, // Chain to send the UTXOs to - to ids.ShortID, // Address of chain recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens -) (*atomic.Tx, error) { - state, err := vm.blockChain.State() - if err != nil { - return nil, err - } - - // Create the transaction - return atomic.NewExportTx( - vm.ctx, // Context - vm.currentRules(), // VM rules - state, - amount, // Amount - chainID, // ID of the chain to send the funds to - to, // Address - baseFee, - keys, // Private keys - ) - if err != nil { - return nil, err - } - - return tx, nil -} - func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) From 40f9827a6b59b9ca358f190c13dd39d1c9cb4480 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 7 Jan 2025 20:31:09 +0300 Subject: [PATCH 40/91] fix ineff assign --- plugin/evm/atomic/export_tx.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 04c5893e77..3b61ea71b8 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -333,7 +333,8 @@ func NewExportTx( avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, amount, cost, baseFee) default: - avaxNeeded, err := math.Add(amount, params.AvalancheAtomicTxFee) + var avaxNeeded uint64 + avaxNeeded, err = math.Add(amount, params.AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } From 11bc6e829c8202aafd8df124cd8ed17b52fbf57a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 14 Jan 2025 11:57:12 +0300 Subject: [PATCH 41/91] merge master to atomic base branch (#744) --- .github/CONTRIBUTING.md | 33 +++++ .github/workflows/ci.yml | 11 ++ consensus/dummy/README.md | 2 +- core/state/pruner/pruner.go | 2 +- core/txpool/legacypool/legacypool.go | 14 -- eth/tracers/js/tracer_test.go | 2 +- eth/tracers/logger/access_list_tracer.go | 2 +- go.mod | 2 + go.sum | 4 + plugin/evm/{service.go => api.go} | 0 .../shared_memories.go} | 24 ++-- .../atomic/{test_tx.go => atomictest/tx.go} | 33 ++--- plugin/evm/atomic/{ => atomictest}/utils.go | 5 +- plugin/evm/atomic/gossip_test.go | 81 ----------- plugin/evm/atomic/mempool_test.go | 56 -------- .../evm/atomic/{ => state}/atomic_backend.go | 40 +++--- plugin/evm/atomic/{ => state}/atomic_state.go | 5 +- plugin/evm/atomic/{ => state}/atomic_trie.go | 57 ++------ .../{ => state}/atomic_trie_iterator.go | 39 ++++- .../{ => state}/atomic_trie_iterator_test.go | 13 +- .../atomic/{ => state}/atomic_trie_test.go | 134 ++++++++++++----- .../{ => state}/atomic_tx_repository.go | 43 +++--- .../{ => state}/atomic_tx_repository_test.go | 115 ++++----------- plugin/evm/atomic/{ => sync}/atomic_syncer.go | 14 +- .../atomic/{ => sync}/atomic_syncer_test.go | 19 +-- plugin/evm/atomic/{ => txpool}/mempool.go | 71 ++++----- plugin/evm/atomic/txpool/mempool_test.go | 136 ++++++++++++++++++ plugin/evm/atomic/{ => txpool}/tx_heap.go | 19 +-- .../evm/atomic/{ => txpool}/tx_heap_test.go | 15 +- plugin/evm/block.go | 4 +- plugin/evm/block_builder.go | 4 +- plugin/evm/config.go | 21 +++ plugin/evm/config/config.go | 35 +++-- plugin/evm/database/wrapped_database.go | 28 +--- plugin/evm/export_tx_test.go | 6 +- plugin/evm/imports_test.go | 72 ++++++++++ plugin/evm/mempool_atomic_gossiping_test.go | 17 ++- plugin/evm/syncervm_client.go | 34 +++-- plugin/evm/syncervm_server.go | 6 +- plugin/evm/syncervm_test.go | 13 +- plugin/evm/vm.go | 85 +++++------ plugin/evm/vm_database.go | 82 +++++++++++ plugin/evm/vm_test.go | 14 +- precompile/contract/interfaces.go | 2 +- precompile/contract/mocks.go | 45 +++--- precompile/contract/mocks_generate_test.go | 6 + precompile/contracts/warp/README.md | 2 +- precompile/precompileconfig/mocks.go | 38 ++--- .../precompileconfig/mocks_generate_test.go | 6 + rpc/handler.go | 2 +- rpc/types.go | 2 +- scripts/build_docker_image.sh | 23 --- scripts/known_flakes.txt | 1 + scripts/mock.gen.sh | 44 ------ scripts/mocks.mockgen.txt | 2 - sync/statesync/trie_sync_stats.go | 2 +- tools.go | 8 ++ warp/aggregator/aggregator_test.go | 2 +- warp/aggregator/mock_signature_getter.go | 55 ++++++- warp/aggregator/mocks_generate_test.go | 6 + warp/backend.go | 2 +- warp/backend_test.go | 7 + warp/verifier_backend.go | 8 +- warp/verifier_backend_test.go | 10 +- warp/verifier_stats.go | 10 +- 65 files changed, 963 insertions(+), 732 deletions(-) rename plugin/evm/{service.go => api.go} (100%) rename plugin/evm/atomic/{test_shared_memories.go => atomictest/shared_memories.go} (73%) rename plugin/evm/atomic/{test_tx.go => atomictest/tx.go} (86%) rename plugin/evm/atomic/{ => atomictest}/utils.go (69%) delete mode 100644 plugin/evm/atomic/mempool_test.go rename plugin/evm/atomic/{ => state}/atomic_backend.go (93%) rename plugin/evm/atomic/{ => state}/atomic_state.go (97%) rename plugin/evm/atomic/{ => state}/atomic_trie.go (87%) rename plugin/evm/atomic/{ => state}/atomic_trie_iterator.go (76%) rename plugin/evm/atomic/{ => state}/atomic_trie_iterator_test.go (88%) rename plugin/evm/atomic/{ => state}/atomic_trie_test.go (80%) rename plugin/evm/atomic/{ => state}/atomic_tx_repository.go (91%) rename plugin/evm/atomic/{ => state}/atomic_tx_repository_test.go (63%) rename plugin/evm/atomic/{ => sync}/atomic_syncer.go (93%) rename plugin/evm/atomic/{ => sync}/atomic_syncer_test.go (90%) rename plugin/evm/atomic/{ => txpool}/mempool.go (89%) create mode 100644 plugin/evm/atomic/txpool/mempool_test.go rename plugin/evm/atomic/{ => txpool}/tx_heap.go (88%) rename plugin/evm/atomic/{ => txpool}/tx_heap_test.go (90%) create mode 100644 plugin/evm/config.go create mode 100644 plugin/evm/imports_test.go create mode 100644 plugin/evm/vm_database.go create mode 100644 precompile/contract/mocks_generate_test.go create mode 100644 precompile/precompileconfig/mocks_generate_test.go delete mode 100755 scripts/build_docker_image.sh delete mode 100755 scripts/mock.gen.sh delete mode 100644 scripts/mocks.mockgen.txt create mode 100644 tools.go create mode 100644 warp/aggregator/mocks_generate_test.go diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 87c79fcfe2..9e89d2d0da 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -33,3 +33,36 @@ Please make sure your contributions adhere to our coding guidelines: Before you submit a feature request, please check and make sure that it isn't possible through some other means. +## Mocks + +Mocks are auto-generated using [mockgen](https://pkg.go.dev/go.uber.org/mock/mockgen) and `//go:generate` commands in the code. + +* To **re-generate all mocks**, use the command below from the root of the project: + + ```sh + go generate -run "go.uber.org/mock/mockgen" ./... + ``` + +* To **add** an interface that needs a corresponding mock generated: + * if the file `mocks_generate_test.go` exists in the package where the interface is located, either: + * modify its `//go:generate go run go.uber.org/mock/mockgen` to generate a mock for your interface (preferred); or + * add another `//go:generate go run go.uber.org/mock/mockgen` to generate a mock for your interface according to specific mock generation settings + * if the file `mocks_generate_test.go` does not exist in the package where the interface is located, create it with content (adapt as needed): + + ```go + // Copyright (C) 2025-2025, Ava Labs, Inc. All rights reserved. + // See the file LICENSE for licensing terms. + + package mypackage + + //go:generate go run go.uber.org/mock/mockgen -package=${GOPACKAGE} -destination=mocks_test.go . YourInterface + ``` + + Notes: + 1. Ideally generate all mocks to `mocks_test.go` for the package you need to use the mocks for and do not export mocks to other packages. This reduces package dependencies, reduces production code pollution and forces to have locally defined narrow interfaces. + 1. Prefer using reflect mode to generate mocks than source mode, unless you need a mock for an unexported interface, which should be rare. +* To **remove** an interface from having a corresponding mock generated: + 1. Edit the `mocks_generate_test.go` file in the directory where the interface is defined + 1. If the `//go:generate` mockgen command line: + * generates a mock file for multiple interfaces, remove your interface from the line + * generates a mock file only for the interface, remove the entire line. If the file is empty, remove `mocks_generate_test.go` as well. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 553c2761d7..9b180a501d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -84,6 +84,17 @@ jobs: run: echo "TIMEOUT=1200s" >> "$GITHUB_ENV" - run: go mod download shell: bash + - name: go mod tidy + run: | + go mod tidy + git diff --exit-code + - name: Mocks are up to date + shell: bash + run: | + grep -lr -E '^// Code generated by MockGen\. DO NOT EDIT\.$' . | xargs -r rm + go generate -run "go.uber.org/mock/mockgen" ./... + git add --intent-to-add --all + git diff --exit-code - run: ./scripts/build.sh evm shell: bash - run: ./scripts/build_test.sh diff --git a/consensus/dummy/README.md b/consensus/dummy/README.md index 23ff20072a..cca69cf239 100644 --- a/consensus/dummy/README.md +++ b/consensus/dummy/README.md @@ -18,7 +18,7 @@ The dynamic fee algorithm aims to adjust the base fee to handle network congesti - EIP-1559 is intended for Ethereum where a block is produced roughly every 10s - C-Chain typically produces blocks every 2 seconds, but the dynamic fee algorithm needs to handle the case that the network quiesces and there are no blocks for a long period of time -- Since C-Chain produces blocks at a different cadence, it adapts EIP-1559 to sum the amount of gas consumed within a 10 second interval instead of using only the amount of gas consumed in the parent block +- Since C-Chain produces blocks at a different cadence, it adapts EIP-1559 to sum the amount of gas consumed within a 10-second interval instead of using only the amount of gas consumed in the parent block ## Consensus Engine Callbacks diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 29431a0ecd..090b7ac6af 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -52,7 +52,7 @@ const ( // stateBloomFilePrefix is the filename prefix of state bloom filter. stateBloomFilePrefix = "statebloom" - // stateBloomFilePrefix is the filename suffix of state bloom filter. + // stateBloomFileSuffix is the filename suffix of state bloom filter. stateBloomFileSuffix = "bf.gz" // stateBloomFileTempSuffix is the filename suffix of state bloom filter diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index f34701da78..51825da9e9 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -227,9 +227,6 @@ type LegacyPool struct { signer types.Signer mu sync.RWMutex - // [currentStateLock] is required to allow concurrent access to address nonces - // and balances during reorgs and gossip handling. - currentStateLock sync.Mutex // closed when the transaction pool is stopped. Any goroutine can listen // to this to be notified if it should shut down. generalShutdownChan chan struct{} @@ -685,9 +682,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *LegacyPool) validateTx(tx *types.Transaction, local bool) error { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - opts := &txpool.ValidationOptionsWithState{ State: pool.currentState, Rules: pool.chainconfig.Rules( @@ -1500,9 +1494,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { return } pool.currentHead.Store(newHead) - pool.currentStateLock.Lock() pool.currentState = statedb - pool.currentStateLock.Unlock() pool.pendingNonces = newNoncer(statedb) // Inject any transactions discarded due to reorgs @@ -1515,9 +1507,6 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction @@ -1724,9 +1713,6 @@ func (pool *LegacyPool) truncateQueue() { // is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful // to trigger a re-heap is this function func (pool *LegacyPool) demoteUnexecutables() { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - // Iterate over all accounts and demote any non-executable transactions gasLimit := pool.currentHead.Load().GasLimit for addr, list := range pool.pending { diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index 7e46c0aa20..746fc0eb0e 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -206,7 +206,7 @@ func TestHaltBetweenSteps(t *testing.T) { } } -// testNoStepExec tests a regular value transfer (no exec), and accessing the statedb +// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb // in 'result' func TestNoStepExec(t *testing.T) { execTracer := func(code string) []byte { diff --git a/eth/tracers/logger/access_list_tracer.go b/eth/tracers/logger/access_list_tracer.go index e2bb12f2d7..d43198a2ff 100644 --- a/eth/tracers/logger/access_list_tracer.go +++ b/eth/tracers/logger/access_list_tracer.go @@ -87,7 +87,7 @@ func (al accessList) equal(other accessList) bool { return true } -// accesslist converts the accesslist to a types.AccessList. +// accessList converts the accesslist to a types.AccessList. func (al accessList) accessList() types.AccessList { acl := make(types.AccessList, 0, len(al)) for addr, slots := range al { diff --git a/go.mod b/go.mod index 2edcd99fb9..0b095d2f54 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( golang.org/x/sys v0.28.0 golang.org/x/text v0.21.0 golang.org/x/time v0.3.0 + golang.org/x/tools v0.22.0 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -120,6 +121,7 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect + golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.33.0 // indirect golang.org/x/term v0.27.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect diff --git a/go.sum b/go.sum index 90a90a8b9e..9c19c061be 100644 --- a/go.sum +++ b/go.sum @@ -641,6 +641,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -855,6 +857,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/plugin/evm/service.go b/plugin/evm/api.go similarity index 100% rename from plugin/evm/service.go rename to plugin/evm/api.go diff --git a/plugin/evm/atomic/test_shared_memories.go b/plugin/evm/atomic/atomictest/shared_memories.go similarity index 73% rename from plugin/evm/atomic/test_shared_memories.go rename to plugin/evm/atomic/atomictest/shared_memories.go index 2f92639a5c..177809a6f9 100644 --- a/plugin/evm/atomic/test_shared_memories.go +++ b/plugin/evm/atomic/atomictest/shared_memories.go @@ -1,4 +1,4 @@ -package atomic +package atomictest import ( "testing" @@ -9,13 +9,13 @@ import ( ) type SharedMemories struct { - thisChain atomic.SharedMemory - peerChain atomic.SharedMemory + ThisChain atomic.SharedMemory + PeerChain atomic.SharedMemory thisChainID ids.ID peerChainID ids.ID } -func (s *SharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { +func (s *SharedMemories) AddItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.Requests) error { for _, reqs := range ops { puts := make(map[ids.ID]*atomic.Requests) puts[s.thisChainID] = &atomic.Requests{} @@ -23,7 +23,7 @@ func (s *SharedMemories) addItemsToBeRemovedToPeerChain(ops map[ids.ID]*atomic.R val := []byte{0x1} puts[s.thisChainID].PutRequests = append(puts[s.thisChainID].PutRequests, &atomic.Element{Key: key, Value: val}) } - if err := s.peerChain.Apply(puts); err != nil { + if err := s.PeerChain.Apply(puts); err != nil { return err } } @@ -35,7 +35,7 @@ func (s *SharedMemories) AssertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.R for _, reqs := range ops { // should be able to get put requests for _, elem := range reqs.PutRequests { - val, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + val, err := s.PeerChain.Get(s.thisChainID, [][]byte{elem.Key}) if err != nil { t.Fatalf("error finding puts in peerChainMemory: %s", err) } @@ -44,24 +44,24 @@ func (s *SharedMemories) AssertOpsApplied(t *testing.T, ops map[ids.ID]*atomic.R // should not be able to get remove requests for _, key := range reqs.RemoveRequests { - _, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + _, err := s.ThisChain.Get(s.peerChainID, [][]byte{key}) assert.EqualError(t, err, "not found") } } } -func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { +func (s *SharedMemories) AssertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomic.Requests) { t.Helper() for _, reqs := range ops { // should not be able to get put requests for _, elem := range reqs.PutRequests { - _, err := s.peerChain.Get(s.thisChainID, [][]byte{elem.Key}) + _, err := s.PeerChain.Get(s.thisChainID, [][]byte{elem.Key}) assert.EqualError(t, err, "not found") } // should be able to get remove requests (these were previously added as puts on peerChain) for _, key := range reqs.RemoveRequests { - val, err := s.thisChain.Get(s.peerChainID, [][]byte{key}) + val, err := s.ThisChain.Get(s.peerChainID, [][]byte{key}) assert.NoError(t, err) assert.Equal(t, []byte{0x1}, val[0]) } @@ -71,8 +71,8 @@ func (s *SharedMemories) assertOpsNotApplied(t *testing.T, ops map[ids.ID]*atomi // TODO: once tests are moved to atomic package, unexport this function func NewSharedMemories(atomicMemory *atomic.Memory, thisChainID, peerChainID ids.ID) *SharedMemories { return &SharedMemories{ - thisChain: atomicMemory.NewSharedMemory(thisChainID), - peerChain: atomicMemory.NewSharedMemory(peerChainID), + ThisChain: atomicMemory.NewSharedMemory(thisChainID), + PeerChain: atomicMemory.NewSharedMemory(peerChainID), thisChainID: thisChainID, peerChainID: peerChainID, } diff --git a/plugin/evm/atomic/test_tx.go b/plugin/evm/atomic/atomictest/tx.go similarity index 86% rename from plugin/evm/atomic/test_tx.go rename to plugin/evm/atomic/atomictest/tx.go index 0c2121a6b8..cba4303b43 100644 --- a/plugin/evm/atomic/test_tx.go +++ b/plugin/evm/atomic/atomictest/tx.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package atomictest import ( "math/big" @@ -17,20 +17,21 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) const testCodecVersion = 0 -var testTxCodec codec.Manager +var TestTxCodec codec.Manager func init() { - testTxCodec = codec.NewDefaultManager() + TestTxCodec = codec.NewDefaultManager() c := linearcodec.NewDefault() errs := wrappers.Errs{} errs.Add( c.RegisterType(&TestUnsignedTx{}), - testTxCodec.RegisterCodec(testCodecVersion, c), + TestTxCodec.RegisterCodec(testCodecVersion, c), ) if errs.Errored() { @@ -52,7 +53,7 @@ type TestUnsignedTx struct { EVMStateTransferV error } -var _ UnsignedAtomicTx = &TestUnsignedTx{} +var _ atomic.UnsignedAtomicTx = &TestUnsignedTx{} // GasUsed implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) GasUsed(fixedFee bool) (uint64, error) { return t.GasUsedV, nil } @@ -84,19 +85,19 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error { +func (t *TestUnsignedTx) SemanticVerify(backend *atomic.VerifierBackend, stx *atomic.Tx, parent atomic.AtomicBlockContext, baseFee *big.Int) error { return t.SemanticVerifyV } // EVMStateTransfer implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state StateDB) error { +func (t *TestUnsignedTx) EVMStateTransfer(ctx *snow.Context, state atomic.StateDB) error { return t.EVMStateTransferV } var TestBlockchainID = ids.GenerateTestID() -func GenerateTestImportTxWithGas(gasUsed uint64, burned uint64) *Tx { - return &Tx{ +func GenerateTestImportTxWithGas(gasUsed uint64, burned uint64) *atomic.Tx { + return &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), GasUsedV: gasUsed, @@ -112,8 +113,8 @@ func GenerateTestImportTxWithGas(gasUsed uint64, burned uint64) *Tx { } } -func GenerateTestImportTx() *Tx { - return &Tx{ +func GenerateTestImportTx() *atomic.Tx { + return &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: TestBlockchainID, @@ -127,8 +128,8 @@ func GenerateTestImportTx() *Tx { } } -func GenerateTestExportTx() *Tx { - return &Tx{ +func GenerateTestExportTx() *atomic.Tx { + return &atomic.Tx{ UnsignedAtomicTx: &TestUnsignedTx{ IDV: ids.GenerateTestID(), AcceptRequestsBlockchainIDV: TestBlockchainID, @@ -148,7 +149,7 @@ func GenerateTestExportTx() *Tx { } } -func NewTestTx() *Tx { +func NewTestTx() *atomic.Tx { txType := rand.Intn(2) switch txType { case 0: @@ -160,8 +161,8 @@ func NewTestTx() *Tx { } } -func NewTestTxs(numTxs int) []*Tx { - txs := make([]*Tx, 0, numTxs) +func NewTestTxs(numTxs int) []*atomic.Tx { + txs := make([]*atomic.Tx, 0, numTxs) for i := 0; i < numTxs; i++ { txs = append(txs, NewTestTx()) } diff --git a/plugin/evm/atomic/utils.go b/plugin/evm/atomic/atomictest/utils.go similarity index 69% rename from plugin/evm/atomic/utils.go rename to plugin/evm/atomic/atomictest/utils.go index e599b75201..a7432ea2b3 100644 --- a/plugin/evm/atomic/utils.go +++ b/plugin/evm/atomic/atomictest/utils.go @@ -1,14 +1,15 @@ // (c) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package atomictest import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) -func ConvertToAtomicOps(tx *Tx) (map[ids.ID]*avalancheatomic.Requests, error) { +func ConvertToAtomicOps(tx *atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { id, reqs, err := tx.AtomicOps() if err != nil { return nil, err diff --git a/plugin/evm/atomic/gossip_test.go b/plugin/evm/atomic/gossip_test.go index 5140f6bc7e..90f33165f2 100644 --- a/plugin/evm/atomic/gossip_test.go +++ b/plugin/evm/atomic/gossip_test.go @@ -6,11 +6,8 @@ package atomic import ( "testing" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -37,81 +34,3 @@ func TestGossipAtomicTxMarshaller(t *testing.T) { require.NoError(err) require.Equal(want.GossipID(), got.GossipID()) } - -func TestAtomicMempoolIterate(t *testing.T) { - txs := []*GossipAtomicTx{ - { - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - { - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - }, - } - - tests := []struct { - name string - add []*GossipAtomicTx - f func(tx *GossipAtomicTx) bool - expectedTxs []*GossipAtomicTx - }{ - { - name: "func matches nothing", - add: txs, - f: func(*GossipAtomicTx) bool { - return false - }, - expectedTxs: []*GossipAtomicTx{}, - }, - { - name: "func matches all", - add: txs, - f: func(*GossipAtomicTx) bool { - return true - }, - expectedTxs: txs, - }, - { - name: "func matches subset", - add: txs, - f: func(tx *GossipAtomicTx) bool { - return tx.Tx == txs[0].Tx - }, - expectedTxs: []*GossipAtomicTx{txs[0]}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) - require.NoError(err) - - for _, add := range tt.add { - require.NoError(m.Add(add)) - } - - matches := make([]*GossipAtomicTx, 0) - f := func(tx *GossipAtomicTx) bool { - match := tt.f(tx) - - if match { - matches = append(matches, tx) - } - - return match - } - - m.Iterate(f) - - require.ElementsMatch(tt.expectedTxs, matches) - }) - } -} diff --git a/plugin/evm/atomic/mempool_test.go b/plugin/evm/atomic/mempool_test.go deleted file mode 100644 index 9334853a5e..0000000000 --- a/plugin/evm/atomic/mempool_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package atomic - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" -) - -func TestMempoolAddTx(t *testing.T) { - require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) - require.NoError(err) - - txs := make([]*GossipAtomicTx, 0) - for i := 0; i < 3_000; i++ { - tx := &GossipAtomicTx{ - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - } - - txs = append(txs, tx) - require.NoError(m.Add(tx)) - } - - for _, tx := range txs { - require.True(m.bloom.Has(tx)) - } -} - -// Add should return an error if a tx is already known -func TestMempoolAdd(t *testing.T) { - require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) - require.NoError(err) - - tx := &GossipAtomicTx{ - Tx: &Tx{ - UnsignedAtomicTx: &TestUnsignedTx{ - IDV: ids.GenerateTestID(), - }, - }, - } - - require.NoError(m.Add(tx)) - err = m.Add(tx) - require.ErrorIs(err, errTxAlreadyKnown) -} diff --git a/plugin/evm/atomic/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go similarity index 93% rename from plugin/evm/atomic/atomic_backend.go rename to plugin/evm/atomic/state/atomic_backend.go index 5156a8a6e8..14e49a8d73 100644 --- a/plugin/evm/atomic/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "encoding/binary" @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" - syncclient "github.com/ava-labs/coreth/sync/client" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -24,8 +24,14 @@ import ( var _ AtomicBackend = &atomicBackend{} var ( - atomicTrieDBPrefix = []byte("atomicTrieDB") - atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") + atomicTrieDBPrefix = []byte("atomicTrieDB") + atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") + appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") + sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates +) + +const ( + progressLogFrequency = 30 * time.Second ) // AtomicBackend abstracts the verification and processing @@ -38,7 +44,7 @@ type AtomicBackend interface { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) // Returns an AtomicState corresponding to a block hash that has been inserted // but not Accepted or Rejected yet. @@ -61,10 +67,6 @@ type AtomicBackend interface { // will not have been executed on shared memory. MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error - // Syncer creates and returns a new Syncer object that can be used to sync the - // state of the atomic trie from peers - Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) - // SetLastAccepted is used after state-sync to reset the last accepted block. SetLastAccepted(lastAcceptedHash common.Hash) @@ -82,7 +84,7 @@ type atomicBackend struct { sharedMemory avalancheatomic.SharedMemory repo AtomicTxRepository - atomicTrie AtomicTrie + atomicTrie *atomicTrie lastAcceptedHash common.Hash verifiedRoots map[common.Hash]AtomicState @@ -93,12 +95,12 @@ func NewAtomicBackend( db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, -) (AtomicBackend, error) { +) (*atomicBackend, error) { atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, db) codec := repo.Codec() - atomicTrie, err := newAtomicTrie(atomicTrieDB, metadataDB, codec, lastAcceptedHeight, commitInterval) + atomicTrie, err := NewAtomicTrie(atomicTrieDB, metadataDB, codec, lastAcceptedHeight, commitInterval) if err != nil { return nil, err } @@ -156,7 +158,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // iterate over the transactions, indexing them if the height is < commit height // otherwise, add the atomic operations from the transaction to the uncommittedOpsMap height = binary.BigEndian.Uint64(iter.Key()) - txs, err := ExtractAtomicTxs(iter.Value(), true, a.codec) + txs, err := atomic.ExtractAtomicTxs(iter.Value(), true, a.codec) if err != nil { return err } @@ -360,12 +362,6 @@ func (a *atomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } -// Syncer creates and returns a new Syncer object that can be used to sync the -// state of the atomic trie from peers -func (a *atomicBackend) Syncer(client syncclient.LeafClient, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (Syncer, error) { - return newAtomicSyncer(client, a, targetRoot, targetHeight, requestSize) -} - func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) { if state, ok := a.verifiedRoots[blockHash]; ok { return state, nil @@ -401,7 +397,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*Tx) (common.Hash, error) { +func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -464,11 +460,11 @@ func (a *atomicBackend) AtomicTrie() AtomicTrie { // mergeAtomicOps merges atomic requests represented by [txs] // to the [output] map, depending on whether [chainID] is present in the map. -func mergeAtomicOps(txs []*Tx) (map[ids.ID]*avalancheatomic.Requests, error) { +func mergeAtomicOps(txs []*atomic.Tx) (map[ids.ID]*avalancheatomic.Requests, error) { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) + copyTxs := make([]*atomic.Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs diff --git a/plugin/evm/atomic/atomic_state.go b/plugin/evm/atomic/state/atomic_state.go similarity index 97% rename from plugin/evm/atomic/atomic_state.go rename to plugin/evm/atomic/state/atomic_state.go index 5b64145d62..672c57d7e0 100644 --- a/plugin/evm/atomic/atomic_state.go +++ b/plugin/evm/atomic/state/atomic_state.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "fmt" @@ -9,6 +9,7 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -36,7 +37,7 @@ type atomicState struct { backend *atomicBackend blockHash common.Hash blockHeight uint64 - txs []*Tx + txs []*atomic.Tx atomicOps map[ids.ID]*avalancheatomic.Requests atomicRoot common.Hash } diff --git a/plugin/evm/atomic/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go similarity index 87% rename from plugin/evm/atomic/atomic_trie.go rename to plugin/evm/atomic/state/atomic_trie.go index bbb299a391..844a0d5c56 100644 --- a/plugin/evm/atomic/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -1,11 +1,10 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "fmt" - "time" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" @@ -17,30 +16,28 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" "github.com/ava-labs/coreth/triedb" + "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) +var _ AtomicTrie = &atomicTrie{} + const ( - progressLogFrequency = 30 * time.Second - atomicKeyLength = wrappers.LongLen + common.HashLength - sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates + AtomicTrieKeyLength = wrappers.LongLen + common.HashLength atomicTrieTipBufferSize = 1 // No need to support a buffer of previously accepted tries for the atomic trie atomicTrieMemoryCap = 64 * units.MiB ) -var ( - _ AtomicTrie = &atomicTrie{} - lastCommittedKey = []byte("atomicTrieLastCommittedBlock") - appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") -) +var lastCommittedKey = []byte("atomicTrieLastCommittedBlock") // AtomicTrie maintains an index of atomic operations by blockchainIDs for every block // height containing atomic transactions. The backing data structure for this index is @@ -88,33 +85,6 @@ type AtomicTrie interface { RejectTrie(root common.Hash) error } -// AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie -type AtomicTrieIterator interface { - // Next advances the iterator to the next node in the atomic trie and - // returns true if there are more leaves to iterate - Next() bool - - // Key returns the current database key that the iterator is iterating - // returned []byte can be freely modified - Key() []byte - - // Value returns the current database value that the iterator is iterating - Value() []byte - - // BlockNumber returns the current block number - BlockNumber() uint64 - - // BlockchainID returns the current blockchain ID at the current block number - BlockchainID() ids.ID - - // AtomicOps returns a map of blockchainIDs to the set of atomic requests - // for that blockchainID at the current block number - AtomicOps() *avalancheatomic.Requests - - // Error returns error, if any encountered during this iteration - Error() error -} - // atomicTrie implements the AtomicTrie interface type atomicTrie struct { commitInterval uint64 // commit interval, same as commitHeightInterval by default @@ -125,12 +95,13 @@ type atomicTrie struct { lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize - tipBuffer *core.BoundedBuffer[common.Hash] + // TODO: we don't really need this to be imported from core package + tipBuffer *core.BoundedBuffer[common.Hash] } -// newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. +// NewAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. -func newAtomicTrie( +func NewAtomicTrie( atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*atomicTrie, error) { @@ -224,7 +195,7 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { - valueBytes, err := a.codec.Marshal(CodecVersion, requests) + valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) if err != nil { // highly unlikely but possible if atomic.Element // has a change that is unsupported by the codec @@ -232,7 +203,7 @@ func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[id } // key is [height]+[blockchainID] - keyPacker := wrappers.Packer{Bytes: make([]byte, atomicKeyLength)} + keyPacker := wrappers.Packer{Bytes: make([]byte, AtomicTrieKeyLength)} keyPacker.PackLong(height) keyPacker.PackFixedBytes(blockchainID[:]) if err := trie.Update(keyPacker.Bytes, valueBytes); err != nil { @@ -268,7 +239,7 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error return nil } -// Iterator returns a types.AtomicTrieIterator that iterates the trie from the given +// Iterator returns a AtomicTrieIterator that iterates the trie from the given // atomic trie root, starting at the specified [cursor]. func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (AtomicTrieIterator, error) { t, err := trie.New(trie.TrieID(root), a.trieDB) diff --git a/plugin/evm/atomic/atomic_trie_iterator.go b/plugin/evm/atomic/state/atomic_trie_iterator.go similarity index 76% rename from plugin/evm/atomic/atomic_trie_iterator.go rename to plugin/evm/atomic/state/atomic_trie_iterator.go index 20be76416e..719ff10941 100644 --- a/plugin/evm/atomic/atomic_trie_iterator.go +++ b/plugin/evm/atomic/state/atomic_trie_iterator.go @@ -1,22 +1,49 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "encoding/binary" "fmt" "github.com/ava-labs/avalanchego/chains/atomic" + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/trie" - "github.com/ethereum/go-ethereum/common" ) -const atomicTrieKeyLen = wrappers.LongLen + common.HashLength +var _ AtomicTrieIterator = &atomicTrieIterator{} + +// AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie +type AtomicTrieIterator interface { + // Next advances the iterator to the next node in the atomic trie and + // returns true if there are more leaves to iterate + Next() bool + + // Key returns the current database key that the iterator is iterating + // returned []byte can be freely modified + Key() []byte + + // Value returns the current database value that the iterator is iterating + Value() []byte + + // BlockNumber returns the current block number + BlockNumber() uint64 + + // BlockchainID returns the current blockchain ID at the current block number + BlockchainID() ids.ID + + // AtomicOps returns a map of blockchainIDs to the set of atomic requests + // for that blockchainID at the current block number + AtomicOps() *avalancheatomic.Requests + + // Error returns error, if any encountered during this iteration + Error() error +} // atomicTrieIterator is an implementation of types.AtomicTrieIterator that serves // parsed data with each iteration @@ -30,7 +57,7 @@ type atomicTrieIterator struct { err error // error if any has occurred } -func NewAtomicTrieIterator(trieIterator *trie.Iterator, codec codec.Manager) AtomicTrieIterator { +func NewAtomicTrieIterator(trieIterator *trie.Iterator, codec codec.Manager) *atomicTrieIterator { return &atomicTrieIterator{trieIterator: trieIterator, codec: codec} } @@ -55,8 +82,8 @@ func (a *atomicTrieIterator) Next() bool { keyLen := len(a.trieIterator.Key) // If the key has an unexpected length, set the error and stop the iteration since the data is // no longer reliable. - if keyLen != atomicTrieKeyLen { - a.resetFields(fmt.Errorf("expected atomic trie key length to be %d but was %d", atomicTrieKeyLen, keyLen)) + if keyLen != AtomicTrieKeyLength { + a.resetFields(fmt.Errorf("expected atomic trie key length to be %d but was %d", AtomicTrieKeyLength, keyLen)) return false } diff --git a/plugin/evm/atomic/atomic_trie_iterator_test.go b/plugin/evm/atomic/state/atomic_trie_iterator_test.go similarity index 88% rename from plugin/evm/atomic/atomic_trie_iterator_test.go rename to plugin/evm/atomic/state/atomic_trie_iterator_test.go index d2810f14eb..71aa2f6dd7 100644 --- a/plugin/evm/atomic/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic/state/atomic_trie_iterator_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "testing" @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" avalancheutils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" @@ -20,7 +21,7 @@ import ( func TestIteratorCanIterate(t *testing.T) { lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(t, err) // create state with multiple transactions @@ -42,7 +43,7 @@ func TestIteratorCanIterate(t *testing.T) { assert.NotEqual(t, common.Hash{}, lastCommittedHash1) assert.EqualValues(t, 1000, lastCommittedHeight1) - verifyOperations(t, atomicTrie1, testTxCodec, lastCommittedHash1, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie1, atomictest.TestTxCodec, lastCommittedHash1, 1, 1000, operationsMap) // iterate on a new atomic trie to make sure there is no resident state affecting the data and the // iterator @@ -54,14 +55,14 @@ func TestIteratorCanIterate(t *testing.T) { assert.NotEqual(t, common.Hash{}, lastCommittedHash2) assert.EqualValues(t, 1000, lastCommittedHeight2) - verifyOperations(t, atomicTrie2, testTxCodec, lastCommittedHash1, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie2, atomictest.TestTxCodec, lastCommittedHash1, 1, 1000, operationsMap) } func TestIteratorHandlesInvalidData(t *testing.T) { require := require.New(t) lastAcceptedHeight := uint64(1000) db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) require.NoError(err) // create state with multiple transactions @@ -84,7 +85,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { require.NotEqual(common.Hash{}, lastCommittedHash) require.EqualValues(1000, lastCommittedHeight) - verifyOperations(t, atomicTrie, testTxCodec, lastCommittedHash, 1, 1000, operationsMap) + verifyOperations(t, atomicTrie, atomictest.TestTxCodec, lastCommittedHash, 1, 1000, operationsMap) // Add a random key-value pair to the atomic trie in order to test that the iterator correctly // handles an error when it runs into an unexpected key-value pair in the trie. diff --git a/plugin/evm/atomic/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go similarity index 80% rename from plugin/evm/atomic/atomic_trie_test.go rename to plugin/evm/atomic/state/atomic_trie_test.go index 9e29c6aa83..281dd4c0a0 100644 --- a/plugin/evm/atomic/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "encoding/binary" @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" @@ -131,7 +134,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, test.lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, test.lastAcceptedHeight) if err != nil { t.Fatal(err) } @@ -152,7 +155,7 @@ func TestAtomicTrieInitialize(t *testing.T) { } // Verify the operations up to the expected commit height - verifyOperations(t, atomicTrie1, testTxCodec, rootHash1, 1, test.expectedCommitHeight, operationsMap) + verifyOperations(t, atomicTrie1, atomictest.TestTxCodec, rootHash1, 1, test.expectedCommitHeight, operationsMap) // Construct the atomic trie again (on the same database) and ensure the last accepted root is correct. atomicBackend2, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) @@ -179,7 +182,7 @@ func TestAtomicTrieInitialize(t *testing.T) { // during the initialization phase will cause an invalid root when indexing continues. nextCommitHeight := nearestCommitHeight(test.lastAcceptedHeight+test.commitInterval, test.commitInterval) for i := test.lastAcceptedHeight + 1; i <= nextCommitHeight; i++ { - txs := NewTestTxs(test.numTxsPerBlock(i)) + txs := atomictest.NewTestTxs(test.numTxsPerBlock(i)) if err := repo.Write(i, txs); err != nil { t.Fatal(err) } @@ -198,7 +201,7 @@ func TestAtomicTrieInitialize(t *testing.T) { assert.NotEqual(t, common.Hash{}, updatedRoot) // Verify the operations up to the new expected commit height - verifyOperations(t, atomicTrie1, testTxCodec, updatedRoot, 1, updatedLastCommitHeight, operationsMap) + verifyOperations(t, atomicTrie1, atomictest.TestTxCodec, updatedRoot, 1, updatedLastCommitHeight, operationsMap) // Generate a new atomic trie to compare the root against. atomicBackend4, err := NewAtomicBackend( @@ -219,7 +222,7 @@ func TestAtomicTrieInitialize(t *testing.T) { func TestIndexerInitializesOnlyOnce(t *testing.T) { lastAcceptedHeight := uint64(25) db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) @@ -237,7 +240,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { // re-initialize the atomic trie since initialize is not supposed to run again the height // at the trie should still be the old height with the old commit hash without any changes. // This scenario is not realistic, but is used to test potential double initialization behavior. - err = repo.Write(15, []*Tx{GenerateTestExportTx()}) + err = repo.Write(15, []*atomic.Tx{atomictest.GenerateTestExportTx()}) assert.NoError(t, err) // Re-initialize the atomic trie @@ -252,7 +255,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { func newTestAtomicTrie(t *testing.T) AtomicTrie { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, 0) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 0) if err != nil { t.Fatal(err) } @@ -272,7 +275,7 @@ func TestIndexerWriteAndRead(t *testing.T) { // process 305 blocks so that we get three commits (100, 200, 300) for height := uint64(1); height <= testCommitInterval*3+5; /*=305*/ height++ { - atomicRequests, err := ConvertToAtomicOps(GenerateTestImportTx()) + atomicRequests, err := atomictest.ConvertToAtomicOps(atomictest.GenerateTestImportTx()) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie, height, atomicRequests) assert.NoError(t, err) @@ -304,11 +307,11 @@ func TestAtomicOpsAreNotTxOrderDependent(t *testing.T) { atomicTrie2 := newTestAtomicTrie(t) for height := uint64(0); height <= testCommitInterval; /*=205*/ height++ { - tx1 := GenerateTestImportTx() - tx2 := GenerateTestImportTx() - atomicRequests1, err := mergeAtomicOps([]*Tx{tx1, tx2}) + tx1 := atomictest.GenerateTestImportTx() + tx2 := atomictest.GenerateTestImportTx() + atomicRequests1, err := mergeAtomicOps([]*atomic.Tx{tx1, tx2}) assert.NoError(t, err) - atomicRequests2, err := mergeAtomicOps([]*Tx{tx2, tx1}) + atomicRequests2, err := mergeAtomicOps([]*atomic.Tx{tx2, tx1}) assert.NoError(t, err) err = indexAtomicTxs(atomicTrie1, height, atomicRequests1) @@ -330,7 +333,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { commitInterval := uint64(10) expectedCommitHeight := uint64(100) db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) if err != nil { t.Fatal(err) } @@ -354,14 +357,14 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { assert.NotEqual(t, common.Hash{}, rootHash) // Verify the operations are as expected - verifyOperations(t, atomicTrie, testTxCodec, rootHash, 1, expectedCommitHeight, operationsMap) + verifyOperations(t, atomicTrie, atomictest.TestTxCodec, rootHash, 1, expectedCommitHeight, operationsMap) } func TestIndexingNilShouldNotImpactTrie(t *testing.T) { // operations to index ops := make([]map[ids.ID]*avalancheatomic.Requests, 0) for i := 0; i <= testCommitInterval; i++ { - atomicOps, err := ConvertToAtomicOps(GenerateTestImportTx()) + atomicOps, err := atomictest.ConvertToAtomicOps(atomictest.GenerateTestImportTx()) assert.NoError(t, err) ops = append(ops, atomicOps) } @@ -434,9 +437,9 @@ func TestApplyToSharedMemory(t *testing.T) { commitInterval: 10, lastAcceptedHeight: 25, setMarker: func(a *atomicBackend) error { - cursor := make([]byte, wrappers.LongLen+len(TestBlockchainID[:])) + cursor := make([]byte, wrappers.LongLen+len(atomictest.TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) - copy(cursor[wrappers.LongLen:], TestBlockchainID[:]) + copy(cursor[wrappers.LongLen:], atomictest.TestBlockchainID[:]) return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, @@ -450,15 +453,15 @@ func TestApplyToSharedMemory(t *testing.T) { } { t.Run(name, func(t *testing.T) { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, test.lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, test.lastAcceptedHeight) assert.NoError(t, err) operationsMap := make(map[uint64]map[ids.ID]*avalancheatomic.Requests) writeTxs(t, repo, 1, test.lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository m := avalancheatomic.NewMemory(db) - sharedMemories := NewSharedMemories(m, ids.GenerateTestID(), TestBlockchainID) - backend, err := NewAtomicBackend(db, sharedMemories.thisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + sharedMemories := atomictest.NewSharedMemories(m, ids.GenerateTestID(), atomictest.TestBlockchainID) + backend, err := NewAtomicBackend(db, sharedMemories.ThisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie().(*atomicTrie) @@ -468,12 +471,12 @@ func TestApplyToSharedMemory(t *testing.T) { // prepare peer chain's shared memory by applying items we expect to remove as puts for _, ops := range operationsMap { - if err := sharedMemories.addItemsToBeRemovedToPeerChain(ops); err != nil { + if err := sharedMemories.AddItemsToBeRemovedToPeerChain(ops); err != nil { t.Fatal(err) } } - assert.NoError(t, test.setMarker(backend.(*atomicBackend))) + assert.NoError(t, test.setMarker(backend)) assert.NoError(t, db.Commit()) assert.NoError(t, backend.ApplyToSharedMemory(test.lastAcceptedHeight)) @@ -482,7 +485,7 @@ func TestApplyToSharedMemory(t *testing.T) { if test.expectOpsApplied(height) { sharedMemories.AssertOpsApplied(t, ops) } else { - sharedMemories.assertOpsNotApplied(t, ops) + sharedMemories.AssertOpsNotApplied(t, ops) } } @@ -492,7 +495,7 @@ func TestApplyToSharedMemory(t *testing.T) { assert.False(t, hasMarker) // reinitialize the atomic trie backend, err = NewAtomicBackend( - db, sharedMemories.thisChain, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, + db, sharedMemories.ThisChain, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, ) assert.NoError(t, err) // no further changes should have occurred in shared memory @@ -501,7 +504,7 @@ func TestApplyToSharedMemory(t *testing.T) { if test.expectOpsApplied(height) { sharedMemories.AssertOpsApplied(t, ops) } else { - sharedMemories.assertOpsNotApplied(t, ops) + sharedMemories.AssertOpsNotApplied(t, ops) } } @@ -520,7 +523,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { lastAcceptedHeight := uint64(25000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) @@ -544,7 +547,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.StopTimer() // Verify operations - verifyOperations(b, atomicTrie, testTxCodec, hash, 1, lastAcceptedHeight, operationsMap) + verifyOperations(b, atomicTrie, atomictest.TestTxCodec, hash, 1, lastAcceptedHeight, operationsMap) } func BenchmarkAtomicTrieIterate(b *testing.B) { @@ -554,7 +557,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { lastAcceptedHeight := uint64(25_000) // add 25000 * 3 = 75000 transactions - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) @@ -630,7 +633,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u sharedMemory := utils.TestSnowContext().SharedMemory lastAcceptedHeight := blocks - repo, err := NewAtomicTxRepository(db, testTxCodec, lastAcceptedHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(b, err) backend, err := NewAtomicBackend(db, sharedMemory, nil, repo, 0, common.Hash{}, 5000) @@ -639,7 +642,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u } trie := backend.AtomicTrie() for height := uint64(1); height <= lastAcceptedHeight; height++ { - txs := NewTestTxs(constTxsPerHeight(3)(height)) + txs := atomictest.NewTestTxs(constTxsPerHeight(3)(height)) ops, err := mergeAtomicOps(txs) assert.NoError(b, err) assert.NoError(b, indexAtomicTxs(trie, height, ops)) @@ -652,9 +655,76 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - backend.(*atomicBackend).sharedMemory = utils.TestSnowContext().SharedMemory + backend.sharedMemory = utils.TestSnowContext().SharedMemory assert.NoError(b, backend.MarkApplyToSharedMemoryCursor(0)) assert.NoError(b, db.Commit()) assert.NoError(b, backend.ApplyToSharedMemory(lastAcceptedHeight)) } } + +// verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to +// the atomic operations contained in [operationsMap] on the same interval. +func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { + t.Helper() + + // Start the iterator at [from] + fromBytes := make([]byte, wrappers.LongLen) + binary.BigEndian.PutUint64(fromBytes, from) + iter, err := atomicTrie.Iterator(rootHash, fromBytes) + if err != nil { + t.Fatal(err) + } + + // Generate map of the marshalled atomic operations on the interval [from, to] + // based on [operationsMap]. + marshalledOperationsMap := make(map[uint64]map[ids.ID][]byte) + for height, blockRequests := range operationsMap { + if height < from || height > to { + continue + } + for blockchainID, atomicRequests := range blockRequests { + b, err := codec.Marshal(0, atomicRequests) + if err != nil { + t.Fatal(err) + } + if requestsMap, exists := marshalledOperationsMap[height]; exists { + requestsMap[blockchainID] = b + } else { + requestsMap = make(map[ids.ID][]byte) + requestsMap[blockchainID] = b + marshalledOperationsMap[height] = requestsMap + } + } + } + + // Generate map of marshalled atomic operations on the interval [from, to] + // based on the contents of the trie. + iteratorMarshalledOperationsMap := make(map[uint64]map[ids.ID][]byte) + for iter.Next() { + height := iter.BlockNumber() + if height < from { + t.Fatalf("Iterator starting at (%d) found value at block height (%d)", from, height) + } + if height > to { + continue + } + + blockchainID := iter.BlockchainID() + b, err := codec.Marshal(0, iter.AtomicOps()) + if err != nil { + t.Fatal(err) + } + if requestsMap, exists := iteratorMarshalledOperationsMap[height]; exists { + requestsMap[blockchainID] = b + } else { + requestsMap = make(map[ids.ID][]byte) + requestsMap[blockchainID] = b + iteratorMarshalledOperationsMap[height] = requestsMap + } + } + if err := iter.Error(); err != nil { + t.Fatal(err) + } + + assert.Equal(t, marshalledOperationsMap, iteratorMarshalledOperationsMap) +} diff --git a/plugin/evm/atomic/atomic_tx_repository.go b/plugin/evm/atomic/state/atomic_tx_repository.go similarity index 91% rename from plugin/evm/atomic/atomic_tx_repository.go rename to plugin/evm/atomic/state/atomic_tx_repository.go index d451ce9d86..e9dc5363e3 100644 --- a/plugin/evm/atomic/atomic_tx_repository.go +++ b/plugin/evm/atomic/state/atomic_tx_repository.go @@ -1,7 +1,7 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( "encoding/binary" @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) const ( @@ -39,10 +40,10 @@ var ( // atomic transactions type AtomicTxRepository interface { GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*Tx, uint64, error) - GetByHeight(height uint64) ([]*Tx, error) - Write(height uint64, txs []*Tx) error - WriteBonus(height uint64, txs []*Tx) error + GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) + GetByHeight(height uint64) ([]*atomic.Tx, error) + Write(height uint64, txs []*atomic.Tx) error + WriteBonus(height uint64, txs []*atomic.Tx) error IterateByHeight(start uint64) database.Iterator Codec() codec.Manager @@ -136,7 +137,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er // Get the tx iter is pointing to, len(txs) == 1 is expected here. txBytes := iterValue[wrappers.LongLen+wrappers.IntLen:] - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return err } @@ -198,10 +199,10 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { return indexHeight, nil } -// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*Tx] object +// GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { +func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -215,7 +216,7 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { packer := wrappers.Packer{Bytes: indexedTxBytes} height := packer.UnpackLong() txBytes := packer.UnpackBytes() - tx, err := ExtractAtomicTx(txBytes, a.codec) + tx, err := atomic.ExtractAtomicTx(txBytes, a.codec) if err != nil { return nil, 0, err } @@ -229,40 +230,40 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*Tx, uint64, error) { // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*Tx, error) { +func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*Tx, error) { +func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err } - return ExtractAtomicTxsBatch(txsBytes, a.codec) + return atomic.ExtractAtomicTxsBatch(txsBytes, a.codec) } // Write updates indexes maintained on atomic txs, so they can be queried // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*Tx) error { +func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { +func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. - copyTxs := make([]*Tx, len(txs)) + copyTxs := make([]*atomic.Tx, len(txs)) copy(copyTxs, txs) utils.Sort(copyTxs) txs = copyTxs @@ -300,8 +301,8 @@ func (a *atomicTxRepository) write(height uint64, txs []*Tx, bonus bool) error { // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { - txBytes, err := a.codec.Marshal(CodecVersion, tx) +func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { + txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) if err != nil { return err } @@ -320,8 +321,8 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *Tx) error { } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) error { - txsBytes, err := a.codec.Marshal(CodecVersion, txs) +func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { + txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) if err != nil { return err } @@ -335,7 +336,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*Tx) err // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *Tx) error { +func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err diff --git a/plugin/evm/atomic/atomic_tx_repository_test.go b/plugin/evm/atomic/state/atomic_tx_repository_test.go similarity index 63% rename from plugin/evm/atomic/atomic_tx_repository_test.go rename to plugin/evm/atomic/state/atomic_tx_repository_test.go index fcde4f01dd..47f30a5acf 100644 --- a/plugin/evm/atomic/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/state/atomic_tx_repository_test.go @@ -1,10 +1,9 @@ // (c) 2020-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package state import ( - "encoding/binary" "testing" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" @@ -12,7 +11,8 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils" - "github.com/ethereum/go-ethereum/common" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/utils/set" @@ -27,13 +27,13 @@ import ( // addTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) directly to [acceptedAtomicTxDB], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { +func addTxs(t testing.TB, codec codec.Manager, acceptedAtomicTxDB database.Database, fromHeight uint64, toHeight uint64, txsPerHeight int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { for height := fromHeight; height < toHeight; height++ { - txs := make([]*Tx, 0, txsPerHeight) + txs := make([]*atomic.Tx, 0, txsPerHeight) for i := 0; i < txsPerHeight; i++ { - tx := NewTestTx() + tx := atomictest.NewTestTx() txs = append(txs, tx) - txBytes, err := codec.Marshal(CodecVersion, tx) + txBytes, err := codec.Marshal(atomic.CodecVersion, tx) assert.NoError(t, err) // Write atomic transactions to the [acceptedAtomicTxDB] @@ -70,10 +70,10 @@ func constTxsPerHeight(txCount int) func(uint64) int { // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, - txsPerHeight func(height uint64) int, txMap map[uint64][]*Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, + txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { - txs := NewTestTxs(txsPerHeight(height)) + txs := atomictest.NewTestTxs(txsPerHeight(height)) if err := repo.Write(height, txs); err != nil { t.Fatal(err) } @@ -95,7 +95,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { +func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) @@ -113,80 +113,13 @@ func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { } } -// verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to -// the atomic operations contained in [operationsMap] on the same interval. -func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { - t.Helper() - - // Start the iterator at [from] - fromBytes := make([]byte, wrappers.LongLen) - binary.BigEndian.PutUint64(fromBytes, from) - iter, err := atomicTrie.Iterator(rootHash, fromBytes) - if err != nil { - t.Fatal(err) - } - - // Generate map of the marshalled atomic operations on the interval [from, to] - // based on [operationsMap]. - marshalledOperationsMap := make(map[uint64]map[ids.ID][]byte) - for height, blockRequests := range operationsMap { - if height < from || height > to { - continue - } - for blockchainID, atomicRequests := range blockRequests { - b, err := codec.Marshal(0, atomicRequests) - if err != nil { - t.Fatal(err) - } - if requestsMap, exists := marshalledOperationsMap[height]; exists { - requestsMap[blockchainID] = b - } else { - requestsMap = make(map[ids.ID][]byte) - requestsMap[blockchainID] = b - marshalledOperationsMap[height] = requestsMap - } - } - } - - // Generate map of marshalled atomic operations on the interval [from, to] - // based on the contents of the trie. - iteratorMarshalledOperationsMap := make(map[uint64]map[ids.ID][]byte) - for iter.Next() { - height := iter.BlockNumber() - if height < from { - t.Fatalf("Iterator starting at (%d) found value at block height (%d)", from, height) - } - if height > to { - continue - } - - blockchainID := iter.BlockchainID() - b, err := codec.Marshal(0, iter.AtomicOps()) - if err != nil { - t.Fatal(err) - } - if requestsMap, exists := iteratorMarshalledOperationsMap[height]; exists { - requestsMap[blockchainID] = b - } else { - requestsMap = make(map[ids.ID][]byte) - requestsMap[blockchainID] = b - iteratorMarshalledOperationsMap[height] = requestsMap - } - } - if err := iter.Error(); err != nil { - t.Fatal(err) - } - - assert.Equal(t, marshalledOperationsMap, iteratorMarshalledOperationsMap) -} - func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, 0) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(1), txMap, nil) verifyTxs(t, repo, txMap) @@ -194,11 +127,11 @@ func TestAtomicRepositoryReadWriteSingleTx(t *testing.T) { func TestAtomicRepositoryReadWriteMultipleTxs(t *testing.T) { db := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(db, testTxCodec, 0) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 0) if err != nil { t.Fatal(err) } - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) writeTxs(t, repo, 1, 100, constTxsPerHeight(10), txMap, nil) verifyTxs(t, repo, txMap) @@ -208,15 +141,15 @@ func TestAtomicRepositoryPreAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) - addTxs(t, testTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) + txMap := make(map[uint64][]*atomic.Tx) + addTxs(t, atomictest.TestTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) } // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, testTxCodec, 100) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 100) if err != nil { t.Fatal(err) } @@ -232,16 +165,16 @@ func TestAtomicRepositoryPostAP5Migration(t *testing.T) { db := versiondb.New(memdb.New()) acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) - addTxs(t, testTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) - addTxs(t, testTxCodec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) + txMap := make(map[uint64][]*atomic.Tx) + addTxs(t, atomictest.TestTxCodec, acceptedAtomicTxDB, 1, 100, 1, txMap, nil) + addTxs(t, atomictest.TestTxCodec, acceptedAtomicTxDB, 100, 200, 10, txMap, nil) if err := db.Commit(); err != nil { t.Fatal(err) } // Ensure the atomic repository can correctly migrate the transactions // from the old accepted atomic tx DB to add the height index. - repo, err := NewAtomicTxRepository(db, testTxCodec, 200) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 200) if err != nil { t.Fatal(err) } @@ -256,13 +189,13 @@ func benchAtomicRepositoryIndex10_000(b *testing.B, maxHeight uint64, txsPerHeig db := versiondb.New(memdb.New()) acceptedAtomicTxDB := prefixdb.New(atomicTxIDDBPrefix, db) - txMap := make(map[uint64][]*Tx) + txMap := make(map[uint64][]*atomic.Tx) - addTxs(b, testTxCodec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) + addTxs(b, atomictest.TestTxCodec, acceptedAtomicTxDB, 0, maxHeight, txsPerHeight, txMap, nil) if err := db.Commit(); err != nil { b.Fatal(err) } - repo, err := NewAtomicTxRepository(db, testTxCodec, maxHeight) + repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, maxHeight) if err != nil { b.Fatal(err) } diff --git a/plugin/evm/atomic/atomic_syncer.go b/plugin/evm/atomic/sync/atomic_syncer.go similarity index 93% rename from plugin/evm/atomic/atomic_syncer.go rename to plugin/evm/atomic/sync/atomic_syncer.go index 52a8376319..0580c5399f 100644 --- a/plugin/evm/atomic/atomic_syncer.go +++ b/plugin/evm/atomic/sync/atomic_syncer.go @@ -1,7 +1,7 @@ // (c) 2019-2022, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package sync import ( "bytes" @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" + "github.com/ava-labs/coreth/plugin/evm/atomic/state" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" @@ -38,7 +39,7 @@ type Syncer interface { // the state of progress and writing the actual atomic trie to the trieDB. type atomicSyncer struct { db *versiondb.Database - atomicTrie AtomicTrie + atomicTrie state.AtomicTrie trie *trie.Trie // used to update the atomic trie targetRoot common.Hash targetHeight uint64 @@ -53,14 +54,13 @@ type atomicSyncer struct { // addZeros adds [common.HashLenth] zeros to [height] and returns the result as []byte func addZeroes(height uint64) []byte { - packer := wrappers.Packer{Bytes: make([]byte, atomicKeyLength)} + packer := wrappers.Packer{Bytes: make([]byte, state.AtomicTrieKeyLength)} packer.PackLong(height) packer.PackFixedBytes(bytes.Repeat([]byte{0x00}, common.HashLength)) return packer.Bytes } -func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { - atomicTrie := atomicBackend.AtomicTrie() +func NewAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie state.AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() trie, err := atomicTrie.OpenTrie(lastCommittedRoot) if err != nil { @@ -68,7 +68,7 @@ func newAtomicSyncer(client syncclient.LeafClient, atomicBackend *atomicBackend, } atomicSyncer := &atomicSyncer{ - db: atomicBackend.db, + db: vdb, atomicTrie: atomicTrie, trie: trie, targetRoot: targetRoot, @@ -91,7 +91,7 @@ func (s *atomicSyncer) Start(ctx context.Context) error { // onLeafs is the callback for the leaf syncer, which will insert the key-value pairs into the trie. func (s *atomicSyncer) onLeafs(keys [][]byte, values [][]byte) error { for i, key := range keys { - if len(key) != atomicKeyLength { + if len(key) != state.AtomicTrieKeyLength { return fmt.Errorf("unexpected key len (%d) in atomic trie sync", len(key)) } // key = height + blockchainID diff --git a/plugin/evm/atomic/atomic_syncer_test.go b/plugin/evm/atomic/sync/atomic_syncer_test.go similarity index 90% rename from plugin/evm/atomic/atomic_syncer_test.go rename to plugin/evm/atomic/sync/atomic_syncer_test.go index 6ff0da417a..533e2daaa2 100644 --- a/plugin/evm/atomic/atomic_syncer_test.go +++ b/plugin/evm/atomic/sync/atomic_syncer_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package sync import ( "bytes" @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/plugin/evm/atomic/state" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" @@ -53,11 +54,11 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight ) clientDB := versiondb.New(memdb.New()) - repo, err := NewAtomicTxRepository(clientDB, message.Codec, 0) + repo, err := state.NewAtomicTxRepository(clientDB, message.Codec, 0) if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := NewAtomicBackend(clientDB, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) + atomicBackend, err := state.NewAtomicBackend(clientDB, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) if err != nil { t.Fatal("could not initialize atomic backend", err) } @@ -66,7 +67,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight // next trie. for i, checkpoint := range checkpoints { // Create syncer targeting the current [syncTrie]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) + syncer, err := NewAtomicSyncer(mockClient, clientDB, atomicBackend.AtomicTrie(), targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -93,7 +94,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight } // Create syncer targeting the current [targetRoot]. - syncer, err := atomicBackend.Syncer(mockClient, targetRoot, targetHeight, config.DefaultStateSyncRequestSize) + syncer, err := NewAtomicSyncer(mockClient, clientDB, atomicBackend.AtomicTrie(), targetRoot, targetHeight, config.DefaultStateSyncRequestSize) if err != nil { t.Fatal(err) } @@ -157,7 +158,7 @@ func TestAtomicSyncer(t *testing.T) { rand.Seed(1) targetHeight := 10 * uint64(commitInterval) serverTrieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) - root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, int(targetHeight), atomicKeyLength) + root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, int(targetHeight), state.AtomicTrieKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, nil, int64(targetHeight)) } @@ -167,7 +168,7 @@ func TestAtomicSyncerResume(t *testing.T) { targetHeight := 10 * uint64(commitInterval) serverTrieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) numTrieKeys := int(targetHeight) - 1 // no atomic ops for genesis - root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys, atomicKeyLength) + root, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys, state.AtomicTrieKeyLength) testAtomicSyncer(t, serverTrieDB, targetHeight, root, []atomicSyncTestCheckpoint{ { @@ -184,12 +185,12 @@ func TestAtomicSyncerResumeNewRootCheckpoint(t *testing.T) { targetHeight1 := 10 * uint64(commitInterval) serverTrieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) numTrieKeys1 := int(targetHeight1) - 1 // no atomic ops for genesis - root1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys1, atomicKeyLength) + root1, _, _ := syncutils.GenerateTrie(t, serverTrieDB, numTrieKeys1, state.AtomicTrieKeyLength) targetHeight2 := 20 * uint64(commitInterval) numTrieKeys2 := int(targetHeight2) - 1 // no atomic ops for genesis root2, _, _ := syncutils.FillTrie( - t, numTrieKeys1, numTrieKeys2, atomicKeyLength, serverTrieDB, root1, + t, numTrieKeys1, numTrieKeys2, state.AtomicTrieKeyLength, serverTrieDB, root1, ) testAtomicSyncer(t, serverTrieDB, targetHeight1, root1, []atomicSyncTestCheckpoint{ diff --git a/plugin/evm/atomic/mempool.go b/plugin/evm/atomic/txpool/mempool.go similarity index 89% rename from plugin/evm/atomic/mempool.go rename to plugin/evm/atomic/txpool/mempool.go index f74eacda7e..df5bb22ec6 100644 --- a/plugin/evm/atomic/mempool.go +++ b/plugin/evm/atomic/txpool/mempool.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package txpool import ( "errors" @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ethereum/go-ethereum/log" ) @@ -30,7 +31,7 @@ var ( ErrInsufficientAtomicTxFee = errors.New("atomic tx fee too low for atomic mempool") ErrTooManyAtomicTx = errors.New("too many atomic tx") - _ gossip.Set[*GossipAtomicTx] = (*Mempool)(nil) + _ gossip.Set[*atomic.GossipAtomicTx] = (*Mempool)(nil) ) // mempoolMetrics defines the metrics for the atomic mempool @@ -62,12 +63,12 @@ type Mempool struct { // maxSize is the maximum number of transactions allowed to be kept in mempool maxSize int // currentTxs is the set of transactions about to be added to a block. - currentTxs map[ids.ID]*Tx + currentTxs map[ids.ID]*atomic.Tx // issuedTxs is the set of transactions that have been issued into a new block - issuedTxs map[ids.ID]*Tx + issuedTxs map[ids.ID]*atomic.Tx // discardedTxs is an LRU Cache of transactions that have been discarded after failing // verification. - discardedTxs *cache.LRU[ids.ID, *Tx] + discardedTxs *cache.LRU[ids.ID, *atomic.Tx] // Pending is a channel of length one, which the mempool ensures has an item on // it as long as there is an unissued transaction remaining in [txs] Pending chan struct{} @@ -75,17 +76,17 @@ type Mempool struct { // NOTE: [txHeap] ONLY contains pending txs txHeap *txHeap // utxoSpenders maps utxoIDs to the transaction consuming them in the mempool - utxoSpenders map[ids.ID]*Tx + utxoSpenders map[ids.ID]*atomic.Tx // bloom is a bloom filter containing the txs in the mempool bloom *gossip.BloomFilter metrics *mempoolMetrics - verify func(tx *Tx) error + verify func(tx *atomic.Tx) error } // NewMempool returns a Mempool with [maxSize] -func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *Tx) error) (*Mempool, error) { +func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *atomic.Tx) error) (*Mempool, error) { bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", config.TxGossipBloomMinTargetElements, config.TxGossipBloomTargetFalsePositiveRate, @@ -97,13 +98,13 @@ func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int return &Mempool{ ctx: ctx, - issuedTxs: make(map[ids.ID]*Tx), - discardedTxs: &cache.LRU[ids.ID, *Tx]{Size: discardedTxsCacheSize}, - currentTxs: make(map[ids.ID]*Tx), + issuedTxs: make(map[ids.ID]*atomic.Tx), + discardedTxs: &cache.LRU[ids.ID, *atomic.Tx]{Size: discardedTxsCacheSize}, + currentTxs: make(map[ids.ID]*atomic.Tx), Pending: make(chan struct{}, 1), txHeap: newTxHeap(maxSize), maxSize: maxSize, - utxoSpenders: make(map[ids.ID]*Tx), + utxoSpenders: make(map[ids.ID]*atomic.Tx), bloom: bloom, metrics: newMempoolMetrics(), verify: verify, @@ -125,7 +126,7 @@ func (m *Mempool) length() int { // atomicTxGasPrice is the [gasPrice] paid by a transaction to burn a given // amount of [AVAXAssetID] given the value of [gasUsed]. -func (m *Mempool) atomicTxGasPrice(tx *Tx) (uint64, error) { +func (m *Mempool) atomicTxGasPrice(tx *atomic.Tx) (uint64, error) { gasUsed, err := tx.GasUsed(true) if err != nil { return 0, err @@ -140,7 +141,7 @@ func (m *Mempool) atomicTxGasPrice(tx *Tx) (uint64, error) { return burned / gasUsed, nil } -func (m *Mempool) Add(tx *GossipAtomicTx) error { +func (m *Mempool) Add(tx *atomic.GossipAtomicTx) error { m.ctx.Lock.RLock() defer m.ctx.Lock.RUnlock() @@ -149,7 +150,7 @@ func (m *Mempool) Add(tx *GossipAtomicTx) error { // AddRemoteTx attempts to add [tx] to the mempool and returns an error if // it could not be added to the mempool. -func (m *Mempool) AddRemoteTx(tx *Tx) error { +func (m *Mempool) AddRemoteTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -172,7 +173,7 @@ func (m *Mempool) AddRemoteTx(tx *Tx) error { return err } -func (m *Mempool) AddLocalTx(tx *Tx) error { +func (m *Mempool) AddLocalTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -184,8 +185,8 @@ func (m *Mempool) AddLocalTx(tx *Tx) error { return err } -// ForceAddTx forcibly adds a *Tx to the mempool and bypasses all verification. -func (m *Mempool) ForceAddTx(tx *Tx) error { +// ForceAddTx forcibly adds a *atomic.Tx to the mempool and bypasses all verification. +func (m *Mempool) ForceAddTx(tx *atomic.Tx) error { m.lock.Lock() defer m.lock.Unlock() @@ -195,13 +196,13 @@ func (m *Mempool) ForceAddTx(tx *Tx) error { // checkConflictTx checks for any transactions in the mempool that spend the same input UTXOs as [tx]. // If any conflicts are present, it returns the highest gas price of any conflicting transaction, the // txID of the corresponding tx and the full list of transactions that conflict with [tx]. -func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { +func (m *Mempool) checkConflictTx(tx *atomic.Tx) (uint64, ids.ID, []*atomic.Tx, error) { utxoSet := tx.InputUTXOs() var ( - highestGasPrice uint64 = 0 - conflictingTxs []*Tx = make([]*Tx, 0) - highestGasPriceConflictTxID ids.ID = ids.ID{} + highestGasPrice uint64 = 0 + conflictingTxs []*atomic.Tx = make([]*atomic.Tx, 0) + highestGasPriceConflictTxID ids.ID = ids.ID{} ) for utxoID := range utxoSet { // Get current gas price of the existing tx in the mempool @@ -226,7 +227,7 @@ func (m *Mempool) checkConflictTx(tx *Tx) (uint64, ids.ID, []*Tx, error) { // addTx adds [tx] to the mempool. Assumes [m.lock] is held. // If [force], skips conflict checks within the mempool. -func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { +func (m *Mempool) addTx(tx *atomic.Tx, local bool, force bool) error { txID := tx.ID() // If [txID] has already been issued or is in the currentTxs map // there's no need to add it. @@ -320,7 +321,7 @@ func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { m.utxoSpenders[utxoID] = tx } - m.bloom.Add(&GossipAtomicTx{Tx: tx}) + m.bloom.Add(&atomic.GossipAtomicTx{Tx: tx}) reset, err := gossip.ResetBloomFilterIfNeeded(m.bloom, m.length()*config.TxGossipBloomChurnMultiplier) if err != nil { return err @@ -330,7 +331,7 @@ func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { log.Debug("resetting bloom filter", "reason", "reached max filled ratio") for _, pendingTx := range m.txHeap.minHeap.items { - m.bloom.Add(&GossipAtomicTx{Tx: pendingTx.tx}) + m.bloom.Add(&atomic.GossipAtomicTx{Tx: pendingTx.tx}) } } @@ -344,12 +345,12 @@ func (m *Mempool) addTx(tx *Tx, local bool, force bool) error { return nil } -func (m *Mempool) Iterate(f func(tx *GossipAtomicTx) bool) { +func (m *Mempool) Iterate(f func(tx *atomic.GossipAtomicTx) bool) { m.lock.RLock() defer m.lock.RUnlock() for _, item := range m.txHeap.maxHeap.items { - if !f(&GossipAtomicTx{Tx: item.tx}) { + if !f(&atomic.GossipAtomicTx{Tx: item.tx}) { return } } @@ -363,7 +364,7 @@ func (m *Mempool) GetFilter() ([]byte, []byte) { } // NextTx returns a transaction to be issued from the mempool. -func (m *Mempool) NextTx() (*Tx, bool) { +func (m *Mempool) NextTx() (*atomic.Tx, bool) { m.lock.Lock() defer m.lock.Unlock() @@ -383,7 +384,7 @@ func (m *Mempool) NextTx() (*Tx, bool) { // GetPendingTx returns the transaction [txID] and true if it is // currently in the [txHeap] waiting to be issued into a block. // Returns nil, false otherwise. -func (m *Mempool) GetPendingTx(txID ids.ID) (*Tx, bool) { +func (m *Mempool) GetPendingTx(txID ids.ID) (*atomic.Tx, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -393,7 +394,7 @@ func (m *Mempool) GetPendingTx(txID ids.ID) (*Tx, bool) { // GetTx returns the transaction [txID] if it was issued // by this node and returns whether it was dropped and whether // it exists. -func (m *Mempool) GetTx(txID ids.ID) (*Tx, bool, bool) { +func (m *Mempool) GetTx(txID ids.ID) (*atomic.Tx, bool, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -476,7 +477,7 @@ func (m *Mempool) CancelCurrentTxs() { // cancelTx removes [tx] from current transactions and moves it back into the // tx heap. // assumes the lock is held. -func (m *Mempool) cancelTx(tx *Tx) { +func (m *Mempool) cancelTx(tx *atomic.Tx) { // Add tx to heap sorted by gasPrice gasPrice, err := m.atomicTxGasPrice(tx) if err == nil { @@ -518,7 +519,7 @@ func (m *Mempool) DiscardCurrentTxs() { // discardCurrentTx discards [tx] from the set of current transactions. // Assumes the lock is held. -func (m *Mempool) discardCurrentTx(tx *Tx) { +func (m *Mempool) discardCurrentTx(tx *atomic.Tx) { m.removeSpenders(tx) m.discardedTxs.Put(tx.ID(), tx) delete(m.currentTxs, tx.ID()) @@ -532,7 +533,7 @@ func (m *Mempool) discardCurrentTx(tx *Tx) { // removeTx must be called for all conflicts before overwriting the utxoSpenders // map. // Assumes lock is held. -func (m *Mempool) removeTx(tx *Tx, discard bool) { +func (m *Mempool) removeTx(tx *atomic.Tx, discard bool) { txID := tx.ID() // Remove from [currentTxs], [txHeap], and [issuedTxs]. @@ -557,7 +558,7 @@ func (m *Mempool) removeTx(tx *Tx, discard bool) { // removeSpenders deletes the entries for all input UTXOs of [tx] from the // [utxoSpenders] map. // Assumes the lock is held. -func (m *Mempool) removeSpenders(tx *Tx) { +func (m *Mempool) removeSpenders(tx *atomic.Tx) { for utxoID := range tx.InputUTXOs() { delete(m.utxoSpenders, utxoID) } @@ -565,7 +566,7 @@ func (m *Mempool) removeSpenders(tx *Tx) { // RemoveTx removes [txID] from the mempool completely. // Evicts [tx] from the discarded cache if present. -func (m *Mempool) RemoveTx(tx *Tx) { +func (m *Mempool) RemoveTx(tx *atomic.Tx) { m.lock.Lock() defer m.lock.Unlock() diff --git a/plugin/evm/atomic/txpool/mempool_test.go b/plugin/evm/atomic/txpool/mempool_test.go new file mode 100644 index 0000000000..2d70b58489 --- /dev/null +++ b/plugin/evm/atomic/txpool/mempool_test.go @@ -0,0 +1,136 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txpool + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestMempoolAddTx(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + require.NoError(err) + + txs := make([]*atomic.GossipAtomicTx, 0) + for i := 0; i < 3_000; i++ { + tx := &atomic.GossipAtomicTx{ + Tx: &atomic.Tx{ + UnsignedAtomicTx: &atomictest.TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + } + + txs = append(txs, tx) + require.NoError(m.Add(tx)) + } + + for _, tx := range txs { + require.True(m.bloom.Has(tx)) + } +} + +// Add should return an error if a tx is already known +func TestMempoolAdd(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + require.NoError(err) + + tx := &atomic.GossipAtomicTx{ + Tx: &atomic.Tx{ + UnsignedAtomicTx: &atomictest.TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + } + + require.NoError(m.Add(tx)) + err = m.Add(tx) + require.ErrorIs(err, errTxAlreadyKnown) +} + +func TestAtomicMempoolIterate(t *testing.T) { + txs := []*atomic.GossipAtomicTx{ + { + Tx: &atomic.Tx{ + UnsignedAtomicTx: &atomictest.TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + { + Tx: &atomic.Tx{ + UnsignedAtomicTx: &atomictest.TestUnsignedTx{ + IDV: ids.GenerateTestID(), + }, + }, + }, + } + + tests := []struct { + name string + add []*atomic.GossipAtomicTx + f func(tx *atomic.GossipAtomicTx) bool + expectedTxs []*atomic.GossipAtomicTx + }{ + { + name: "func matches nothing", + add: txs, + f: func(*atomic.GossipAtomicTx) bool { + return false + }, + expectedTxs: []*atomic.GossipAtomicTx{}, + }, + { + name: "func matches all", + add: txs, + f: func(*atomic.GossipAtomicTx) bool { + return true + }, + expectedTxs: txs, + }, + { + name: "func matches subset", + add: txs, + f: func(tx *atomic.GossipAtomicTx) bool { + return tx.Tx == txs[0].Tx + }, + expectedTxs: []*atomic.GossipAtomicTx{txs[0]}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) + require.NoError(err) + + for _, add := range tt.add { + require.NoError(m.Add(add)) + } + + matches := make([]*atomic.GossipAtomicTx, 0) + f := func(tx *atomic.GossipAtomicTx) bool { + match := tt.f(tx) + + if match { + matches = append(matches, tx) + } + + return match + } + + m.Iterate(f) + + require.ElementsMatch(tt.expectedTxs, matches) + }) + } +} diff --git a/plugin/evm/atomic/tx_heap.go b/plugin/evm/atomic/txpool/tx_heap.go similarity index 88% rename from plugin/evm/atomic/tx_heap.go rename to plugin/evm/atomic/txpool/tx_heap.go index bcec314cd7..5cd2e04a94 100644 --- a/plugin/evm/atomic/tx_heap.go +++ b/plugin/evm/atomic/txpool/tx_heap.go @@ -1,12 +1,13 @@ // (c) 2020-2025, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package txpool import ( "container/heap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" ) // txEntry is used to track the [gasPrice] transactions pay to be included in @@ -14,7 +15,7 @@ import ( type txEntry struct { id ids.ID gasPrice uint64 - tx *Tx + tx *atomic.Tx index int } @@ -91,7 +92,7 @@ func newTxHeap(maxSize int) *txHeap { } } -func (th *txHeap) Push(tx *Tx, gasPrice uint64) { +func (th *txHeap) Push(tx *atomic.Tx, gasPrice uint64) { txID := tx.ID() oldLen := th.Len() heap.Push(th.maxHeap, &txEntry{ @@ -109,28 +110,28 @@ func (th *txHeap) Push(tx *Tx, gasPrice uint64) { } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMax() (*Tx, uint64) { +func (th *txHeap) PeekMax() (*atomic.Tx, uint64) { txEntry := th.maxHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PeekMin() (*Tx, uint64) { +func (th *txHeap) PeekMin() (*atomic.Tx, uint64) { txEntry := th.minHeap.items[0] return txEntry.tx, txEntry.gasPrice } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMax() *Tx { +func (th *txHeap) PopMax() *atomic.Tx { return th.Remove(th.maxHeap.items[0].id) } // Assumes there is non-zero items in [txHeap] -func (th *txHeap) PopMin() *Tx { +func (th *txHeap) PopMin() *atomic.Tx { return th.Remove(th.minHeap.items[0].id) } -func (th *txHeap) Remove(id ids.ID) *Tx { +func (th *txHeap) Remove(id ids.ID) *atomic.Tx { maxEntry, ok := th.maxHeap.Get(id) if !ok { return nil @@ -150,7 +151,7 @@ func (th *txHeap) Len() int { return th.maxHeap.Len() } -func (th *txHeap) Get(id ids.ID) (*Tx, bool) { +func (th *txHeap) Get(id ids.ID) (*atomic.Tx, bool) { txEntry, ok := th.maxHeap.Get(id) if !ok { return nil, false diff --git a/plugin/evm/atomic/tx_heap_test.go b/plugin/evm/atomic/txpool/tx_heap_test.go similarity index 90% rename from plugin/evm/atomic/tx_heap_test.go rename to plugin/evm/atomic/txpool/tx_heap_test.go index c9f602ccea..9ebd54b52b 100644 --- a/plugin/evm/atomic/tx_heap_test.go +++ b/plugin/evm/atomic/txpool/tx_heap_test.go @@ -1,32 +1,33 @@ // (c) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package atomic +package txpool import ( "testing" + "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/stretchr/testify/assert" ) func TestTxHeap(t *testing.T) { var ( - tx0 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx0 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 0, }, } tx0Bytes = []byte{0} - tx1 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx1 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 1, }, } tx1Bytes = []byte{1} - tx2 = &Tx{ - UnsignedAtomicTx: &UnsignedImportTx{ + tx2 = &atomic.Tx{ + UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: 2, }, } diff --git a/plugin/evm/block.go b/plugin/evm/block.go index d139b640a0..80bd07de7b 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -143,7 +143,7 @@ func (b *Block) Accept(context.Context) error { // Although returning an error from Accept is considered fatal, it is good // practice to cleanup the batch we were modifying in the case of an error. - defer vm.db.Abort() + defer vm.versiondb.Abort() log.Debug(fmt.Sprintf("Accepting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) @@ -176,7 +176,7 @@ func (b *Block) Accept(context.Context) error { } // Get pending operations on the vm's versionDB so we can apply them atomically // with the shared memory changes. - vdbBatch, err := b.vm.db.CommitBatch() + vdbBatch, err := b.vm.versiondb.CommitBatch() if err != nil { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index 721561ff40..967444b0d0 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" + atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/snow" @@ -30,7 +30,7 @@ type blockBuilder struct { chainConfig *params.ChainConfig txPool *txpool.TxPool - mempool *atomic.Mempool + mempool *atomictxpool.Mempool shutdownChan <-chan struct{} shutdownWg *sync.WaitGroup diff --git a/plugin/evm/config.go b/plugin/evm/config.go new file mode 100644 index 0000000000..5ad18aa2f2 --- /dev/null +++ b/plugin/evm/config.go @@ -0,0 +1,21 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "github.com/ava-labs/coreth/core/txpool/legacypool" + "github.com/ava-labs/coreth/plugin/evm/config" +) + +// defaultTxPoolConfig uses [legacypool.DefaultConfig] to make a [config.TxPoolConfig] +// that can be passed to [config.Config.SetDefaults]. +var defaultTxPoolConfig = config.TxPoolConfig{ + PriceLimit: legacypool.DefaultConfig.PriceLimit, + PriceBump: legacypool.DefaultConfig.PriceBump, + AccountSlots: legacypool.DefaultConfig.AccountSlots, + GlobalSlots: legacypool.DefaultConfig.GlobalSlots, + AccountQueue: legacypool.DefaultConfig.AccountQueue, + GlobalQueue: legacypool.DefaultConfig.GlobalQueue, + Lifetime: legacypool.DefaultConfig.Lifetime, +} diff --git a/plugin/evm/config/config.go b/plugin/evm/config/config.go index a92405bcb5..f5bf644657 100644 --- a/plugin/evm/config/config.go +++ b/plugin/evm/config/config.go @@ -9,8 +9,6 @@ import ( "time" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/coreth/core/txpool/legacypool" - "github.com/ava-labs/coreth/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/spf13/cast" @@ -224,28 +222,37 @@ type Config struct { HttpBodyLimit uint64 `json:"http-body-limit"` } +// TxPoolConfig contains the transaction pool config to be passed +// to [Config.SetDefaults]. +type TxPoolConfig struct { + PriceLimit uint64 + PriceBump uint64 + AccountSlots uint64 + GlobalSlots uint64 + AccountQueue uint64 + GlobalQueue uint64 + Lifetime time.Duration +} + // EthAPIs returns an array of strings representing the Eth APIs that should be enabled func (c Config) EthAPIs() []string { return c.EnabledEthAPIs } -func (c Config) EthBackendSettings() eth.Settings { - return eth.Settings{MaxBlocksPerRequest: c.MaxBlocksPerRequest} -} - -func (c *Config) SetDefaults() { +func (c *Config) SetDefaults(txPoolConfig TxPoolConfig) { c.EnabledEthAPIs = defaultEnabledAPIs c.RPCGasCap = defaultRpcGasCap c.RPCTxFeeCap = defaultRpcTxFeeCap c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit - c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump - c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots - c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots - c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue - c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue - c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime + // TxPool settings + c.TxPoolPriceLimit = txPoolConfig.PriceLimit + c.TxPoolPriceBump = txPoolConfig.PriceBump + c.TxPoolAccountSlots = txPoolConfig.AccountSlots + c.TxPoolGlobalSlots = txPoolConfig.GlobalSlots + c.TxPoolAccountQueue = txPoolConfig.AccountQueue + c.TxPoolGlobalQueue = txPoolConfig.GlobalQueue + c.TxPoolLifetime.Duration = txPoolConfig.Lifetime c.APIMaxDuration.Duration = defaultApiMaxDuration c.WSCPURefillRate.Duration = defaultWsCpuRefillRate diff --git a/plugin/evm/database/wrapped_database.go b/plugin/evm/database/wrapped_database.go index 9421e514a8..f8a36913bb 100644 --- a/plugin/evm/database/wrapped_database.go +++ b/plugin/evm/database/wrapped_database.go @@ -17,23 +17,15 @@ var ( ) // ethDbWrapper implements ethdb.Database -type ethDbWrapper struct { - database.Database -} +type ethDbWrapper struct{ database.Database } -func WrapDatabase(db database.Database) ethdb.KeyValueStore { - return ethDbWrapper{db} -} +func WrapDatabase(db database.Database) ethdb.KeyValueStore { return ethDbWrapper{db} } // Stat implements ethdb.Database -func (db ethDbWrapper) Stat(string) (string, error) { - return "", database.ErrNotFound -} +func (db ethDbWrapper) Stat(string) (string, error) { return "", database.ErrNotFound } // NewBatch implements ethdb.Database -func (db ethDbWrapper) NewBatch() ethdb.Batch { - return wrappedBatch{db.Database.NewBatch()} -} +func (db ethDbWrapper) NewBatch() ethdb.Batch { return wrappedBatch{db.Database.NewBatch()} } // NewBatchWithSize implements ethdb.Database // TODO: propagate size through avalanchego Database interface @@ -67,16 +59,10 @@ func (db ethDbWrapper) NewIteratorWithStart(start []byte) ethdb.Iterator { } // wrappedBatch implements ethdb.wrappedBatch -type wrappedBatch struct { - database.Batch -} +type wrappedBatch struct{ database.Batch } // ValueSize implements ethdb.Batch -func (batch wrappedBatch) ValueSize() int { - return batch.Batch.Size() -} +func (batch wrappedBatch) ValueSize() int { return batch.Batch.Size() } // Replay implements ethdb.Batch -func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { - return batch.Batch.Replay(w) -} +func (batch wrappedBatch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 1c5507d218..643eef391c 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -1012,7 +1012,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatal(err) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1800,7 +1800,7 @@ func TestNewExportTx(t *testing.T) { t.Fatalf("burned wrong amount of AVAX - expected %d burned %d", test.expectedBurnedAVAX, burnedAVAX) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -2000,7 +2000,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal("newExportTx created an invalid transaction", err) } - commitBatch, err := vm.db.CommitBatch() + commitBatch, err := vm.versiondb.CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } diff --git a/plugin/evm/imports_test.go b/plugin/evm/imports_test.go new file mode 100644 index 0000000000..8b7ac4c4a9 --- /dev/null +++ b/plugin/evm/imports_test.go @@ -0,0 +1,72 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/packages" +) + +// getDependencies takes a fully qualified package name and returns a map of all +// its recursive package imports (including itself) in the same format. +func getDependencies(packageName string) (map[string]struct{}, error) { + // Configure the load mode to include dependencies + cfg := &packages.Config{Mode: packages.NeedDeps | packages.NeedImports | packages.NeedName | packages.NeedModule} + pkgs, err := packages.Load(cfg, packageName) + if err != nil { + return nil, fmt.Errorf("failed to load package: %v", err) + } + + if len(pkgs) == 0 || pkgs[0].Errors != nil { + return nil, fmt.Errorf("failed to load package %s", packageName) + } + + deps := make(map[string]struct{}) + var collectDeps func(pkg *packages.Package) + collectDeps = func(pkg *packages.Package) { + if _, ok := deps[pkg.PkgPath]; ok { + return // Avoid re-processing the same dependency + } + deps[pkg.PkgPath] = struct{}{} + for _, dep := range pkg.Imports { + collectDeps(dep) + } + } + + // Start collecting dependencies + collectDeps(pkgs[0]) + return deps, nil +} + +func TestMustNotImport(t *testing.T) { + withRepo := func(pkg string) string { + const repo = "github.com/ava-labs/coreth" + return fmt.Sprintf("%s/%s", repo, pkg) + } + mustNotImport := map[string][]string{ + // The following sub-packages of plugin/evm must not import core, core/vm + // so clients (e.g., wallets, e2e tests) can import them without pulling in + // the entire VM logic. + // Importing these packages configures libevm globally and it is not + // possible to do so for both coreth and subnet-evm, where the client may + // wish to connect to multiple chains. + "plugin/evm/atomic": {"core", "core/vm"}, + "plugin/evm/client": {"core", "core/vm"}, + "plugin/evm/config": {"core", "core/vm"}, + } + + for packageName, forbiddenImports := range mustNotImport { + imports, err := getDependencies(withRepo(packageName)) + require.NoError(t, err) + + for _, forbiddenImport := range forbiddenImports { + fullForbiddenImport := withRepo(forbiddenImport) + _, found := imports[fullForbiddenImport] + require.False(t, found, "package %s must not import %s, check output of go list -f '{{ .Deps }}' \"%s\" ", packageName, fullForbiddenImport, withRepo(packageName)) + } + } +} diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index 9f2cc89535..ae0be940e0 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -15,6 +15,9 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" + atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" + "github.com/stretchr/testify/assert" ) @@ -55,7 +58,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // try to add a conflicting tx err = vm.mempool.AddLocalTx(conflictingTx) - assert.ErrorIs(err, atomic.ErrConflictingAtomicTx) + assert.ErrorIs(err, atomictxpool.ErrConflictingAtomicTx) has = mempool.Has(conflictingTxID) assert.False(has, "conflicting tx in mempool") @@ -94,10 +97,10 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { func TestMempoolMaxMempoolSizeHandling(t *testing.T) { assert := assert.New(t) - mempool, err := atomic.NewMempool(&snow.Context{}, prometheus.NewRegistry(), 1, nil) + mempool, err := atomictxpool.NewMempool(&snow.Context{}, prometheus.NewRegistry(), 1, nil) assert.NoError(err) // create candidate tx (we will drop before validation) - tx := atomic.GenerateTestImportTx() + tx := atomictest.GenerateTestImportTx() assert.NoError(mempool.AddRemoteTx(tx)) assert.True(mempool.Has(tx.ID())) @@ -107,8 +110,8 @@ func TestMempoolMaxMempoolSizeHandling(t *testing.T) { mempool.IssueCurrentTxs() // try to add one more tx - tx2 := atomic.GenerateTestImportTx() - assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrTooManyAtomicTx) + tx2 := atomictest.GenerateTestImportTx() + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomictxpool.ErrTooManyAtomicTx) assert.False(mempool.Has(tx2.ID())) } @@ -126,7 +129,7 @@ func TestMempoolPriorityDrop(t *testing.T) { err := vm.Shutdown(context.Background()) assert.NoError(err) }() - mempool, err := atomic.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) + mempool, err := atomictxpool.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) assert.NoError(err) tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) @@ -140,7 +143,7 @@ func TestMempoolPriorityDrop(t *testing.T) { if err != nil { t.Fatal(err) } - assert.ErrorIs(mempool.AddRemoteTx(tx2), atomic.ErrInsufficientAtomicTxFee) + assert.ErrorIs(mempool.AddRemoteTx(tx2), atomictxpool.ErrInsufficientAtomicTxFee) assert.True(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) diff --git a/plugin/evm/syncervm_client.go b/plugin/evm/syncervm_client.go index 4649e33952..c286d8d4cc 100644 --- a/plugin/evm/syncervm_client.go +++ b/plugin/evm/syncervm_client.go @@ -11,7 +11,6 @@ import ( syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" @@ -22,7 +21,8 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state" + atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/statesync" "github.com/ethereum/go-ethereum/common" @@ -63,8 +63,9 @@ type StateSyncClientConfig struct { State *chain.State ChaindDB ethdb.Database Acceptor BlockAcceptor - DB *versiondb.Database - AtomicBackend atomic.AtomicBackend + VerDB *versiondb.Database + MetadataDB database.Database + AtomicBackend state.AtomicBackend Client syncclient.Client @@ -82,13 +83,11 @@ type stateSyncerClient struct { // State Sync results syncSummary message.SyncSummary stateSyncErr error - metadataDB database.Database } func NewStateSyncClient(config *StateSyncClientConfig) StateSyncClient { return &stateSyncerClient{ StateSyncClientConfig: config, - metadataDB: prefixdb.New(metadataPrefix, config.DB), } } @@ -126,7 +125,7 @@ func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (bl return nil, database.ErrNotFound } - summaryBytes, err := client.metadataDB.Get(stateSyncSummaryKey) + summaryBytes, err := client.MetadataDB.Get(stateSyncSummaryKey) if err != nil { return nil, err // includes the [database.ErrNotFound] case } @@ -141,10 +140,10 @@ func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (bl // ClearOngoingSummary clears any marker of an ongoing state sync summary func (client *stateSyncerClient) ClearOngoingSummary() error { - if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { + if err := client.MetadataDB.Delete(stateSyncSummaryKey); err != nil { return fmt.Errorf("failed to clear ongoing summary: %w", err) } - if err := client.DB.Commit(); err != nil { + if err := client.VerDB.Commit(); err != nil { return fmt.Errorf("failed to commit db while clearing ongoing summary: %w", err) } @@ -206,10 +205,10 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // Update the current state sync summary key in the database // Note: this must be performed after WipeSnapshot finishes so that we do not start a state sync // session from a partially wiped snapshot. - if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { + if err := client.MetadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to write state sync summary key to disk: %w", err) } - if err := client.DB.Commit(); err != nil { + if err := client.VerDB.Commit(); err != nil { return block.StateSyncSkipped, fmt.Errorf("failed to commit db: %w", err) } @@ -290,7 +289,14 @@ func (client *stateSyncerClient) syncBlocks(ctx context.Context, fromHash common func (client *stateSyncerClient) syncAtomicTrie(ctx context.Context) error { log.Info("atomic tx: sync starting", "root", client.syncSummary.AtomicRoot) - atomicSyncer, err := client.AtomicBackend.Syncer(client.Client, client.syncSummary.AtomicRoot, client.syncSummary.BlockNumber, client.StateSyncRequestSize) + atomicSyncer, err := atomicsync.NewAtomicSyncer( + client.Client, + client.VerDB, + client.AtomicBackend.AtomicTrie(), + client.syncSummary.AtomicRoot, + client.syncSummary.BlockNumber, + client.StateSyncRequestSize, + ) if err != nil { return err } @@ -414,10 +420,10 @@ func (client *stateSyncerClient) updateVMMarkers() error { if err := client.Acceptor.PutLastAcceptedID(id); err != nil { return err } - if err := client.metadataDB.Delete(stateSyncSummaryKey); err != nil { + if err := client.MetadataDB.Delete(stateSyncSummaryKey); err != nil { return err } - return client.DB.Commit() + return client.VerDB.Commit() } // Error returns a non-nil error if one occurred during the sync. diff --git a/plugin/evm/syncervm_server.go b/plugin/evm/syncervm_server.go index f434f4ae7d..c3bfaaeccd 100644 --- a/plugin/evm/syncervm_server.go +++ b/plugin/evm/syncervm_server.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -19,7 +19,7 @@ import ( type StateSyncServerConfig struct { Chain *core.BlockChain - AtomicTrie atomic.AtomicTrie + AtomicTrie state.AtomicTrie // SyncableInterval is the interval at which blocks are eligible to provide syncable block summaries. SyncableInterval uint64 @@ -27,7 +27,7 @@ type StateSyncServerConfig struct { type stateSyncServer struct { chain *core.BlockChain - atomicTrie atomic.AtomicTrie + atomicTrie state.AtomicTrie syncableInterval uint64 } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 10f55031ba..c3a270e06d 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -36,6 +36,7 @@ import ( "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" @@ -336,12 +337,12 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverAtomicTrie := serverVM.atomicTrie require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) - require.NoError(serverVM.db.Commit()) + require.NoError(serverVM.versiondb.Commit()) - serverSharedMemories := atomic.NewSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - importOps, err := atomic.ConvertToAtomicOps(importTx) + serverSharedMemories := atomictest.NewSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) + importOps, err := atomictest.ConvertToAtomicOps(importTx) require.NoError(err) - exportOps, err := atomic.ConvertToAtomicOps(exportTx) + exportOps, err := atomictest.ConvertToAtomicOps(exportTx) require.NoError(err) serverSharedMemories.AssertOpsApplied(t, importOps) serverSharedMemories.AssertOpsApplied(t, exportOps) @@ -562,10 +563,10 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { require.True(syncerVM.bootstrapped.Get()) // check atomic memory was synced properly - syncerSharedMemories := atomic.NewSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) + syncerSharedMemories := atomictest.NewSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) for _, tx := range includedAtomicTxs { - ops, err := atomic.ConvertToAtomicOps(tx) + ops, err := atomictest.ConvertToAtomicOps(tx) require.NoError(err) syncerSharedMemories.AssertOpsApplied(t, ops) } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index e2e53ca124..e36488a12a 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -41,17 +41,17 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" + atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" + atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" - "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/triedb" - "github.com/ava-labs/coreth/triedb/hashdb" - "github.com/ava-labs/coreth/utils" - warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/rpc" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/client/stats" + "github.com/ava-labs/coreth/triedb" + "github.com/ava-labs/coreth/triedb/hashdb" + "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp" // Force-load tracer engine to trigger registration @@ -77,7 +77,6 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" avalanchedatabase "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -222,8 +221,11 @@ type VM struct { blockChain *core.BlockChain miner *miner.Miner - // [db] is the VM's current database managed by ChainState - db *versiondb.Database + // [versiondb] is the VM's current versioned database + versiondb *versiondb.Database + + // [db] is the VM's current database + db avalanchedatabase.Database // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database @@ -236,6 +238,8 @@ type VM struct { // set to a prefixDB with the prefix [warpPrefix] warpDB avalanchedatabase.Database + metadataDB avalanchedatabase.Database + toEngine chan<- commonEng.Message syntacticBlockValidator BlockValidator @@ -243,17 +247,17 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository atomic.AtomicTxRepository + atomicTxRepository atomicstate.AtomicTxRepository // [atomicTrie] maintains a merkle forest of [height]=>[atomic txs]. - atomicTrie atomic.AtomicTrie + atomicTrie atomicstate.AtomicTrie // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend atomic.AtomicBackend + atomicBackend atomicstate.AtomicBackend builder *blockBuilder baseCodec codec.Registry clock mockable.Clock - mempool *atomic.Mempool + mempool *atomictxpool.Mempool shutdownChan chan struct{} shutdownWg sync.WaitGroup @@ -268,7 +272,7 @@ type VM struct { client peer.NetworkClient networkCodec codec.Manager - validators *p2p.Validators + p2pValidators *p2p.Validators // Metrics sdkMetrics *prometheus.Registry @@ -331,7 +335,7 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - vm.config.SetDefaults() + vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { return fmt.Errorf("failed to unmarshal config %s: %w", string(configBytes), err) @@ -390,14 +394,10 @@ func (vm *VM) Initialize( if err := vm.initializeDBs(db); err != nil { return fmt.Errorf("failed to initialize databases: %w", err) } - if vm.config.InspectDatabase { - start := time.Now() - log.Info("Starting database inspection") - if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { + if err := vm.inspectDatabases(); err != nil { return err } - log.Info("Completed database inspection", "elapsed", time.Since(start)) } g := new(core.Genesis) @@ -521,7 +521,7 @@ func (vm *VM) Initialize( } // TODO: read size from settings - vm.mempool, err = atomic.NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) + vm.mempool, err = atomictxpool.NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) if err != nil { return fmt.Errorf("failed to initialize mempool: %w", err) } @@ -536,7 +536,7 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } - vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) + vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) @@ -586,12 +586,12 @@ func (vm *VM) Initialize( } // initialize atomic repository - vm.atomicTxRepository, err = atomic.NewAtomicTxRepository(vm.db, atomic.Codec, lastAcceptedHeight) + vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(vm.versiondb, atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } - vm.atomicBackend, err = atomic.NewAtomicBackend( - vm.db, vm.ctx.SharedMemory, bonusBlockHeights, + vm.atomicBackend, err = atomicstate.NewAtomicBackend( + vm.versiondb, vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, vm.config.CommitInterval, ) @@ -656,7 +656,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { &vm.ethConfig, &EthPushGossiper{vm: vm}, vm.chaindb, - vm.config.EthBackendSettings(), + eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, dummy.NewFakerWithClock(callbacks, &vm.clock), &vm.clock, @@ -715,7 +715,8 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { StateSyncRequestSize: vm.config.StateSyncRequestSize, LastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around ChaindDB: vm.chaindb, - DB: vm.db, + VerDB: vm.versiondb, + MetadataDB: vm.metadataDB, AtomicBackend: vm.atomicBackend, ToEngine: vm.toEngine, Acceptor: vm, @@ -1040,7 +1041,7 @@ func (vm *VM) initBlockBuilding() error { vm.cancel = cancel ethTxGossipMarshaller := GossipEthTxMarshaller{} - ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) + ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) @@ -1056,7 +1057,7 @@ func (vm *VM) initBlockBuilding() error { }() atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} - atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) + atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) atomicTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, atomicTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) @@ -1077,7 +1078,7 @@ func (vm *VM) initBlockBuilding() error { ethTxPushGossiper, err = gossip.NewPushGossiper[*GossipEthTx]( ethTxGossipMarshaller, ethTxPool, - vm.validators, + vm.p2pValidators, ethTxGossipClient, ethTxGossipMetrics, pushGossipParams, @@ -1096,7 +1097,7 @@ func (vm *VM) initBlockBuilding() error { vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*atomic.GossipAtomicTx]( atomicTxGossipMarshaller, vm.mempool, - vm.validators, + vm.p2pValidators, atomicTxGossipClient, atomicTxGossipMetrics, pushGossipParams, @@ -1123,7 +1124,7 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.validators, + vm.p2pValidators, ) } @@ -1140,7 +1141,7 @@ func (vm *VM) initBlockBuilding() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.validators, + vm.p2pValidators, ) } @@ -1161,7 +1162,7 @@ func (vm *VM) initBlockBuilding() error { vm.ethTxPullGossiper = gossip.ValidatorGossiper{ Gossiper: ethTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.validators, + Validators: vm.p2pValidators, } } @@ -1188,7 +1189,7 @@ func (vm *VM) initBlockBuilding() error { vm.atomicTxPullGossiper = &gossip.ValidatorGossiper{ Gossiper: atomicTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.validators, + Validators: vm.p2pValidators, } } @@ -1450,6 +1451,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "coreth-admin") } + // RPC APIs if vm.config.SnowmanAPIEnabled { if err := handler.RegisterName("snowman", &SnowmanAPI{vm}); err != nil { return nil, err @@ -1477,21 +1479,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { return apis, nil } -// initializeDBs initializes the databases used by the VM. -// coreth always uses the avalanchego provided avalanchedatabase. -func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { - // Use NewNested rather than New so that the structure of the database - // remains the same regardless of the provided baseDB type. - vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) - vm.db = versiondb.New(db) - vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) - // Note warpDB is not part of versiondb because it is not necessary - // that warp signatures are committed to the database atomically with - // the last accepted block. - vm.warpDB = prefixdb.New(warpPrefix, db) - return nil -} - // CreateStaticHandlers makes new http handlers that can handle API calls func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(0) diff --git a/plugin/evm/vm_database.go b/plugin/evm/vm_database.go new file mode 100644 index 0000000000..f2a5b4c344 --- /dev/null +++ b/plugin/evm/vm_database.go @@ -0,0 +1,82 @@ +// (c) 2019-2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "time" + + avalanchedatabase "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/plugin/evm/database" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// initializeDBs initializes the databases used by the VM. +// coreth always uses the avalanchego provided database. +func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { + // Use NewNested rather than New so that the structure of the database + // remains the same regardless of the provided baseDB type. + vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) + vm.versiondb = versiondb.New(db) + vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.versiondb) + vm.metadataDB = prefixdb.New(metadataPrefix, vm.versiondb) + vm.db = db + // Note warpDB is not part of versiondb because it is not necessary + // that warp signatures are committed to the database atomically with + // the last accepted block. + vm.warpDB = prefixdb.New(warpPrefix, db) + return nil +} + +func (vm *VM) inspectDatabases() error { + start := time.Now() + log.Info("Starting database inspection") + if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { + return err + } + if err := inspectDB(vm.acceptedBlockDB, "acceptedBlockDB"); err != nil { + return err + } + if err := inspectDB(vm.metadataDB, "metadataDB"); err != nil { + return err + } + if err := inspectDB(vm.warpDB, "warpDB"); err != nil { + return err + } + log.Info("Completed database inspection", "elapsed", time.Since(start)) + return nil +} + +func inspectDB(db avalanchedatabase.Database, label string) error { + it := db.NewIterator() + defer it.Release() + + var ( + count int64 + start = time.Now() + logged = time.Now() + + // Totals + total common.StorageSize + ) + // Inspect key-value database first. + for it.Next() { + var ( + key = it.Key() + size = common.StorageSize(len(key) + len(it.Value())) + ) + total += size + count++ + if count%1000 == 0 && time.Since(logged) > 8*time.Second { + log.Info("Inspecting database", "label", label, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + // Display the database statistic. + log.Info("Database statistics", "label", label, "total", total.String(), "count", count) + return nil +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 3a2c3a0de9..fa34e01444 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -25,6 +25,8 @@ import ( "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state" + "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -360,7 +362,7 @@ func TestVMConfigDefaults(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") var vmConfig config.Config - vmConfig.SetDefaults() + vmConfig.SetDefaults(defaultTxPoolConfig) vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") @@ -372,7 +374,7 @@ func TestVMNilConfig(t *testing.T) { // VM Config should match defaults if no config is passed in var vmConfig config.Config - vmConfig.SetDefaults() + vmConfig.SetDefaults(defaultTxPoolConfig) require.Equal(t, vmConfig, vm.config, "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) } @@ -1053,8 +1055,8 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, atomic.ErrConflictingAtomicTx) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicTx, err) + if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, txpool.ErrConflictingAtomicTx) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", txpool.ErrConflictingAtomicTx, err) } assert.True(t, vm.mempool.Has(importTx1.ID())) @@ -1468,7 +1470,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } type wrappedBackend struct { - atomic.AtomicBackend + state.AtomicBackend registeredBonusBlocks map[uint64]common.Hash } @@ -3142,7 +3144,7 @@ func TestConfigureLogLevel(t *testing.T) { } } - // If the VM was not initialized, do not attept to shut it down + // If the VM was not initialized, do not attempt to shut it down if err == nil { shutdownChan := make(chan error, 1) shutdownFunc := func() { diff --git a/precompile/contract/interfaces.go b/precompile/contract/interfaces.go index b3ffb02fe2..44c3cfa633 100644 --- a/precompile/contract/interfaces.go +++ b/precompile/contract/interfaces.go @@ -62,7 +62,7 @@ type ConfigurationBlockContext interface { type BlockContext interface { ConfigurationBlockContext - // GetResults returns an arbitrary byte array result of verifying the predicates + // GetPredicateResults returns an arbitrary byte array result of verifying the predicates // of the given transaction, precompile address pair. GetPredicateResults(txHash common.Hash, precompileAddress common.Address) []byte } diff --git a/precompile/contract/mocks.go b/precompile/contract/mocks.go index 94bf9c5ba0..546090339f 100644 --- a/precompile/contract/mocks.go +++ b/precompile/contract/mocks.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -package=contract -destination=precompile/contract/mocks.go github.com/ava-labs/coreth/precompile/contract BlockContext,AccessibleState,StateDB +// mockgen -package=contract -destination=mocks.go . BlockContext,AccessibleState,StateDB // // Package contract is a generated GoMock package. @@ -24,6 +24,7 @@ import ( type MockBlockContext struct { ctrl *gomock.Controller recorder *MockBlockContextMockRecorder + isgomock struct{} } // MockBlockContextMockRecorder is the mock recorder for MockBlockContext. @@ -44,17 +45,17 @@ func (m *MockBlockContext) EXPECT() *MockBlockContextMockRecorder { } // GetPredicateResults mocks base method. -func (m *MockBlockContext) GetPredicateResults(arg0 common.Hash, arg1 common.Address) []byte { +func (m *MockBlockContext) GetPredicateResults(txHash common.Hash, precompileAddress common.Address) []byte { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPredicateResults", arg0, arg1) + ret := m.ctrl.Call(m, "GetPredicateResults", txHash, precompileAddress) ret0, _ := ret[0].([]byte) return ret0 } // GetPredicateResults indicates an expected call of GetPredicateResults. -func (mr *MockBlockContextMockRecorder) GetPredicateResults(arg0, arg1 any) *gomock.Call { +func (mr *MockBlockContextMockRecorder) GetPredicateResults(txHash, precompileAddress any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateResults", reflect.TypeOf((*MockBlockContext)(nil).GetPredicateResults), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateResults", reflect.TypeOf((*MockBlockContext)(nil).GetPredicateResults), txHash, precompileAddress) } // Number mocks base method. @@ -89,6 +90,7 @@ func (mr *MockBlockContextMockRecorder) Timestamp() *gomock.Call { type MockAccessibleState struct { ctrl *gomock.Controller recorder *MockAccessibleStateMockRecorder + isgomock struct{} } // MockAccessibleStateMockRecorder is the mock recorder for MockAccessibleState. @@ -165,9 +167,9 @@ func (mr *MockAccessibleStateMockRecorder) GetStateDB() *gomock.Call { } // NativeAssetCall mocks base method. -func (m *MockAccessibleState) NativeAssetCall(arg0 common.Address, arg1 []byte, arg2, arg3 uint64, arg4 bool) ([]byte, uint64, error) { +func (m *MockAccessibleState) NativeAssetCall(caller common.Address, input []byte, suppliedGas, gasCost uint64, readOnly bool) ([]byte, uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NativeAssetCall", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "NativeAssetCall", caller, input, suppliedGas, gasCost, readOnly) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(uint64) ret2, _ := ret[2].(error) @@ -175,15 +177,16 @@ func (m *MockAccessibleState) NativeAssetCall(arg0 common.Address, arg1 []byte, } // NativeAssetCall indicates an expected call of NativeAssetCall. -func (mr *MockAccessibleStateMockRecorder) NativeAssetCall(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { +func (mr *MockAccessibleStateMockRecorder) NativeAssetCall(caller, input, suppliedGas, gasCost, readOnly any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NativeAssetCall", reflect.TypeOf((*MockAccessibleState)(nil).NativeAssetCall), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NativeAssetCall", reflect.TypeOf((*MockAccessibleState)(nil).NativeAssetCall), caller, input, suppliedGas, gasCost, readOnly) } // MockStateDB is a mock of StateDB interface. type MockStateDB struct { ctrl *gomock.Controller recorder *MockStateDBMockRecorder + isgomock struct{} } // MockStateDBMockRecorder is the mock recorder for MockStateDB. @@ -216,15 +219,15 @@ func (mr *MockStateDBMockRecorder) AddBalance(arg0, arg1 any) *gomock.Call { } // AddLog mocks base method. -func (m *MockStateDB) AddLog(arg0 common.Address, arg1 []common.Hash, arg2 []byte, arg3 uint64) { +func (m *MockStateDB) AddLog(addr common.Address, topics []common.Hash, data []byte, blockNumber uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "AddLog", arg0, arg1, arg2, arg3) + m.ctrl.Call(m, "AddLog", addr, topics, data, blockNumber) } // AddLog indicates an expected call of AddLog. -func (mr *MockStateDBMockRecorder) AddLog(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockStateDBMockRecorder) AddLog(addr, topics, data, blockNumber any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLog", reflect.TypeOf((*MockStateDB)(nil).AddLog), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLog", reflect.TypeOf((*MockStateDB)(nil).AddLog), addr, topics, data, blockNumber) } // CreateAccount mocks base method. @@ -311,18 +314,18 @@ func (mr *MockStateDBMockRecorder) GetNonce(arg0 any) *gomock.Call { } // GetPredicateStorageSlots mocks base method. -func (m *MockStateDB) GetPredicateStorageSlots(arg0 common.Address, arg1 int) ([]byte, bool) { +func (m *MockStateDB) GetPredicateStorageSlots(address common.Address, index int) ([]byte, bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPredicateStorageSlots", arg0, arg1) + ret := m.ctrl.Call(m, "GetPredicateStorageSlots", address, index) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(bool) return ret0, ret1 } // GetPredicateStorageSlots indicates an expected call of GetPredicateStorageSlots. -func (mr *MockStateDBMockRecorder) GetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { +func (mr *MockStateDBMockRecorder) GetPredicateStorageSlots(address, index any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).GetPredicateStorageSlots), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).GetPredicateStorageSlots), address, index) } // GetState mocks base method. @@ -378,15 +381,15 @@ func (mr *MockStateDBMockRecorder) SetNonce(arg0, arg1 any) *gomock.Call { } // SetPredicateStorageSlots mocks base method. -func (m *MockStateDB) SetPredicateStorageSlots(arg0 common.Address, arg1 [][]byte) { +func (m *MockStateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPredicateStorageSlots", arg0, arg1) + m.ctrl.Call(m, "SetPredicateStorageSlots", address, predicates) } // SetPredicateStorageSlots indicates an expected call of SetPredicateStorageSlots. -func (mr *MockStateDBMockRecorder) SetPredicateStorageSlots(arg0, arg1 any) *gomock.Call { +func (mr *MockStateDBMockRecorder) SetPredicateStorageSlots(address, predicates any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).SetPredicateStorageSlots), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPredicateStorageSlots", reflect.TypeOf((*MockStateDB)(nil).SetPredicateStorageSlots), address, predicates) } // SetState mocks base method. diff --git a/precompile/contract/mocks_generate_test.go b/precompile/contract/mocks_generate_test.go new file mode 100644 index 0000000000..ab422832da --- /dev/null +++ b/precompile/contract/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package contract + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -destination=mocks.go . BlockContext,AccessibleState,StateDB diff --git a/precompile/contracts/warp/README.md b/precompile/contracts/warp/README.md index 10e1daaa38..73ca165224 100644 --- a/precompile/contracts/warp/README.md +++ b/precompile/contracts/warp/README.md @@ -57,7 +57,7 @@ To use this function, the transaction must include the signed Avalanche Warp Mes This leads to the following advantages: 1. The EVM execution does not need to verify the Warp Message at runtime (no signature verification or external calls to the P-Chain) -2. The EVM can deterministically re-execute and re-verify blocks assuming the predicate was verified by the network (eg., in bootstrapping) +2. The EVM can deterministically re-execute and re-verify blocks assuming the predicate was verified by the network (e.g., in bootstrapping) This pre-verification is performed using the ProposerVM Block header during [block verification](../../../plugin/evm/block.go#L220) and [block building](../../../miner/worker.go#L200). diff --git a/precompile/precompileconfig/mocks.go b/precompile/precompileconfig/mocks.go index 4be7f046ec..cba1ca13a3 100644 --- a/precompile/precompileconfig/mocks.go +++ b/precompile/precompileconfig/mocks.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -package=precompileconfig -destination=precompile/precompileconfig/mocks.go github.com/ava-labs/coreth/precompile/precompileconfig Predicater,Config,ChainConfig,Accepter +// mockgen -package=precompileconfig -destination=mocks.go . Predicater,Config,ChainConfig,Accepter // // Package precompileconfig is a generated GoMock package. @@ -20,6 +20,7 @@ import ( type MockPredicater struct { ctrl *gomock.Controller recorder *MockPredicaterMockRecorder + isgomock struct{} } // MockPredicaterMockRecorder is the mock recorder for MockPredicater. @@ -40,38 +41,39 @@ func (m *MockPredicater) EXPECT() *MockPredicaterMockRecorder { } // PredicateGas mocks base method. -func (m *MockPredicater) PredicateGas(arg0 []byte) (uint64, error) { +func (m *MockPredicater) PredicateGas(predicateBytes []byte) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PredicateGas", arg0) + ret := m.ctrl.Call(m, "PredicateGas", predicateBytes) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // PredicateGas indicates an expected call of PredicateGas. -func (mr *MockPredicaterMockRecorder) PredicateGas(arg0 any) *gomock.Call { +func (mr *MockPredicaterMockRecorder) PredicateGas(predicateBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PredicateGas", reflect.TypeOf((*MockPredicater)(nil).PredicateGas), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PredicateGas", reflect.TypeOf((*MockPredicater)(nil).PredicateGas), predicateBytes) } // VerifyPredicate mocks base method. -func (m *MockPredicater) VerifyPredicate(arg0 *PredicateContext, arg1 []byte) error { +func (m *MockPredicater) VerifyPredicate(predicateContext *PredicateContext, predicateBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyPredicate", arg0, arg1) + ret := m.ctrl.Call(m, "VerifyPredicate", predicateContext, predicateBytes) ret0, _ := ret[0].(error) return ret0 } // VerifyPredicate indicates an expected call of VerifyPredicate. -func (mr *MockPredicaterMockRecorder) VerifyPredicate(arg0, arg1 any) *gomock.Call { +func (mr *MockPredicaterMockRecorder) VerifyPredicate(predicateContext, predicateBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPredicate", reflect.TypeOf((*MockPredicater)(nil).VerifyPredicate), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPredicate", reflect.TypeOf((*MockPredicater)(nil).VerifyPredicate), predicateContext, predicateBytes) } // MockConfig is a mock of Config interface. type MockConfig struct { ctrl *gomock.Controller recorder *MockConfigMockRecorder + isgomock struct{} } // MockConfigMockRecorder is the mock recorder for MockConfig. @@ -165,6 +167,7 @@ func (mr *MockConfigMockRecorder) Verify(arg0 any) *gomock.Call { type MockChainConfig struct { ctrl *gomock.Controller recorder *MockChainConfigMockRecorder + isgomock struct{} } // MockChainConfigMockRecorder is the mock recorder for MockChainConfig. @@ -185,23 +188,24 @@ func (m *MockChainConfig) EXPECT() *MockChainConfigMockRecorder { } // IsDurango mocks base method. -func (m *MockChainConfig) IsDurango(arg0 uint64) bool { +func (m *MockChainConfig) IsDurango(time uint64) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsDurango", arg0) + ret := m.ctrl.Call(m, "IsDurango", time) ret0, _ := ret[0].(bool) return ret0 } // IsDurango indicates an expected call of IsDurango. -func (mr *MockChainConfigMockRecorder) IsDurango(arg0 any) *gomock.Call { +func (mr *MockChainConfigMockRecorder) IsDurango(time any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDurango", reflect.TypeOf((*MockChainConfig)(nil).IsDurango), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsDurango", reflect.TypeOf((*MockChainConfig)(nil).IsDurango), time) } // MockAccepter is a mock of Accepter interface. type MockAccepter struct { ctrl *gomock.Controller recorder *MockAccepterMockRecorder + isgomock struct{} } // MockAccepterMockRecorder is the mock recorder for MockAccepter. @@ -222,15 +226,15 @@ func (m *MockAccepter) EXPECT() *MockAccepterMockRecorder { } // Accept mocks base method. -func (m *MockAccepter) Accept(arg0 *AcceptContext, arg1 common.Hash, arg2 uint64, arg3 common.Hash, arg4 int, arg5 []common.Hash, arg6 []byte) error { +func (m *MockAccepter) Accept(acceptCtx *AcceptContext, blockHash common.Hash, blockNumber uint64, txHash common.Hash, logIndex int, topics []common.Hash, logData []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret := m.ctrl.Call(m, "Accept", acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockAccepterMockRecorder) Accept(arg0, arg1, arg2, arg3, arg4, arg5, arg6 any) *gomock.Call { +func (mr *MockAccepterMockRecorder) Accept(acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockAccepter)(nil).Accept), arg0, arg1, arg2, arg3, arg4, arg5, arg6) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockAccepter)(nil).Accept), acceptCtx, blockHash, blockNumber, txHash, logIndex, topics, logData) } diff --git a/precompile/precompileconfig/mocks_generate_test.go b/precompile/precompileconfig/mocks_generate_test.go new file mode 100644 index 0000000000..9ca4ea38df --- /dev/null +++ b/precompile/precompileconfig/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package precompileconfig + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -destination=mocks.go . Predicater,Config,ChainConfig,Accepter diff --git a/rpc/handler.go b/rpc/handler.go index a15f6b20be..ef35a61ca8 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -479,7 +479,7 @@ func (h *handler) startCallProc(fn func(*callProc)) { } } -// handleResponse processes method call responses. +// handleResponses processes method call responses. func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) { var resolvedops []*requestOp handleResp := func(msg *jsonrpcMessage) { diff --git a/rpc/types.go b/rpc/types.go index b2f5b98528..c96e7bc74f 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -62,7 +62,7 @@ type ServerCodec interface { type jsonWriter interface { // writeJSON writes a message to the connection. writeJSON(ctx context.Context, msg interface{}, isError bool) error - // writeJSON writes a message to the connection with the option of skipping the deadline. + // writeJSONSkipDeadline writes a message to the connection with the option of skipping the deadline. writeJSONSkipDeadline(ctx context.Context, msg interface{}, isError bool, skip bool) error // Closed returns a channel which is closed when the connection is closed. closed() <-chan interface{} diff --git a/scripts/build_docker_image.sh b/scripts/build_docker_image.sh deleted file mode 100755 index 9126a47a8d..0000000000 --- a/scripts/build_docker_image.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Avalanche root directory -CORETH_PATH=$( - cd "$(dirname "${BASH_SOURCE[0]}")" - cd .. && pwd -) - -# Load the constants -source "$CORETH_PATH"/scripts/constants.sh - -# Load the versions -source "$CORETH_PATH"/scripts/versions.sh - -# WARNING: this will use the most recent commit even if there are un-committed changes present -BUILD_IMAGE_ID=${BUILD_IMAGE_ID:-"${CURRENT_BRANCH}"} -echo "Building Docker Image: $DOCKERHUB_REPO:$BUILD_IMAGE_ID based of AvalancheGo@$AVALANCHE_VERSION" -docker build -t "$DOCKERHUB_REPO:$BUILD_IMAGE_ID" "$CORETH_PATH" -f "$CORETH_PATH/Dockerfile" \ - --build-arg AVALANCHE_VERSION="$AVALANCHE_VERSION" \ - --build-arg CORETH_COMMIT="$CORETH_COMMIT" \ - --build-arg CURRENT_BRANCH="$CURRENT_BRANCH" diff --git a/scripts/known_flakes.txt b/scripts/known_flakes.txt index b28b4e710c..0f324a6714 100644 --- a/scripts/known_flakes.txt +++ b/scripts/known_flakes.txt @@ -5,6 +5,7 @@ TestMempoolAtmTxsAppGossipHandlingDiscardedTx TestMempoolEthTxsAppGossipHandling TestResumeSyncAccountsTrieInterrupted TestResyncNewRootAfterDeletes +TestTimedUnlock TestTransactionSkipIndexing TestVMShutdownWhileSyncing TestWaitDeployedCornerCases diff --git a/scripts/mock.gen.sh b/scripts/mock.gen.sh deleted file mode 100755 index 87465d43a9..0000000000 --- a/scripts/mock.gen.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Root directory -CORETH_PATH=$( - cd "$(dirname "${BASH_SOURCE[0]}")" - cd .. && pwd -) - -if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then - echo "must be run from repository root" - exit 255 -fi - -# https://github.com/uber-go/mock -go install -v go.uber.org/mock/mockgen@v0.4.0 - -if ! command -v go-license &>/dev/null; then - echo "go-license not found, installing..." - # https://github.com/palantir/go-license - go install -v github.com/palantir/go-license@v1.25.0 -fi - -# Load the versions -source "$CORETH_PATH"/scripts/versions.sh - -# Load the constants -source "$CORETH_PATH"/scripts/constants.sh - -# tuples of (source interface import path, comma-separated interface names, output file path) -input="scripts/mocks.mockgen.txt" -while IFS= read -r line; do - IFS='=' read -r src_import_path interface_name output_path <<<"${line}" - package_name=$(basename "$(dirname "$output_path")") - echo "Generating ${output_path}..." - mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" - - go-license \ - --config=./header.yml \ - "${output_path}" -done <"$input" - -echo "SUCCESS" diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt deleted file mode 100644 index 694343e40e..0000000000 --- a/scripts/mocks.mockgen.txt +++ /dev/null @@ -1,2 +0,0 @@ -github.com/ava-labs/coreth/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go -github.com/ava-labs/coreth/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go diff --git a/sync/statesync/trie_sync_stats.go b/sync/statesync/trie_sync_stats.go index bb4770e28c..c55fc8da35 100644 --- a/sync/statesync/trie_sync_stats.go +++ b/sync/statesync/trie_sync_stats.go @@ -79,7 +79,7 @@ func (t *trieSyncStats) incLeafs(segment *trieSegment, count uint64, remaining u } } -// estimateSegmentsInProgressTime retrns the ETA for all trie segments +// estimateSegmentsInProgressTime returns the ETA for all trie segments // in progress to finish (uses the one with most remaining leafs to estimate). func (t *trieSyncStats) estimateSegmentsInProgressTime() time.Duration { if len(t.remainingLeafs) == 0 { diff --git a/tools.go b/tools.go new file mode 100644 index 0000000000..d7c65266dd --- /dev/null +++ b/tools.go @@ -0,0 +1,8 @@ +// (c) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package coreth + +import ( + _ "golang.org/x/tools/imports" // golang.org/x/tools to satisfy requirement for go.uber.org/mock/mockgen@v0.5 +) diff --git a/warp/aggregator/aggregator_test.go b/warp/aggregator/aggregator_test.go index 98e90607db..055d3edfa8 100644 --- a/warp/aggregator/aggregator_test.go +++ b/warp/aggregator/aggregator_test.go @@ -230,7 +230,7 @@ func TestAggregateSignatures(t *testing.T) { expectedErr: nil, }, { - name: "early termination of signature fetching on parent context cancelation", + name: "early termination of signature fetching on parent context cancellation", contextWithCancelFunc: func() (context.Context, context.CancelFunc) { ctx, cancel := context.WithCancel(context.Background()) cancel() diff --git a/warp/aggregator/mock_signature_getter.go b/warp/aggregator/mock_signature_getter.go index 144d8a2f98..70365da0ce 100644 --- a/warp/aggregator/mock_signature_getter.go +++ b/warp/aggregator/mock_signature_getter.go @@ -1,5 +1,10 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/coreth/warp/aggregator (interfaces: SignatureGetter) +// Source: signature_getter.go +// +// Generated by this command: +// +// mockgen -package=aggregator -source=signature_getter.go -destination=mock_signature_getter.go +// // Package aggregator is a generated GoMock package. package aggregator @@ -18,6 +23,7 @@ import ( type MockSignatureGetter struct { ctrl *gomock.Controller recorder *MockSignatureGetterMockRecorder + isgomock struct{} } // MockSignatureGetterMockRecorder is the mock recorder for MockSignatureGetter. @@ -38,16 +44,55 @@ func (m *MockSignatureGetter) EXPECT() *MockSignatureGetterMockRecorder { } // GetSignature mocks base method. -func (m *MockSignatureGetter) GetSignature(arg0 context.Context, arg1 ids.NodeID, arg2 *warp.UnsignedMessage) (*bls.Signature, error) { +func (m *MockSignatureGetter) GetSignature(ctx context.Context, nodeID ids.NodeID, unsignedWarpMessage *warp.UnsignedMessage) (*bls.Signature, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSignature", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "GetSignature", ctx, nodeID, unsignedWarpMessage) ret0, _ := ret[0].(*bls.Signature) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSignature indicates an expected call of GetSignature. -func (mr *MockSignatureGetterMockRecorder) GetSignature(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockSignatureGetterMockRecorder) GetSignature(ctx, nodeID, unsignedWarpMessage any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSignature", reflect.TypeOf((*MockSignatureGetter)(nil).GetSignature), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSignature", reflect.TypeOf((*MockSignatureGetter)(nil).GetSignature), ctx, nodeID, unsignedWarpMessage) +} + +// MockNetworkClient is a mock of NetworkClient interface. +type MockNetworkClient struct { + ctrl *gomock.Controller + recorder *MockNetworkClientMockRecorder + isgomock struct{} +} + +// MockNetworkClientMockRecorder is the mock recorder for MockNetworkClient. +type MockNetworkClientMockRecorder struct { + mock *MockNetworkClient +} + +// NewMockNetworkClient creates a new mock instance. +func NewMockNetworkClient(ctrl *gomock.Controller) *MockNetworkClient { + mock := &MockNetworkClient{ctrl: ctrl} + mock.recorder = &MockNetworkClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNetworkClient) EXPECT() *MockNetworkClientMockRecorder { + return m.recorder +} + +// SendAppRequest mocks base method. +func (m *MockNetworkClient) SendAppRequest(ctx context.Context, nodeID ids.NodeID, message []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAppRequest", ctx, nodeID, message) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendAppRequest indicates an expected call of SendAppRequest. +func (mr *MockNetworkClientMockRecorder) SendAppRequest(ctx, nodeID, message any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockNetworkClient)(nil).SendAppRequest), ctx, nodeID, message) } diff --git a/warp/aggregator/mocks_generate_test.go b/warp/aggregator/mocks_generate_test.go new file mode 100644 index 0000000000..46388a6c7c --- /dev/null +++ b/warp/aggregator/mocks_generate_test.go @@ -0,0 +1,6 @@ +// Copyright (C) 2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package aggregator + +//go:generate go run go.uber.org/mock/mockgen -package=$GOPACKAGE -source=signature_getter.go -destination=mock_signature_getter.go diff --git a/warp/backend.go b/warp/backend.go index 6e1f6a9553..d35c96b8fb 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -180,7 +180,7 @@ func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, unsignedMessageBytes, err := b.db.Get(messageID[:]) if err != nil { - return nil, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + return nil, err } unsignedMessage, err := avalancheWarp.ParseUnsignedMessage(unsignedMessageBytes) diff --git a/warp/backend_test.go b/warp/backend_test.go index 4935875ece..cd7aa1ea76 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" @@ -157,6 +158,12 @@ func TestOffChainMessages(t *testing.T) { require.Equal(expectedSignatureBytes, signature[:]) }, }, + "unknown message": { + check: func(require *require.Assertions, b Backend) { + _, err := b.GetMessage(testUnsignedMessage.ID()) + require.ErrorIs(err, database.ErrNotFound) + }, + }, "invalid message": { offchainMessages: [][]byte{{1, 2, 3}}, err: errParsingOffChainMessage, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index c70563c585..3c8427b8ff 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/common" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" @@ -24,6 +25,11 @@ func (b *backend) Verify(ctx context.Context, unsignedMessage *avalancheWarp.Uns // Known on-chain messages should be signed if _, err := b.GetMessage(messageID); err == nil { return nil + } else if err != database.ErrNotFound { + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("failed to get message %s: %s", messageID, err.Error()), + } } parsed, err := payload.Parse(unsignedMessage.Payload) @@ -53,7 +59,7 @@ func (b *backend) verifyBlockMessage(ctx context.Context, blockHashPayload *payl blockID := blockHashPayload.Hash _, err := b.blockClient.GetAcceptedBlock(ctx, blockID) if err != nil { - b.stats.IncBlockSignatureValidationFail() + b.stats.IncBlockValidationFail() return &common.AppError{ Code: VerifyErrCode, Message: fmt.Sprintf("failed to get block %s: %s", blockID, err.Error()), diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 4bf11541b0..a58726aa0f 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -56,7 +56,7 @@ func TestAddressedCallSignatures(t *testing.T) { }, verifyStats: func(t *testing.T, stats *verifierStats) { require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) }, }, "offchain message": { @@ -65,7 +65,7 @@ func TestAddressedCallSignatures(t *testing.T) { }, verifyStats: func(t *testing.T, stats *verifierStats) { require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) }, }, "unknown message": { @@ -78,7 +78,7 @@ func TestAddressedCallSignatures(t *testing.T) { }, verifyStats: func(t *testing.T, stats *verifierStats) { require.EqualValues(t, 1, stats.messageParseFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) }, err: &common.AppError{Code: ParseErrCode}, }, @@ -177,7 +177,7 @@ func TestBlockSignatures(t *testing.T) { return toMessageBytes(knownBlkID), signature[:] }, verifyStats: func(t *testing.T, stats *verifierStats) { - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) }, }, @@ -187,7 +187,7 @@ func TestBlockSignatures(t *testing.T) { return toMessageBytes(unknownBlockID), nil }, verifyStats: func(t *testing.T, stats *verifierStats) { - require.EqualValues(t, 1, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 1, stats.blockValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) }, err: &common.AppError{Code: VerifyErrCode}, diff --git a/warp/verifier_stats.go b/warp/verifier_stats.go index 3ee90312d9..980d464429 100644 --- a/warp/verifier_stats.go +++ b/warp/verifier_stats.go @@ -10,18 +10,18 @@ import ( type verifierStats struct { messageParseFail metrics.Counter // BlockRequest metrics - blockSignatureValidationFail metrics.Counter + blockValidationFail metrics.Counter } func newVerifierStats() *verifierStats { return &verifierStats{ - messageParseFail: metrics.NewRegisteredCounter("message_parse_fail", nil), - blockSignatureValidationFail: metrics.NewRegisteredCounter("block_signature_validation_fail", nil), + messageParseFail: metrics.NewRegisteredCounter("warp_backend_message_parse_fail", nil), + blockValidationFail: metrics.NewRegisteredCounter("warp_backend_block_validation_fail", nil), } } -func (h *verifierStats) IncBlockSignatureValidationFail() { - h.blockSignatureValidationFail.Inc(1) +func (h *verifierStats) IncBlockValidationFail() { + h.blockValidationFail.Inc(1) } func (h *verifierStats) IncMessageParseFail() { From 072dcf168440b028843021cb655abb0dba87220b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 15 Jan 2025 16:17:50 +0300 Subject: [PATCH 42/91] Revert removed test --- plugin/evm/api.go | 3 --- plugin/evm/atomic/export_tx.go | 42 +++++++++++++++++++++++----------- plugin/evm/export_tx_test.go | 4 ++-- plugin/evm/syncervm_test.go | 1 + plugin/evm/vm_test.go | 2 ++ 5 files changed, 34 insertions(+), 18 deletions(-) diff --git a/plugin/evm/api.go b/plugin/evm/api.go index 12791d4e08..0c63d438eb 100644 --- a/plugin/evm/api.go +++ b/plugin/evm/api.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ethereum/go-ethereum/common" @@ -35,8 +34,6 @@ var ( errNoAddresses = errors.New("no addresses provided") errNoSourceChain = errors.New("no source chain provided") errNilTxID = errors.New("nil transaction ID") - - initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) ) // SnowmanAPI introduces snowman specific functionality to the evm diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index 3b61ea71b8..819b921a4e 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -287,14 +287,15 @@ func NewExportTx( ctx *snow.Context, rules params.Rules, state StateDB, - amount uint64, // Amount of AVAX to export + assetID ids.ID, // AssetID of the tokens to export + amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to to ids.ShortID, // Address of chain recipient baseFee *big.Int, // fee to use post-AP3 keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens ) (*Tx, error) { outs := []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: ctx.AVAXAssetID}, + Asset: avax.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, OutputOwners: secp256k1fx.OutputOwners{ @@ -306,11 +307,22 @@ func NewExportTx( }} var ( + avaxNeeded uint64 = 0 ins, avaxIns []EVMInput signers, avaxSigners [][]*secp256k1.PrivateKey err error ) + // consume non-AVAX + if assetID != ctx.AVAXAssetID { + ins, signers, err = GetSpendableFunds(ctx, state, keys, assetID, amount) + if err != nil { + return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) + } + } else { + avaxNeeded = amount + } + switch { case rules.IsApricotPhase3: utx := &UnsignedExportTx{ @@ -331,14 +343,14 @@ func NewExportTx( return nil, err } - avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, amount, cost, baseFee) + avaxIns, avaxSigners, err = getSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) default: - var avaxNeeded uint64 - avaxNeeded, err = math.Add(amount, params.AvalancheAtomicTxFee) + var newAvaxNeeded uint64 + newAvaxNeeded, err = math.Add64(avaxNeeded, params.AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, avaxNeeded) + avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -408,6 +420,7 @@ func GetSpendableFunds( ctx *snow.Context, state StateDB, keys []*secp256k1.PrivateKey, + assetID ids.ID, amount uint64, ) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { inputs := []EVMInput{} @@ -420,10 +433,13 @@ func GetSpendableFunds( } addr := key.EthAddress() var balance uint64 - // we divide by the x2cRate to convert back to the correct - // denomination of AVAX that can be exported. - balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() - + if assetID == ctx.AVAXAssetID { + // If the asset is AVAX, we divide by the x2cRate to convert back to the correct + // denomination of AVAX that can be exported. + balance = new(uint256.Int).Div(state.GetBalance(addr), X2CRate).Uint64() + } else { + balance = state.GetBalanceMultiCoin(addr, common.Hash(assetID)).Uint64() + } if balance == 0 { continue } @@ -435,7 +451,7 @@ func GetSpendableFunds( inputs = append(inputs, EVMInput{ Address: addr, Amount: balance, - AssetID: ctx.AVAXAssetID, + AssetID: assetID, Nonce: nonce, }) signers = append(signers, []*secp256k1.PrivateKey{key}) @@ -449,7 +465,7 @@ func GetSpendableFunds( return inputs, signers, nil } -// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding +// getSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding // order) to total [amount] + [fee] of [AVAX] owned by [keys]. // This function accounts for the added cost of the additional inputs needed to // create the transaction and makes sure to skip any keys with a balance that is @@ -457,7 +473,7 @@ func GetSpendableFunds( // Note: we return [][]*secp256k1.PrivateKey even though each input // corresponds to a single key, so that the signers can be passed in to // [tx.Sign] which supports multiple keys on a single input. -func GetSpendableAVAXWithFee( +func getSpendableAVAXWithFee( ctx *snow.Context, state StateDB, keys []*secp256k1.PrivateKey, diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 3a8383b8bc..643eef391c 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -92,7 +92,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } for _, addr := range testShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1772,7 +1772,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 0a90bd8be0..00b6846332 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -324,6 +324,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.ctx, serverVM.currentRules(), state, + serverVM.ctx.AVAXAssetID, importAmount/2, serverVM.ctx.XChainID, testShortIDAddrs[0], diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 2f802e49b8..ca11654e4a 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -75,6 +75,7 @@ var ( testShortIDAddrs []ids.ShortID username = "Johns" password = "CjasdjhiPeirbSenfeI13" // #nosec G101 + initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) genesisJSON = func(cfg *params.ChainConfig) string { g := new(core.Genesis) @@ -613,6 +614,7 @@ func TestIssueAtomicTxs(t *testing.T) { vm.ctx, vm.currentRules(), state, + vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], From 7a3b1b75fd0870b83cc16644253b9625a068c823 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 15 Jan 2025 17:39:46 +0300 Subject: [PATCH 43/91] Reviews --- plugin/evm/atomic/state/atomic_backend.go | 55 +------ plugin/evm/atomic/state/atomic_state.go | 18 +- plugin/evm/atomic/state/atomic_trie.go | 51 +----- .../evm/atomic/state/atomic_trie_iterator.go | 31 +--- plugin/evm/atomic/state/atomic_trie_test.go | 9 +- .../evm/atomic/state/atomic_tx_repository.go | 13 -- .../atomic/state/atomic_tx_repository_test.go | 4 +- .../evm/atomic/state/interfaces/interfaces.go | 155 ++++++++++++++++++ .../evm/atomic/sync/atomic_sync_extender.go | 10 +- .../evm/atomic/sync/atomic_sync_provider.go | 6 +- plugin/evm/atomic/sync/atomic_syncer.go | 5 +- plugin/evm/sync/syncervm_client.go | 35 ++-- plugin/evm/sync/syncervm_server.go | 4 +- plugin/evm/syncervm_test.go | 16 +- plugin/evm/vm.go | 28 ++-- plugin/evm/vm_test.go | 4 +- 16 files changed, 231 insertions(+), 213 deletions(-) create mode 100644 plugin/evm/atomic/state/interfaces/interfaces.go diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index 14e49a8d73..bc0526aeeb 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -17,11 +17,12 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var _ AtomicBackend = &atomicBackend{} +var _ interfaces.AtomicBackend = &atomicBackend{} var ( atomicTrieDBPrefix = []byte("atomicTrieDB") @@ -34,46 +35,6 @@ const ( progressLogFrequency = 30 * time.Second ) -// AtomicBackend abstracts the verification and processing -// of atomic transactions -type AtomicBackend interface { - // InsertTxs calculates the root of the atomic trie that would - // result from applying [txs] to the atomic trie, starting at the state - // corresponding to previously verified block [parentHash]. - // If [blockHash] is provided, the modified atomic trie is pinned in memory - // and it's the caller's responsibility to call either Accept or Reject on - // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the - // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) - - // Returns an AtomicState corresponding to a block hash that has been inserted - // but not Accepted or Rejected yet. - GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) - - // AtomicTrie returns the atomic trie managed by this backend. - AtomicTrie() AtomicTrie - - // ApplyToSharedMemory applies the atomic operations that have been indexed into the trie - // but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. - // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. - // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie - // the range of operations that were added to the trie without being executed on shared memory. - ApplyToSharedMemory(lastAcceptedBlock uint64) error - - // MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that - // have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. - // This is used when state sync syncs the atomic trie, such that the atomic operations - // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync - // will not have been executed on shared memory. - MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error - - // SetLastAccepted is used after state-sync to reset the last accepted block. - SetLastAccepted(lastAcceptedHash common.Hash) - - // IsBonus returns true if the block for atomicState is a bonus block - IsBonus(blockHeight uint64, blockHash common.Hash) bool -} - // atomicBackend implements the AtomicBackend interface using // the AtomicTrie, AtomicTxRepository, and the VM's shared memory. type atomicBackend struct { @@ -83,17 +44,17 @@ type atomicBackend struct { metadataDB database.Database // Underlying database containing the atomic trie metadata sharedMemory avalancheatomic.SharedMemory - repo AtomicTxRepository + repo interfaces.AtomicTxRepository atomicTrie *atomicTrie lastAcceptedHash common.Hash - verifiedRoots map[common.Hash]AtomicState + verifiedRoots map[common.Hash]*atomicState } // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, - bonusBlocks map[uint64]ids.ID, repo AtomicTxRepository, + bonusBlocks map[uint64]ids.ID, repo interfaces.AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (*atomicBackend, error) { atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) @@ -113,7 +74,7 @@ func NewAtomicBackend( repo: repo, atomicTrie: atomicTrie, lastAcceptedHash: lastAcceptedHash, - verifiedRoots: make(map[common.Hash]AtomicState), + verifiedRoots: make(map[common.Hash]*atomicState), } // We call ApplyToSharedMemory here to ensure that if the node was shut down in the middle @@ -362,7 +323,7 @@ func (a *atomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } -func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) { +func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (interfaces.AtomicState, error) { if state, ok := a.verifiedRoots[blockHash]; ok { return state, nil } @@ -454,7 +415,7 @@ func (a *atomicBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool return false } -func (a *atomicBackend) AtomicTrie() AtomicTrie { +func (a *atomicBackend) AtomicTrie() interfaces.AtomicTrie { return a.atomicTrie } diff --git a/plugin/evm/atomic/state/atomic_state.go b/plugin/evm/atomic/state/atomic_state.go index 672c57d7e0..e867ff57a2 100644 --- a/plugin/evm/atomic/state/atomic_state.go +++ b/plugin/evm/atomic/state/atomic_state.go @@ -10,26 +10,12 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var _ AtomicState = &atomicState{} - -// AtomicState is an abstraction created through AtomicBackend -// and can be used to apply the VM's state change for atomic txs -// or reject them to free memory. -// The root of the atomic trie after applying the state change -// is accessible through this interface as well. -type AtomicState interface { - // Root of the atomic trie after applying the state change. - Root() common.Hash - // Accept applies the state change to VM's persistent storage - // Changes are persisted atomically along with the provided [commitBatch]. - Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error - // Reject frees memory associated with the state change. - Reject() error -} +var _ interfaces.AtomicState = &atomicState{} // atomicState implements the AtomicState interface using // a pointer to the atomicBackend. diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index 844a0d5c56..1390773f6e 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" @@ -28,7 +29,7 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var _ AtomicTrie = &atomicTrie{} +var _ interfaces.AtomicTrie = &atomicTrie{} const ( AtomicTrieKeyLength = wrappers.LongLen + common.HashLength @@ -39,52 +40,6 @@ const ( var lastCommittedKey = []byte("atomicTrieLastCommittedBlock") -// AtomicTrie maintains an index of atomic operations by blockchainIDs for every block -// height containing atomic transactions. The backing data structure for this index is -// a Trie. The keys of the trie are block heights and the values (leaf nodes) -// are the atomic operations applied to shared memory while processing the block accepted -// at the corresponding height. -type AtomicTrie interface { - // OpenTrie returns a modifiable instance of the atomic trie backed by trieDB - // opened at hash. - OpenTrie(hash common.Hash) (*trie.Trie, error) - - // UpdateTrie updates [tr] to inlude atomicOps for height. - UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error - - // Iterator returns an AtomicTrieIterator to iterate the trie at the given - // root hash starting at [cursor]. - Iterator(hash common.Hash, cursor []byte) (AtomicTrieIterator, error) - - // LastCommitted returns the last committed hash and corresponding block height - LastCommitted() (common.Hash, uint64) - - // TrieDB returns the underlying trie database - TrieDB() *triedb.Database - - // Root returns hash if it exists at specified height - // if trie was not committed at provided height, it returns - // common.Hash{} instead - Root(height uint64) (common.Hash, error) - - // LastAcceptedRoot returns the most recent accepted root of the atomic trie, - // or the root it was initialized to if no new tries were accepted yet. - LastAcceptedRoot() common.Hash - - // InsertTrie updates the trieDB with the provided node set and adds a reference - // to root in the trieDB. Once InsertTrie is called, it is expected either - // AcceptTrie or RejectTrie be called for the same root. - InsertTrie(nodes *trienode.NodeSet, root common.Hash) error - - // AcceptTrie marks root as the last accepted atomic trie root, and - // commits the trie to persistent storage if height is divisible by - // the commit interval. Returns true if the trie was committed. - AcceptTrie(height uint64, root common.Hash) (bool, error) - - // RejectTrie dereferences root from the trieDB, freeing memory. - RejectTrie(root common.Hash) error -} - // atomicTrie implements the AtomicTrie interface type atomicTrie struct { commitInterval uint64 // commit interval, same as commitHeightInterval by default @@ -241,7 +196,7 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error // Iterator returns a AtomicTrieIterator that iterates the trie from the given // atomic trie root, starting at the specified [cursor]. -func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (AtomicTrieIterator, error) { +func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (interfaces.AtomicTrieIterator, error) { t, err := trie.New(trie.TrieID(root), a.trieDB) if err != nil { return nil, err diff --git a/plugin/evm/atomic/state/atomic_trie_iterator.go b/plugin/evm/atomic/state/atomic_trie_iterator.go index 719ff10941..4ba56dc251 100644 --- a/plugin/evm/atomic/state/atomic_trie_iterator.go +++ b/plugin/evm/atomic/state/atomic_trie_iterator.go @@ -8,42 +8,15 @@ import ( "fmt" "github.com/ava-labs/avalanchego/chains/atomic" - avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/trie" ) -var _ AtomicTrieIterator = &atomicTrieIterator{} - -// AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie -type AtomicTrieIterator interface { - // Next advances the iterator to the next node in the atomic trie and - // returns true if there are more leaves to iterate - Next() bool - - // Key returns the current database key that the iterator is iterating - // returned []byte can be freely modified - Key() []byte - - // Value returns the current database value that the iterator is iterating - Value() []byte - - // BlockNumber returns the current block number - BlockNumber() uint64 - - // BlockchainID returns the current blockchain ID at the current block number - BlockchainID() ids.ID - - // AtomicOps returns a map of blockchainIDs to the set of atomic requests - // for that blockchainID at the current block number - AtomicOps() *avalancheatomic.Requests - - // Error returns error, if any encountered during this iteration - Error() error -} +var _ interfaces.AtomicTrieIterator = &atomicTrieIterator{} // atomicTrieIterator is an implementation of types.AtomicTrieIterator that serves // parsed data with each iteration diff --git a/plugin/evm/atomic/state/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go index 281dd4c0a0..a7990ad4c7 100644 --- a/plugin/evm/atomic/state/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" @@ -32,7 +33,7 @@ const testCommitInterval = 100 // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. -func indexAtomicTxs(tr AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { +func indexAtomicTxs(tr interfaces.AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { snapshot, err := tr.OpenTrie(tr.LastAcceptedRoot()) if err != nil { return err @@ -253,7 +254,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { assert.Equal(t, hash, newHash, "hash should be the same") } -func newTestAtomicTrie(t *testing.T) AtomicTrie { +func newTestAtomicTrie(t *testing.T) interfaces.AtomicTrie { db := versiondb.New(memdb.New()) repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 0) if err != nil { @@ -528,7 +529,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) var ( - atomicTrie AtomicTrie + atomicTrie interfaces.AtomicTrie hash common.Hash height uint64 ) @@ -664,7 +665,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u // verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to // the atomic operations contained in [operationsMap] on the same interval. -func verifyOperations(t testing.TB, atomicTrie AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { +func verifyOperations(t testing.TB, atomicTrie interfaces.AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { t.Helper() // Start the iterator at [from] diff --git a/plugin/evm/atomic/state/atomic_tx_repository.go b/plugin/evm/atomic/state/atomic_tx_repository.go index e9dc5363e3..d84b3697a6 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository.go +++ b/plugin/evm/atomic/state/atomic_tx_repository.go @@ -36,19 +36,6 @@ var ( // bonusBlocksRepairedKey = []byte("bonusBlocksRepaired") ) -// AtomicTxRepository defines an entity that manages storage and indexing of -// atomic transactions -type AtomicTxRepository interface { - GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) - GetByHeight(height uint64) ([]*atomic.Tx, error) - Write(height uint64, txs []*atomic.Tx) error - WriteBonus(height uint64, txs []*atomic.Tx) error - - IterateByHeight(start uint64) database.Iterator - Codec() codec.Manager -} - // atomicTxRepository is a prefixdb implementation of the AtomicTxRepository interface type atomicTxRepository struct { // [acceptedAtomicTxDB] maintains an index of [txID] => [height]+[atomic tx] for all accepted atomic txs. diff --git a/plugin/evm/atomic/state/atomic_tx_repository_test.go b/plugin/evm/atomic/state/atomic_tx_repository_test.go index 47f30a5acf..d9c6b150c8 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/state/atomic_tx_repository_test.go @@ -69,7 +69,7 @@ func constTxsPerHeight(txCount int) func(uint64) int { // writeTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) through the Write call on [repo], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight uint64, +func writeTxs(t testing.TB, repo *atomicTxRepository, fromHeight uint64, toHeight uint64, txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { @@ -95,7 +95,7 @@ func writeTxs(t testing.TB, repo AtomicTxRepository, fromHeight uint64, toHeight } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { +func verifyTxs(t testing.TB, repo *atomicTxRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) diff --git a/plugin/evm/atomic/state/interfaces/interfaces.go b/plugin/evm/atomic/state/interfaces/interfaces.go new file mode 100644 index 0000000000..9c05c5c963 --- /dev/null +++ b/plugin/evm/atomic/state/interfaces/interfaces.go @@ -0,0 +1,155 @@ +package interfaces + +import ( + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" + "github.com/ava-labs/coreth/triedb" + "github.com/ethereum/go-ethereum/common" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" +) + +// AtomicTrie maintains an index of atomic operations by blockchainIDs for every block +// height containing atomic transactions. The backing data structure for this index is +// a Trie. The keys of the trie are block heights and the values (leaf nodes) +// are the atomic operations applied to shared memory while processing the block accepted +// at the corresponding height. +type AtomicTrie interface { + // OpenTrie returns a modifiable instance of the atomic trie backed by trieDB + // opened at hash. + OpenTrie(hash common.Hash) (*trie.Trie, error) + + // UpdateTrie updates [tr] to inlude atomicOps for height. + UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error + + // Iterator returns an AtomicTrieIterator to iterate the trie at the given + // root hash starting at [cursor]. + Iterator(hash common.Hash, cursor []byte) (AtomicTrieIterator, error) + + // LastCommitted returns the last committed hash and corresponding block height + LastCommitted() (common.Hash, uint64) + + // TrieDB returns the underlying trie database + TrieDB() *triedb.Database + + // Root returns hash if it exists at specified height + // if trie was not committed at provided height, it returns + // common.Hash{} instead + Root(height uint64) (common.Hash, error) + + // LastAcceptedRoot returns the most recent accepted root of the atomic trie, + // or the root it was initialized to if no new tries were accepted yet. + LastAcceptedRoot() common.Hash + + // InsertTrie updates the trieDB with the provided node set and adds a reference + // to root in the trieDB. Once InsertTrie is called, it is expected either + // AcceptTrie or RejectTrie be called for the same root. + InsertTrie(nodes *trienode.NodeSet, root common.Hash) error + + // AcceptTrie marks root as the last accepted atomic trie root, and + // commits the trie to persistent storage if height is divisible by + // the commit interval. Returns true if the trie was committed. + AcceptTrie(height uint64, root common.Hash) (bool, error) + + // RejectTrie dereferences root from the trieDB, freeing memory. + RejectTrie(root common.Hash) error +} + +// AtomicBackend abstracts the verification and processing +// of atomic transactions +type AtomicBackend interface { + // InsertTxs calculates the root of the atomic trie that would + // result from applying [txs] to the atomic trie, starting at the state + // corresponding to previously verified block [parentHash]. + // If [blockHash] is provided, the modified atomic trie is pinned in memory + // and it's the caller's responsibility to call either Accept or Reject on + // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the + // changes or abort them and free memory. + InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) + + // Returns an AtomicState corresponding to a block hash that has been inserted + // but not Accepted or Rejected yet. + GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) + + // AtomicTrie returns the atomic trie managed by this backend. + AtomicTrie() AtomicTrie + + // ApplyToSharedMemory applies the atomic operations that have been indexed into the trie + // but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. + // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. + // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie + // the range of operations that were added to the trie without being executed on shared memory. + ApplyToSharedMemory(lastAcceptedBlock uint64) error + + // MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that + // have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. + // This is used when state sync syncs the atomic trie, such that the atomic operations + // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync + // will not have been executed on shared memory. + MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error + + // SetLastAccepted is used after state-sync to reset the last accepted block. + SetLastAccepted(lastAcceptedHash common.Hash) + + // IsBonus returns true if the block for atomicState is a bonus block + IsBonus(blockHeight uint64, blockHash common.Hash) bool +} + +// AtomicTxRepository defines an entity that manages storage and indexing of +// atomic transactions +type AtomicTxRepository interface { + GetIndexHeight() (uint64, error) + GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) + GetByHeight(height uint64) ([]*atomic.Tx, error) + Write(height uint64, txs []*atomic.Tx) error + WriteBonus(height uint64, txs []*atomic.Tx) error + + IterateByHeight(start uint64) database.Iterator + Codec() codec.Manager +} + +// AtomicState is an abstraction created through AtomicBackend +// and can be used to apply the VM's state change for atomic txs +// or reject them to free memory. +// The root of the atomic trie after applying the state change +// is accessible through this interface as well. +type AtomicState interface { + // Root of the atomic trie after applying the state change. + Root() common.Hash + // Accept applies the state change to VM's persistent storage + // Changes are persisted atomically along with the provided [commitBatch]. + Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error + // Reject frees memory associated with the state change. + Reject() error +} + +// AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie +type AtomicTrieIterator interface { + // Next advances the iterator to the next node in the atomic trie and + // returns true if there are more leaves to iterate + Next() bool + + // Key returns the current database key that the iterator is iterating + // returned []byte can be freely modified + Key() []byte + + // Value returns the current database value that the iterator is iterating + Value() []byte + + // BlockNumber returns the current block number + BlockNumber() uint64 + + // BlockchainID returns the current blockchain ID at the current block number + BlockchainID() ids.ID + + // AtomicOps returns a map of blockchainIDs to the set of atomic requests + // for that blockchainID at the current block number + AtomicOps() *avalancheatomic.Requests + + // Error returns error, if any encountered during this iteration + Error() error +} diff --git a/plugin/evm/atomic/sync/atomic_sync_extender.go b/plugin/evm/atomic/sync/atomic_sync_extender.go index 2a2517e4ef..56735eb509 100644 --- a/plugin/evm/atomic/sync/atomic_sync_extender.go +++ b/plugin/evm/atomic/sync/atomic_sync_extender.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" syncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/plugin/evm/atomic/state" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ethereum/go-ethereum/log" @@ -18,18 +18,18 @@ import ( var _ sync.Extender = (*AtomicSyncExtender)(nil) type AtomicSyncExtender struct { - backend state.AtomicBackend + backend interfaces.AtomicBackend stateSyncRequestSize uint16 } -func NewAtomicSyncExtender(backend state.AtomicBackend, stateSyncRequestSize uint16) *AtomicSyncExtender { +func NewAtomicSyncExtender(backend interfaces.AtomicBackend, stateSyncRequestSize uint16) *AtomicSyncExtender { return &AtomicSyncExtender{ backend: backend, stateSyncRequestSize: stateSyncRequestSize, } } -func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.Client, verDB *versiondb.Database, syncSummary message.Syncable) error { +func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.LeafClient, verDB *versiondb.Database, syncSummary message.Syncable) error { atomicSyncSummary, ok := syncSummary.(*AtomicBlockSyncSummary) if !ok { return fmt.Errorf("expected *AtomicBlockSyncSummary, got %T", syncSummary) @@ -44,7 +44,7 @@ func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.Client, a.stateSyncRequestSize, ) if err != nil { - return err + return fmt.Errorf("failed to create atomic syncer: %w", err) } if err := atomicSyncer.Start(ctx); err != nil { return fmt.Errorf("failed to start atomic syncer: %w", err) diff --git a/plugin/evm/atomic/sync/atomic_sync_provider.go b/plugin/evm/atomic/sync/atomic_sync_provider.go index f11c4ebaad..7c5883d999 100644 --- a/plugin/evm/atomic/sync/atomic_sync_provider.go +++ b/plugin/evm/atomic/sync/atomic_sync_provider.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/atomic/state" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ethereum/go-ethereum/common" ) @@ -17,10 +17,10 @@ var _ sync.SummaryProvider = &AtomicSyncProvider{} type AtomicSyncProvider struct { chain *core.BlockChain - atomicTrie state.AtomicTrie + atomicTrie interfaces.AtomicTrie } -func NewAtomicProvider(chain *core.BlockChain, atomicTrie state.AtomicTrie) *AtomicSyncProvider { +func NewAtomicProvider(chain *core.BlockChain, atomicTrie interfaces.AtomicTrie) *AtomicSyncProvider { return &AtomicSyncProvider{chain: chain, atomicTrie: atomicTrie} } diff --git a/plugin/evm/atomic/sync/atomic_syncer.go b/plugin/evm/atomic/sync/atomic_syncer.go index 0580c5399f..d557232665 100644 --- a/plugin/evm/atomic/sync/atomic_syncer.go +++ b/plugin/evm/atomic/sync/atomic_syncer.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/coreth/plugin/evm/atomic/state" + "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" @@ -39,7 +40,7 @@ type Syncer interface { // the state of progress and writing the actual atomic trie to the trieDB. type atomicSyncer struct { db *versiondb.Database - atomicTrie state.AtomicTrie + atomicTrie interfaces.AtomicTrie trie *trie.Trie // used to update the atomic trie targetRoot common.Hash targetHeight uint64 @@ -60,7 +61,7 @@ func addZeroes(height uint64) []byte { return packer.Bytes } -func NewAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie state.AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { +func NewAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie interfaces.AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() trie, err := atomicTrie.OpenTrie(lastCommittedRoot) if err != nil { diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index 61a6187b0b..f2e8dac7d9 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -27,9 +27,9 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// StateSyncParentsToFetch is the number of the block parents the state syncs to. +// ParentsToFetch is the number of the block parents the state syncs to. // The last 256 block hashes are necessary to support the BLOCKHASH opcode. -const StateSyncParentsToFetch = 256 +const ParentsToFetch = 256 var stateSyncSummaryKey = []byte("stateSyncSummary") @@ -42,13 +42,13 @@ type EthBlockWrapper interface { } type Extender interface { - Sync(ctx context.Context, client syncclient.Client, verdb *versiondb.Database, syncSummary message.Syncable) error + Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error OnFinishAfterCommit(summaryHeight uint64) error } -// StateSyncClientConfig defines the options and dependencies needed to construct a StateSyncerClient -type StateSyncClientConfig struct { +// ClientConfig defines the options and dependencies needed to construct a Client +type ClientConfig struct { Enabled bool SkipResume bool // Specifies the number of blocks behind the latest state summary that the chain must be @@ -76,7 +76,7 @@ type StateSyncClientConfig struct { } type stateSyncerClient struct { - *StateSyncClientConfig + *ClientConfig resumableSummary message.Syncable @@ -88,13 +88,13 @@ type stateSyncerClient struct { stateSyncErr error } -func NewStateSyncClient(config *StateSyncClientConfig) StateSyncClient { +func NewClient(config *ClientConfig) Client { return &stateSyncerClient{ - StateSyncClientConfig: config, + ClientConfig: config, } } -type StateSyncClient interface { +type Client interface { // methods that implement the client side of [block.StateSyncableVM] StateSyncEnabled(context.Context) (bool, error) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) @@ -161,7 +161,7 @@ func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryByt // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.GetBlockHash(), client.syncSummary.GetBlockNumber(), StateSyncParentsToFetch); err != nil { + if err := client.syncBlocks(ctx, client.syncSummary.GetBlockHash(), client.syncSummary.GetBlockNumber(), ParentsToFetch); err != nil { return err } @@ -171,7 +171,7 @@ func (client *stateSyncerClient) stateSync(ctx context.Context) error { return err } - return client.StateSyncClientConfig.ExtraSyncer.Sync(ctx, client.Client, client.VerDB, client.syncSummary) + return client.ClientConfig.ExtraSyncer.Sync(ctx, client.Client, client.VerDB, client.syncSummary) } // acceptSyncSummary returns true if sync will be performed and launches the state sync process @@ -365,7 +365,11 @@ func (client *stateSyncerClient) finishSync() error { return err } - if err := client.updateVMMarkers(); err != nil { + if err := client.ExtraSyncer.OnFinishBeforeCommit(client.LastAcceptedHeight, client.syncSummary); err != nil { + return err + } + + if err := client.commitVMMarkers(); err != nil { return fmt.Errorf("error updating vm markers, height=%d, hash=%s, err=%w", block.NumberU64(), block.Hash(), err) } @@ -376,19 +380,16 @@ func (client *stateSyncerClient) finishSync() error { return client.ExtraSyncer.OnFinishAfterCommit(block.NumberU64()) } -// updateVMMarkers updates the following markers in the VM's database +// commitVMMarkers updates the following markers in the VM's database // and commits them atomically: // - updates atomic trie so it will have necessary metadata for the last committed root // - updates atomic trie so it will resume applying operations to shared memory on initialize // - updates lastAcceptedKey // - removes state sync progress markers -func (client *stateSyncerClient) updateVMMarkers() error { +func (client *stateSyncerClient) commitVMMarkers() error { // Mark the previously last accepted block for the shared memory cursor, so that we will execute shared // memory operations from the previously last accepted block to [vm.syncSummary] when ApplyToSharedMemory // is called. - if err := client.ExtraSyncer.OnFinishBeforeCommit(client.LastAcceptedHeight, client.syncSummary); err != nil { - return err - } id, err := ids.ToID(client.syncSummary.GetBlockHash().Bytes()) if err != nil { return err diff --git a/plugin/evm/sync/syncervm_server.go b/plugin/evm/sync/syncervm_server.go index 249719adc0..215e3a19b9 100644 --- a/plugin/evm/sync/syncervm_server.go +++ b/plugin/evm/sync/syncervm_server.go @@ -23,12 +23,12 @@ type stateSyncServer struct { syncableInterval uint64 } -type StateSyncServer interface { +type Server interface { GetLastStateSummary(context.Context) (block.StateSummary, error) GetStateSummary(context.Context, uint64) (block.StateSummary, error) } -func NewStateSyncServer(chain *core.BlockChain, provider SummaryProvider, syncableInterval uint64) StateSyncServer { +func SyncServer(chain *core.BlockChain, provider SummaryProvider, syncableInterval uint64) Server { return &stateSyncServer{ chain: chain, provider: provider, diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index c13f204543..64565b6c65 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -57,7 +57,7 @@ func TestSkipStateSync(t *testing.T) { stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync syncMode: block.StateSyncSkipped, } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) testSyncerVM(t, vmSetup, test) } @@ -69,14 +69,14 @@ func TestStateSyncFromScratch(t *testing.T) { stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) testSyncerVM(t, vmSetup, test) } func TestStateSyncFromScratchExceedParent(t *testing.T) { rand.Seed(1) - numToGen := vmsync.StateSyncParentsToFetch + uint64(32) + numToGen := vmsync.ParentsToFetch + uint64(32) test := syncTest{ syncableInterval: numToGen, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync @@ -111,7 +111,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { panic(err) } - if err := syncerVM.StateSyncClient.Shutdown(); err != nil { + if err := syncerVM.Client.Shutdown(); err != nil { panic(err) } } else { @@ -120,7 +120,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) + vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) @@ -271,7 +271,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { }, expectedErr: context.Canceled, } - vmSetup = createSyncServerAndClientVMs(t, test, vmsync.StateSyncParentsToFetch) + vmSetup = createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) } @@ -332,7 +332,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s } }, nil) - serverAtomicTrie := serverVM.atomicTrie + serverAtomicTrie := serverVM.atomicBackend.AtomicTrie() require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) require.NoError(serverVM.versiondb.Commit()) @@ -486,7 +486,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { require.Equal(commonEng.StateSyncDone, msg) // If the test is expected to error, assert the correct error is returned and finish the test. - err = syncerVM.StateSyncClient.Error() + err = syncerVM.Client.Error() if test.expectedErr != nil { require.ErrorIs(err, test.expectedErr) // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index b2f73f4b57..16ecb8faef 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" + atomicstateinterfaces "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" @@ -250,11 +251,9 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository atomicstate.AtomicTxRepository - // [atomicTrie] maintains a merkle forest of [height]=>[atomic txs]. - atomicTrie atomicstate.AtomicTrie + atomicTxRepository atomicstateinterfaces.AtomicTxRepository // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend atomicstate.AtomicBackend + atomicBackend atomicstateinterfaces.AtomicBackend builder *blockBuilder @@ -285,8 +284,8 @@ type VM struct { logger CorethLogger // State sync server and client - vmsync.StateSyncServer - vmsync.StateSyncClient + vmsync.Server + vmsync.Client // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC @@ -601,7 +600,6 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to create atomic backend: %w", err) } - vm.atomicTrie = vm.atomicBackend.AtomicTrie() go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler) @@ -620,8 +618,8 @@ func (vm *VM) Initialize( vm.setAppRequestHandlers() - atomicProvider := atomicsync.NewAtomicProvider(vm.blockChain, vm.atomicTrie) - vm.StateSyncServer = vmsync.NewStateSyncServer(vm.blockChain, atomicProvider, vm.config.StateSyncCommitInterval) + atomicProvider := atomicsync.NewAtomicProvider(vm.blockChain, vm.atomicBackend.AtomicTrie()) + vm.Server = vmsync.SyncServer(vm.blockChain, atomicProvider, vm.config.StateSyncCommitInterval) return vm.initializeStateSyncClient(lastAcceptedHeight) } @@ -697,7 +695,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - vm.StateSyncClient = vmsync.NewStateSyncClient(&vmsync.StateSyncClientConfig{ + vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ Chain: vm.eth, State: vm.State, ExtraSyncer: atomicsync.NewAtomicSyncExtender(vm.atomicBackend, vm.config.StateSyncRequestSize), @@ -726,7 +724,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume // sync using a snapshot that has been modified by the node running normal operations. if !stateSyncEnabled { - return vm.StateSyncClient.ClearOngoingSummary() + return vm.Client.ClearOngoingSummary() } return nil @@ -1008,11 +1006,11 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { // onBootstrapStarted marks this VM as bootstrapping func (vm *VM) onBootstrapStarted() error { vm.bootstrapped.Set(false) - if err := vm.StateSyncClient.Error(); err != nil { + if err := vm.Client.Error(); err != nil { return err } // After starting bootstrapping, do not attempt to resume a previous state sync. - if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { + if err := vm.Client.ClearOngoingSummary(); err != nil { return err } // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). @@ -1225,7 +1223,7 @@ func (vm *VM) setAppRequestHandlers() { vm.blockChain, vm.chaindb, evmTrieDB, - vm.atomicTrie.TrieDB(), + vm.atomicBackend.AtomicTrie().TrieDB(), vm.warpBackend, vm.networkCodec, ) @@ -1241,7 +1239,7 @@ func (vm *VM) Shutdown(context.Context) error { vm.cancel() } vm.Network.Shutdown() - if err := vm.StateSyncClient.Shutdown(); err != nil { + if err := vm.Client.Shutdown(); err != nil { log.Error("error stopping state syncer", "err", err) } close(vm.shutdownChan) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index fa34e01444..750ec8b0c8 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -25,7 +25,7 @@ import ( "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/state" + stateinterfaces "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/trie" @@ -1470,7 +1470,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } type wrappedBackend struct { - state.AtomicBackend + stateinterfaces.AtomicBackend registeredBonusBlocks map[uint64]common.Hash } From f56376add7724f5fc9c68f5cb8d3561b6fb743a4 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 16 Jan 2025 20:09:51 +0300 Subject: [PATCH 44/91] move node types and handlings to separate packages (#729) --- peer/network.go | 1 + plugin/evm/atomic/sync/atomic_syncer.go | 7 +- plugin/evm/atomic/sync/atomic_syncer_test.go | 2 +- plugin/evm/message/handler.go | 10 +- plugin/evm/message/leafs_request.go | 32 +----- plugin/evm/message/leafs_request_test.go | 115 +------------------ plugin/evm/network_handler.go | 54 ++++++--- plugin/evm/vm.go | 42 ++++++- sync/README.md | 4 +- sync/client/client_test.go | 6 +- sync/client/stats/stats.go | 34 ++---- sync/handlers/leafs_request.go | 34 ++---- sync/handlers/leafs_request_test.go | 2 +- sync/statesync/sync_test.go | 2 +- 14 files changed, 116 insertions(+), 229 deletions(-) diff --git a/peer/network.go b/peer/network.go index a4dfd015f6..6631c0a90a 100644 --- a/peer/network.go +++ b/peer/network.go @@ -228,6 +228,7 @@ func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u log.Debug("received AppRequest from node", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request)) + // TODO: investigate if we can move all these network logic to new SDK handlers var req message.Request if _, err := n.codec.Unmarshal(request, &req); err != nil { log.Debug("forwarding AppRequest to SDK network", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request), "err", err) diff --git a/plugin/evm/atomic/sync/atomic_syncer.go b/plugin/evm/atomic/sync/atomic_syncer.go index d557232665..d0f05bddae 100644 --- a/plugin/evm/atomic/sync/atomic_syncer.go +++ b/plugin/evm/atomic/sync/atomic_syncer.go @@ -21,6 +21,11 @@ import ( "github.com/ava-labs/coreth/trie" ) +const ( + // AtomicTrieNode represents a leaf node that belongs to the atomic trie. + AtomicTrieNode message.NodeType = 2 +) + var ( _ Syncer = &atomicSyncer{} _ syncclient.LeafSyncTask = &atomicSyncerLeafTask{} @@ -177,7 +182,7 @@ type atomicSyncerLeafTask struct { func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.lastHeight + 1) } func (a *atomicSyncerLeafTask) End() []byte { return nil } -func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return message.AtomicTrieNode } +func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return AtomicTrieNode } func (a *atomicSyncerLeafTask) OnFinish(context.Context) error { return a.atomicSyncer.onFinish() } func (a *atomicSyncerLeafTask) OnStart() (bool, error) { return false, nil } func (a *atomicSyncerLeafTask) Root() common.Hash { return a.atomicSyncer.targetRoot } diff --git a/plugin/evm/atomic/sync/atomic_syncer_test.go b/plugin/evm/atomic/sync/atomic_syncer_test.go index 533e2daaa2..fa47e52279 100644 --- a/plugin/evm/atomic/sync/atomic_syncer_test.go +++ b/plugin/evm/atomic/sync/atomic_syncer_test.go @@ -48,7 +48,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight numLeaves := 0 mockClient := syncclient.NewMockClient( message.Codec, - handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()), + handlers.NewLeafsRequestHandler(serverTrieDB, state.AtomicTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()), nil, nil, ) diff --git a/plugin/evm/message/handler.go b/plugin/evm/message/handler.go index 1b910e3826..d95d373fbd 100644 --- a/plugin/evm/message/handler.go +++ b/plugin/evm/message/handler.go @@ -15,10 +15,8 @@ var _ RequestHandler = NoopRequestHandler{} // Must have methods in format of handleType(context.Context, ids.NodeID, uint32, request Type) error // so that the Request object of relevant Type can invoke its respective handle method // on this struct. -// Also see GossipHandler for implementation style. type RequestHandler interface { - HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) - HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) + HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, request BlockRequest) ([]byte, error) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest CodeRequest) ([]byte, error) HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest MessageSignatureRequest) ([]byte, error) @@ -36,11 +34,7 @@ type ResponseHandler interface { type NoopRequestHandler struct{} -func (NoopRequestHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { - return nil, nil -} - -func (NoopRequestHandler) HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { +func (NoopRequestHandler) HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest LeafsRequest) ([]byte, error) { return nil, nil } diff --git a/plugin/evm/message/leafs_request.go b/plugin/evm/message/leafs_request.go index 22629e62ef..26f9590975 100644 --- a/plugin/evm/message/leafs_request.go +++ b/plugin/evm/message/leafs_request.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) const MaxCodeHashesPerRequest = 5 @@ -18,27 +17,14 @@ var _ Request = LeafsRequest{} // NodeType outlines the trie that a leaf node belongs to // handlers.LeafsRequestHandler uses this information to determine -// which of the two tries (state/atomic) to fetch the information from +// which trie type to fetch the information from type NodeType uint8 const ( - // StateTrieNode represents a leaf node that belongs to the coreth State trie - StateTrieNode NodeType = iota + 1 - // AtomicTrieNode represents a leaf node that belongs to the coreth evm.AtomicTrie - AtomicTrieNode + StateTrieNode = NodeType(1) + StateTrieKeyLength = common.HashLength ) -func (nt NodeType) String() string { - switch nt { - case StateTrieNode: - return "StateTrie" - case AtomicTrieNode: - return "AtomicTrie" - default: - return "Unknown" - } -} - // LeafsRequest is a request to receive trie leaves at specified Root within Start and End byte range // Limit outlines maximum number of leaves to returns starting at Start // NodeType outlines which trie to read from state/atomic. @@ -53,21 +39,13 @@ type LeafsRequest struct { func (l LeafsRequest) String() string { return fmt.Sprintf( - "LeafsRequest(Root=%s, Account=%s, Start=%s, End=%s, Limit=%d, NodeType=%s)", + "LeafsRequest(Root=%s, Account=%s, Start=%s, End=%s, Limit=%d, NodeType=%d)", l.Root, l.Account, common.Bytes2Hex(l.Start), common.Bytes2Hex(l.End), l.Limit, l.NodeType, ) } func (l LeafsRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, handler RequestHandler) ([]byte, error) { - switch l.NodeType { - case StateTrieNode: - return handler.HandleStateTrieLeafsRequest(ctx, nodeID, requestID, l) - case AtomicTrieNode: - return handler.HandleAtomicTrieLeafsRequest(ctx, nodeID, requestID, l) - } - - log.Debug("node type is not recognised, dropping request", "nodeID", nodeID, "requestID", requestID, "nodeType", l.NodeType) - return nil, nil + return handler.HandleLeafsRequest(ctx, nodeID, requestID, l) } // LeafsResponse is a response to a LeafsRequest diff --git a/plugin/evm/message/leafs_request_test.go b/plugin/evm/message/leafs_request_test.go index ab6cab5124..f70aad7bba 100644 --- a/plugin/evm/message/leafs_request_test.go +++ b/plugin/evm/message/leafs_request_test.go @@ -4,14 +4,10 @@ package message import ( - "bytes" - "context" "encoding/base64" "math/rand" "testing" - "github.com/ava-labs/avalanchego/ids" - "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" ) @@ -37,7 +33,7 @@ func TestMarshalLeafsRequest(t *testing.T) { Start: startBytes, End: endBytes, Limit: 1024, - NodeType: StateTrieNode, + NodeType: NodeType(1), } base64LeafsRequest := "AAAAAAAAAAAAAAAAAAAAAABpbSBST09UaW5nIGZvciB5YQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJAAAAIIGFWthoHQ2G0ekeABZ5OctmlNLEIqzSCKAHKTlIf2mZBAAB" @@ -108,112 +104,3 @@ func TestMarshalLeafsResponse(t *testing.T) { assert.False(t, l.More) // make sure it is not serialized assert.Equal(t, leafsResponse.ProofVals, l.ProofVals) } - -func TestLeafsRequestValidation(t *testing.T) { - mockRequestHandler := &mockHandler{} - - tests := map[string]struct { - request LeafsRequest - assertResponse func(t *testing.T) - }{ - "node type StateTrieNode": { - request: LeafsRequest{ - Root: common.BytesToHash([]byte("some hash goes here")), - Start: bytes.Repeat([]byte{0x00}, common.HashLength), - End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: 10, - NodeType: StateTrieNode, - }, - assertResponse: func(t *testing.T) { - assert.True(t, mockRequestHandler.handleStateTrieCalled) - assert.False(t, mockRequestHandler.handleAtomicTrieCalled) - assert.False(t, mockRequestHandler.handleBlockRequestCalled) - assert.False(t, mockRequestHandler.handleCodeRequestCalled) - }, - }, - "node type AtomicTrieNode": { - request: LeafsRequest{ - Root: common.BytesToHash([]byte("some hash goes here")), - Start: bytes.Repeat([]byte{0x00}, common.HashLength), - End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: 10, - NodeType: AtomicTrieNode, - }, - assertResponse: func(t *testing.T) { - assert.False(t, mockRequestHandler.handleStateTrieCalled) - assert.True(t, mockRequestHandler.handleAtomicTrieCalled) - assert.False(t, mockRequestHandler.handleBlockRequestCalled) - assert.False(t, mockRequestHandler.handleCodeRequestCalled) - }, - }, - "unknown node type": { - request: LeafsRequest{ - Root: common.BytesToHash([]byte("some hash goes here")), - Start: bytes.Repeat([]byte{0x00}, common.HashLength), - End: bytes.Repeat([]byte{0xff}, common.HashLength), - Limit: 10, - NodeType: NodeType(11), - }, - assertResponse: func(t *testing.T) { - assert.False(t, mockRequestHandler.handleStateTrieCalled) - assert.False(t, mockRequestHandler.handleAtomicTrieCalled) - assert.False(t, mockRequestHandler.handleBlockRequestCalled) - assert.False(t, mockRequestHandler.handleCodeRequestCalled) - }, - }, - } - for name, test := range tests { - t.Run(name, func(t *testing.T) { - _, _ = test.request.Handle(context.Background(), ids.GenerateTestNodeID(), 1, mockRequestHandler) - test.assertResponse(t) - mockRequestHandler.reset() - }) - } -} - -var _ RequestHandler = &mockHandler{} - -type mockHandler struct { - handleStateTrieCalled, - handleAtomicTrieCalled, - handleBlockRequestCalled, - handleCodeRequestCalled, - handleMessageSignatureCalled, - handleBlockSignatureCalled bool -} - -func (m *mockHandler) HandleStateTrieLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { - m.handleStateTrieCalled = true - return nil, nil -} - -func (m *mockHandler) HandleAtomicTrieLeafsRequest(context.Context, ids.NodeID, uint32, LeafsRequest) ([]byte, error) { - m.handleAtomicTrieCalled = true - return nil, nil -} - -func (m *mockHandler) HandleBlockRequest(context.Context, ids.NodeID, uint32, BlockRequest) ([]byte, error) { - m.handleBlockRequestCalled = true - return nil, nil -} - -func (m *mockHandler) HandleCodeRequest(context.Context, ids.NodeID, uint32, CodeRequest) ([]byte, error) { - m.handleCodeRequestCalled = true - return nil, nil -} - -func (m *mockHandler) HandleMessageSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest MessageSignatureRequest) ([]byte, error) { - m.handleMessageSignatureCalled = true - return nil, nil -} -func (m *mockHandler) HandleBlockSignatureRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, signatureRequest BlockSignatureRequest) ([]byte, error) { - m.handleBlockSignatureCalled = true - return nil, nil -} - -func (m *mockHandler) reset() { - m.handleStateTrieCalled = false - m.handleAtomicTrieCalled = false - m.handleBlockRequestCalled = false - m.handleCodeRequestCalled = false -} diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go index aa4b728ed5..048f288797 100644 --- a/plugin/evm/network_handler.go +++ b/plugin/evm/network_handler.go @@ -16,43 +16,61 @@ import ( "github.com/ava-labs/coreth/warp" warpHandlers "github.com/ava-labs/coreth/warp/handlers" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" ) var _ message.RequestHandler = &networkHandler{} +type LeafHandlers map[message.NodeType]*syncHandlers.LeafsRequestHandler + type networkHandler struct { - stateTrieLeafsRequestHandler *syncHandlers.LeafsRequestHandler - atomicTrieLeafsRequestHandler *syncHandlers.LeafsRequestHandler - blockRequestHandler *syncHandlers.BlockRequestHandler - codeRequestHandler *syncHandlers.CodeRequestHandler - signatureRequestHandler *warpHandlers.SignatureRequestHandler + leafRequestHandlers LeafHandlers + blockRequestHandler *syncHandlers.BlockRequestHandler + codeRequestHandler *syncHandlers.CodeRequestHandler + signatureRequestHandler *warpHandlers.SignatureRequestHandler +} + +type LeafRequestTypeConfig struct { + NodeType message.NodeType + NodeKeyLen int + TrieDB *triedb.Database + UseSnapshots bool + MetricName string } // newNetworkHandler constructs the handler for serving network requests. func newNetworkHandler( provider syncHandlers.SyncDataProvider, diskDB ethdb.KeyValueReader, - evmTrieDB *triedb.Database, - atomicTrieDB *triedb.Database, warpBackend warp.Backend, networkCodec codec.Manager, + leafRequesTypeConfigs map[message.NodeType]LeafRequestTypeConfig, ) message.RequestHandler { syncStats := syncStats.NewHandlerStats(metrics.Enabled) + leafRequestHandlers := make(LeafHandlers) + for _, config := range leafRequesTypeConfigs { + snapshotProvider := provider + if !config.UseSnapshots { + snapshotProvider = nil + } + leafRequestHandler := syncHandlers.NewLeafsRequestHandler(config.TrieDB, config.NodeKeyLen, snapshotProvider, networkCodec, syncStats) + leafRequestHandlers[config.NodeType] = leafRequestHandler + } return &networkHandler{ - stateTrieLeafsRequestHandler: syncHandlers.NewLeafsRequestHandler(evmTrieDB, provider, networkCodec, syncStats), - atomicTrieLeafsRequestHandler: syncHandlers.NewLeafsRequestHandler(atomicTrieDB, nil, networkCodec, syncStats), - blockRequestHandler: syncHandlers.NewBlockRequestHandler(provider, networkCodec, syncStats), - codeRequestHandler: syncHandlers.NewCodeRequestHandler(diskDB, networkCodec, syncStats), - signatureRequestHandler: warpHandlers.NewSignatureRequestHandler(warpBackend, networkCodec), + leafRequestHandlers: leafRequestHandlers, + blockRequestHandler: syncHandlers.NewBlockRequestHandler(provider, networkCodec, syncStats), + codeRequestHandler: syncHandlers.NewCodeRequestHandler(diskDB, networkCodec, syncStats), + signatureRequestHandler: warpHandlers.NewSignatureRequestHandler(warpBackend, networkCodec), } } -func (n networkHandler) HandleStateTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { - return n.stateTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) -} - -func (n networkHandler) HandleAtomicTrieLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { - return n.atomicTrieLeafsRequestHandler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) +func (n networkHandler) HandleLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + handler, ok := n.leafRequestHandlers[leafsRequest.NodeType] + if !ok { + log.Debug("node type is not recognised, dropping request", "nodeID", nodeID, "requestID", requestID, "nodeType", leafsRequest.NodeType) + return nil, nil + } + return handler.OnLeafsRequest(ctx, nodeID, requestID, leafsRequest) } func (n networkHandler) HandleBlockRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, blockRequest message.BlockRequest) ([]byte, error) { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 16ecb8faef..85c1530d00 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -287,6 +287,8 @@ type VM struct { vmsync.Server vmsync.Client + leafRequestTypeConfigs map[message.NodeType]LeafRequestTypeConfig + // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC warpBackend warp.Backend @@ -695,6 +697,12 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } + // Get leaf metrics from config + leafMetricsNames := make(map[message.NodeType]string, len(vm.leafRequestTypeConfigs)) + for _, nodeType := range vm.leafRequestTypeConfigs { + leafMetricsNames[nodeType.NodeType] = nodeType.MetricName + } + vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ Chain: vm.eth, State: vm.State, @@ -703,7 +711,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { &statesyncclient.ClientConfig{ NetworkClient: vm.client, Codec: vm.networkCodec, - Stats: stats.NewClientSyncerStats(), + Stats: stats.NewClientSyncerStats(leafMetricsNames), StateSyncNodeIDs: stateSyncIDs, BlockParser: vm, }, @@ -1207,7 +1215,7 @@ func (vm *VM) initBlockBuilding() error { // setAppRequestHandlers sets the request handlers for the VM to serve state sync // requests. -func (vm *VM) setAppRequestHandlers() { +func (vm *VM) setAppRequestHandlers() error { // Create standalone EVM TrieDB (read only) for serving leafs requests. // We create a standalone TrieDB here, so that it has a standalone cache from the one // used by the node when processing blocks. @@ -1219,15 +1227,41 @@ func (vm *VM) setAppRequestHandlers() { }, }, ) + if err := vm.RegisterLeafRequestHandler(message.StateTrieNode, "sync_state_trie_leaves", evmTrieDB, message.StateTrieKeyLength, true); err != nil { + return fmt.Errorf("failed to register leaf request handler for state trie: %w", err) + } + // Register atomic trieDB for serving atomic leafs requests. + if err := vm.RegisterLeafRequestHandler(atomicsync.AtomicTrieNode, "sync_atomic_trie_leaves", vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, false); err != nil { + return fmt.Errorf("failed to register leaf request handler for atomic trie: %w", err) + } + networkHandler := newNetworkHandler( vm.blockChain, vm.chaindb, - evmTrieDB, - vm.atomicBackend.AtomicTrie().TrieDB(), vm.warpBackend, vm.networkCodec, + vm.leafRequestTypeConfigs, ) vm.Network.SetRequestHandler(networkHandler) + return nil +} + +func (vm *VM) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { + if vm.leafRequestTypeConfigs == nil { + vm.leafRequestTypeConfigs = make(map[message.NodeType]LeafRequestTypeConfig) + } + if _, ok := vm.leafRequestTypeConfigs[nodeType]; ok { + return fmt.Errorf("leaf request handler for node type %d already registered", nodeType) + } + handlerConfig := LeafRequestTypeConfig{ + NodeType: nodeType, + TrieDB: trieDB, + UseSnapshots: useSnapshot, + NodeKeyLen: trieKeyLen, + MetricName: metricName, + } + vm.leafRequestTypeConfigs[nodeType] = handlerConfig + return nil } // Shutdown implements the snowman.ChainVM interface diff --git a/sync/README.md b/sync/README.md index 2e6437a71f..5cf88bcd2e 100644 --- a/sync/README.md +++ b/sync/README.md @@ -65,8 +65,8 @@ The following steps are executed by the VM to sync its state from peers (see `st Steps 3 and 4 involve syncing tries. To sync trie data, the VM will send a series of `LeafRequests` to its peers. Each request specifies: - Type of trie (`NodeType`): - - `message.StateTrieNode` (account trie and storage tries share the same database) - - `message.AtomicTrieNode` (atomic trie has an independent database) + - `statesync.StateTrieNode` (account trie and storage tries share the same database) + - `atomic.AtomicTrieNode` (atomic trie has an independent database) - `Root` of the trie to sync, - `Start` and `End` specify a range of keys. diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 9902c4e694..167b5ce120 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -139,7 +139,7 @@ func TestGetBlocks(t *testing.T) { // set random seed for deterministic tests rand.Seed(1) - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: params.TestChainConfig, } memdb := rawdb.NewMemoryDatabase() @@ -415,7 +415,7 @@ func TestGetLeafs(t *testing.T) { largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ NetworkClient: &mockNetwork{}, Codec: message.Codec, @@ -797,7 +797,7 @@ func TestGetLeafsRetries(t *testing.T) { trieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) root, _, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} const maxAttempts = 8 diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index 14af154423..f8c7d2e3e9 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -75,19 +75,21 @@ func (m *messageMetric) UpdateRequestLatency(duration time.Duration) { } type clientSyncerStats struct { - atomicTrieLeavesMetric, - stateTrieLeavesMetric, + leafMetrics map[message.NodeType]MessageMetric codeRequestMetric, blockRequestMetric MessageMetric } // NewClientSyncerStats returns stats for the client syncer -func NewClientSyncerStats() ClientSyncerStats { +func NewClientSyncerStats(leafMetricNames map[message.NodeType]string) ClientSyncerStats { + leafMetrics := make(map[message.NodeType]MessageMetric, len(leafMetricNames)) + for nodeType, name := range leafMetricNames { + leafMetrics[nodeType] = NewMessageMetric(name) + } return &clientSyncerStats{ - atomicTrieLeavesMetric: NewMessageMetric("sync_atomic_trie_leaves"), - stateTrieLeavesMetric: NewMessageMetric("sync_state_trie_leaves"), - codeRequestMetric: NewMessageMetric("sync_code"), - blockRequestMetric: NewMessageMetric("sync_blocks"), + leafMetrics: leafMetrics, + codeRequestMetric: NewMessageMetric("sync_code"), + blockRequestMetric: NewMessageMetric("sync_blocks"), } } @@ -99,14 +101,11 @@ func (c *clientSyncerStats) GetMetric(msgIntf message.Request) (MessageMetric, e case message.CodeRequest: return c.codeRequestMetric, nil case message.LeafsRequest: - switch msg.NodeType { - case message.StateTrieNode: - return c.stateTrieLeavesMetric, nil - case message.AtomicTrieNode: - return c.atomicTrieLeavesMetric, nil - default: + metric, ok := c.leafMetrics[msg.NodeType] + if !ok { return nil, fmt.Errorf("invalid leafs request for node type: %T", msg.NodeType) } + return metric, nil default: return nil, fmt.Errorf("attempted to get metric for invalid request with type %T", msg) } @@ -133,12 +132,3 @@ func NewNoOpStats() ClientSyncerStats { func (n noopStats) GetMetric(_ message.Request) (MessageMetric, error) { return n.noop, nil } - -// NewStats returns syncer stats if enabled or a no-op version if disabled. -func NewStats(enabled bool) ClientSyncerStats { - if enabled { - return NewClientSyncerStats() - } else { - return NewNoOpStats() - } -} diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index 4bb0fcfb61..a3d6453416 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -6,13 +6,11 @@ package handlers import ( "bytes" "context" - "fmt" "sync" "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/message" @@ -48,14 +46,16 @@ type LeafsRequestHandler struct { codec codec.Manager stats stats.LeafsRequestHandlerStats pool sync.Pool + trieKeyLength int } -func NewLeafsRequestHandler(trieDB *triedb.Database, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler { +func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler { return &LeafsRequestHandler{ trieDB: trieDB, snapshotProvider: snapshotProvider, codec: codec, stats: syncerStats, + trieKeyLength: trieKeyLength, pool: sync.Pool{ New: func() interface{} { return make([][]byte, 0, maxLeavesLimit) }, }, @@ -70,7 +70,6 @@ func NewLeafsRequestHandler(trieDB *triedb.Database, snapshotProvider SnapshotPr // Specified Limit in message.LeafsRequest is overridden to maxLeavesLimit if it is greater than maxLeavesLimit // Expects returned errors to be treated as FATAL // Never returns errors -// Expects NodeType to be one of message.AtomicTrieNode or message.StateTrieNode // Returns nothing if NodeType is invalid or requested trie root is not found // Assumes ctx is active func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { @@ -85,16 +84,9 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N lrh.stats.IncInvalidLeafsRequest() return nil, nil } - keyLength, err := getKeyLength(leafsRequest.NodeType) - if err != nil { - // Note: LeafsRequest.Handle checks NodeType's validity so clients cannot cause the server to spam this error - log.Error("Failed to get key length for leafs request", "err", err) - lrh.stats.IncInvalidLeafsRequest() - return nil, nil - } - if len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != keyLength || - len(leafsRequest.End) != 0 && len(leafsRequest.End) != keyLength { - log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", keyLength) + if (len(leafsRequest.Start) != 0 && len(leafsRequest.Start) != lrh.trieKeyLength) || + (len(leafsRequest.End) != 0 && len(leafsRequest.End) != lrh.trieKeyLength) { + log.Debug("invalid length for leafs request range, dropping request", "startLen", len(leafsRequest.Start), "endLen", len(leafsRequest.End), "expected", lrh.trieKeyLength) lrh.stats.IncInvalidLeafsRequest() return nil, nil } @@ -134,7 +126,7 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N request: &leafsRequest, response: &leafsResponse, t: t, - keyLength: keyLength, + keyLength: lrh.trieKeyLength, limit: limit, stats: lrh.stats, } @@ -455,18 +447,6 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, return more, it.Err } -// getKeyLength returns trie key length for given nodeType -// expects nodeType to be one of message.AtomicTrieNode or message.StateTrieNode -func getKeyLength(nodeType message.NodeType) (int, error) { - switch nodeType { - case message.AtomicTrieNode: - return wrappers.LongLen + common.HashLength, nil - case message.StateTrieNode: - return common.HashLength, nil - } - return 0, fmt.Errorf("cannot get key length for unknown node type: %s", nodeType) -} - // readLeafsFromSnapshot iterates the storage snapshot of the requested account // (or the main account trie if account is empty). Returns up to [rb.limit] key/value // pairs for keys that are in the request's range (inclusive). diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 1c8201fdad..296e87371f 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -74,7 +74,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } } snapshotProvider := &TestSnapshotProvider{} - leafsHandler := NewLeafsRequestHandler(trieDB, snapshotProvider, message.Codec, mockHandlerStats) + leafsHandler := NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, snapshotProvider, message.Codec, mockHandlerStats) snapConfig := snapshot.Config{ CacheSize: 64, AsyncBuild: false, diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 31b2518187..c64d36faa5 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -49,7 +49,7 @@ func testSync(t *testing.T, test syncTest) { ctx = test.ctx } clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t) - leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client From dfd2e9be952eecd06db86e849f05580ed2b3602f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 16 Jan 2025 22:48:23 +0300 Subject: [PATCH 45/91] create new codec with correct type --- ...ncable.go => atomic_block_sync_summary.go} | 18 ++- plugin/evm/atomic/sync/atomic_syncer_test.go | 6 +- plugin/evm/message/block_request.go | 4 +- plugin/evm/message/block_request_test.go | 8 +- plugin/evm/message/block_sync_summary.go | 123 ++++++++++++++++++ plugin/evm/message/code_request_test.go | 8 +- plugin/evm/message/codec.go | 21 ++- plugin/evm/message/leafs_request_test.go | 8 +- plugin/evm/message/signature_request_test.go | 12 +- plugin/evm/message/syncable.go | 97 -------------- plugin/evm/vm.go | 15 ++- plugin/evm/vm_warp_test.go | 8 +- sync/client/client_test.go | 61 +++++---- sync/handlers/block_request_test.go | 12 +- sync/handlers/code_request_test.go | 4 +- sync/handlers/leafs_request_test.go | 43 +++--- sync/statesync/code_syncer_test.go | 5 +- sync/statesync/sync_test.go | 17 ++- warp/aggregator/signature_getter.go | 15 ++- warp/handlers/signature_request_test.go | 19 ++- warp/service.go | 34 +++-- 21 files changed, 308 insertions(+), 230 deletions(-) rename plugin/evm/atomic/sync/{syncable.go => atomic_block_sync_summary.go} (86%) create mode 100644 plugin/evm/message/block_sync_summary.go diff --git a/plugin/evm/atomic/sync/syncable.go b/plugin/evm/atomic/sync/atomic_block_sync_summary.go similarity index 86% rename from plugin/evm/atomic/sync/syncable.go rename to plugin/evm/atomic/sync/atomic_block_sync_summary.go index 8a6e574983..7b5f12d6fe 100644 --- a/plugin/evm/atomic/sync/syncable.go +++ b/plugin/evm/atomic/sync/atomic_block_sync_summary.go @@ -7,6 +7,7 @@ import ( "context" "fmt" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" @@ -21,6 +22,19 @@ var ( _ message.SyncableParser = (*AtomicSyncSummaryParser)(nil) ) +// CodecWithAtomicSync is the codec manager that contains the codec for AtomicBlockSyncSummary and +// other message types that are used in the network protocol. This is to ensure that the codec +// version is consistent across all message types and includes the codec for AtomicBlockSyncSummary. +var CodecWithAtomicSync codec.Manager + +func init() { + var err error + CodecWithAtomicSync, err = message.NewCodec(AtomicBlockSyncSummary{}) + if err != nil { + panic(fmt.Errorf("failed to create codec manager: %w", err)) + } +} + // AtomicBlockSyncSummary provides the information necessary to sync a node starting // at the given block. type AtomicBlockSyncSummary struct { @@ -34,10 +48,6 @@ type AtomicBlockSyncSummary struct { acceptImpl message.AcceptImplFn } -func init() { - message.SyncSummaryType = &AtomicBlockSyncSummary{} -} - type AtomicSyncSummaryParser struct{} func NewAtomicSyncSummaryParser() *AtomicSyncSummaryParser { diff --git a/plugin/evm/atomic/sync/atomic_syncer_test.go b/plugin/evm/atomic/sync/atomic_syncer_test.go index fa47e52279..e95798a630 100644 --- a/plugin/evm/atomic/sync/atomic_syncer_test.go +++ b/plugin/evm/atomic/sync/atomic_syncer_test.go @@ -47,14 +47,14 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight numLeaves := 0 mockClient := syncclient.NewMockClient( - message.Codec, - handlers.NewLeafsRequestHandler(serverTrieDB, state.AtomicTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()), + CodecWithAtomicSync, + handlers.NewLeafsRequestHandler(serverTrieDB, state.AtomicTrieKeyLength, nil, CodecWithAtomicSync, handlerstats.NewNoopHandlerStats()), nil, nil, ) clientDB := versiondb.New(memdb.New()) - repo, err := state.NewAtomicTxRepository(clientDB, message.Codec, 0) + repo, err := state.NewAtomicTxRepository(clientDB, CodecWithAtomicSync, 0) if err != nil { t.Fatal("could not initialize atomix tx repository", err) } diff --git a/plugin/evm/message/block_request.go b/plugin/evm/message/block_request.go index f1f353f2f7..9bb053bf07 100644 --- a/plugin/evm/message/block_request.go +++ b/plugin/evm/message/block_request.go @@ -12,9 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -var ( - _ Request = BlockRequest{} -) +var _ Request = BlockRequest{} // BlockRequest is a request to retrieve Parents number of blocks starting from Hash from newest-oldest manner type BlockRequest struct { diff --git a/plugin/evm/message/block_request_test.go b/plugin/evm/message/block_request_test.go index cd9070117d..9e519676d9 100644 --- a/plugin/evm/message/block_request_test.go +++ b/plugin/evm/message/block_request_test.go @@ -23,12 +23,12 @@ func TestMarshalBlockRequest(t *testing.T) { base64BlockRequest := "AAAAAAAAAAAAAAAAAABzb21lIGhhc2ggaXMgaGVyZSB5bwAAAAAAAAU5AEA=" - blockRequestBytes, err := Codec.Marshal(Version, blockRequest) + blockRequestBytes, err := codecWithBlockSync.Marshal(Version, blockRequest) assert.NoError(t, err) assert.Equal(t, base64BlockRequest, base64.StdEncoding.EncodeToString(blockRequestBytes)) var b BlockRequest - _, err = Codec.Unmarshal(blockRequestBytes, &b) + _, err = codecWithBlockSync.Unmarshal(blockRequestBytes, &b) assert.NoError(t, err) assert.Equal(t, blockRequest.Hash, b.Hash) assert.Equal(t, blockRequest.Height, b.Height) @@ -54,12 +54,12 @@ func TestMarshalBlockResponse(t *testing.T) { base64BlockResponse := "AAAAAAAgAAAAIU8WP18PmmIdcpVmx00QA3xNe7sEB9HixkmBhVrYaB0NhgAAADnR6ZTSxCKs0gigByk5SH9pmeudGKRHhARdh/PGfPInRumVr1olNnlRuqL/bNRxxIPxX7kLrbN8WCEAAAA6tmgLTnyLdjobHUnUlVyEhiFjJSU/7HON16nii/khEZwWDwcCRIYVu9oIMT9qjrZo0gv1BZh1kh5migAAACtb3yx/xIRo0tbFL1BU4tCDa/hMcXTLdHY2TMPb2Wiw9xcu2FeUuzWLDDtSAAAAO12heG+f69ehnQ97usvgJVqlt9RL7ED4TIkrm//UNimwIjvupfT3Q5H0RdFa/UKUBAN09pJLmMv4cT+NAAAAMpYtJOLK/Mrjph+1hrFDI6a8j5598dkpMz/5k5M76m9bOvbeA3Q2bEcZ5DobBn2JvH8BAAAAOfHxekxyFaO1OeseWEnGB327VyL1cXoomiZvl2R5gZmOvqicC0s3OXARXoLtb0ElyPpzEeTX3vqSLQAAACc2zU8kq/ffhmuqVgODZ61hRd4e6PSosJk+vfiIOgrYvpw5eLBIg+UAAAAkahVqnexqQOmh0AfwM8KCMGG90Oqln45NpkMBBSINCyloi3NLAAAAKI6gENd8luqAp6Zl9gb2pjt/Pf0lZ8GJeeTWDyZobZvy+ybJAf81TN4AAAA8FgfuKbpk+Eq0PKDG5rkcH9O+iZBDQXnTr0SRo2kBLbktGE/DnRc0/1cWQolTu2hl/PkrDDoXyQKL6ZFOAAAAMwl50YMDVvKlTD3qsqS0R11jr76PtWmHx39YGFJvGBS+gjNQ6rE5NfMdhEhFF+kkrveK4QAAADhRwAdVkgww7CmjcDk0v1CijaECl13tp351hXnqPf5BNqv3UrO4Jx0D6USzyds2a3UEX479adIq5QAAADpBGUfLVbzqQGsy1hCL1oWE9X43yqxuM/6qMmOjmUNwJLqcmxRniidPAakQrilfbvv+X1q/RMzeJjtWAAAAKAZjPn05Bp8BojnENlhUw69/a0HWMfkrmo0S9BJXMl//My91drBiBVYAAAAqMEo+Pq6QGlJyDahcoeSzjq8/RMbG74Ni8vVPwA4J1vwlZAhUwV38rKqKAAAAOyzszlo6lLTTOKUUPmNAjYcksM8/rhej95vhBy+2PDXWBCxBYPOO6eKp8/tP+wAZtFTVIrX/oXYEGT+4AAAAMpZnz1PD9SDIibeb9QTPtXx2ASMtWJuszqnW4mPiXCd0HT9sYsu7FdmvvL9/faQasECOAAAALzk4vxd0rOdwmk8JHpqD/erg7FXrIzqbU5TLPHhWtUbTE8ijtMHA4FRH9Lo3DrNtAAAAPLz97PUi4qbx7Qr+wfjiD6q+32sWLnF9OnSKWGd6DFY0j4khomaxHQ8zTGL+UrpTrxl3nLKUi2Vw/6C3cwAAADqWPBMK15dRJSEPDvHDFAkPB8eab1ccJG8+msC3QT7xEL1YsAznO/9wb3/0tvRAkKMnEfMgjk5LictRAAAAJ2XOZAA98kaJKNWiO5ynQPgMk4LZxgNK0pYMeWUD4c4iFyX1DK8fvwAAADtcR6U9v459yvyeE4ZHpLRO1LzpZO1H90qllEaM7TI8t28NP6xHbJ+wP8kij7roj9WAZjoEVLaDEiB/CgAAADc7WExi1QJ84VpPClglDY+1Dnfyv08BUuXUlDWAf51Ll75vt3lwRmpWJv4zQIz56I4seXQIoy0pAAAAKkFrryBqmDIJgsharXA4SFnAWksTodWy9b/vWm7ZLaSCyqlWjltv6dip3QAAAC7Z6wkne1AJRMvoAKCxUn6mRymoYdL2SXoyNcN/QZJ3nsHZazscVCT84LcnsDByAAAAI+ZAq8lEj93rIZHZRcBHZ6+Eev0O212IV7eZrLGOSv+r4wN/AAAAL/7MQW5zTTc8Xr68nNzFlbzOPHvT2N+T+rfhJd3rr+ZaMb1dQeLSzpwrF4kvD+oZAAAAMTGikNy/poQG6HcHP/CINOGXpANKpIr6P4W4picIyuu6yIC1uJuT2lOBAWRAIQTmSLYAAAA1ImobDzE6id38RUxfj3KsibOLGfU3hMGem+rAPIdaJ9sCneN643pCMYgTSHaFkpNZyoxeuU4AAAA9FS3Br0LquOKSXG2u5N5e+fnc8I38vQK4CAk5hYWSig995QvhptwdV2joU3mI/dzlYum5SMkYu6PpM+XEAAAAAC3Nrne6HSWbGIpLIchvvCPXKLRTR+raZQryTFbQgAqGkTMgiKgFvVXERuJesHU=" - blockResponseBytes, err := Codec.Marshal(Version, blockResponse) + blockResponseBytes, err := codecWithBlockSync.Marshal(Version, blockResponse) assert.NoError(t, err) assert.Equal(t, base64BlockResponse, base64.StdEncoding.EncodeToString(blockResponseBytes)) var b BlockResponse - _, err = Codec.Unmarshal(blockResponseBytes, &b) + _, err = codecWithBlockSync.Unmarshal(blockResponseBytes, &b) assert.NoError(t, err) assert.Equal(t, blockResponse.Blocks, b.Blocks) } diff --git a/plugin/evm/message/block_sync_summary.go b/plugin/evm/message/block_sync_summary.go new file mode 100644 index 0000000000..fa404e177e --- /dev/null +++ b/plugin/evm/message/block_sync_summary.go @@ -0,0 +1,123 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var _ Syncable = (*BlockSyncSummary)(nil) + +// codecWithBlockSync is the codec manager that contains the codec for BlockSyncSummary and +// other message types that are used in the network protocol. This is to ensure that the codec +// version is consistent across all message types and includes the codec for BlockSyncSummary. +var codecWithBlockSync codec.Manager + +func init() { + var err error + codecWithBlockSync, err = NewCodec(BlockSyncSummary{}) + if err != nil { + panic(fmt.Errorf("failed to create codec manager: %w", err)) + } +} + +// BlockSyncSummary provides the information necessary to sync a node starting +// at the given block. +type BlockSyncSummary struct { + BlockNumber uint64 `serialize:"true"` + BlockHash common.Hash `serialize:"true"` + BlockRoot common.Hash `serialize:"true"` + + summaryID ids.ID + bytes []byte + acceptImpl AcceptImplFn +} + +type BlockSyncSummaryParser struct{} + +func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { + return &BlockSyncSummaryParser{} +} + +func (b *BlockSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { + summary := BlockSyncSummary{} + if codecVersion, err := codecWithBlockSync.Unmarshal(summaryBytes, &summary); err != nil { + return nil, fmt.Errorf("failed to parse syncable summary: %w", err) + } else if codecVersion != Version { + return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) + } + + summary.bytes = summaryBytes + summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + summary.acceptImpl = acceptImpl + return &summary, nil +} + +func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (*BlockSyncSummary, error) { + summary := BlockSyncSummary{ + BlockNumber: blockNumber, + BlockHash: blockHash, + BlockRoot: blockRoot, + } + bytes, err := codecWithBlockSync.Marshal(Version, &summary) + if err != nil { + return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) + } + + summary.bytes = bytes + summaryID, err := ids.ToID(crypto.Keccak256(bytes)) + if err != nil { + return nil, fmt.Errorf("failed to compute summary ID: %w", err) + } + summary.summaryID = summaryID + + return &summary, nil +} + +func (s *BlockSyncSummary) GetBlockNumber() uint64 { + return s.BlockNumber +} + +func (s *BlockSyncSummary) GetBlockHash() common.Hash { + return s.BlockHash +} + +func (s *BlockSyncSummary) GetBlockRoot() common.Hash { + return s.BlockRoot +} + +func (s *BlockSyncSummary) Bytes() []byte { + return s.bytes +} + +func (s *BlockSyncSummary) Height() uint64 { + return s.BlockNumber +} + +func (s *BlockSyncSummary) ID() ids.ID { + return s.summaryID +} + +func (s *BlockSyncSummary) String() string { + return fmt.Sprintf("BlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) +} + +func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { + if s.acceptImpl == nil { + return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) + } + return s.acceptImpl(s) +} diff --git a/plugin/evm/message/code_request_test.go b/plugin/evm/message/code_request_test.go index 88cedb54d4..10321a389d 100644 --- a/plugin/evm/message/code_request_test.go +++ b/plugin/evm/message/code_request_test.go @@ -21,12 +21,12 @@ func TestMarshalCodeRequest(t *testing.T) { base64CodeRequest := "AAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAHNvbWUgY29kZSBwbHM=" - codeRequestBytes, err := Codec.Marshal(Version, codeRequest) + codeRequestBytes, err := codecWithBlockSync.Marshal(Version, codeRequest) assert.NoError(t, err) assert.Equal(t, base64CodeRequest, base64.StdEncoding.EncodeToString(codeRequestBytes)) var c CodeRequest - _, err = Codec.Unmarshal(codeRequestBytes, &c) + _, err = codecWithBlockSync.Unmarshal(codeRequestBytes, &c) assert.NoError(t, err) assert.Equal(t, codeRequest.Hashes, c.Hashes) } @@ -47,12 +47,12 @@ func TestMarshalCodeResponse(t *testing.T) { base64CodeResponse := "AAAAAAABAAAAMlL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJgYVa2GgdDYbR6R4AFnk5y2aU" - codeResponseBytes, err := Codec.Marshal(Version, codeResponse) + codeResponseBytes, err := codecWithBlockSync.Marshal(Version, codeResponse) assert.NoError(t, err) assert.Equal(t, base64CodeResponse, base64.StdEncoding.EncodeToString(codeResponseBytes)) var c CodeResponse - _, err = Codec.Unmarshal(codeResponseBytes, &c) + _, err = codecWithBlockSync.Unmarshal(codeResponseBytes, &c) assert.NoError(t, err) assert.Equal(t, codeResponse.Data, c.Data) } diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index d7de1820c6..d2599647a3 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -15,14 +15,11 @@ const ( maxMessageSize = 2*units.MiB - 64*units.KiB // Subtract 64 KiB from p2p network cap to leave room for encoding overhead from AvalancheGo ) -var ( - Codec codec.Manager - // TODO: Remove this once we have a better way to register types (i.e use a different codec version or use build flags) - SyncSummaryType interface{} = BlockSyncSummary{} -) - -func init() { - Codec = codec.NewManager(maxMessageSize) +// NewCodec returns a codec manager that can be used to marshal and unmarshal +// messages, including the provided syncSummaryType. syncSummaryType can be used +// to register a type for sync summaries. +func NewCodec(syncSummaryType interface{}) (codec.Manager, error) { + codec := codec.NewManager(maxMessageSize) c := linearcodec.NewDefault() errs := wrappers.Errs{} @@ -30,7 +27,7 @@ func init() { c.SkipRegistrations(2) errs.Add( // Types for state sync frontier consensus - c.RegisterType(SyncSummaryType), + c.RegisterType(syncSummaryType), // state sync types c.RegisterType(BlockRequest{}), @@ -45,10 +42,12 @@ func init() { c.RegisterType(BlockSignatureRequest{}), c.RegisterType(SignatureResponse{}), - Codec.RegisterCodec(Version, c), + codec.RegisterCodec(Version, c), ) if errs.Errored() { - panic(errs.Err) + return nil, errs.Err } + + return codec, nil } diff --git a/plugin/evm/message/leafs_request_test.go b/plugin/evm/message/leafs_request_test.go index f70aad7bba..9b98c45aba 100644 --- a/plugin/evm/message/leafs_request_test.go +++ b/plugin/evm/message/leafs_request_test.go @@ -38,12 +38,12 @@ func TestMarshalLeafsRequest(t *testing.T) { base64LeafsRequest := "AAAAAAAAAAAAAAAAAAAAAABpbSBST09UaW5nIGZvciB5YQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFL9/AchgmVPFj9fD5piHXKVZsdNEAN8TXu7BAfR4sZJAAAAIIGFWthoHQ2G0ekeABZ5OctmlNLEIqzSCKAHKTlIf2mZBAAB" - leafsRequestBytes, err := Codec.Marshal(Version, leafsRequest) + leafsRequestBytes, err := codecWithBlockSync.Marshal(Version, leafsRequest) assert.NoError(t, err) assert.Equal(t, base64LeafsRequest, base64.StdEncoding.EncodeToString(leafsRequestBytes)) var l LeafsRequest - _, err = Codec.Unmarshal(leafsRequestBytes, &l) + _, err = codecWithBlockSync.Unmarshal(leafsRequestBytes, &l) assert.NoError(t, err) assert.Equal(t, leafsRequest.Root, l.Root) assert.Equal(t, leafsRequest.Start, l.Start) @@ -92,12 +92,12 @@ func TestMarshalLeafsResponse(t *testing.T) { base64LeafsResponse := "AAAAAAAQAAAAIE8WP18PmmIdcpVmx00QA3xNe7sEB9HixkmBhVrYaB0NAAAAIGagByk5SH9pmeudGKRHhARdh/PGfPInRumVr1olNnlRAAAAIK2zfFghtmgLTnyLdjobHUnUlVyEhiFjJSU/7HON16niAAAAIIYVu9oIMfUFmHWSHmaKW98sf8SERZLSVyvNBmjS1sUvAAAAIHHb2Wiw9xcu2FeUuzWLDDtSXaF4b5//CUJ52xlE69ehAAAAIPhMiSs77qX090OR9EXRWv1ClAQDdPaSS5jL+HE/jZYtAAAAIMr8yuOmvI+effHZKTM/+ZOTO+pvWzr23gN0NmxHGeQ6AAAAIBZZpE856x5YScYHfbtXIvVxeiiaJm+XZHmBmY6+qJwLAAAAIHOq53hmZ/fpNs1PJKv334ZrqlYDg2etYUXeHuj0qLCZAAAAIHiN5WOvpGfUnexqQOmh0AfwM8KCMGG90Oqln45NpkMBAAAAIKAQ13yW6oCnpmX2BvamO389/SVnwYl55NYPJmhtm/L7AAAAIAfuKbpk+Eq0PKDG5rkcH9O+iZBDQXnTr0SRo2kBLbktAAAAILsXyQKL6ZFOt2ScbJNHgAl50YMDVvKlTD3qsqS0R11jAAAAIOqxOTXzHYRIRRfpJK73iuFRwAdVklg2twdYhWUMMOwpAAAAIHnqPf5BNqv3UrO4Jx0D6USzyds2a3UEX479adIq5UEZAAAAIDLWEMqsbjP+qjJjo5lDcCS6nJsUZ4onTwGpEK4pX277AAAAEAAAAAmG0ekeABZ5OcsAAAAMuqL/bNRxxIPxX7kLAAAACov5IRGcFg8HAkQAAAAIUFTi0INr+EwAAAAOnQ97usvgJVqlt9RL7EAAAAAJfI0BkZLCQiTiAAAACxsGfYm8fwHx9XOYAAAADUs3OXARXoLtb0ElyPoAAAAKPr34iDoK2L6cOQAAAAoFIg0LKWiLc0uOAAAACCbJAf81TN4WAAAADBhPw50XNP9XFkKJUwAAAAuvvo+1aYfHf1gYUgAAAAqjcDk0v1CijaECAAAADkfLVT12lCZ670686kBrAAAADf5fWr9EzN4mO1YGYz4AAAAEAAAADlcyXwVWMEo+Pq4Uwo0MAAAADeo50qHks46vP0TGxu8AAAAOg2Ly9WQIVMFd/KyqiiwAAAAL7M5aOpS00zilFD4=" - leafsResponseBytes, err := Codec.Marshal(Version, leafsResponse) + leafsResponseBytes, err := codecWithBlockSync.Marshal(Version, leafsResponse) assert.NoError(t, err) assert.Equal(t, base64LeafsResponse, base64.StdEncoding.EncodeToString(leafsResponseBytes)) var l LeafsResponse - _, err = Codec.Unmarshal(leafsResponseBytes, &l) + _, err = codecWithBlockSync.Unmarshal(leafsResponseBytes, &l) assert.NoError(t, err) assert.Equal(t, leafsResponse.Keys, l.Keys) assert.Equal(t, leafsResponse.Vals, l.Vals) diff --git a/plugin/evm/message/signature_request_test.go b/plugin/evm/message/signature_request_test.go index 59614fbb2e..a9fb5470c0 100644 --- a/plugin/evm/message/signature_request_test.go +++ b/plugin/evm/message/signature_request_test.go @@ -21,12 +21,12 @@ func TestMarshalMessageSignatureRequest(t *testing.T) { } base64MessageSignatureRequest := "AABET0ZBSElAawAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" - signatureRequestBytes, err := Codec.Marshal(Version, signatureRequest) + signatureRequestBytes, err := codecWithBlockSync.Marshal(Version, signatureRequest) require.NoError(t, err) require.Equal(t, base64MessageSignatureRequest, base64.StdEncoding.EncodeToString(signatureRequestBytes)) var s MessageSignatureRequest - _, err = Codec.Unmarshal(signatureRequestBytes, &s) + _, err = codecWithBlockSync.Unmarshal(signatureRequestBytes, &s) require.NoError(t, err) require.Equal(t, signatureRequest.MessageID, s.MessageID) } @@ -39,12 +39,12 @@ func TestMarshalBlockSignatureRequest(t *testing.T) { } base64BlockSignatureRequest := "AABET0ZBSElAawAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" - signatureRequestBytes, err := Codec.Marshal(Version, signatureRequest) + signatureRequestBytes, err := codecWithBlockSync.Marshal(Version, signatureRequest) require.NoError(t, err) require.Equal(t, base64BlockSignatureRequest, base64.StdEncoding.EncodeToString(signatureRequestBytes)) var s BlockSignatureRequest - _, err = Codec.Unmarshal(signatureRequestBytes, &s) + _, err = codecWithBlockSync.Unmarshal(signatureRequestBytes, &s) require.NoError(t, err) require.Equal(t, signatureRequest.BlockID, s.BlockID) } @@ -62,12 +62,12 @@ func TestMarshalSignatureResponse(t *testing.T) { } base64SignatureResponse := "AAABI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8BI0VniavN7wEjRWeJq83vASNFZ4mrze8=" - signatureResponseBytes, err := Codec.Marshal(Version, signatureResponse) + signatureResponseBytes, err := codecWithBlockSync.Marshal(Version, signatureResponse) require.NoError(t, err) require.Equal(t, base64SignatureResponse, base64.StdEncoding.EncodeToString(signatureResponseBytes)) var s SignatureResponse - _, err = Codec.Unmarshal(signatureResponseBytes, &s) + _, err = codecWithBlockSync.Unmarshal(signatureResponseBytes, &s) require.NoError(t, err) require.Equal(t, signatureResponse.Signature, s.Signature) } diff --git a/plugin/evm/message/syncable.go b/plugin/evm/message/syncable.go index 8a8918b891..4ae81b823d 100644 --- a/plugin/evm/message/syncable.go +++ b/plugin/evm/message/syncable.go @@ -4,12 +4,7 @@ package message import ( - "context" - "fmt" - - "github.com/ava-labs/avalanchego/ids" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) @@ -28,95 +23,3 @@ type SyncableParser interface { } type AcceptImplFn func(Syncable) (block.StateSyncMode, error) - -// BlockSyncSummary provides the information necessary to sync a node starting -// at the given block. -type BlockSyncSummary struct { - BlockNumber uint64 `serialize:"true"` - BlockHash common.Hash `serialize:"true"` - BlockRoot common.Hash `serialize:"true"` - - summaryID ids.ID - bytes []byte - acceptImpl AcceptImplFn -} - -type BlockSyncSummaryParser struct{} - -func NewBlockSyncSummaryParser() *BlockSyncSummaryParser { - return &BlockSyncSummaryParser{} -} - -func (b *BlockSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl AcceptImplFn) (Syncable, error) { - summary := BlockSyncSummary{} - if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { - return nil, fmt.Errorf("failed to parse syncable summary: %w", err) - } else if codecVersion != Version { - return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (%d != %d)", codecVersion, Version) - } - - summary.bytes = summaryBytes - summaryID, err := ids.ToID(crypto.Keccak256(summaryBytes)) - if err != nil { - return nil, fmt.Errorf("failed to compute summary ID: %w", err) - } - summary.summaryID = summaryID - summary.acceptImpl = acceptImpl - return &summary, nil -} - -func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash) (*BlockSyncSummary, error) { - summary := BlockSyncSummary{ - BlockNumber: blockNumber, - BlockHash: blockHash, - BlockRoot: blockRoot, - } - bytes, err := Codec.Marshal(Version, &summary) - if err != nil { - return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) - } - - summary.bytes = bytes - summaryID, err := ids.ToID(crypto.Keccak256(bytes)) - if err != nil { - return nil, fmt.Errorf("failed to compute summary ID: %w", err) - } - summary.summaryID = summaryID - - return &summary, nil -} - -func (s *BlockSyncSummary) GetBlockNumber() uint64 { - return s.BlockNumber -} - -func (s *BlockSyncSummary) GetBlockHash() common.Hash { - return s.BlockHash -} - -func (s *BlockSyncSummary) GetBlockRoot() common.Hash { - return s.BlockRoot -} - -func (s *BlockSyncSummary) Bytes() []byte { - return s.bytes -} - -func (s *BlockSyncSummary) Height() uint64 { - return s.BlockNumber -} - -func (s *BlockSyncSummary) ID() ids.ID { - return s.summaryID -} - -func (s *BlockSyncSummary) String() string { - return fmt.Sprintf("BlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot) -} - -func (s *BlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { - if s.acceptImpl == nil { - return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) - } - return s.acceptImpl(s) -} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 85c1530d00..1bf36001bb 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -159,6 +159,8 @@ var ( metadataPrefix = []byte("metadata") warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") + + networkCodec = atomicsync.CodecWithAtomicSync ) var ( @@ -271,8 +273,7 @@ type VM struct { profiler profiler.ContinuousProfiler peer.Network - client peer.NetworkClient - networkCodec codec.Manager + client peer.NetworkClient p2pValidators *p2p.Validators @@ -541,8 +542,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to initialize p2p network: %w", err) } vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) - vm.networkCodec = message.Codec - vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) + vm.Network = peer.NewNetwork(p2pNetwork, appSender, networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) // Initialize warp backend @@ -710,7 +710,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, - Codec: vm.networkCodec, + Codec: networkCodec, Stats: stats.NewClientSyncerStats(leafMetricsNames), StateSyncNodeIDs: stateSyncIDs, BlockParser: vm, @@ -1239,7 +1239,7 @@ func (vm *VM) setAppRequestHandlers() error { vm.blockChain, vm.chaindb, vm.warpBackend, - vm.networkCodec, + networkCodec, vm.leafRequestTypeConfigs, ) vm.Network.SetRequestHandler(networkHandler) @@ -1493,7 +1493,8 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } if vm.config.WarpAPIEnabled { - if err := handler.RegisterName("warp", warp.NewAPI(vm.ctx.NetworkID, vm.ctx.SubnetID, vm.ctx.ChainID, vm.ctx.ValidatorState, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners)); err != nil { + warpAPI := warp.NewAPI(vm.ctx, networkCodec, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners) + if err := handler.RegisterName("warp", warpAPI); err != nil { return nil, err } enabledAPIs = append(enabledAPIs, "warp") diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index e44f5ba606..b2d7dd46d8 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -753,7 +753,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { calledSendAppResponseFn = true var response message.SignatureResponse - _, err := message.Codec.Unmarshal(responseBytes, &response) + _, err := networkCodec.Unmarshal(responseBytes, &response) require.NoError(t, err) require.Equal(t, test.expectedResponse, response.Signature) @@ -764,7 +764,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { MessageID: test.messageID, } - requestBytes, err := message.Codec.Marshal(message.Version, &signatureRequest) + requestBytes, err := networkCodec.Marshal(message.Version, &signatureRequest) require.NoError(t, err) // Send the app request and make sure we called SendAppResponseFn @@ -811,7 +811,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { calledSendAppResponseFn = true var response message.SignatureResponse - _, err := message.Codec.Unmarshal(responseBytes, &response) + _, err := networkCodec.Unmarshal(responseBytes, &response) require.NoError(t, err) require.Equal(t, test.expectedResponse, response.Signature) @@ -822,7 +822,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { BlockID: test.blockID, } - requestBytes, err := message.Codec.Marshal(message.Version, &signatureRequest) + requestBytes, err := networkCodec.Marshal(message.Version, &signatureRequest) require.NoError(t, err) // Send the app request and make sure we called SendAppResponseFn diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 167b5ce120..31658dafb4 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/consensus/dummy" @@ -30,6 +31,16 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +var networkCodec codec.Manager + +func init() { + var err error + networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} + func TestGetCode(t *testing.T) { mockNetClient := &mockNetwork{} @@ -86,7 +97,7 @@ func TestGetCode(t *testing.T) { stateSyncClient := NewClient(&ClientConfig{ NetworkClient: mockNetClient, - Codec: message.Codec, + Codec: networkCodec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, BlockParser: mockBlockParser, @@ -98,7 +109,7 @@ func TestGetCode(t *testing.T) { defer cancel() codeHashes, res, expectedCode := test.setupRequest() - responseBytes, err := message.Codec.Marshal(message.Version, res) + responseBytes, err := networkCodec.Marshal(message.Version, res) if err != nil { t.Fatal(err) } @@ -157,13 +168,13 @@ func TestGetBlocks(t *testing.T) { mockNetClient := &mockNetwork{} stateSyncClient := NewClient(&ClientConfig{ NetworkClient: mockNetClient, - Codec: message.Codec, + Codec: networkCodec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, BlockParser: mockBlockParser, }) - blocksRequestHandler := handlers.NewBlockRequestHandler(buildGetter(blocks), message.Codec, handlerstats.NewNoopHandlerStats()) + blocksRequestHandler := handlers.NewBlockRequestHandler(buildGetter(blocks), networkCodec, handlerstats.NewNoopHandlerStats()) // encodeBlockSlice takes a slice of blocks that are ordered in increasing height order // and returns a slice of byte slices with those blocks encoded in reverse order @@ -254,12 +265,12 @@ func TestGetBlocks(t *testing.T) { t.Fatalf("failed to get block response: %s", err) } var blockResponse message.BlockResponse - if _, err = message.Codec.Unmarshal(response, &blockResponse); err != nil { + if _, err = networkCodec.Unmarshal(response, &blockResponse); err != nil { t.Fatalf("failed to marshal block response: %s", err) } // Replace middle value with garbage data blockResponse.Blocks[10] = []byte("invalid value replacing block bytes") - responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + responseBytes, err := networkCodec.Marshal(message.Version, blockResponse) if err != nil { t.Fatalf("failed to marshal block response: %s", err) } @@ -308,7 +319,7 @@ func TestGetBlocks(t *testing.T) { blockResponse := message.BlockResponse{ Blocks: blockBytes, } - responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + responseBytes, err := networkCodec.Marshal(message.Version, blockResponse) if err != nil { t.Fatalf("failed to marshal block response: %s", err) } @@ -327,7 +338,7 @@ func TestGetBlocks(t *testing.T) { blockResponse := message.BlockResponse{ Blocks: nil, } - responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + responseBytes, err := networkCodec.Marshal(message.Version, blockResponse) if err != nil { t.Fatalf("failed to marshal block response: %s", err) } @@ -348,7 +359,7 @@ func TestGetBlocks(t *testing.T) { blockResponse := message.BlockResponse{ Blocks: blockBytes, } - responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) + responseBytes, err := networkCodec.Marshal(message.Version, blockResponse) if err != nil { t.Fatalf("failed to marshal block response: %s", err) } @@ -415,10 +426,10 @@ func TestGetLeafs(t *testing.T) { largeTrieRoot, largeTrieKeys, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) smallTrieRoot, _, _ := syncutils.GenerateTrie(t, trieDB, leafsLimit, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, networkCodec, handlerstats.NewNoopHandlerStats()) client := NewClient(&ClientConfig{ NetworkClient: &mockNetwork{}, - Codec: message.Codec, + Codec: networkCodec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, BlockParser: mockBlockParser, @@ -594,13 +605,13 @@ func TestGetLeafs(t *testing.T) { t.Fatal("Failed to create valid response") } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } leafResponse.Keys = leafResponse.Keys[1:] leafResponse.Vals = leafResponse.Vals[1:] - modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + modifiedResponse, err := networkCodec.Marshal(message.Version, leafResponse) if err != nil { t.Fatal(err) } @@ -625,7 +636,7 @@ func TestGetLeafs(t *testing.T) { t.Fatal("Failed to create valid response") } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } modifiedRequest := request @@ -655,13 +666,13 @@ func TestGetLeafs(t *testing.T) { t.Fatal("Failed to create valid response") } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-2] leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-2] - modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + modifiedResponse, err := networkCodec.Marshal(message.Version, leafResponse) if err != nil { t.Fatal(err) } @@ -686,14 +697,14 @@ func TestGetLeafs(t *testing.T) { t.Fatal("Failed to create valid response") } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } // Remove middle key-value pair response leafResponse.Keys = append(leafResponse.Keys[:100], leafResponse.Keys[101:]...) leafResponse.Vals = append(leafResponse.Vals[:100], leafResponse.Vals[101:]...) - modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + modifiedResponse, err := networkCodec.Marshal(message.Version, leafResponse) if err != nil { t.Fatal(err) } @@ -718,13 +729,13 @@ func TestGetLeafs(t *testing.T) { t.Fatal("Failed to create valid response") } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } // Remove middle key-value pair response leafResponse.Vals[100] = []byte("garbage value data") - modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + modifiedResponse, err := networkCodec.Marshal(message.Version, leafResponse) if err != nil { t.Fatal(err) } @@ -750,13 +761,13 @@ func TestGetLeafs(t *testing.T) { } var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { + if _, err := networkCodec.Unmarshal(response, &leafResponse); err != nil { t.Fatal(err) } // Remove the proof leafResponse.ProofVals = nil - modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + modifiedResponse, err := networkCodec.Marshal(message.Version, leafResponse) if err != nil { t.Fatal(err) } @@ -797,13 +808,13 @@ func TestGetLeafsRetries(t *testing.T) { trieDB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil) root, _, _ := syncutils.GenerateTrie(t, trieDB, 100_000, common.HashLength) - handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) + handler := handlers.NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, nil, networkCodec, handlerstats.NewNoopHandlerStats()) mockNetClient := &mockNetwork{} const maxAttempts = 8 client := NewClient(&ClientConfig{ NetworkClient: mockNetClient, - Codec: message.Codec, + Codec: networkCodec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: nil, BlockParser: mockBlockParser, @@ -865,7 +876,7 @@ func TestStateSyncNodes(t *testing.T) { } client := NewClient(&ClientConfig{ NetworkClient: mockNetClient, - Codec: message.Codec, + Codec: networkCodec, Stats: clientstats.NewNoOpStats(), StateSyncNodeIDs: stateSyncNodes, BlockParser: mockBlockParser, diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go index 7b0124d8f9..aed723a4a7 100644 --- a/sync/handlers/block_request_test.go +++ b/sync/handlers/block_request_test.go @@ -55,7 +55,7 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type return blk }, } - blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, mockHandlerStats) + blockRequestHandler := NewBlockRequestHandler(blockProvider, networkCodec, mockHandlerStats) var blockRequest message.BlockRequest if test.startBlockHash != (common.Hash{}) { @@ -84,7 +84,7 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { + if _, err = networkCodec.Unmarshal(responseBytes, &response); err != nil { t.Fatal("error unmarshalling", err) } assert.Len(t, response.Blocks, test.expectedBlocks) @@ -102,7 +102,7 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type } func TestBlockRequestHandler(t *testing.T) { - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: params.TestChainConfig, } memdb := rawdb.NewMemoryDatabase() @@ -214,7 +214,7 @@ func TestBlockRequestHandlerLargeBlocks(t *testing.T) { } func TestBlockRequestHandlerCtxExpires(t *testing.T) { - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: params.TestChainConfig, } memdb := rawdb.NewMemoryDatabase() @@ -252,7 +252,7 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { return blk }, } - blockRequestHandler := NewBlockRequestHandler(blockProvider, message.Codec, stats.NewNoopHandlerStats()) + blockRequestHandler := NewBlockRequestHandler(blockProvider, networkCodec, stats.NewNoopHandlerStats()) responseBytes, err := blockRequestHandler.OnBlockRequest(ctx, ids.GenerateTestNodeID(), 1, message.BlockRequest{ Hash: blocks[10].Hash(), @@ -265,7 +265,7 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { + if _, err = networkCodec.Unmarshal(responseBytes, &response); err != nil { t.Fatal("error unmarshalling", err) } // requested 8 blocks, received cancelAfterNumRequests because of timeout diff --git a/sync/handlers/code_request_test.go b/sync/handlers/code_request_test.go index 1bf5bd5223..797c191311 100644 --- a/sync/handlers/code_request_test.go +++ b/sync/handlers/code_request_test.go @@ -35,7 +35,7 @@ func TestCodeRequestHandler(t *testing.T) { rawdb.WriteCode(database, maxSizeCodeHash, maxSizeCodeBytes) mockHandlerStats := &stats.MockHandlerStats{} - codeRequestHandler := NewCodeRequestHandler(database, message.Codec, mockHandlerStats) + codeRequestHandler := NewCodeRequestHandler(database, networkCodec, mockHandlerStats) tests := map[string]struct { setup func() (request message.CodeRequest, expectedCodeResponse [][]byte) @@ -100,7 +100,7 @@ func TestCodeRequestHandler(t *testing.T) { return } var response message.CodeResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { + if _, err = networkCodec.Unmarshal(responseBytes, &response); err != nil { t.Fatal("error unmarshalling CodeResponse", err) } if len(expectedResponse) != len(response.Data) { diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 296e87371f..eb57362e01 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -9,6 +9,7 @@ import ( "math/rand" "testing" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" @@ -24,6 +25,16 @@ import ( "github.com/stretchr/testify/assert" ) +var networkCodec codec.Manager + +func init() { + var err error + networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} + func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { rand.Seed(1) mockHandlerStats := &stats.MockHandlerStats{} @@ -74,7 +85,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } } snapshotProvider := &TestSnapshotProvider{} - leafsHandler := NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, snapshotProvider, message.Codec, mockHandlerStats) + leafsHandler := NewLeafsRequestHandler(trieDB, message.StateTrieKeyLength, snapshotProvider, networkCodec, mockHandlerStats) snapConfig := snapshot.Config{ CacheSize: 64, AsyncBuild: false, @@ -228,7 +239,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.Len(t, leafsResponse.Keys, 500) assert.Len(t, leafsResponse.Vals, 500) @@ -248,7 +259,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.Len(t, leafsResponse.Keys, 500) assert.Len(t, leafsResponse.Vals, 500) @@ -302,7 +313,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, _ message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, len(leafsResponse.Keys), maxLeavesLimit) assert.EqualValues(t, len(leafsResponse.Vals), maxLeavesLimit) @@ -323,7 +334,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, len(leafsResponse.Keys), maxLeavesLimit) assert.EqualValues(t, len(leafsResponse.Vals), maxLeavesLimit) @@ -345,7 +356,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, len(leafsResponse.Keys), maxLeavesLimit) assert.EqualValues(t, len(leafsResponse.Vals), maxLeavesLimit) @@ -370,7 +381,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, 40, len(leafsResponse.Keys)) assert.EqualValues(t, 40, len(leafsResponse.Vals)) @@ -392,7 +403,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, 600, len(leafsResponse.Keys)) assert.EqualValues(t, 600, len(leafsResponse.Vals)) @@ -414,7 +425,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, len(leafsResponse.Keys), 0) assert.EqualValues(t, len(leafsResponse.Vals), 0) @@ -437,7 +448,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assert.NotEmpty(t, response) var leafsResponse message.LeafsResponse - if _, err = message.Codec.Unmarshal(response, &leafsResponse); err != nil { + if _, err = networkCodec.Unmarshal(response, &leafsResponse); err != nil { t.Fatalf("unexpected error when unmarshalling LeafsResponse: %v", err) } @@ -465,7 +476,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Keys)) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Vals)) @@ -513,7 +524,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Keys)) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Vals)) @@ -546,7 +557,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Keys)) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Vals)) @@ -592,7 +603,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Keys)) assert.EqualValues(t, maxLeavesLimit, len(leafsResponse.Vals)) @@ -633,7 +644,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, 500, len(leafsResponse.Keys)) assert.EqualValues(t, 500, len(leafsResponse.Vals)) @@ -670,7 +681,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { assertResponseFn: func(t *testing.T, request message.LeafsRequest, response []byte, err error) { assert.NoError(t, err) var leafsResponse message.LeafsResponse - _, err = message.Codec.Unmarshal(response, &leafsResponse) + _, err = networkCodec.Unmarshal(response, &leafsResponse) assert.NoError(t, err) assert.EqualValues(t, 1, len(leafsResponse.Keys)) assert.EqualValues(t, 1, len(leafsResponse.Vals)) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 574290e286..1fd20ba643 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/plugin/evm/message" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" @@ -40,8 +39,8 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { } // Set up mockClient - codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) - mockClient := statesyncclient.NewMockClient(message.Codec, nil, codeRequestHandler, nil) + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, networkCodec, handlerstats.NewNoopHandlerStats()) + mockClient := statesyncclient.NewMockClient(networkCodec, nil, codeRequestHandler, nil) mockClient.GetCodeIntercept = test.getCodeIntercept clientDB := rawdb.NewMemoryDatabase() diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index c64d36faa5..99c5d17b0e 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" @@ -34,6 +35,16 @@ const testSyncTimeout = 30 * time.Second var errInterrupted = errors.New("interrupted sync") +var networkCodec codec.Manager + +func init() { + var err error + networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} + type syncTest struct { ctx context.Context prepareForTest func(t *testing.T) (clientDB ethdb.Database, serverDB ethdb.Database, serverTrieDB *triedb.Database, syncRoot common.Hash) @@ -49,9 +60,9 @@ func testSync(t *testing.T, test syncTest) { ctx = test.ctx } clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t) - leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, message.Codec, handlerstats.NewNoopHandlerStats()) - codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) - mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) + leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, message.StateTrieKeyLength, nil, networkCodec, handlerstats.NewNoopHandlerStats()) + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, networkCodec, handlerstats.NewNoopHandlerStats()) + mockClient := statesyncclient.NewMockClient(networkCodec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client mockClient.GetLeafsIntercept = test.GetLeafsIntercept mockClient.GetCodeIntercept = test.GetCodeIntercept diff --git a/warp/aggregator/signature_getter.go b/warp/aggregator/signature_getter.go index 8bdb60fea1..d5e36ac972 100644 --- a/warp/aggregator/signature_getter.go +++ b/warp/aggregator/signature_getter.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -36,12 +37,14 @@ type NetworkClient interface { // NetworkSignatureGetter fetches warp signatures on behalf of the // aggregator using VM App-Specific Messaging type NetworkSignatureGetter struct { - Client NetworkClient + Client NetworkClient + networkCodec codec.Manager } -func NewSignatureGetter(client NetworkClient) *NetworkSignatureGetter { +func NewSignatureGetter(client NetworkClient, networkCodec codec.Manager) *NetworkSignatureGetter { return &NetworkSignatureGetter{ - Client: client, + Client: client, + networkCodec: networkCodec, } } @@ -60,7 +63,7 @@ func (s *NetworkSignatureGetter) GetSignature(ctx context.Context, nodeID ids.No signatureReq := message.MessageSignatureRequest{ MessageID: unsignedWarpMessage.ID(), } - signatureReqBytes, err = message.RequestToBytes(message.Codec, signatureReq) + signatureReqBytes, err = message.RequestToBytes(s.networkCodec, signatureReq) if err != nil { return nil, fmt.Errorf("failed to marshal signature request: %w", err) } @@ -68,7 +71,7 @@ func (s *NetworkSignatureGetter) GetSignature(ctx context.Context, nodeID ids.No signatureReq := message.BlockSignatureRequest{ BlockID: p.Hash, } - signatureReqBytes, err = message.RequestToBytes(message.Codec, signatureReq) + signatureReqBytes, err = message.RequestToBytes(s.networkCodec, signatureReq) if err != nil { return nil, fmt.Errorf("failed to marshal signature request: %w", err) } @@ -102,7 +105,7 @@ func (s *NetworkSignatureGetter) GetSignature(ctx context.Context, nodeID ids.No continue } var response message.SignatureResponse - if _, err := message.Codec.Unmarshal(signatureRes, &response); err != nil { + if _, err := s.networkCodec.Unmarshal(signatureRes, &response); err != nil { return nil, fmt.Errorf("failed to unmarshal signature res: %w", err) } if response.Signature == [bls.SignatureLen]byte{} { diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 77a4af087e..528bf87bfd 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -20,6 +21,16 @@ import ( "github.com/stretchr/testify/require" ) +var networkCodec codec.Manager + +func init() { + var err error + networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} + func TestMessageSignatureHandler(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() @@ -102,7 +113,7 @@ func TestMessageSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandler(backend, message.Codec) + handler := NewSignatureRequestHandler(backend, networkCodec) request, expectedResponse := test.setup() responseBytes, err := handler.OnMessageSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) @@ -116,7 +127,7 @@ func TestMessageSignatureHandler(t *testing.T) { return } var response message.SignatureResponse - _, err = message.Codec.Unmarshal(responseBytes, &response) + _, err = networkCodec.Unmarshal(responseBytes, &response) require.NoError(t, err, "error unmarshalling SignatureResponse") require.Equal(t, expectedResponse, response.Signature[:]) @@ -189,7 +200,7 @@ func TestBlockSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandler(backend, message.Codec) + handler := NewSignatureRequestHandler(backend, networkCodec) request, expectedResponse := test.setup() responseBytes, err := handler.OnBlockSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) @@ -203,7 +214,7 @@ func TestBlockSignatureHandler(t *testing.T) { return } var response message.SignatureResponse - _, err = message.Codec.Unmarshal(responseBytes, &response) + _, err = networkCodec.Unmarshal(responseBytes, &response) require.NoError(t, err, "error unmarshalling SignatureResponse") require.Equal(t, expectedResponse, response.Signature[:]) diff --git a/warp/service.go b/warp/service.go index 610fc85a91..d160fe6812 100644 --- a/warp/service.go +++ b/warp/service.go @@ -8,8 +8,9 @@ import ( "errors" "fmt" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/peer" @@ -23,22 +24,18 @@ var errNoValidators = errors.New("cannot aggregate signatures from subnet with n // API introduces snowman specific functionality to the evm type API struct { - networkID uint32 - sourceSubnetID, sourceChainID ids.ID - backend Backend - state validators.State - client peer.NetworkClient - requirePrimaryNetworkSigners func() bool + chainContext *snow.Context + backend Backend + signatureGetter aggregator.SignatureGetter + requirePrimaryNetworkSigners func() bool } -func NewAPI(networkID uint32, sourceSubnetID ids.ID, sourceChainID ids.ID, state validators.State, backend Backend, client peer.NetworkClient, requirePrimaryNetworkSigners func() bool) *API { +func NewAPI(chainCtx *snow.Context, networkCodec codec.Manager, backend Backend, client peer.NetworkClient, requirePrimaryNetworkSigners func() bool) *API { + signatureGetter := aggregator.NewSignatureGetter(client, networkCodec) return &API{ - networkID: networkID, - sourceSubnetID: sourceSubnetID, - sourceChainID: sourceChainID, backend: backend, - state: state, - client: client, + chainContext: chainCtx, + signatureGetter: signatureGetter, requirePrimaryNetworkSigners: requirePrimaryNetworkSigners, } } @@ -89,7 +86,7 @@ func (a *API) GetBlockAggregateSignature(ctx context.Context, blockID ids.ID, qu if err != nil { return nil, err } - unsignedMessage, err := warp.NewUnsignedMessage(a.networkID, a.sourceChainID, blockHashPayload.Bytes()) + unsignedMessage, err := warp.NewUnsignedMessage(a.chainContext.NetworkID, a.chainContext.ChainID, blockHashPayload.Bytes()) if err != nil { return nil, err } @@ -98,7 +95,7 @@ func (a *API) GetBlockAggregateSignature(ctx context.Context, blockID ids.ID, qu } func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.UnsignedMessage, quorumNum uint64, subnetIDStr string) (hexutil.Bytes, error) { - subnetID := a.sourceSubnetID + subnetID := a.chainContext.SubnetID if len(subnetIDStr) > 0 { sid, err := ids.FromString(subnetIDStr) if err != nil { @@ -106,12 +103,13 @@ func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.Uns } subnetID = sid } - pChainHeight, err := a.state.GetCurrentHeight(ctx) + validatorState := a.chainContext.ValidatorState + pChainHeight, err := validatorState.GetCurrentHeight(ctx) if err != nil { return nil, err } - state := warpValidators.NewState(a.state, a.sourceSubnetID, a.sourceChainID, a.requirePrimaryNetworkSigners()) + state := warpValidators.NewState(validatorState, subnetID, a.chainContext.ChainID, a.requirePrimaryNetworkSigners()) validators, totalWeight, err := warp.GetCanonicalValidatorSet(ctx, state, pChainHeight, subnetID) if err != nil { return nil, fmt.Errorf("failed to get validator set: %w", err) @@ -127,7 +125,7 @@ func (a *API) aggregateSignatures(ctx context.Context, unsignedMessage *warp.Uns "totalWeight", totalWeight, ) - agg := aggregator.New(aggregator.NewSignatureGetter(a.client), validators, totalWeight) + agg := aggregator.New(a.signatureGetter, validators, totalWeight) signatureResult, err := agg.AggregateSignatures(ctx, unsignedMessage, quorumNum) if err != nil { return nil, err From 1ab7a7afebddd7b154c6e1de5c6a2359f0fb1ddc Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 16 Jan 2025 22:50:47 +0300 Subject: [PATCH 46/91] unexport codec --- plugin/evm/atomic/sync/atomic_block_sync_summary.go | 6 +++--- plugin/evm/atomic/sync/atomic_syncer_test.go | 6 +++--- plugin/evm/vm.go | 9 ++++++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/plugin/evm/atomic/sync/atomic_block_sync_summary.go b/plugin/evm/atomic/sync/atomic_block_sync_summary.go index 7b5f12d6fe..8c98db64c9 100644 --- a/plugin/evm/atomic/sync/atomic_block_sync_summary.go +++ b/plugin/evm/atomic/sync/atomic_block_sync_summary.go @@ -22,14 +22,14 @@ var ( _ message.SyncableParser = (*AtomicSyncSummaryParser)(nil) ) -// CodecWithAtomicSync is the codec manager that contains the codec for AtomicBlockSyncSummary and +// codecWithAtomicSync is the codec manager that contains the codec for AtomicBlockSyncSummary and // other message types that are used in the network protocol. This is to ensure that the codec // version is consistent across all message types and includes the codec for AtomicBlockSyncSummary. -var CodecWithAtomicSync codec.Manager +var codecWithAtomicSync codec.Manager func init() { var err error - CodecWithAtomicSync, err = message.NewCodec(AtomicBlockSyncSummary{}) + codecWithAtomicSync, err = message.NewCodec(AtomicBlockSyncSummary{}) if err != nil { panic(fmt.Errorf("failed to create codec manager: %w", err)) } diff --git a/plugin/evm/atomic/sync/atomic_syncer_test.go b/plugin/evm/atomic/sync/atomic_syncer_test.go index e95798a630..41984a7c2f 100644 --- a/plugin/evm/atomic/sync/atomic_syncer_test.go +++ b/plugin/evm/atomic/sync/atomic_syncer_test.go @@ -47,14 +47,14 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight numLeaves := 0 mockClient := syncclient.NewMockClient( - CodecWithAtomicSync, - handlers.NewLeafsRequestHandler(serverTrieDB, state.AtomicTrieKeyLength, nil, CodecWithAtomicSync, handlerstats.NewNoopHandlerStats()), + codecWithAtomicSync, + handlers.NewLeafsRequestHandler(serverTrieDB, state.AtomicTrieKeyLength, nil, codecWithAtomicSync, handlerstats.NewNoopHandlerStats()), nil, nil, ) clientDB := versiondb.New(memdb.New()) - repo, err := state.NewAtomicTxRepository(clientDB, CodecWithAtomicSync, 0) + repo, err := state.NewAtomicTxRepository(clientDB, codecWithAtomicSync, 0) if err != nil { t.Fatal("could not initialize atomix tx repository", err) } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 1bf36001bb..92c6f91b6d 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -160,7 +160,7 @@ var ( warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") - networkCodec = atomicsync.CodecWithAtomicSync + networkCodec codec.Manager ) var ( @@ -202,6 +202,13 @@ func init() { // Preserving the log level allows us to update the root handler while writing to the original // [os.Stderr] that is being piped through to the logger via the rpcchainvm. originalStderr = os.Stderr + + // Register the codec for the atomic block sync summary + var err error + networkCodec, err = message.NewCodec(atomicsync.AtomicBlockSyncSummary{}) + if err != nil { + panic(fmt.Errorf("failed to create codec manager: %w", err)) + } } // VM implements the snowman.ChainVM interface From 152317dd4f40b18e1d643d32526669679ca6d983 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 17 Jan 2025 14:15:52 +0300 Subject: [PATCH 47/91] reviews --- .../evm/atomic/sync/atomic_sync_extender.go | 2 +- ...sync_summary.go => atomic_sync_summary.go} | 37 +++++++-------- .../atomic/sync/atomic_sync_summary_test.go | 46 +++++++++++++++++++ plugin/evm/message/block_sync_summary.go | 4 -- plugin/evm/message/block_sync_summary_test.go | 44 ++++++++++++++++++ plugin/evm/message/syncable.go | 3 -- plugin/evm/sync/syncervm_client.go | 6 +-- plugin/evm/vm.go | 2 +- 8 files changed, 111 insertions(+), 33 deletions(-) rename plugin/evm/atomic/sync/{atomic_block_sync_summary.go => atomic_sync_summary.go} (72%) create mode 100644 plugin/evm/atomic/sync/atomic_sync_summary_test.go create mode 100644 plugin/evm/message/block_sync_summary_test.go diff --git a/plugin/evm/atomic/sync/atomic_sync_extender.go b/plugin/evm/atomic/sync/atomic_sync_extender.go index 56735eb509..8174548da4 100644 --- a/plugin/evm/atomic/sync/atomic_sync_extender.go +++ b/plugin/evm/atomic/sync/atomic_sync_extender.go @@ -30,7 +30,7 @@ func NewAtomicSyncExtender(backend interfaces.AtomicBackend, stateSyncRequestSiz } func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.LeafClient, verDB *versiondb.Database, syncSummary message.Syncable) error { - atomicSyncSummary, ok := syncSummary.(*AtomicBlockSyncSummary) + atomicSyncSummary, ok := syncSummary.(*AtomicSyncSummary) if !ok { return fmt.Errorf("expected *AtomicBlockSyncSummary, got %T", syncSummary) } diff --git a/plugin/evm/atomic/sync/atomic_block_sync_summary.go b/plugin/evm/atomic/sync/atomic_sync_summary.go similarity index 72% rename from plugin/evm/atomic/sync/atomic_block_sync_summary.go rename to plugin/evm/atomic/sync/atomic_sync_summary.go index 8c98db64c9..fe5fb72454 100644 --- a/plugin/evm/atomic/sync/atomic_block_sync_summary.go +++ b/plugin/evm/atomic/sync/atomic_sync_summary.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -18,7 +17,7 @@ import ( ) var ( - _ message.Syncable = (*AtomicBlockSyncSummary)(nil) + _ message.Syncable = (*AtomicSyncSummary)(nil) _ message.SyncableParser = (*AtomicSyncSummaryParser)(nil) ) @@ -29,15 +28,15 @@ var codecWithAtomicSync codec.Manager func init() { var err error - codecWithAtomicSync, err = message.NewCodec(AtomicBlockSyncSummary{}) + codecWithAtomicSync, err = message.NewCodec(AtomicSyncSummary{}) if err != nil { panic(fmt.Errorf("failed to create codec manager: %w", err)) } } -// AtomicBlockSyncSummary provides the information necessary to sync a node starting +// AtomicSyncSummary provides the information necessary to sync a node starting // at the given block. -type AtomicBlockSyncSummary struct { +type AtomicSyncSummary struct { BlockNumber uint64 `serialize:"true"` BlockHash common.Hash `serialize:"true"` BlockRoot common.Hash `serialize:"true"` @@ -55,8 +54,8 @@ func NewAtomicSyncSummaryParser() *AtomicSyncSummaryParser { } func (a *AtomicSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl message.AcceptImplFn) (message.Syncable, error) { - summary := AtomicBlockSyncSummary{} - if codecVersion, err := atomic.Codec.Unmarshal(summaryBytes, &summary); err != nil { + summary := AtomicSyncSummary{} + if codecVersion, err := codecWithAtomicSync.Unmarshal(summaryBytes, &summary); err != nil { return nil, fmt.Errorf("failed to parse syncable summary: %w", err) } else if codecVersion != message.Version { return nil, fmt.Errorf("failed to parse syncable summary due to unexpected codec version (got %d, expected %d)", codecVersion, message.Version) @@ -72,14 +71,14 @@ func (a *AtomicSyncSummaryParser) ParseFromBytes(summaryBytes []byte, acceptImpl return &summary, nil } -func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (*AtomicBlockSyncSummary, error) { - summary := AtomicBlockSyncSummary{ +func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot common.Hash, atomicRoot common.Hash) (*AtomicSyncSummary, error) { + summary := AtomicSyncSummary{ BlockNumber: blockNumber, BlockHash: blockHash, BlockRoot: blockRoot, AtomicRoot: atomicRoot, } - bytes, err := atomic.Codec.Marshal(message.Version, &summary) + bytes, err := codecWithAtomicSync.Marshal(message.Version, &summary) if err != nil { return nil, fmt.Errorf("failed to marshal syncable summary: %w", err) } @@ -94,35 +93,31 @@ func NewAtomicSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot c return &summary, nil } -func (a *AtomicBlockSyncSummary) GetBlockNumber() uint64 { - return a.BlockNumber -} - -func (a *AtomicBlockSyncSummary) GetBlockHash() common.Hash { +func (a *AtomicSyncSummary) GetBlockHash() common.Hash { return a.BlockHash } -func (a *AtomicBlockSyncSummary) GetBlockRoot() common.Hash { +func (a *AtomicSyncSummary) GetBlockRoot() common.Hash { return a.BlockRoot } -func (a *AtomicBlockSyncSummary) Bytes() []byte { +func (a *AtomicSyncSummary) Bytes() []byte { return a.bytes } -func (a *AtomicBlockSyncSummary) Height() uint64 { +func (a *AtomicSyncSummary) Height() uint64 { return a.BlockNumber } -func (a *AtomicBlockSyncSummary) ID() ids.ID { +func (a *AtomicSyncSummary) ID() ids.ID { return a.summaryID } -func (a *AtomicBlockSyncSummary) String() string { +func (a *AtomicSyncSummary) String() string { return fmt.Sprintf("AtomicBlockSyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", a.BlockHash, a.BlockNumber, a.BlockRoot, a.AtomicRoot) } -func (a *AtomicBlockSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { +func (a *AtomicSyncSummary) Accept(context.Context) (block.StateSyncMode, error) { if a.acceptImpl == nil { return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", a) } diff --git a/plugin/evm/atomic/sync/atomic_sync_summary_test.go b/plugin/evm/atomic/sync/atomic_sync_summary_test.go new file mode 100644 index 0000000000..9b534c75ed --- /dev/null +++ b/plugin/evm/atomic/sync/atomic_sync_summary_test.go @@ -0,0 +1,46 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "encoding/base64" + "testing" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestMarshalAtomicSyncSummary(t *testing.T) { + atomicSyncSummary, err := NewAtomicSyncSummary(common.Hash{1}, 2, common.Hash{3}, common.Hash{4}) + require.NoError(t, err) + + require.Equal(t, common.Hash{1}, atomicSyncSummary.GetBlockHash()) + require.Equal(t, uint64(2), atomicSyncSummary.Height()) + require.Equal(t, common.Hash{3}, atomicSyncSummary.GetBlockRoot()) + + expectedBase64Bytes := "AAAAAAAAAAAAAgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + require.Equal(t, expectedBase64Bytes, base64.StdEncoding.EncodeToString(atomicSyncSummary.Bytes())) + + parser := NewAtomicSyncSummaryParser() + called := false + acceptImplTest := func(message.Syncable) (block.StateSyncMode, error) { + called = true + return block.StateSyncSkipped, nil + } + s, err := parser.ParseFromBytes(atomicSyncSummary.Bytes(), acceptImplTest) + require.NoError(t, err) + require.Equal(t, atomicSyncSummary.GetBlockHash(), s.GetBlockHash()) + require.Equal(t, atomicSyncSummary.Height(), s.Height()) + require.Equal(t, atomicSyncSummary.GetBlockRoot(), s.GetBlockRoot()) + require.Equal(t, atomicSyncSummary.AtomicRoot, s.(*AtomicSyncSummary).AtomicRoot) + require.Equal(t, atomicSyncSummary.Bytes(), s.Bytes()) + + mode, err := s.Accept(context.TODO()) + require.NoError(t, err) + require.Equal(t, block.StateSyncSkipped, mode) + require.True(t, called) +} diff --git a/plugin/evm/message/block_sync_summary.go b/plugin/evm/message/block_sync_summary.go index fa404e177e..04c74bb682 100644 --- a/plugin/evm/message/block_sync_summary.go +++ b/plugin/evm/message/block_sync_summary.go @@ -87,10 +87,6 @@ func NewBlockSyncSummary(blockHash common.Hash, blockNumber uint64, blockRoot co return &summary, nil } -func (s *BlockSyncSummary) GetBlockNumber() uint64 { - return s.BlockNumber -} - func (s *BlockSyncSummary) GetBlockHash() common.Hash { return s.BlockHash } diff --git a/plugin/evm/message/block_sync_summary_test.go b/plugin/evm/message/block_sync_summary_test.go new file mode 100644 index 0000000000..f7a4c19975 --- /dev/null +++ b/plugin/evm/message/block_sync_summary_test.go @@ -0,0 +1,44 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "encoding/base64" + "testing" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestMarshalBlockSyncSummary(t *testing.T) { + blockSyncSummary, err := NewBlockSyncSummary(common.Hash{1}, 2, common.Hash{3}) + require.NoError(t, err) + + require.Equal(t, common.Hash{1}, blockSyncSummary.GetBlockHash()) + require.Equal(t, uint64(2), blockSyncSummary.Height()) + require.Equal(t, common.Hash{3}, blockSyncSummary.GetBlockRoot()) + + expectedBase64Bytes := "AAAAAAAAAAAAAgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + require.Equal(t, expectedBase64Bytes, base64.StdEncoding.EncodeToString(blockSyncSummary.Bytes())) + + parser := NewBlockSyncSummaryParser() + called := false + acceptImplTest := func(Syncable) (block.StateSyncMode, error) { + called = true + return block.StateSyncSkipped, nil + } + s, err := parser.ParseFromBytes(blockSyncSummary.Bytes(), acceptImplTest) + require.NoError(t, err) + require.Equal(t, blockSyncSummary.GetBlockHash(), s.GetBlockHash()) + require.Equal(t, blockSyncSummary.Height(), s.Height()) + require.Equal(t, blockSyncSummary.GetBlockRoot(), s.GetBlockRoot()) + require.Equal(t, blockSyncSummary.Bytes(), s.Bytes()) + + mode, err := s.Accept(context.TODO()) + require.NoError(t, err) + require.Equal(t, block.StateSyncSkipped, mode) + require.True(t, called) +} diff --git a/plugin/evm/message/syncable.go b/plugin/evm/message/syncable.go index 4ae81b823d..c83ae2d8cb 100644 --- a/plugin/evm/message/syncable.go +++ b/plugin/evm/message/syncable.go @@ -9,11 +9,8 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -var _ Syncable = (*BlockSyncSummary)(nil) - type Syncable interface { block.StateSummary - GetBlockNumber() uint64 GetBlockHash() common.Hash GetBlockRoot() common.Hash } diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index f2e8dac7d9..b315895127 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -161,7 +161,7 @@ func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryByt // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. func (client *stateSyncerClient) stateSync(ctx context.Context) error { - if err := client.syncBlocks(ctx, client.syncSummary.GetBlockHash(), client.syncSummary.GetBlockNumber(), ParentsToFetch); err != nil { + if err := client.syncBlocks(ctx, client.syncSummary.GetBlockHash(), client.syncSummary.Height(), ParentsToFetch); err != nil { return err } @@ -345,8 +345,8 @@ func (client *stateSyncerClient) finishSync() error { if block.Hash() != client.syncSummary.GetBlockHash() { return fmt.Errorf("attempted to set last summary block to unexpected block hash: (%s != %s)", block.Hash(), client.syncSummary.GetBlockHash()) } - if block.NumberU64() != client.syncSummary.GetBlockNumber() { - return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.GetBlockNumber()) + if block.NumberU64() != client.syncSummary.Height() { + return fmt.Errorf("attempted to set last summary block to unexpected block number: (%d != %d)", block.NumberU64(), client.syncSummary.Height()) } // BloomIndexer needs to know that some parts of the chain are not available diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 92c6f91b6d..ef6c535d40 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -205,7 +205,7 @@ func init() { // Register the codec for the atomic block sync summary var err error - networkCodec, err = message.NewCodec(atomicsync.AtomicBlockSyncSummary{}) + networkCodec, err = message.NewCodec(atomicsync.AtomicSyncSummary{}) if err != nil { panic(fmt.Errorf("failed to create codec manager: %w", err)) } From 492b3dd924ffb4983f1b82e48fbd2d118db70e6b Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Fri, 17 Jan 2025 10:37:28 -0800 Subject: [PATCH 48/91] use concrete returns / callsite interfaces where possible (#756) --- plugin/evm/atomic/state/atomic_backend.go | 43 ++--- plugin/evm/atomic/state/atomic_state.go | 5 +- plugin/evm/atomic/state/atomic_trie.go | 35 ++-- .../evm/atomic/state/atomic_trie_iterator.go | 3 - plugin/evm/atomic/state/atomic_trie_test.go | 21 ++- .../evm/atomic/state/atomic_tx_repository.go | 32 ++-- .../atomic/state/atomic_tx_repository_test.go | 4 +- .../evm/atomic/state/interfaces/interfaces.go | 155 ------------------ .../evm/atomic/sync/atomic_sync_extender.go | 29 +++- .../evm/atomic/sync/atomic_sync_provider.go | 5 +- plugin/evm/atomic/sync/atomic_syncer.go | 35 +++- plugin/evm/vm.go | 7 +- plugin/evm/vm_test.go | 17 +- 13 files changed, 132 insertions(+), 259 deletions(-) delete mode 100644 plugin/evm/atomic/state/interfaces/interfaces.go diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index bc0526aeeb..309fbb41ca 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -17,13 +17,10 @@ import ( "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var _ interfaces.AtomicBackend = &atomicBackend{} - var ( atomicTrieDBPrefix = []byte("atomicTrieDB") atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") @@ -35,17 +32,17 @@ const ( progressLogFrequency = 30 * time.Second ) -// atomicBackend implements the AtomicBackend interface using +// AtomicBackend implements the AtomicBackend interface using // the AtomicTrie, AtomicTxRepository, and the VM's shared memory. -type atomicBackend struct { +type AtomicBackend struct { codec codec.Manager bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing db *versiondb.Database // Underlying database metadataDB database.Database // Underlying database containing the atomic trie metadata sharedMemory avalancheatomic.SharedMemory - repo interfaces.AtomicTxRepository - atomicTrie *atomicTrie + repo *AtomicTxRepository + atomicTrie *AtomicTrie lastAcceptedHash common.Hash verifiedRoots map[common.Hash]*atomicState @@ -54,9 +51,9 @@ type atomicBackend struct { // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, - bonusBlocks map[uint64]ids.ID, repo interfaces.AtomicTxRepository, + bonusBlocks map[uint64]ids.ID, repo *AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, -) (*atomicBackend, error) { +) (*AtomicBackend, error) { atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, db) codec := repo.Codec() @@ -65,7 +62,7 @@ func NewAtomicBackend( if err != nil { return nil, err } - atomicBackend := &atomicBackend{ + atomicBackend := &AtomicBackend{ codec: codec, db: db, metadataDB: metadataDB, @@ -93,7 +90,7 @@ func NewAtomicBackend( // most recent height divisible by the commitInterval. // Subsequent updates to this trie are made using the Index call as blocks are accepted. // Note: this method assumes no atomic txs are applied at genesis. -func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { +func (a *AtomicBackend) initialize(lastAcceptedHeight uint64) error { start := time.Now() // track the last committed height and last committed root @@ -196,7 +193,7 @@ func (a *atomicBackend) initialize(lastAcceptedHeight uint64) error { // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie // the range of operations that were added to the trie without being executed on shared memory. -func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { +func (a *AtomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { sharedMemoryCursor, err := a.metadataDB.Get(appliedSharedMemoryCursorKey) if err == database.ErrNotFound { return nil @@ -317,13 +314,13 @@ func (a *atomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { // This is used when state sync syncs the atomic trie, such that the atomic operations // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync // will not have been executed on shared memory. -func (a *atomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error { +func (a *AtomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error { // Set the cursor to [previousLastAcceptedHeight+1] so that we begin the iteration at the // first item that has not been applied to shared memory. return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } -func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (interfaces.AtomicState, error) { +func (a *AtomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (*atomicState, error) { if state, ok := a.verifiedRoots[blockHash]; ok { return state, nil } @@ -334,7 +331,7 @@ func (a *atomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (interface // - the last accepted block // - a block that has been verified but not accepted or rejected yet. // If [blockHash] is neither of the above, an error is returned. -func (a *atomicBackend) getAtomicRootAt(blockHash common.Hash) (common.Hash, error) { +func (a *AtomicBackend) getAtomicRootAt(blockHash common.Hash) (common.Hash, error) { // TODO: we can implement this in a few ways. if blockHash == a.lastAcceptedHash { return a.atomicTrie.LastAcceptedRoot(), nil @@ -347,7 +344,7 @@ func (a *atomicBackend) getAtomicRootAt(blockHash common.Hash) (common.Hash, err } // SetLastAccepted is used after state-sync to update the last accepted block hash. -func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { +func (a *AtomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { a.lastAcceptedHash = lastAcceptedHash } @@ -358,7 +355,7 @@ func (a *atomicBackend) SetLastAccepted(lastAcceptedHash common.Hash) { // and it's the caller's responsibility to call either Accept or Reject on // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the // changes or abort them and free memory. -func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { +func (a *AtomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) { // access the atomic trie at the parent block parentRoot, err := a.getAtomicRootAt(parentHash) if err != nil { @@ -408,14 +405,14 @@ func (a *atomicBackend) InsertTxs(blockHash common.Hash, blockHeight uint64, par } // IsBonus returns true if the block for atomicState is a bonus block -func (a *atomicBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { +func (a *AtomicBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { if bonusID, found := a.bonusBlocks[blockHeight]; found { return bonusID == ids.ID(blockHash) } return false } -func (a *atomicBackend) AtomicTrie() interfaces.AtomicTrie { +func (a *AtomicBackend) AtomicTrie() *AtomicTrie { return a.atomicTrie } @@ -451,3 +448,11 @@ func mergeAtomicOpsToMap(output map[ids.ID]*avalancheatomic.Requests, chainID id output[chainID] = requests } } + +// AddBonusBlock adds a bonus block to the atomic backend +func (a *AtomicBackend) AddBonusBlock(height uint64, blockID ids.ID) { + if a.bonusBlocks == nil { + a.bonusBlocks = make(map[uint64]ids.ID) + } + a.bonusBlocks[height] = blockID +} diff --git a/plugin/evm/atomic/state/atomic_state.go b/plugin/evm/atomic/state/atomic_state.go index e867ff57a2..07d9e806fd 100644 --- a/plugin/evm/atomic/state/atomic_state.go +++ b/plugin/evm/atomic/state/atomic_state.go @@ -10,17 +10,14 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -var _ interfaces.AtomicState = &atomicState{} - // atomicState implements the AtomicState interface using // a pointer to the atomicBackend. type atomicState struct { - backend *atomicBackend + backend *AtomicBackend blockHash common.Hash blockHeight uint64 txs []*atomic.Tx diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index 1390773f6e..08c93f0b91 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/database" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/trie/trienode" @@ -29,8 +28,6 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var _ interfaces.AtomicTrie = &atomicTrie{} - const ( AtomicTrieKeyLength = wrappers.LongLen + common.HashLength @@ -40,8 +37,8 @@ const ( var lastCommittedKey = []byte("atomicTrieLastCommittedBlock") -// atomicTrie implements the AtomicTrie interface -type atomicTrie struct { +// AtomicTrie implements the AtomicTrie interface +type AtomicTrie struct { commitInterval uint64 // commit interval, same as commitHeightInterval by default metadataDB avalanchedatabase.Database // Underlying database containing the atomic trie metadata trieDB *triedb.Database // Trie database @@ -59,7 +56,7 @@ type atomicTrie struct { func NewAtomicTrie( atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, -) (*atomicTrie, error) { +) (*AtomicTrie, error) { root, height, err := lastCommittedRootIfExists(metadataDB) if err != nil { return nil, err @@ -87,7 +84,7 @@ func NewAtomicTrie( }, ) - return &atomicTrie{ + return &AtomicTrie{ commitInterval: commitHeightInterval, metadataDB: metadataDB, trieDB: trieDB, @@ -135,12 +132,12 @@ func nearestCommitHeight(blockNumber uint64, commitInterval uint64) uint64 { return blockNumber - (blockNumber % commitInterval) } -func (a *atomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { +func (a *AtomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { return trie.New(trie.TrieID(root), a.trieDB) } // commit calls commit on the underlying trieDB and updates metadata pointers. -func (a *atomicTrie) commit(height uint64, root common.Hash) error { +func (a *AtomicTrie) commit(height uint64, root common.Hash) error { if err := a.trieDB.Commit(root, false); err != nil { return err } @@ -148,7 +145,7 @@ func (a *atomicTrie) commit(height uint64, root common.Hash) error { return a.updateLastCommitted(root, height) } -func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { +func (a *AtomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { for blockchainID, requests := range atomicOps { valueBytes, err := a.codec.Marshal(atomic.CodecVersion, requests) if err != nil { @@ -170,13 +167,13 @@ func (a *atomicTrie) UpdateTrie(trie *trie.Trie, height uint64, atomicOps map[id } // LastCommitted returns the last committed trie hash and last committed height -func (a *atomicTrie) LastCommitted() (common.Hash, uint64) { +func (a *AtomicTrie) LastCommitted() (common.Hash, uint64) { return a.lastCommittedRoot, a.lastCommittedHeight } // updateLastCommitted adds [height] -> [root] to the index and marks it as the last committed // root/height pair. -func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error { +func (a *AtomicTrie) updateLastCommitted(root common.Hash, height uint64) error { heightBytes := avalanchedatabase.PackUInt64(height) // now save the trie hash against the height it was committed at @@ -196,7 +193,7 @@ func (a *atomicTrie) updateLastCommitted(root common.Hash, height uint64) error // Iterator returns a AtomicTrieIterator that iterates the trie from the given // atomic trie root, starting at the specified [cursor]. -func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (interfaces.AtomicTrieIterator, error) { +func (a *AtomicTrie) Iterator(root common.Hash, cursor []byte) (*atomicTrieIterator, error) { t, err := trie.New(trie.TrieID(root), a.trieDB) if err != nil { return nil, err @@ -210,14 +207,14 @@ func (a *atomicTrie) Iterator(root common.Hash, cursor []byte) (interfaces.Atomi return NewAtomicTrieIterator(iter, a.codec), iter.Err } -func (a *atomicTrie) TrieDB() *triedb.Database { +func (a *AtomicTrie) TrieDB() *triedb.Database { return a.trieDB } // Root returns hash if it exists at specified height // if trie was not committed at provided height, it returns // common.Hash{} instead -func (a *atomicTrie) Root(height uint64) (common.Hash, error) { +func (a *AtomicTrie) Root(height uint64) (common.Hash, error) { return getRoot(a.metadataDB, height) } @@ -242,11 +239,11 @@ func getRoot(metadataDB avalanchedatabase.Database, height uint64) (common.Hash, return common.BytesToHash(hash), nil } -func (a *atomicTrie) LastAcceptedRoot() common.Hash { +func (a *AtomicTrie) LastAcceptedRoot() common.Hash { return a.lastAcceptedRoot } -func (a *atomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error { +func (a *AtomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error { if nodes != nil { if err := a.trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { return err @@ -268,7 +265,7 @@ func (a *atomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error // AcceptTrie commits the triedb at [root] if needed and returns true if a commit // was performed. -func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { +func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { hasCommitted := false // Because we do not accept the trie at every height, we may need to // populate roots at prior commit heights that were skipped. @@ -297,7 +294,7 @@ func (a *atomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { return hasCommitted, nil } -func (a *atomicTrie) RejectTrie(root common.Hash) error { +func (a *AtomicTrie) RejectTrie(root common.Hash) error { a.trieDB.Dereference(root) return nil } diff --git a/plugin/evm/atomic/state/atomic_trie_iterator.go b/plugin/evm/atomic/state/atomic_trie_iterator.go index 4ba56dc251..d7df867f8d 100644 --- a/plugin/evm/atomic/state/atomic_trie_iterator.go +++ b/plugin/evm/atomic/state/atomic_trie_iterator.go @@ -12,12 +12,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/trie" ) -var _ interfaces.AtomicTrieIterator = &atomicTrieIterator{} - // atomicTrieIterator is an implementation of types.AtomicTrieIterator that serves // parsed data with each iteration type atomicTrieIterator struct { diff --git a/plugin/evm/atomic/state/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go index a7990ad4c7..99a72bab85 100644 --- a/plugin/evm/atomic/state/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -22,7 +22,6 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" @@ -33,7 +32,7 @@ const testCommitInterval = 100 // indexAtomicTxs updates [tr] with entries in [atomicOps] at height by creating // a new snapshot, calculating a new root, and calling InsertTrie followed // by AcceptTrie on the new root. -func indexAtomicTxs(tr interfaces.AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { +func indexAtomicTxs(tr *AtomicTrie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error { snapshot, err := tr.OpenTrie(tr.LastAcceptedRoot()) if err != nil { return err @@ -254,7 +253,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { assert.Equal(t, hash, newHash, "hash should be the same") } -func newTestAtomicTrie(t *testing.T) interfaces.AtomicTrie { +func newTestAtomicTrie(t *testing.T) *AtomicTrie { db := versiondb.New(memdb.New()) repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, 0) if err != nil { @@ -410,7 +409,7 @@ func TestIndexingNilShouldNotImpactTrie(t *testing.T) { func TestApplyToSharedMemory(t *testing.T) { type test struct { commitInterval, lastAcceptedHeight uint64 - setMarker func(*atomicBackend) error + setMarker func(*AtomicBackend) error expectOpsApplied func(height uint64) bool bonusBlockHeights map[uint64]ids.ID } @@ -419,13 +418,13 @@ func TestApplyToSharedMemory(t *testing.T) { "marker is set to height": { commitInterval: 10, lastAcceptedHeight: 25, - setMarker: func(a *atomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, + setMarker: func(a *AtomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, }, "marker is set to height, should skip bonus blocks": { commitInterval: 10, lastAcceptedHeight: 25, - setMarker: func(a *atomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, + setMarker: func(a *AtomicBackend) error { return a.MarkApplyToSharedMemoryCursor(10) }, bonusBlockHeights: map[uint64]ids.ID{15: {}}, expectOpsApplied: func(height uint64) bool { if height == 15 { @@ -437,7 +436,7 @@ func TestApplyToSharedMemory(t *testing.T) { "marker is set to height + blockchain ID": { commitInterval: 10, lastAcceptedHeight: 25, - setMarker: func(a *atomicBackend) error { + setMarker: func(a *AtomicBackend) error { cursor := make([]byte, wrappers.LongLen+len(atomictest.TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) copy(cursor[wrappers.LongLen:], atomictest.TestBlockchainID[:]) @@ -448,7 +447,7 @@ func TestApplyToSharedMemory(t *testing.T) { "marker not set": { commitInterval: 10, lastAcceptedHeight: 25, - setMarker: func(*atomicBackend) error { return nil }, + setMarker: func(*AtomicBackend) error { return nil }, expectOpsApplied: func(uint64) bool { return false }, }, } { @@ -464,7 +463,7 @@ func TestApplyToSharedMemory(t *testing.T) { sharedMemories := atomictest.NewSharedMemories(m, ids.GenerateTestID(), atomictest.TestBlockchainID) backend, err := NewAtomicBackend(db, sharedMemories.ThisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) - atomicTrie := backend.AtomicTrie().(*atomicTrie) + atomicTrie := backend.AtomicTrie() hash, height := atomicTrie.LastCommitted() assert.NotEqual(t, common.Hash{}, hash) @@ -529,7 +528,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) var ( - atomicTrie interfaces.AtomicTrie + atomicTrie *AtomicTrie hash common.Hash height uint64 ) @@ -665,7 +664,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u // verifyOperations creates an iterator over the atomicTrie at [rootHash] and verifies that the all of the operations in the trie in the interval [from, to] are identical to // the atomic operations contained in [operationsMap] on the same interval. -func verifyOperations(t testing.TB, atomicTrie interfaces.AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { +func verifyOperations(t testing.TB, atomicTrie *AtomicTrie, codec codec.Manager, rootHash common.Hash, from, to uint64, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests) { t.Helper() // Start the iterator at [from] diff --git a/plugin/evm/atomic/state/atomic_tx_repository.go b/plugin/evm/atomic/state/atomic_tx_repository.go index d84b3697a6..6beae0ff51 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository.go +++ b/plugin/evm/atomic/state/atomic_tx_repository.go @@ -37,7 +37,7 @@ var ( ) // atomicTxRepository is a prefixdb implementation of the AtomicTxRepository interface -type atomicTxRepository struct { +type AtomicTxRepository struct { // [acceptedAtomicTxDB] maintains an index of [txID] => [height]+[atomic tx] for all accepted atomic txs. acceptedAtomicTxDB database.Database @@ -57,8 +57,8 @@ type atomicTxRepository struct { func NewAtomicTxRepository( db *versiondb.Database, codec codec.Manager, lastAcceptedHeight uint64, -) (*atomicTxRepository, error) { - repo := &atomicTxRepository{ +) (*AtomicTxRepository, error) { + repo := &AtomicTxRepository{ acceptedAtomicTxDB: prefixdb.New(atomicTxIDDBPrefix, db), acceptedAtomicTxByHeightDB: prefixdb.New(atomicHeightTxDBPrefix, db), atomicRepoMetadataDB: prefixdb.New(atomicRepoMetadataDBPrefix, db), @@ -73,7 +73,7 @@ func NewAtomicTxRepository( // initializeHeightIndex initializes the atomic repository and takes care of any required migration from the previous database // format which did not have a height -> txs index. -func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) error { +func (a *AtomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) error { startTime := time.Now() lastLogTime := startTime @@ -173,7 +173,7 @@ func (a *atomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er } // GetIndexHeight returns the last height that was indexed by the atomic repository -func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { +func (a *AtomicTxRepository) GetIndexHeight() (uint64, error) { indexHeightBytes, err := a.atomicRepoMetadataDB.Get(maxIndexedHeightKey) if err != nil { return 0, err @@ -189,7 +189,7 @@ func (a *atomicTxRepository) GetIndexHeight() (uint64, error) { // GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { +func (a *AtomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -217,14 +217,14 @@ func (a *atomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *atomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { +func (a *AtomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { +func (a *AtomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err @@ -236,17 +236,17 @@ func (a *atomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *atomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { +func (a *AtomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *atomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { +func (a *AtomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, true) } -func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { +func (a *AtomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. @@ -288,7 +288,7 @@ func (a *atomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { +func (a *AtomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) if err != nil { return err @@ -308,7 +308,7 @@ func (a *atomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) erro } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { +func (a *AtomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) if err != nil { return err @@ -323,7 +323,7 @@ func (a *atomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic. // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { +func (a *AtomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err @@ -344,12 +344,12 @@ func (a *atomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomi // IterateByHeight returns an iterator beginning at [height]. // Note [height] must be greater than 0 since we assume there are no // atomic txs in genesis. -func (a *atomicTxRepository) IterateByHeight(height uint64) database.Iterator { +func (a *AtomicTxRepository) IterateByHeight(height uint64) database.Iterator { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.acceptedAtomicTxByHeightDB.NewIteratorWithStart(heightBytes) } -func (a *atomicTxRepository) Codec() codec.Manager { +func (a *AtomicTxRepository) Codec() codec.Manager { return a.codec } diff --git a/plugin/evm/atomic/state/atomic_tx_repository_test.go b/plugin/evm/atomic/state/atomic_tx_repository_test.go index d9c6b150c8..8886864dad 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/state/atomic_tx_repository_test.go @@ -69,7 +69,7 @@ func constTxsPerHeight(txCount int) func(uint64) int { // writeTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) through the Write call on [repo], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func writeTxs(t testing.TB, repo *atomicTxRepository, fromHeight uint64, toHeight uint64, +func writeTxs(t testing.TB, repo *AtomicTxRepository, fromHeight uint64, toHeight uint64, txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { @@ -95,7 +95,7 @@ func writeTxs(t testing.TB, repo *atomicTxRepository, fromHeight uint64, toHeigh } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo *atomicTxRepository, txMap map[uint64][]*atomic.Tx) { +func verifyTxs(t testing.TB, repo *AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) diff --git a/plugin/evm/atomic/state/interfaces/interfaces.go b/plugin/evm/atomic/state/interfaces/interfaces.go deleted file mode 100644 index 9c05c5c963..0000000000 --- a/plugin/evm/atomic/state/interfaces/interfaces.go +++ /dev/null @@ -1,155 +0,0 @@ -package interfaces - -import ( - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/trie/trienode" - "github.com/ava-labs/coreth/triedb" - "github.com/ethereum/go-ethereum/common" - - avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" -) - -// AtomicTrie maintains an index of atomic operations by blockchainIDs for every block -// height containing atomic transactions. The backing data structure for this index is -// a Trie. The keys of the trie are block heights and the values (leaf nodes) -// are the atomic operations applied to shared memory while processing the block accepted -// at the corresponding height. -type AtomicTrie interface { - // OpenTrie returns a modifiable instance of the atomic trie backed by trieDB - // opened at hash. - OpenTrie(hash common.Hash) (*trie.Trie, error) - - // UpdateTrie updates [tr] to inlude atomicOps for height. - UpdateTrie(tr *trie.Trie, height uint64, atomicOps map[ids.ID]*avalancheatomic.Requests) error - - // Iterator returns an AtomicTrieIterator to iterate the trie at the given - // root hash starting at [cursor]. - Iterator(hash common.Hash, cursor []byte) (AtomicTrieIterator, error) - - // LastCommitted returns the last committed hash and corresponding block height - LastCommitted() (common.Hash, uint64) - - // TrieDB returns the underlying trie database - TrieDB() *triedb.Database - - // Root returns hash if it exists at specified height - // if trie was not committed at provided height, it returns - // common.Hash{} instead - Root(height uint64) (common.Hash, error) - - // LastAcceptedRoot returns the most recent accepted root of the atomic trie, - // or the root it was initialized to if no new tries were accepted yet. - LastAcceptedRoot() common.Hash - - // InsertTrie updates the trieDB with the provided node set and adds a reference - // to root in the trieDB. Once InsertTrie is called, it is expected either - // AcceptTrie or RejectTrie be called for the same root. - InsertTrie(nodes *trienode.NodeSet, root common.Hash) error - - // AcceptTrie marks root as the last accepted atomic trie root, and - // commits the trie to persistent storage if height is divisible by - // the commit interval. Returns true if the trie was committed. - AcceptTrie(height uint64, root common.Hash) (bool, error) - - // RejectTrie dereferences root from the trieDB, freeing memory. - RejectTrie(root common.Hash) error -} - -// AtomicBackend abstracts the verification and processing -// of atomic transactions -type AtomicBackend interface { - // InsertTxs calculates the root of the atomic trie that would - // result from applying [txs] to the atomic trie, starting at the state - // corresponding to previously verified block [parentHash]. - // If [blockHash] is provided, the modified atomic trie is pinned in memory - // and it's the caller's responsibility to call either Accept or Reject on - // the AtomicState which can be retreived from GetVerifiedAtomicState to commit the - // changes or abort them and free memory. - InsertTxs(blockHash common.Hash, blockHeight uint64, parentHash common.Hash, txs []*atomic.Tx) (common.Hash, error) - - // Returns an AtomicState corresponding to a block hash that has been inserted - // but not Accepted or Rejected yet. - GetVerifiedAtomicState(blockHash common.Hash) (AtomicState, error) - - // AtomicTrie returns the atomic trie managed by this backend. - AtomicTrie() AtomicTrie - - // ApplyToSharedMemory applies the atomic operations that have been indexed into the trie - // but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. - // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. - // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie - // the range of operations that were added to the trie without being executed on shared memory. - ApplyToSharedMemory(lastAcceptedBlock uint64) error - - // MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that - // have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. - // This is used when state sync syncs the atomic trie, such that the atomic operations - // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync - // will not have been executed on shared memory. - MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error - - // SetLastAccepted is used after state-sync to reset the last accepted block. - SetLastAccepted(lastAcceptedHash common.Hash) - - // IsBonus returns true if the block for atomicState is a bonus block - IsBonus(blockHeight uint64, blockHash common.Hash) bool -} - -// AtomicTxRepository defines an entity that manages storage and indexing of -// atomic transactions -type AtomicTxRepository interface { - GetIndexHeight() (uint64, error) - GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) - GetByHeight(height uint64) ([]*atomic.Tx, error) - Write(height uint64, txs []*atomic.Tx) error - WriteBonus(height uint64, txs []*atomic.Tx) error - - IterateByHeight(start uint64) database.Iterator - Codec() codec.Manager -} - -// AtomicState is an abstraction created through AtomicBackend -// and can be used to apply the VM's state change for atomic txs -// or reject them to free memory. -// The root of the atomic trie after applying the state change -// is accessible through this interface as well. -type AtomicState interface { - // Root of the atomic trie after applying the state change. - Root() common.Hash - // Accept applies the state change to VM's persistent storage - // Changes are persisted atomically along with the provided [commitBatch]. - Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error - // Reject frees memory associated with the state change. - Reject() error -} - -// AtomicTrieIterator is a stateful iterator that iterates the leafs of an AtomicTrie -type AtomicTrieIterator interface { - // Next advances the iterator to the next node in the atomic trie and - // returns true if there are more leaves to iterate - Next() bool - - // Key returns the current database key that the iterator is iterating - // returned []byte can be freely modified - Key() []byte - - // Value returns the current database value that the iterator is iterating - Value() []byte - - // BlockNumber returns the current block number - BlockNumber() uint64 - - // BlockchainID returns the current blockchain ID at the current block number - BlockchainID() ids.ID - - // AtomicOps returns a map of blockchainIDs to the set of atomic requests - // for that blockchainID at the current block number - AtomicOps() *avalancheatomic.Requests - - // Error returns error, if any encountered during this iteration - Error() error -} diff --git a/plugin/evm/atomic/sync/atomic_sync_extender.go b/plugin/evm/atomic/sync/atomic_sync_extender.go index 8174548da4..4d4216c18c 100644 --- a/plugin/evm/atomic/sync/atomic_sync_extender.go +++ b/plugin/evm/atomic/sync/atomic_sync_extender.go @@ -9,22 +9,43 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" syncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) var _ sync.Extender = (*AtomicSyncExtender)(nil) +type AtomicBackend interface { + // ApplyToSharedMemory applies the atomic operations that have been indexed into the trie + // but not yet applied to shared memory for heights less than or equal to [lastAcceptedBlock]. + // This executes operations in the range [cursorHeight+1, lastAcceptedBlock]. + // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie + // the range of operations that were added to the trie without being executed on shared memory. + ApplyToSharedMemory(lastAcceptedBlock uint64) error + + // MarkApplyToSharedMemoryCursor marks the atomic trie as containing atomic ops that + // have not been executed on shared memory starting at [previousLastAcceptedHeight+1]. + // This is used when state sync syncs the atomic trie, such that the atomic operations + // from [previousLastAcceptedHeight+1] to the [lastAcceptedHeight] set by state sync + // will not have been executed on shared memory. + MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error + + // SetLastAccepted is used after state-sync to reset the last accepted block. + SetLastAccepted(lastAcceptedHash common.Hash) +} + type AtomicSyncExtender struct { - backend interfaces.AtomicBackend + backend AtomicBackend + atomicTrie AtomicTrie stateSyncRequestSize uint16 } -func NewAtomicSyncExtender(backend interfaces.AtomicBackend, stateSyncRequestSize uint16) *AtomicSyncExtender { +func NewAtomicSyncExtender(backend AtomicBackend, atomicTrie AtomicTrie, stateSyncRequestSize uint16) *AtomicSyncExtender { return &AtomicSyncExtender{ backend: backend, + atomicTrie: atomicTrie, stateSyncRequestSize: stateSyncRequestSize, } } @@ -38,7 +59,7 @@ func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.LeafCli atomicSyncer, err := NewAtomicSyncer( client, verDB, - a.backend.AtomicTrie(), + a.atomicTrie, atomicSyncSummary.AtomicRoot, atomicSyncSummary.BlockNumber, a.stateSyncRequestSize, diff --git a/plugin/evm/atomic/sync/atomic_sync_provider.go b/plugin/evm/atomic/sync/atomic_sync_provider.go index 7c5883d999..40060f60ed 100644 --- a/plugin/evm/atomic/sync/atomic_sync_provider.go +++ b/plugin/evm/atomic/sync/atomic_sync_provider.go @@ -8,7 +8,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ethereum/go-ethereum/common" ) @@ -17,10 +16,10 @@ var _ sync.SummaryProvider = &AtomicSyncProvider{} type AtomicSyncProvider struct { chain *core.BlockChain - atomicTrie interfaces.AtomicTrie + atomicTrie AtomicTrie } -func NewAtomicProvider(chain *core.BlockChain, atomicTrie interfaces.AtomicTrie) *AtomicSyncProvider { +func NewAtomicProvider(chain *core.BlockChain, atomicTrie AtomicTrie) *AtomicSyncProvider { return &AtomicSyncProvider{chain: chain, atomicTrie: atomicTrie} } diff --git a/plugin/evm/atomic/sync/atomic_syncer.go b/plugin/evm/atomic/sync/atomic_syncer.go index d0f05bddae..ab1f563b70 100644 --- a/plugin/evm/atomic/sync/atomic_syncer.go +++ b/plugin/evm/atomic/sync/atomic_syncer.go @@ -15,10 +15,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/coreth/plugin/evm/atomic/state" - "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/trie/trienode" ) const ( @@ -31,6 +31,35 @@ var ( _ syncclient.LeafSyncTask = &atomicSyncerLeafTask{} ) +// AtomicTrie maintains an index of atomic operations by blockchainIDs for every block +// height containing atomic transactions. The backing data structure for this index is +// a Trie. The keys of the trie are block heights and the values (leaf nodes) +// are the atomic operations applied to shared memory while processing the block accepted +// at the corresponding height. +type AtomicTrie interface { + // OpenTrie returns a modifiable instance of the atomic trie backed by trieDB + // opened at hash. + OpenTrie(hash common.Hash) (*trie.Trie, error) + + // LastCommitted returns the last committed hash and corresponding block height + LastCommitted() (common.Hash, uint64) + + // Root returns hash if it exists at specified height + // if trie was not committed at provided height, it returns + // common.Hash{} instead + Root(height uint64) (common.Hash, error) + + // InsertTrie updates the trieDB with the provided node set and adds a reference + // to root in the trieDB. Once InsertTrie is called, it is expected either + // AcceptTrie or RejectTrie be called for the same root. + InsertTrie(nodes *trienode.NodeSet, root common.Hash) error + + // AcceptTrie marks root as the last accepted atomic trie root, and + // commits the trie to persistent storage if height is divisible by + // the commit interval. Returns true if the trie was committed. + AcceptTrie(height uint64, root common.Hash) (bool, error) +} + // Syncer represents a step in state sync, // along with Start/Done methods to control // and monitor progress. @@ -45,7 +74,7 @@ type Syncer interface { // the state of progress and writing the actual atomic trie to the trieDB. type atomicSyncer struct { db *versiondb.Database - atomicTrie interfaces.AtomicTrie + atomicTrie AtomicTrie trie *trie.Trie // used to update the atomic trie targetRoot common.Hash targetHeight uint64 @@ -66,7 +95,7 @@ func addZeroes(height uint64) []byte { return packer.Bytes } -func NewAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie interfaces.AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { +func NewAtomicSyncer(client syncclient.LeafClient, vdb *versiondb.Database, atomicTrie AtomicTrie, targetRoot common.Hash, targetHeight uint64, requestSize uint16) (*atomicSyncer, error) { lastCommittedRoot, lastCommit := atomicTrie.LastCommitted() trie, err := atomicTrie.OpenTrie(lastCommittedRoot) if err != nil { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index ef6c535d40..3928f80577 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -42,7 +42,6 @@ import ( "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/atomic" atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" - atomicstateinterfaces "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" @@ -260,9 +259,9 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository atomicstateinterfaces.AtomicTxRepository + atomicTxRepository *atomicstate.AtomicTxRepository // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend atomicstateinterfaces.AtomicBackend + atomicBackend *atomicstate.AtomicBackend builder *blockBuilder @@ -713,7 +712,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ Chain: vm.eth, State: vm.State, - ExtraSyncer: atomicsync.NewAtomicSyncExtender(vm.atomicBackend, vm.config.StateSyncRequestSize), + ExtraSyncer: atomicsync.NewAtomicSyncExtender(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), vm.config.StateSyncRequestSize), Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 750ec8b0c8..fbae28b871 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -25,7 +25,6 @@ import ( "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/atomic" - stateinterfaces "github.com/ava-labs/coreth/plugin/evm/atomic/state/interfaces" "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/trie" @@ -1469,16 +1468,6 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } } -type wrappedBackend struct { - stateinterfaces.AtomicBackend - registeredBonusBlocks map[uint64]common.Hash -} - -func (w *wrappedBackend) IsBonus(blockHeight uint64, blockHash common.Hash) bool { - hash, ok := w.registeredBonusBlocks[blockHeight] - return ok && blockHash.Cmp(hash) == 0 -} - func TestBonusBlocksTxs(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") @@ -1536,11 +1525,7 @@ func TestBonusBlocksTxs(t *testing.T) { } // Make [blk] a bonus block. - wrappedBackend := &wrappedBackend{ - AtomicBackend: vm.atomicBackend, - registeredBonusBlocks: map[uint64]common.Hash{blk.Height(): common.Hash(blk.ID())}, - } - vm.atomicBackend = wrappedBackend + vm.atomicBackend.AddBonusBlock(blk.Height(), blk.ID()) // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { From 6bf036c2f3422b6326a6afad9687a96272f6a331 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 20 Jan 2025 14:57:20 +0300 Subject: [PATCH 49/91] remove unused var --- plugin/evm/vm_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 2afa2d9cc8..ef0f557904 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -70,7 +70,6 @@ var ( testKeys []*secp256k1.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID - testAvaxAssetID = ids.ID{1, 2, 3} genesisJSON = func(cfg *params.ChainConfig) string { g := new(core.Genesis) From 551bd96dc2e55e6defca7633e0845bd187f6ab49 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 20 Jan 2025 15:40:02 +0300 Subject: [PATCH 50/91] remove newImportTx --- plugin/evm/export_tx_test.go | 8 +-- plugin/evm/import_tx_test.go | 4 +- plugin/evm/mempool_atomic_gossiping_test.go | 6 +- plugin/evm/syncervm_test.go | 2 +- plugin/evm/vm.go | 19 ------ plugin/evm/vm_test.go | 74 +++++++++++++-------- 6 files changed, 57 insertions(+), 56 deletions(-) diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 643eef391c..86474940f9 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -57,7 +57,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } // Import the funds - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -373,7 +373,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1736,7 +1736,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1939,7 +1939,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index da8a5c9db6..c4f8e6104e 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -56,7 +56,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M importTxs := make([]*atomic.Tx, 0, 3) for _, ethAddr := range testEthAddrs { - importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -435,7 +435,7 @@ func TestNewImportTx(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/mempool_atomic_gossiping_test.go index ae0be940e0..1fb7ac55b3 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/mempool_atomic_gossiping_test.go @@ -132,14 +132,14 @@ func TestMempoolPriorityDrop(t *testing.T) { mempool, err := atomictxpool.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) assert.NoError(err) - tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx1, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } assert.NoError(mempool.AddRemoteTx(tx1)) assert.True(mempool.Has(tx1.ID())) - tx2, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) + tx2, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) if err != nil { t.Fatal(err) } @@ -147,7 +147,7 @@ func TestMempoolPriorityDrop(t *testing.T) { assert.True(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) - tx3, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], new(big.Int).Mul(initialBaseFee, big.NewInt(2)), []*secp256k1.PrivateKey{testKeys[1]}) + tx3, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[1], new(big.Int).Mul(initialBaseFee, big.NewInt(2)), []*secp256k1.PrivateKey{testKeys[1]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index e6a1b0b25d..51b4b1efaa 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -308,7 +308,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s switch i { case 0: // spend the UTXOs from shared memory - importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err = newImportTx(serverVM, serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(err) require.NoError(serverVM.mempool.AddLocalTx(importTx)) case 1: diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 0b0393da66..5d6099f34f 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1840,25 +1840,6 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { return lastAcceptedHeight == 0 } -func (vm *VM) newImportTx( - chainID ids.ID, // chain to import from - to common.Address, // Address of recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds -) (*atomic.Tx, error) { - kc := secp256k1fx.NewKeychain() - for _, key := range keys { - kc.Add(key) - } - - atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } - - return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) -} - func (vm *VM) PutLastAcceptedID(ID ids.ID) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 5a40ee7679..2518cb063c 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -505,7 +505,7 @@ func TestImportMissingUTXOs(t *testing.T) { require.NoError(t, err) }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(t, err) err = vm.mempool.AddLocalTx(importTx) require.NoError(t, err) @@ -545,7 +545,7 @@ func TestIssueAtomicTxs(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -668,7 +668,7 @@ func TestBuildEthTxBlock(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -811,14 +811,14 @@ func testConflictingImportTxs(t *testing.T, genesis string) { importTxs := make([]*atomic.Tx, 0, 3) conflictTxs := make([]*atomic.Tx, 0, 3) for i, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } importTxs = append(importTxs, importTx) conflictAddr := testEthAddrs[(i+1)%len(testEthAddrs)] - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*secp256k1.PrivateKey{key}) + conflictTx, err := newImportTx(vm, vm.ctx.XChainID, conflictAddr, initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -1145,7 +1145,7 @@ func TestSetPreferenceRace(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1370,12 +1370,12 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key0}) + importTx0A, err := newImportTx(vm, vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key0}) if err != nil { t.Fatal(err) } // Create a conflicting transaction - importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*secp256k1.PrivateKey{key0}) + importTx0B, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*secp256k1.PrivateKey{key0}) if err != nil { t.Fatal(err) } @@ -1433,7 +1433,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { t.Fatal(err) } - importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key1}) + importTx1, err := newImportTx(vm, vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key1}) if err != nil { t.Fatalf("Failed to issue importTx1 due to: %s", err) } @@ -1512,7 +1512,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1597,7 +1597,7 @@ func TestReorgProtection(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1765,7 +1765,7 @@ func TestNonCanonicalAccept(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1960,7 +1960,7 @@ func TestStickyPreference(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2218,7 +2218,7 @@ func TestUncleBlock(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2386,7 +2386,7 @@ func TestEmptyBlock(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2467,7 +2467,7 @@ func TestAcceptReorg(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2651,7 +2651,7 @@ func TestFutureBlock(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2716,7 +2716,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2820,7 +2820,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2894,7 +2894,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3207,7 +3207,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3377,7 +3377,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3568,7 +3568,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { // Create a conflict set for each pair of transactions conflictSets := make([]set.Set[ids.ID], len(testKeys)) for index, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*secp256k1.PrivateKey{key}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -3576,7 +3576,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { t.Fatal(err) } conflictSets[index].Add(importTx.ID()) - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*secp256k1.PrivateKey{key}) + conflictTx, err := newImportTx(vm, vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -3696,7 +3696,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { // Double the initial base fee used when estimating the cost of this transaction to ensure that when it is // used in ApricotPhase5 it still pays a sufficient fee with the fixed fee per atomic transaction. - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm1, vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3755,7 +3755,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // Since rewinding is permitted for last accepted height of 0, we must // accept one block to test the SkipUpgradeCheck functionality. - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) require.NoError(t, err) require.NoError(t, vm.mempool.AddLocalTx(importTx)) <-issuer @@ -3848,7 +3848,7 @@ func TestParentBeaconRootBlock(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := newImportTx(vm, vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3946,3 +3946,23 @@ func TestNoBlobsAllowed(t *testing.T) { err = vmBlock.Verify(ctx) require.ErrorContains(err, "blobs not enabled on avalanche networks") } + +func newImportTx( + vm *VM, + chainID ids.ID, // chain to import from + to common.Address, // Address of recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Keys to import the funds +) (*atomic.Tx, error) { + kc := secp256k1fx.NewKeychain() + for _, key := range keys { + kc.Add(key) + } + + atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + + return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) +} From ae808819e167876832128040c6a0d9bfec581fff Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 20 Jan 2025 18:47:58 +0300 Subject: [PATCH 51/91] wrap atomic vm --- plugin/evm/atomic/vm/vm.go | 60 ++++++++++++++++++++++++++++++++++++++ plugin/evm/factory.go | 10 ++++++- plugin/main.go | 4 +-- 3 files changed, 71 insertions(+), 3 deletions(-) create mode 100644 plugin/evm/atomic/vm/vm.go diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go new file mode 100644 index 0000000000..950bf1835f --- /dev/null +++ b/plugin/evm/atomic/vm/vm.go @@ -0,0 +1,60 @@ +package vm + +import ( + "context" + + avalanchedatabase "github.com/ava-labs/avalanchego/database" + avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + _ secp256k1fx.VM = &VM{} + _ block.ChainVM = &VM{} + _ block.BuildBlockWithContextChainVM = &VM{} + _ block.StateSyncableVM = &VM{} +) + +type innerVM interface { + avalanchecommon.VM + secp256k1fx.VM + block.ChainVM + block.BuildBlockWithContextChainVM + block.StateSyncableVM +} + +type VM struct { + innerVM // Inner EVM +} + +func WrapVM(vm innerVM) *VM { + return &VM{innerVM: vm} +} + +// Initialize implements the snowman.ChainVM interface +func (vm *VM) Initialize( + ctx context.Context, + chainCtx *snow.Context, + db avalanchedatabase.Database, + genesisBytes []byte, + upgradeBytes []byte, + configBytes []byte, + toEngine chan<- avalanchecommon.Message, + fxs []*avalanchecommon.Fx, + appSender avalanchecommon.AppSender, +) error { + return vm.innerVM.Initialize( + ctx, + chainCtx, + db, + genesisBytes, + upgradeBytes, + configBytes, + toEngine, + fxs, + appSender, + ) +} diff --git a/plugin/evm/factory.go b/plugin/evm/factory.go index a08fbc2a40..d88df59944 100644 --- a/plugin/evm/factory.go +++ b/plugin/evm/factory.go @@ -5,8 +5,11 @@ package evm import ( "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" + + atomicvm "github.com/ava-labs/coreth/plugin/evm/atomic/vm" ) var ( @@ -16,8 +19,13 @@ var ( _ vms.Factory = &Factory{} ) +// TODO: either move this from plugin or move the VM itself type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - return &VM{}, nil + return atomicvm.WrapVM(&VM{}), nil +} + +func (*Factory) NewPlugin() block.ChainVM { + return atomicvm.WrapVM(&VM{IsPlugin: true}) } diff --git a/plugin/main.go b/plugin/main.go index 4080476d15..a1b18add0f 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -29,6 +29,6 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - - rpcchainvm.Serve(context.Background(), &evm.VM{IsPlugin: true}) + factory := evm.Factory{} + rpcchainvm.Serve(context.Background(), factory.NewPlugin()) } From f75f052e7a9adbcc76c908be168ffe47fb69c357 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 20 Jan 2025 19:07:19 +0300 Subject: [PATCH 52/91] set network codec --- plugin/evm/atomic/vm/vm.go | 28 +++++++++++++++++++++++----- plugin/evm/vm.go | 28 +++++++++++++++------------- plugin/evm/vm_warp_test.go | 11 +++++++++++ 3 files changed, 49 insertions(+), 18 deletions(-) diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 950bf1835f..8b3dac493c 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -2,9 +2,13 @@ package vm import ( "context" + "fmt" + "github.com/ava-labs/avalanchego/codec" avalanchedatabase "github.com/ava-labs/avalanchego/database" avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/coreth/plugin/evm/atomic/sync" + "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" @@ -18,7 +22,12 @@ var ( _ block.StateSyncableVM = &VM{} ) -type innerVM interface { +type ExtandableEVM interface { + SetNetworkCodec(codec codec.Manager) error +} + +type InnerVM interface { + ExtandableEVM avalanchecommon.VM secp256k1fx.VM block.ChainVM @@ -27,11 +36,11 @@ type innerVM interface { } type VM struct { - innerVM // Inner EVM + InnerVM // Inner EVM } -func WrapVM(vm innerVM) *VM { - return &VM{innerVM: vm} +func WrapVM(vm InnerVM) *VM { + return &VM{InnerVM: vm} } // Initialize implements the snowman.ChainVM interface @@ -46,7 +55,16 @@ func (vm *VM) Initialize( fxs []*avalanchecommon.Fx, appSender avalanchecommon.AppSender, ) error { - return vm.innerVM.Initialize( + innerVM := vm.InnerVM + // Register the codec for the atomic block sync summary + networkCodec, err := message.NewCodec(sync.AtomicSyncSummary{}) + if err != nil { + return fmt.Errorf("failed to create codec manager: %w", err) + } + if err := innerVM.SetNetworkCodec(networkCodec); err != nil { + return fmt.Errorf("failed to set network codec: %w", err) + } + return innerVM.Initialize( ctx, chainCtx, db, diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index a7990e16f6..6664f8ce6f 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -158,8 +158,6 @@ var ( metadataPrefix = []byte("metadata") warpPrefix = []byte("warp") ethDBPrefix = []byte("ethdb") - - networkCodec codec.Manager ) var ( @@ -201,13 +199,6 @@ func init() { // Preserving the log level allows us to update the root handler while writing to the original // [os.Stderr] that is being piped through to the logger via the rpcchainvm. originalStderr = os.Stderr - - // Register the codec for the atomic block sync summary - var err error - networkCodec, err = message.NewCodec(atomicsync.AtomicSyncSummary{}) - if err != nil { - panic(fmt.Errorf("failed to create codec manager: %w", err)) - } } // VM implements the snowman.ChainVM interface @@ -227,6 +218,9 @@ type VM struct { chainConfig *params.ChainConfig ethConfig ethconfig.Config + // Extension Points + networkCodec codec.Manager + // pointers to eth constructs eth *eth.Ethereum txPool *txpool.TxPool @@ -314,6 +308,14 @@ type VM struct { rpcHandlers []interface{ Stop() } } +func (vm *VM) SetNetworkCodec(codec codec.Manager) error { + if vm.networkCodec != nil { + return errors.New("network codec already set") + } + vm.networkCodec = codec + return nil +} + // CodecRegistry implements the secp256k1fx interface func (vm *VM) CodecRegistry() codec.Registry { return vm.baseCodec } @@ -549,7 +551,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to initialize p2p network: %w", err) } vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) - vm.Network = peer.NewNetwork(p2pNetwork, appSender, networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) + vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) // Initialize warp backend @@ -717,7 +719,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, - Codec: networkCodec, + Codec: vm.networkCodec, Stats: stats.NewClientSyncerStats(leafMetricsNames), StateSyncNodeIDs: stateSyncIDs, BlockParser: vm, @@ -1246,7 +1248,7 @@ func (vm *VM) setAppRequestHandlers() error { vm.blockChain, vm.chaindb, vm.warpBackend, - networkCodec, + vm.networkCodec, vm.leafRequestTypeConfigs, ) vm.Network.SetRequestHandler(networkHandler) @@ -1500,7 +1502,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } if vm.config.WarpAPIEnabled { - warpAPI := warp.NewAPI(vm.ctx, networkCodec, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners) + warpAPI := warp.NewAPI(vm.ctx, vm.networkCodec, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners) if err := handler.RegisterName("warp", warpAPI); err != nil { return nil, err } diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index b2d7dd46d8..58dfb3761f 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -12,6 +12,7 @@ import ( _ "embed" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" @@ -62,6 +63,16 @@ const ( signersPrimary ) +var networkCodec codec.Manager + +func init() { + var err error + networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} + func TestSendWarpMessage(t *testing.T) { require := require.New(t) issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") From 96eb3ed26bac13655e2922964b1b9969298445a1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 20 Jan 2025 19:47:58 +0300 Subject: [PATCH 53/91] unexport inner evm --- plugin/evm/admin.go | 4 +- plugin/evm/api.go | 4 +- plugin/evm/atomic/vm/vm.go | 4 +- plugin/evm/block.go | 4 +- plugin/evm/block_builder.go | 2 +- plugin/evm/export_tx_test.go | 2 +- plugin/evm/factory.go | 4 +- plugin/evm/formatting.go | 8 +-- plugin/evm/gossip.go | 2 +- plugin/evm/health.go | 2 +- plugin/evm/import_tx_test.go | 32 ++++----- plugin/evm/syncervm_test.go | 22 +++--- plugin/evm/tx_gossip_test.go | 55 +++++++-------- plugin/evm/tx_test.go | 4 +- plugin/evm/vm.go | 131 ++++++++++++++++++++--------------- plugin/evm/vm_database.go | 4 +- plugin/evm/vm_test.go | 23 +++--- plugin/evm/vm_warp_test.go | 8 +-- 18 files changed, 165 insertions(+), 150 deletions(-) diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index 34595a0b0e..cbd7b70ba4 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -15,11 +15,11 @@ import ( // Admin is the API service for admin API calls type Admin struct { - vm *VM + vm *sharedEvm profiler profiler.Profiler } -func NewAdminService(vm *VM, performanceDir string) *Admin { +func NewAdminService(vm *sharedEvm, performanceDir string) *Admin { return &Admin{ vm: vm, profiler: profiler.New(performanceDir), diff --git a/plugin/evm/api.go b/plugin/evm/api.go index 12791d4e08..eede874958 100644 --- a/plugin/evm/api.go +++ b/plugin/evm/api.go @@ -40,7 +40,7 @@ var ( ) // SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *VM } +type SnowmanAPI struct{ vm *sharedEvm } // GetAcceptedFrontReply defines the reply that will be sent from the // GetAcceptedFront API call @@ -67,7 +67,7 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { } // AvaxAPI offers Avalanche network related API methods -type AvaxAPI struct{ vm *VM } +type AvaxAPI struct{ vm *sharedEvm } type VersionReply struct { Version string `json:"version"` diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 8b3dac493c..32c3fad99d 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -22,12 +22,12 @@ var ( _ block.StateSyncableVM = &VM{} ) -type ExtandableEVM interface { +type ExtensibleEVM interface { SetNetworkCodec(codec codec.Manager) error } type InnerVM interface { - ExtandableEVM + ExtensibleEVM avalanchecommon.VM secp256k1fx.VM block.ChainVM diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 80bd07de7b..8b8b2a534f 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -112,12 +112,12 @@ func readMainnetBonusBlocks() (map[uint64]ids.ID, error) { type Block struct { id ids.ID ethBlock *types.Block - vm *VM + vm *sharedEvm atomicTxs []*atomic.Tx } // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { +func (vm *sharedEvm) newBlock(ethBlock *types.Block) (*Block, error) { isApricotPhase5 := vm.chainConfig.IsApricotPhase5(ethBlock.Time()) atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) if err != nil { diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index 967444b0d0..cb437676a3 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -53,7 +53,7 @@ type blockBuilder struct { buildBlockTimer *timer.Timer } -func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { +func (vm *sharedEvm) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { b := &blockBuilder{ ctx: vm.ctx, chainConfig: vm.chainConfig, diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 643eef391c..89d5dddabc 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -26,7 +26,7 @@ import ( // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). -func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { +func createExportTxOptions(t *testing.T, vm *sharedEvm, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { // Add a UTXO to shared memory utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, diff --git a/plugin/evm/factory.go b/plugin/evm/factory.go index d88df59944..e43ea519f1 100644 --- a/plugin/evm/factory.go +++ b/plugin/evm/factory.go @@ -23,9 +23,9 @@ var ( type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - return atomicvm.WrapVM(&VM{}), nil + return atomicvm.WrapVM(NewExtensibleEVM(false)), nil } func (*Factory) NewPlugin() block.ChainVM { - return atomicvm.WrapVM(&VM{IsPlugin: true}) + return atomicvm.WrapVM(NewExtensibleEVM(true)) } diff --git a/plugin/evm/formatting.go b/plugin/evm/formatting.go index feeab134b7..2127422691 100644 --- a/plugin/evm/formatting.go +++ b/plugin/evm/formatting.go @@ -14,7 +14,7 @@ import ( // ParseServiceAddress get address ID from address string, being it either localized (using address manager, // doing also components validations), or not localized. // If both attempts fail, reports error from localized address parsing -func (vm *VM) ParseServiceAddress(addrStr string) (ids.ShortID, error) { +func (vm *sharedEvm) ParseServiceAddress(addrStr string) (ids.ShortID, error) { addr, err := ids.ShortFromString(addrStr) if err == nil { return addr, nil @@ -23,7 +23,7 @@ func (vm *VM) ParseServiceAddress(addrStr string) (ids.ShortID, error) { } // ParseLocalAddress takes in an address for this chain and produces the ID -func (vm *VM) ParseLocalAddress(addrStr string) (ids.ShortID, error) { +func (vm *sharedEvm) ParseLocalAddress(addrStr string) (ids.ShortID, error) { chainID, addr, err := vm.ParseAddress(addrStr) if err != nil { return ids.ShortID{}, err @@ -36,13 +36,13 @@ func (vm *VM) ParseLocalAddress(addrStr string) (ids.ShortID, error) { } // FormatLocalAddress takes in a raw address and produces the formatted address -func (vm *VM) FormatLocalAddress(addr ids.ShortID) (string, error) { +func (vm *sharedEvm) FormatLocalAddress(addr ids.ShortID) (string, error) { return vm.FormatAddress(vm.ctx.ChainID, addr) } // FormatAddress takes in a chainID and a raw address and produces the formatted // address -func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { +func (vm *sharedEvm) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { chainIDAlias, err := vm.ctx.BCLookup.PrimaryAlias(chainID) if err != nil { return "", err diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index 16d632bd94..bea7115331 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -220,7 +220,7 @@ func (tx *GossipEthTx) GossipID() ids.ID { // EthPushGossiper is used by the ETH backend to push transactions issued over // the RPC and added to the mempool to peers. type EthPushGossiper struct { - vm *VM + vm *sharedEvm } func (e *EthPushGossiper) Add(tx *types.Transaction) { diff --git a/plugin/evm/health.go b/plugin/evm/health.go index 116f820eb2..cd13cee44d 100644 --- a/plugin/evm/health.go +++ b/plugin/evm/health.go @@ -8,7 +8,7 @@ import "context" // Health returns nil if this chain is healthy. // Also returns details, which should be one of: // string, []byte, map[string]string -func (vm *VM) HealthCheck(context.Context) (interface{}, error) { +func (vm *sharedEvm) HealthCheck(context.Context) (interface{}, error) { // TODO perform actual health check return nil, nil } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index da8a5c9db6..7880d32962 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -25,7 +25,7 @@ import ( // createImportTxOptions adds a UTXO to shared memory and generates a list of import transactions sending this UTXO // to each of the three test keys (conflicting transactions) -func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { +func createImportTxOptions(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -428,7 +428,7 @@ func TestNewImportTx(t *testing.T) { importAmount := uint64(5000000) // createNewImportAVAXTx adds a UTXO to shared memory and then constructs a new import transaction // and checks that it has the correct fee for the base fee that has been used - createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + createNewImportAVAXTx := func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) if err != nil { @@ -468,7 +468,7 @@ func TestNewImportTx(t *testing.T) { return tx } - checkState := func(t *testing.T, vm *VM) { + checkState := func(t *testing.T, vm *sharedEvm) { txs := vm.LastAcceptedBlockInternal().(*Block).atomicTxs if len(txs) != 1 { t.Fatalf("Expected one import tx to be in the last accepted block, but found %d", len(txs)) @@ -871,7 +871,7 @@ func TestImportTxGasCost(t *testing.T) { func TestImportTxSemanticVerify(t *testing.T) { tests := map[string]atomicTxTest{ "UTXO not present during bootstrapping": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, @@ -900,7 +900,7 @@ func TestImportTxSemanticVerify(t *testing.T) { bootstrapping: true, }, "UTXO not present": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, @@ -929,7 +929,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to fetch import UTXOs from", }, "garbage UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxoID.InputID() @@ -969,7 +969,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to unmarshal UTXO", }, "UTXO AssetID mismatch": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testShortIDAddrs[0]) @@ -1003,7 +1003,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: atomic.ErrAssetIDMismatch.Error(), }, "insufficient AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1036,7 +1036,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "insufficient non-AVAX funds": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) @@ -1070,7 +1070,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "no signatures": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1103,7 +1103,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx contained mismatched number of inputs/credentials", }, "incorrect signature": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1137,7 +1137,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx transfer failed verification", }, "non-unique EVM Outputs": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testShortIDAddrs[0]) if err != nil { @@ -1190,7 +1190,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { assetID := ids.GenerateTestID() tests := map[string]atomicTxTest{ "AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1220,7 +1220,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } return tx }, - checkState: func(t *testing.T, vm *VM) { + checkState: func(t *testing.T, vm *sharedEvm) { lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) @@ -1235,7 +1235,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }, "non-AVAX UTXO": { - setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1265,7 +1265,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } return tx }, - checkState: func(t *testing.T, vm *VM) { + checkState: func(t *testing.T, vm *sharedEvm) { lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 64565b6c65..637b7063c4 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -101,7 +101,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { + responseIntercept: func(syncerVM *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -129,7 +129,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test.responseIntercept = nil test.expectedErr = nil - syncDisabledVM := &VM{} + syncDisabledVM := NewDefaultEVM() appSender := &enginetest.Sender{T: t} appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { @@ -199,7 +199,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { syncDisabledVM.blockChain.DrainAcceptorQueue() // Create a new VM from the same database with state sync enabled. - syncReEnabledVM := &VM{} + syncReEnabledVM := NewDefaultEVM() // Enable state sync in configJSON configJSON := fmt.Sprintf( `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, @@ -255,7 +255,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { + responseIntercept: func(syncerVM *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -365,7 +365,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, ) - shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} + shutdownOnceSyncerVM := &shutdownOnceVM{sharedEvm: syncerVM} t.Cleanup(func() { require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) }) @@ -422,13 +422,13 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s // syncVMSetup contains the required set up for a client VM to perform state sync // off of a server VM. type syncVMSetup struct { - serverVM *VM + serverVM *sharedEvm serverAppSender *enginetest.Sender includedAtomicTxs []*atomic.Tx fundedAccounts map[*keystore.Key]*types.StateAccount - syncerVM *VM + syncerVM *sharedEvm syncerDB avalanchedatabase.Database syncerEngineChan <-chan commonEng.Message syncerAtomicMemory *avalancheatomic.Memory @@ -436,19 +436,19 @@ type syncVMSetup struct { } type shutdownOnceVM struct { - *VM + *sharedEvm shutdownOnce sync.Once } func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { var err error - vm.shutdownOnce.Do(func() { err = vm.VM.Shutdown(ctx) }) + vm.shutdownOnce.Do(func() { err = vm.sharedEvm.Shutdown(ctx) }) return err } // syncTest contains both the actual VMs as well as the parameters with the expected output. type syncTest struct { - responseIntercept func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte) + responseIntercept func(vm *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) stateSyncMinBlocks uint64 syncableInterval uint64 syncMode block.StateSyncMode @@ -617,7 +617,7 @@ func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Bl // generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then // calls Verify and Accept on each generated block // TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests -func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { +func generateAndAcceptBlocks(t *testing.T, vm *sharedEvm, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { t.Helper() // acceptExternalBlock defines a function to parse, verify, and accept a block once it has been diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 35527a51f7..1cdc10679e 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -57,11 +57,11 @@ func TestEthTxGossip(t *testing.T) { responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), } - vm := &VM{ - p2pSender: responseSender, - atomicTxGossipHandler: &p2p.NoOpHandler{}, - atomicTxPullGossiper: &gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + + vm.p2pSender = responseSender + vm.atomicTxGossipHandler = &p2p.NoOpHandler{} + vm.atomicTxPullGossiper = &gossip.NoOpGossiper{} require.NoError(vm.Initialize( ctx, @@ -191,11 +191,10 @@ func TestAtomicTxGossip(t *testing.T) { responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), } - vm := &VM{ - p2pSender: responseSender, - ethTxGossipHandler: &p2p.NoOpHandler{}, - ethTxPullGossiper: &gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + vm.p2pSender = responseSender + vm.ethTxGossipHandler = &p2p.NoOpHandler{} + vm.ethTxPullGossiper = &gossip.NoOpGossiper{} require.NoError(vm.Initialize( ctx, @@ -320,11 +319,10 @@ func TestEthTxPushGossipOutbound(t *testing.T) { SentAppGossip: make(chan []byte, 1), } - vm := &VM{ - p2pSender: sender, - ethTxPullGossiper: gossip.NoOpGossiper{}, - atomicTxPullGossiper: gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + vm.p2pSender = sender + vm.ethTxPullGossiper = gossip.NoOpGossiper{} + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} pk, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -380,11 +378,10 @@ func TestEthTxPushGossipInbound(t *testing.T) { snowCtx := utils.TestSnowContext() sender := &enginetest.Sender{} - vm := &VM{ - p2pSender: sender, - ethTxPullGossiper: gossip.NoOpGossiper{}, - atomicTxPullGossiper: gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + vm.p2pSender = sender + vm.ethTxPullGossiper = gossip.NoOpGossiper{} + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} pk, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -455,11 +452,10 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } - vm := &VM{ - p2pSender: sender, - ethTxPullGossiper: gossip.NoOpGossiper{}, - atomicTxPullGossiper: gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + vm.p2pSender = sender + vm.ethTxPullGossiper = gossip.NoOpGossiper{} + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} require.NoError(vm.Initialize( ctx, @@ -526,11 +522,10 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { require.NoError(err) sender := &enginetest.Sender{} - vm := &VM{ - p2pSender: sender, - ethTxPullGossiper: gossip.NoOpGossiper{}, - atomicTxPullGossiper: gossip.NoOpGossiper{}, - } + vm := NewDefaultEVM() + vm.p2pSender = sender + vm.ethTxPullGossiper = gossip.NoOpGossiper{} + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} require.NoError(vm.Initialize( ctx, diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index 9bef967e68..689b13a1cb 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -79,14 +79,14 @@ func executeTxVerifyTest(t *testing.T, test atomicTxVerifyTest) { type atomicTxTest struct { // setup returns the atomic transaction for the test - setup func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx + setup func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx // define a string that should be contained in the error message if the tx fails verification // at some point. If the strings are empty, then the tx should pass verification at the // respective step. semanticVerifyErr, evmStateTransferErr, acceptErr string // checkState is called iff building and verifying a block containing the transaction is successful. Verifies // the state of the VM following the block's acceptance. - checkState func(t *testing.T, vm *VM) + checkState func(t *testing.T, vm *sharedEvm) // Whether or not the VM should be considered to still be bootstrapping bootstrapping bool diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 6664f8ce6f..24d747f53e 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -103,12 +103,12 @@ import ( ) var ( - _ block.ChainVM = &VM{} - _ block.BuildBlockWithContextChainVM = &VM{} - _ block.StateSyncableVM = &VM{} - _ statesyncclient.EthBlockParser = &VM{} - _ secp256k1fx.VM = &VM{} - _ vmsync.BlockAcceptor = &VM{} + _ block.ChainVM = &sharedEvm{} + _ block.BuildBlockWithContextChainVM = &sharedEvm{} + _ block.StateSyncableVM = &sharedEvm{} + _ statesyncclient.EthBlockParser = &sharedEvm{} + _ secp256k1fx.VM = &sharedEvm{} + _ vmsync.BlockAcceptor = &sharedEvm{} ) const ( @@ -201,8 +201,8 @@ func init() { originalStderr = os.Stderr } -// VM implements the snowman.ChainVM interface -type VM struct { +// sharedEvm implements the snowman.ChainVM interface +type sharedEvm struct { ctx *snow.Context // [cancel] may be nil until [snow.NormalOp] starts cancel context.CancelFunc @@ -308,7 +308,26 @@ type VM struct { rpcHandlers []interface{ Stop() } } -func (vm *VM) SetNetworkCodec(codec codec.Manager) error { +// NewDefaultEVM returns a new instance of the VM with default extensions +// This should not be called if the VM is being extended +func NewDefaultEVM() *sharedEvm { + vm := &sharedEvm{} + defaultCodec, err := message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } + if err := vm.SetNetworkCodec(defaultCodec); err != nil { + panic(err) + } + return vm +} + +func NewExtensibleEVM(isPlugin bool) *sharedEvm { + vm := &sharedEvm{IsPlugin: isPlugin} + return vm +} + +func (vm *sharedEvm) SetNetworkCodec(codec codec.Manager) error { if vm.networkCodec != nil { return errors.New("network codec already set") } @@ -317,13 +336,13 @@ func (vm *VM) SetNetworkCodec(codec codec.Manager) error { } // CodecRegistry implements the secp256k1fx interface -func (vm *VM) CodecRegistry() codec.Registry { return vm.baseCodec } +func (vm *sharedEvm) CodecRegistry() codec.Registry { return vm.baseCodec } // Clock implements the secp256k1fx interface -func (vm *VM) Clock() *mockable.Clock { return &vm.clock } +func (vm *sharedEvm) Clock() *mockable.Clock { return &vm.clock } // Logger implements the secp256k1fx interface -func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } +func (vm *sharedEvm) Logger() logging.Logger { return vm.ctx.Log } /* ****************************************************************************** @@ -332,12 +351,12 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } */ // implements SnowmanPlusPlusVM interface -func (vm *VM) GetActivationTime() time.Time { +func (vm *sharedEvm) GetActivationTime() time.Time { return utils.Uint64ToTime(vm.chainConfig.ApricotPhase4BlockTimestamp) } // Initialize implements the snowman.ChainVM interface -func (vm *VM) Initialize( +func (vm *sharedEvm) Initialize( _ context.Context, chainCtx *snow.Context, db avalanchedatabase.Database, @@ -634,7 +653,7 @@ func (vm *VM) Initialize( return vm.initializeStateSyncClient(lastAcceptedHeight) } -func (vm *VM) initializeMetrics() error { +func (vm *sharedEvm) initializeMetrics() error { vm.sdkMetrics = prometheus.NewRegistry() // If metrics are enabled, register the default metrics registry if !metrics.Enabled { @@ -648,7 +667,7 @@ func (vm *VM) initializeMetrics() error { return vm.ctx.Metrics.Register(sdkMetricsPrefix, vm.sdkMetrics) } -func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { +func (vm *sharedEvm) initializeChain(lastAcceptedHash common.Hash) error { nodecfg := &node.Config{ CorethVersion: Version, KeyStoreDir: vm.config.KeystoreDirectory, @@ -690,7 +709,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { // initializeStateSyncClient initializes the client for performing state sync. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. -func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { +func (vm *sharedEvm) initializeStateSyncClient(lastAcceptedHeight uint64) error { stateSyncEnabled := vm.stateSyncEnabled(lastAcceptedHeight) // parse nodeIDs from state sync IDs in vm config var stateSyncIDs []ids.NodeID @@ -747,7 +766,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { return nil } -func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { +func (vm *sharedEvm) initChainState(lastAcceptedBlock *types.Block) error { block, err := vm.newBlock(lastAcceptedBlock) if err != nil { return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) @@ -780,14 +799,14 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { return vm.ctx.Metrics.Register(chainStateMetricsPrefix, chainStateRegisterer) } -func (vm *VM) createConsensusCallbacks() dummy.ConsensusCallbacks { +func (vm *sharedEvm) createConsensusCallbacks() dummy.ConsensusCallbacks { return dummy.ConsensusCallbacks{ OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, OnExtraStateChange: vm.onExtraStateChange, } } -func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *sharedEvm) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { for { tx, exists := vm.mempool.NextTx() if !exists { @@ -834,7 +853,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S } // assumes that we are in at least Apricot Phase 5. -func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *sharedEvm) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { var ( batchAtomicTxs []*atomic.Tx batchAtomicUTXOs set.Set[ids.ID] @@ -934,14 +953,14 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. return nil, nil, nil, nil } -func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *sharedEvm) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { if !vm.chainConfig.IsApricotPhase5(header.Time) { return vm.preBatchOnFinalizeAndAssemble(header, state, txs) } return vm.postBatchOnFinalizeAndAssemble(header, state, txs) } -func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { +func (vm *sharedEvm) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { var ( batchContribution *big.Int = big.NewInt(0) batchGasUsed *big.Int = big.NewInt(0) @@ -1006,7 +1025,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big return batchContribution, batchGasUsed, nil } -func (vm *VM) SetState(_ context.Context, state snow.State) error { +func (vm *sharedEvm) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: vm.bootstrapped.Set(false) @@ -1021,7 +1040,7 @@ func (vm *VM) SetState(_ context.Context, state snow.State) error { } // onBootstrapStarted marks this VM as bootstrapping -func (vm *VM) onBootstrapStarted() error { +func (vm *sharedEvm) onBootstrapStarted() error { vm.bootstrapped.Set(false) if err := vm.Client.Error(); err != nil { return err @@ -1038,7 +1057,7 @@ func (vm *VM) onBootstrapStarted() error { } // onNormalOperationsStarted marks this VM as bootstrapped -func (vm *VM) onNormalOperationsStarted() error { +func (vm *sharedEvm) onNormalOperationsStarted() error { if vm.bootstrapped.Get() { return nil } @@ -1052,7 +1071,7 @@ func (vm *VM) onNormalOperationsStarted() error { } // initBlockBuilding starts goroutines to manage block building -func (vm *VM) initBlockBuilding() error { +func (vm *sharedEvm) initBlockBuilding() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel @@ -1224,7 +1243,7 @@ func (vm *VM) initBlockBuilding() error { // setAppRequestHandlers sets the request handlers for the VM to serve state sync // requests. -func (vm *VM) setAppRequestHandlers() error { +func (vm *sharedEvm) setAppRequestHandlers() error { // Create standalone EVM TrieDB (read only) for serving leafs requests. // We create a standalone TrieDB here, so that it has a standalone cache from the one // used by the node when processing blocks. @@ -1255,7 +1274,7 @@ func (vm *VM) setAppRequestHandlers() error { return nil } -func (vm *VM) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { +func (vm *sharedEvm) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { if vm.leafRequestTypeConfigs == nil { vm.leafRequestTypeConfigs = make(map[message.NodeType]LeafRequestTypeConfig) } @@ -1274,7 +1293,7 @@ func (vm *VM) RegisterLeafRequestHandler(nodeType message.NodeType, metricName s } // Shutdown implements the snowman.ChainVM interface -func (vm *VM) Shutdown(context.Context) error { +func (vm *sharedEvm) Shutdown(context.Context) error { if vm.ctx == nil { return nil } @@ -1296,11 +1315,11 @@ func (vm *VM) Shutdown(context.Context) error { } // buildBlock builds a block to be wrapped by ChainState -func (vm *VM) buildBlock(ctx context.Context) (snowman.Block, error) { +func (vm *sharedEvm) buildBlock(ctx context.Context) (snowman.Block, error) { return vm.buildBlockWithContext(ctx, nil) } -func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { +func (vm *sharedEvm) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { if proposerVMBlockCtx != nil { log.Debug("Building block with context", "pChainBlockHeight", proposerVMBlockCtx.PChainHeight) } else { @@ -1351,7 +1370,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo } // parseBlock parses [b] into a block to be wrapped by ChainState. -func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { +func (vm *sharedEvm) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { ethBlock := new(types.Block) if err := rlp.DecodeBytes(b, ethBlock); err != nil { return nil, err @@ -1370,7 +1389,7 @@ func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { return block, nil } -func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { +func (vm *sharedEvm) ParseEthBlock(b []byte) (*types.Block, error) { block, err := vm.parseBlock(context.TODO(), b) if err != nil { return nil, err @@ -1381,7 +1400,7 @@ func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { // getBlock attempts to retrieve block [id] from the VM to be wrapped // by ChainState. -func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { +func (vm *sharedEvm) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { ethBlock := vm.blockChain.GetBlockByHash(common.Hash(id)) // If [ethBlock] is nil, return [avalanchedatabase.ErrNotFound] here // so that the miss is considered cacheable. @@ -1394,7 +1413,7 @@ func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { // GetAcceptedBlock attempts to retrieve block [blkID] from the VM. This method // only returns accepted blocks. -func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { +func (vm *sharedEvm) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { blk, err := vm.GetBlock(ctx, blkID) if err != nil { return nil, err @@ -1414,7 +1433,7 @@ func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block } // SetPreference sets what the current tail of the chain is -func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { +func (vm *sharedEvm) SetPreference(ctx context.Context, blkID ids.ID) error { // Since each internal handler used by [vm.State] always returns a block // with non-nil ethBlock value, GetBlockInternal should never return a // (*Block) with a nil ethBlock value. @@ -1428,7 +1447,7 @@ func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { // VerifyHeightIndex always returns a nil error since the index is maintained by // vm.blockChain. -func (vm *VM) VerifyHeightIndex(context.Context) error { +func (vm *sharedEvm) VerifyHeightIndex(context.Context) error { return nil } @@ -1436,7 +1455,7 @@ func (vm *VM) VerifyHeightIndex(context.Context) error { // Note: the engine assumes that if a block is not found at [height], then // [avalanchedatabase.ErrNotFound] will be returned. This indicates that the VM has state // synced and does not have all historical blocks available. -func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { +func (vm *sharedEvm) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { lastAcceptedBlock := vm.LastAcceptedBlock() if lastAcceptedBlock.Height() < height { return ids.ID{}, avalanchedatabase.ErrNotFound @@ -1449,7 +1468,7 @@ func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, erro return ids.ID(hash), nil } -func (vm *VM) Version(context.Context) (string, error) { +func (vm *sharedEvm) Version(context.Context) (string, error) { return Version, nil } @@ -1465,7 +1484,7 @@ func newHandler(name string, service interface{}) (http.Handler, error) { } // CreateHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { +func (vm *sharedEvm) CreateHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) if vm.config.HttpBodyLimit > 0 { handler.SetHTTPBodyLimit(int(vm.config.HttpBodyLimit)) @@ -1523,7 +1542,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } // CreateStaticHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { +func (vm *sharedEvm) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(0) if vm.config.HttpBodyLimit > 0 { handler.SetHTTPBodyLimit(int(vm.config.HttpBodyLimit)) @@ -1546,7 +1565,7 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er // getAtomicTx returns the requested transaction, status, and height. // If the status is Unknown, then the returned transaction will be nil. -func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { +func (vm *sharedEvm) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { return tx, atomic.Accepted, height, nil } else if err != avalanchedatabase.ErrNotFound { @@ -1565,7 +1584,7 @@ func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error // ParseAddress takes in an address and produces the ID of the chain it's for // the ID of the address -func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { +func (vm *sharedEvm) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) if err != nil { return ids.ID{}, ids.ShortID{}, err @@ -1590,7 +1609,7 @@ func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { } // verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { +func (vm *sharedEvm) verifyTxAtTip(tx *atomic.Tx) error { if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) } @@ -1631,7 +1650,7 @@ func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { // Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. -func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { +func (vm *sharedEvm) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) @@ -1656,7 +1675,7 @@ func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, // verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] // using [rules] as the current rule set. -func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { +func (vm *sharedEvm) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { // Ensure that the parent was verified and inserted correctly. if !vm.blockChain.HasBlock(parentHash, height-1) { return errRejectedParent @@ -1703,7 +1722,7 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I // GetAtomicUTXOs returns the utxos that at least one of the provided addresses is // referenced in. -func (vm *VM) GetAtomicUTXOs( +func (vm *sharedEvm) GetAtomicUTXOs( chainID ids.ID, addrs set.Set[ids.ShortID], startAddr ids.ShortID, @@ -1726,7 +1745,7 @@ func (vm *VM) GetAtomicUTXOs( } // currentRules returns the chain rules for the current block. -func (vm *VM) currentRules() params.Rules { +func (vm *sharedEvm) currentRules() params.Rules { header := vm.eth.APIBackend.CurrentHeader() return vm.chainConfig.Rules(header.Number, header.Time) } @@ -1734,7 +1753,7 @@ func (vm *VM) currentRules() params.Rules { // requirePrimaryNetworkSigners returns true if warp messages from the primary // network must be signed by the primary network validators. // This is necessary when the subnet is not validating the primary network. -func (vm *VM) requirePrimaryNetworkSigners() bool { +func (vm *sharedEvm) requirePrimaryNetworkSigners() bool { switch c := vm.currentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { case *warpcontract.Config: return c.RequirePrimaryNetworkSigners @@ -1743,7 +1762,7 @@ func (vm *VM) requirePrimaryNetworkSigners() bool { } } -func (vm *VM) startContinuousProfiler() { +func (vm *sharedEvm) startContinuousProfiler() { // If the profiler directory is empty, return immediately // without creating or starting a continuous profiler. if vm.config.ContinuousProfilerDir == "" { @@ -1773,7 +1792,7 @@ func (vm *VM) startContinuousProfiler() { // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. // Note: assumes [vm.chaindb] and [vm.genesisHash] have been initialized. -func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { +func (vm *sharedEvm) readLastAccepted() (common.Hash, uint64, error) { // Attempt to load last accepted block to determine if it is necessary to // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) @@ -1832,7 +1851,7 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } -func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { +func (vm *sharedEvm) stateSyncEnabled(lastAcceptedHeight uint64) bool { if vm.config.StateSyncEnabled != nil { // if the config is set, use that return *vm.config.StateSyncEnabled @@ -1842,7 +1861,7 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { return lastAcceptedHeight == 0 } -func (vm *VM) newImportTx( +func (vm *sharedEvm) newImportTx( chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 @@ -1862,7 +1881,7 @@ func (vm *VM) newImportTx( } // newExportTx returns a new ExportTx -func (vm *VM) newExportTx( +func (vm *sharedEvm) newExportTx( assetID ids.ID, // AssetID of the tokens to export amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to @@ -1894,6 +1913,6 @@ func (vm *VM) newExportTx( return tx, nil } -func (vm *VM) PutLastAcceptedID(ID ids.ID) error { +func (vm *sharedEvm) PutLastAcceptedID(ID ids.ID) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } diff --git a/plugin/evm/vm_database.go b/plugin/evm/vm_database.go index f2a5b4c344..22ed7a9768 100644 --- a/plugin/evm/vm_database.go +++ b/plugin/evm/vm_database.go @@ -17,7 +17,7 @@ import ( // initializeDBs initializes the databases used by the VM. // coreth always uses the avalanchego provided database. -func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { +func (vm *sharedEvm) initializeDBs(db avalanchedatabase.Database) error { // Use NewNested rather than New so that the structure of the database // remains the same regardless of the provided baseDB type. vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) @@ -32,7 +32,7 @@ func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { return nil } -func (vm *VM) inspectDatabases() error { +func (vm *sharedEvm) inspectDatabases() error { start := time.Now() log.Info("Starting database inspection") if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index ef0f557904..6fc52648f1 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -231,7 +231,7 @@ func GenesisVM(t *testing.T, upgradeJSON string, ) ( chan commonEng.Message, - *VM, + *sharedEvm, database.Database, *avalancheatomic.Memory, *enginetest.Sender, @@ -250,12 +250,13 @@ func GenesisVMWithClock( clock mockable.Clock, ) ( chan commonEng.Message, - *VM, + *sharedEvm, database.Database, *avalancheatomic.Memory, *enginetest.Sender, ) { - vm := &VM{clock: clock} + vm := NewDefaultEVM() + vm.clock = clock ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true @@ -319,7 +320,7 @@ func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.I // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *sharedEvm, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) @@ -753,7 +754,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Found unexpected blkID for parent of blk2") } - restartedVM := &VM{} + restartedVM := NewDefaultEVM() if err := restartedVM.Initialize( context.Background(), utils.TestSnowContext(), @@ -961,8 +962,8 @@ func testConflictingImportTxs(t *testing.T, genesis string) { func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) - for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ - "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + for name, issueTxs := range map[string]func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ + "single UTXO override": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -985,7 +986,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + "one of two UTXOs overrides": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1012,7 +1013,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + "hola": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -3093,7 +3094,7 @@ func TestConfigureLogLevel(t *testing.T) { } for _, test := range configTests { t.Run(test.name, func(t *testing.T) { - vm := &VM{} + vm := NewDefaultEVM() ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true @@ -3752,7 +3753,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { require.NoError(t, vm.SetPreference(context.Background(), blk.ID())) require.NoError(t, blk.Accept(context.Background())) - reinitVM := &VM{} + reinitVM := NewDefaultEVM() // use the block's timestamp instead of 0 since rewind to genesis // is hardcoded to be allowed in core/genesis.go. genesisWithUpgrade := &core.Genesis{} diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 58dfb3761f..1c19e2da8a 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -509,7 +509,7 @@ func TestReceiveWarpMessage(t *testing.T) { } func testReceiveWarpMessage( - t *testing.T, issuer chan commonEng.Message, vm *VM, + t *testing.T, issuer chan commonEng.Message, vm *sharedEvm, sourceChainID ids.ID, msgFrom warpMsgFrom, useSigners useWarpMsgSigners, blockTime time.Time, @@ -847,7 +847,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { func TestClearWarpDB(t *testing.T) { ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSONLatest) - vm := &VM{} + vm := NewDefaultEVM() err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) @@ -870,7 +870,7 @@ func TestClearWarpDB(t *testing.T) { require.NoError(t, vm.Shutdown(context.Background())) // Restart VM with the same database default should not prune the warp db - vm = &VM{} + vm = NewDefaultEVM() // we need new context since the previous one has registered metrics. ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) @@ -886,7 +886,7 @@ func TestClearWarpDB(t *testing.T) { require.NoError(t, vm.Shutdown(context.Background())) // restart the VM with pruning enabled - vm = &VM{} + vm = NewDefaultEVM() config := `{"prune-warp-db-enabled": true}` ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) From 84ddc342e3427175638f92ca650205bd894e2818 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 12:56:11 +0300 Subject: [PATCH 54/91] Update plugin/evm/atomic/vm/vm.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/atomic/vm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 32c3fad99d..a7a6d7da8f 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -36,7 +36,7 @@ type InnerVM interface { } type VM struct { - InnerVM // Inner EVM + InnerVM } func WrapVM(vm InnerVM) *VM { From 7ba0e386c9965ded41e044fac944e9dd88641ccd Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 12:56:18 +0300 Subject: [PATCH 55/91] Update plugin/evm/atomic/vm/vm.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/atomic/vm/vm.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index a7a6d7da8f..07ec0af7fe 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -16,10 +16,10 @@ import ( ) var ( - _ secp256k1fx.VM = &VM{} - _ block.ChainVM = &VM{} - _ block.BuildBlockWithContextChainVM = &VM{} - _ block.StateSyncableVM = &VM{} + _ secp256k1fx.VM = (*VM)(nil) + _ block.ChainVM = (*VM)(nil) + _ block.BuildBlockWithContextChainVM = (*VM)(nil) + _ block.StateSyncableVM = (*VM)(nil) ) type ExtensibleEVM interface { From 1b5c84e6091e153df51bf6bf4932d927eb258358 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 13:51:53 +0300 Subject: [PATCH 56/91] Update plugin/evm/vm.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 24d747f53e..d1a9413629 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -323,8 +323,7 @@ func NewDefaultEVM() *sharedEvm { } func NewExtensibleEVM(isPlugin bool) *sharedEvm { - vm := &sharedEvm{IsPlugin: isPlugin} - return vm + return &sharedEvm{IsPlugin: isPlugin} } func (vm *sharedEvm) SetNetworkCodec(codec codec.Manager) error { From 90e248c2048d972895dc4b26c23044c8c89f9d7c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 13:53:49 +0300 Subject: [PATCH 57/91] Update plugin/evm/vm.go Co-authored-by: Quentin McGaw Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index d1a9413629..3857a8de2a 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -328,7 +328,7 @@ func NewExtensibleEVM(isPlugin bool) *sharedEvm { func (vm *sharedEvm) SetNetworkCodec(codec codec.Manager) error { if vm.networkCodec != nil { - return errors.New("network codec already set") + return fmt.Errorf("network codec already set to %T", vm.networkCodec) } vm.networkCodec = codec return nil From 28702f3d56dca23633453cb9cc727d979e374638 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 13:59:04 +0300 Subject: [PATCH 58/91] nits --- peer/network.go | 2 ++ plugin/evm/admin.go | 2 +- plugin/evm/factory.go | 6 +++--- plugin/evm/syncervm_test.go | 4 ++-- plugin/evm/tx_gossip_test.go | 12 ++++++------ plugin/evm/vm.go | 18 ++---------------- plugin/evm/vm_test.go | 23 +++++++++++++++++++---- plugin/evm/vm_warp_test.go | 6 +++--- plugin/main.go | 3 +-- 9 files changed, 39 insertions(+), 37 deletions(-) diff --git a/peer/network.go b/peer/network.go index 6631c0a90a..bff48e65fd 100644 --- a/peer/network.go +++ b/peer/network.go @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -99,6 +100,7 @@ type network struct { } func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec.Manager, self ids.NodeID, maxActiveAppRequests int64) Network { + math.Add64 return &network{ appSender: appSender, codec: codec, diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index cbd7b70ba4..b638aa0583 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -19,7 +19,7 @@ type Admin struct { profiler profiler.Profiler } -func NewAdminService(vm *sharedEvm, performanceDir string) *Admin { +func newAdminService(vm *sharedEvm, performanceDir string) *Admin { return &Admin{ vm: vm, profiler: profiler.New(performanceDir), diff --git a/plugin/evm/factory.go b/plugin/evm/factory.go index e43ea519f1..72ebe8ec9a 100644 --- a/plugin/evm/factory.go +++ b/plugin/evm/factory.go @@ -23,9 +23,9 @@ var ( type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - return atomicvm.WrapVM(NewExtensibleEVM(false)), nil + return atomicvm.WrapVM(newExtensibleEVM(false)), nil } -func (*Factory) NewPlugin() block.ChainVM { - return atomicvm.WrapVM(NewExtensibleEVM(true)) +func NewPluginVM() block.ChainVM { + return atomicvm.WrapVM(newExtensibleEVM(true)) } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 637b7063c4..dd9a6198d0 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -129,7 +129,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test.responseIntercept = nil test.expectedErr = nil - syncDisabledVM := NewDefaultEVM() + syncDisabledVM := newDefaultTestVM() appSender := &enginetest.Sender{T: t} appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { @@ -199,7 +199,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { syncDisabledVM.blockChain.DrainAcceptorQueue() // Create a new VM from the same database with state sync enabled. - syncReEnabledVM := NewDefaultEVM() + syncReEnabledVM := newDefaultTestVM() // Enable state sync in configJSON configJSON := fmt.Sprintf( `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 1cdc10679e..429aade245 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -57,7 +57,7 @@ func TestEthTxGossip(t *testing.T) { responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), } - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = responseSender vm.atomicTxGossipHandler = &p2p.NoOpHandler{} @@ -191,7 +191,7 @@ func TestAtomicTxGossip(t *testing.T) { responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), } - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = responseSender vm.ethTxGossipHandler = &p2p.NoOpHandler{} vm.ethTxPullGossiper = &gossip.NoOpGossiper{} @@ -319,7 +319,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { SentAppGossip: make(chan []byte, 1), } - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = sender vm.ethTxPullGossiper = gossip.NoOpGossiper{} vm.atomicTxPullGossiper = gossip.NoOpGossiper{} @@ -378,7 +378,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { snowCtx := utils.TestSnowContext() sender := &enginetest.Sender{} - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = sender vm.ethTxPullGossiper = gossip.NoOpGossiper{} vm.atomicTxPullGossiper = gossip.NoOpGossiper{} @@ -452,7 +452,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = sender vm.ethTxPullGossiper = gossip.NoOpGossiper{} vm.atomicTxPullGossiper = gossip.NoOpGossiper{} @@ -522,7 +522,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { require.NoError(err) sender := &enginetest.Sender{} - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.p2pSender = sender vm.ethTxPullGossiper = gossip.NoOpGossiper{} vm.atomicTxPullGossiper = gossip.NoOpGossiper{} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 24d747f53e..9404cb01b2 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -308,21 +308,7 @@ type sharedEvm struct { rpcHandlers []interface{ Stop() } } -// NewDefaultEVM returns a new instance of the VM with default extensions -// This should not be called if the VM is being extended -func NewDefaultEVM() *sharedEvm { - vm := &sharedEvm{} - defaultCodec, err := message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } - if err := vm.SetNetworkCodec(defaultCodec); err != nil { - panic(err) - } - return vm -} - -func NewExtensibleEVM(isPlugin bool) *sharedEvm { +func newExtensibleEVM(isPlugin bool) *sharedEvm { vm := &sharedEvm{IsPlugin: isPlugin} return vm } @@ -1504,7 +1490,7 @@ func (vm *sharedEvm) CreateHandlers(context.Context) (map[string]http.Handler, e apis[avaxEndpoint] = avaxAPI if vm.config.AdminAPIEnabled { - adminAPI, err := newHandler("admin", NewAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, vm.chainAlias)))) + adminAPI, err := newHandler("admin", newAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, vm.chainAlias)))) if err != nil { return nil, fmt.Errorf("failed to register service for admin API due to %w", err) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 6fc52648f1..bd2c900467 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -27,6 +27,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -220,6 +221,20 @@ func setupGenesis( return ctx, prefixedDB, genesisBytes, issuer, atomicMemory } +// newDefaultTestVM returns a new instance of the VM with default extensions +// This should not be called if the VM is being extended +func newDefaultTestVM() *sharedEvm { + vm := &sharedEvm{} + defaultCodec, err := message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } + if err := vm.SetNetworkCodec(defaultCodec); err != nil { + panic(err) + } + return vm +} + // GenesisVM creates a VM instance with the genesis test bytes and returns // the channel use to send messages to the engine, the VM, database manager, // sender, and atomic memory. @@ -255,7 +270,7 @@ func GenesisVMWithClock( *avalancheatomic.Memory, *enginetest.Sender, ) { - vm := NewDefaultEVM() + vm := newDefaultTestVM() vm.clock = clock ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) appSender := &enginetest.Sender{T: t} @@ -754,7 +769,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Found unexpected blkID for parent of blk2") } - restartedVM := NewDefaultEVM() + restartedVM := newDefaultTestVM() if err := restartedVM.Initialize( context.Background(), utils.TestSnowContext(), @@ -3094,7 +3109,7 @@ func TestConfigureLogLevel(t *testing.T) { } for _, test := range configTests { t.Run(test.name, func(t *testing.T) { - vm := NewDefaultEVM() + vm := newDefaultTestVM() ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true @@ -3753,7 +3768,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { require.NoError(t, vm.SetPreference(context.Background(), blk.ID())) require.NoError(t, blk.Accept(context.Background())) - reinitVM := NewDefaultEVM() + reinitVM := newDefaultTestVM() // use the block's timestamp instead of 0 since rewind to genesis // is hardcoded to be allowed in core/genesis.go. genesisWithUpgrade := &core.Genesis{} diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 1c19e2da8a..9560d7ce59 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -847,7 +847,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { func TestClearWarpDB(t *testing.T) { ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSONLatest) - vm := NewDefaultEVM() + vm := newDefaultTestVM() err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) @@ -870,7 +870,7 @@ func TestClearWarpDB(t *testing.T) { require.NoError(t, vm.Shutdown(context.Background())) // Restart VM with the same database default should not prune the warp db - vm = NewDefaultEVM() + vm = newDefaultTestVM() // we need new context since the previous one has registered metrics. ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) @@ -886,7 +886,7 @@ func TestClearWarpDB(t *testing.T) { require.NoError(t, vm.Shutdown(context.Background())) // restart the VM with pruning enabled - vm = NewDefaultEVM() + vm = newDefaultTestVM() config := `{"prune-warp-db-enabled": true}` ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) diff --git a/plugin/main.go b/plugin/main.go index a1b18add0f..5002571705 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -29,6 +29,5 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - factory := evm.Factory{} - rpcchainvm.Serve(context.Background(), factory.NewPlugin()) + rpcchainvm.Serve(context.Background(), evm.NewPluginVM()) } From 75f6b0ee16594d73b6e1d8b380156e4317cbd105 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 21 Jan 2025 15:24:41 +0300 Subject: [PATCH 59/91] revert extra change --- peer/network.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/peer/network.go b/peer/network.go index bff48e65fd..6631c0a90a 100644 --- a/peer/network.go +++ b/peer/network.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -100,7 +99,6 @@ type network struct { } func NewNetwork(p2pNetwork *p2p.Network, appSender common.AppSender, codec codec.Manager, self ids.NodeID, maxActiveAppRequests int64) Network { - math.Add64 return &network{ appSender: appSender, codec: codec, From d2a2418604af9aa82058dc208bbfc53e2bcdbd82 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 22 Jan 2025 15:37:58 +0300 Subject: [PATCH 60/91] add extension config --- plugin/evm/admin.go | 4 +- plugin/evm/api.go | 4 +- plugin/evm/atomic/vm/vm.go | 28 +++---- plugin/evm/block.go | 4 +- plugin/evm/block_builder.go | 2 +- plugin/evm/export_tx_test.go | 2 +- plugin/evm/extension/extension.go | 7 ++ plugin/evm/factory.go | 15 +++- plugin/evm/formatting.go | 8 +- plugin/evm/gossip.go | 2 +- plugin/evm/health.go | 2 +- plugin/evm/import_tx_test.go | 32 +++---- plugin/evm/syncervm_test.go | 18 ++-- plugin/evm/tx_test.go | 4 +- plugin/evm/vm.go | 133 ++++++++++++++---------------- plugin/evm/vm_database.go | 4 +- plugin/evm/vm_test.go | 26 +++--- plugin/evm/vm_warp_test.go | 2 +- plugin/main.go | 7 +- 19 files changed, 156 insertions(+), 148 deletions(-) create mode 100644 plugin/evm/extension/extension.go diff --git a/plugin/evm/admin.go b/plugin/evm/admin.go index b638aa0583..4d999dad01 100644 --- a/plugin/evm/admin.go +++ b/plugin/evm/admin.go @@ -15,11 +15,11 @@ import ( // Admin is the API service for admin API calls type Admin struct { - vm *sharedEvm + vm *VM profiler profiler.Profiler } -func newAdminService(vm *sharedEvm, performanceDir string) *Admin { +func newAdminService(vm *VM, performanceDir string) *Admin { return &Admin{ vm: vm, profiler: profiler.New(performanceDir), diff --git a/plugin/evm/api.go b/plugin/evm/api.go index eede874958..12791d4e08 100644 --- a/plugin/evm/api.go +++ b/plugin/evm/api.go @@ -40,7 +40,7 @@ var ( ) // SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *sharedEvm } +type SnowmanAPI struct{ vm *VM } // GetAcceptedFrontReply defines the reply that will be sent from the // GetAcceptedFront API call @@ -67,7 +67,7 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { } // AvaxAPI offers Avalanche network related API methods -type AvaxAPI struct{ vm *sharedEvm } +type AvaxAPI struct{ vm *VM } type VersionReply struct { Version string `json:"version"` diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 07ec0af7fe..7212913ca4 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "github.com/ava-labs/avalanchego/codec" avalanchedatabase "github.com/ava-labs/avalanchego/database" avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/coreth/plugin/evm/atomic/sync" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/avalanchego/snow" @@ -22,12 +22,7 @@ var ( _ block.StateSyncableVM = (*VM)(nil) ) -type ExtensibleEVM interface { - SetNetworkCodec(codec codec.Manager) error -} - type InnerVM interface { - ExtensibleEVM avalanchecommon.VM secp256k1fx.VM block.ChainVM @@ -39,6 +34,16 @@ type VM struct { InnerVM } +func NewAtomicExtensionConfig() (extension.ExtensionConfig, error) { + codec, err := message.NewCodec(sync.AtomicSyncSummary{}) + if err != nil { + return extension.ExtensionConfig{}, fmt.Errorf("failed to create codec manager: %w", err) + } + return extension.ExtensionConfig{ + NetworkCodec: codec, + }, nil +} + func WrapVM(vm InnerVM) *VM { return &VM{InnerVM: vm} } @@ -55,16 +60,7 @@ func (vm *VM) Initialize( fxs []*avalanchecommon.Fx, appSender avalanchecommon.AppSender, ) error { - innerVM := vm.InnerVM - // Register the codec for the atomic block sync summary - networkCodec, err := message.NewCodec(sync.AtomicSyncSummary{}) - if err != nil { - return fmt.Errorf("failed to create codec manager: %w", err) - } - if err := innerVM.SetNetworkCodec(networkCodec); err != nil { - return fmt.Errorf("failed to set network codec: %w", err) - } - return innerVM.Initialize( + return vm.InnerVM.Initialize( ctx, chainCtx, db, diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 8b8b2a534f..80bd07de7b 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -112,12 +112,12 @@ func readMainnetBonusBlocks() (map[uint64]ids.ID, error) { type Block struct { id ids.ID ethBlock *types.Block - vm *sharedEvm + vm *VM atomicTxs []*atomic.Tx } // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (vm *sharedEvm) newBlock(ethBlock *types.Block) (*Block, error) { +func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { isApricotPhase5 := vm.chainConfig.IsApricotPhase5(ethBlock.Time()) atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) if err != nil { diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index cb437676a3..967444b0d0 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -53,7 +53,7 @@ type blockBuilder struct { buildBlockTimer *timer.Timer } -func (vm *sharedEvm) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { +func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { b := &blockBuilder{ ctx: vm.ctx, chainConfig: vm.chainConfig, diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/export_tx_test.go index 89d5dddabc..643eef391c 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/export_tx_test.go @@ -26,7 +26,7 @@ import ( // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). -func createExportTxOptions(t *testing.T, vm *sharedEvm, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { +func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { // Add a UTXO to shared memory utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, diff --git a/plugin/evm/extension/extension.go b/plugin/evm/extension/extension.go new file mode 100644 index 0000000000..416d131632 --- /dev/null +++ b/plugin/evm/extension/extension.go @@ -0,0 +1,7 @@ +package extension + +import "github.com/ava-labs/avalanchego/codec" + +type ExtensionConfig struct { + NetworkCodec codec.Manager +} diff --git a/plugin/evm/factory.go b/plugin/evm/factory.go index 72ebe8ec9a..b3b2143a58 100644 --- a/plugin/evm/factory.go +++ b/plugin/evm/factory.go @@ -19,13 +19,20 @@ var ( _ vms.Factory = &Factory{} ) -// TODO: either move this from plugin or move the VM itself type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - return atomicvm.WrapVM(newExtensibleEVM(false)), nil + extensionCfg, err := atomicvm.NewAtomicExtensionConfig() + if err != nil { + return nil, err + } + return atomicvm.WrapVM(NewExtensibleEVM(false, extensionCfg)), nil } -func NewPluginVM() block.ChainVM { - return atomicvm.WrapVM(newExtensibleEVM(true)) +func NewPluginVM() (block.ChainVM, error) { + extensionCfg, err := atomicvm.NewAtomicExtensionConfig() + if err != nil { + return nil, err + } + return atomicvm.WrapVM(NewExtensibleEVM(true, extensionCfg)), nil } diff --git a/plugin/evm/formatting.go b/plugin/evm/formatting.go index 2127422691..feeab134b7 100644 --- a/plugin/evm/formatting.go +++ b/plugin/evm/formatting.go @@ -14,7 +14,7 @@ import ( // ParseServiceAddress get address ID from address string, being it either localized (using address manager, // doing also components validations), or not localized. // If both attempts fail, reports error from localized address parsing -func (vm *sharedEvm) ParseServiceAddress(addrStr string) (ids.ShortID, error) { +func (vm *VM) ParseServiceAddress(addrStr string) (ids.ShortID, error) { addr, err := ids.ShortFromString(addrStr) if err == nil { return addr, nil @@ -23,7 +23,7 @@ func (vm *sharedEvm) ParseServiceAddress(addrStr string) (ids.ShortID, error) { } // ParseLocalAddress takes in an address for this chain and produces the ID -func (vm *sharedEvm) ParseLocalAddress(addrStr string) (ids.ShortID, error) { +func (vm *VM) ParseLocalAddress(addrStr string) (ids.ShortID, error) { chainID, addr, err := vm.ParseAddress(addrStr) if err != nil { return ids.ShortID{}, err @@ -36,13 +36,13 @@ func (vm *sharedEvm) ParseLocalAddress(addrStr string) (ids.ShortID, error) { } // FormatLocalAddress takes in a raw address and produces the formatted address -func (vm *sharedEvm) FormatLocalAddress(addr ids.ShortID) (string, error) { +func (vm *VM) FormatLocalAddress(addr ids.ShortID) (string, error) { return vm.FormatAddress(vm.ctx.ChainID, addr) } // FormatAddress takes in a chainID and a raw address and produces the formatted // address -func (vm *sharedEvm) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { +func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { chainIDAlias, err := vm.ctx.BCLookup.PrimaryAlias(chainID) if err != nil { return "", err diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index bea7115331..16d632bd94 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -220,7 +220,7 @@ func (tx *GossipEthTx) GossipID() ids.ID { // EthPushGossiper is used by the ETH backend to push transactions issued over // the RPC and added to the mempool to peers. type EthPushGossiper struct { - vm *sharedEvm + vm *VM } func (e *EthPushGossiper) Add(tx *types.Transaction) { diff --git a/plugin/evm/health.go b/plugin/evm/health.go index cd13cee44d..116f820eb2 100644 --- a/plugin/evm/health.go +++ b/plugin/evm/health.go @@ -8,7 +8,7 @@ import "context" // Health returns nil if this chain is healthy. // Also returns details, which should be one of: // string, []byte, map[string]string -func (vm *sharedEvm) HealthCheck(context.Context) (interface{}, error) { +func (vm *VM) HealthCheck(context.Context) (interface{}, error) { // TODO perform actual health check return nil, nil } diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/import_tx_test.go index 7880d32962..da8a5c9db6 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/import_tx_test.go @@ -25,7 +25,7 @@ import ( // createImportTxOptions adds a UTXO to shared memory and generates a list of import transactions sending this UTXO // to each of the three test keys (conflicting transactions) -func createImportTxOptions(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { +func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, @@ -428,7 +428,7 @@ func TestNewImportTx(t *testing.T) { importAmount := uint64(5000000) // createNewImportAVAXTx adds a UTXO to shared memory and then constructs a new import transaction // and checks that it has the correct fee for the base fee that has been used - createNewImportAVAXTx := func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) if err != nil { @@ -468,7 +468,7 @@ func TestNewImportTx(t *testing.T) { return tx } - checkState := func(t *testing.T, vm *sharedEvm) { + checkState := func(t *testing.T, vm *VM) { txs := vm.LastAcceptedBlockInternal().(*Block).atomicTxs if len(txs) != 1 { t.Fatalf("Expected one import tx to be in the last accepted block, but found %d", len(txs)) @@ -871,7 +871,7 @@ func TestImportTxGasCost(t *testing.T) { func TestImportTxSemanticVerify(t *testing.T) { tests := map[string]atomicTxTest{ "UTXO not present during bootstrapping": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, @@ -900,7 +900,7 @@ func TestImportTxSemanticVerify(t *testing.T) { bootstrapping: true, }, "UTXO not present": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { tx := &atomic.Tx{UnsignedAtomicTx: &atomic.UnsignedImportTx{ NetworkID: vm.ctx.NetworkID, BlockchainID: vm.ctx.ChainID, @@ -929,7 +929,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to fetch import UTXOs from", }, "garbage UTXO": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) inputID := utxoID.InputID() @@ -969,7 +969,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "failed to unmarshal UTXO", }, "UTXO AssetID mismatch": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testShortIDAddrs[0]) @@ -1003,7 +1003,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: atomic.ErrAssetIDMismatch.Error(), }, "insufficient AVAX funds": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1036,7 +1036,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "insufficient non-AVAX funds": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) @@ -1070,7 +1070,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx flow check failed due to", }, "no signatures": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1103,7 +1103,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx contained mismatched number of inputs/credentials", }, "incorrect signature": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1137,7 +1137,7 @@ func TestImportTxSemanticVerify(t *testing.T) { semanticVerifyErr: "import tx transfer failed verification", }, "non-unique EVM Outputs": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testShortIDAddrs[0]) if err != nil { @@ -1190,7 +1190,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { assetID := ids.GenerateTestID() tests := map[string]atomicTxTest{ "AVAX UTXO": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1220,7 +1220,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } return tx }, - checkState: func(t *testing.T, vm *sharedEvm) { + checkState: func(t *testing.T, vm *VM) { lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) @@ -1235,7 +1235,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }, "non-AVAX UTXO": { - setup: func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx { + setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) if err != nil { @@ -1265,7 +1265,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { } return tx }, - checkState: func(t *testing.T, vm *sharedEvm) { + checkState: func(t *testing.T, vm *VM) { lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index dd9a6198d0..9f1bebfa48 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -101,7 +101,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) { + responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -255,7 +255,7 @@ func TestVMShutdownWhileSyncing(t *testing.T) { syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) { + responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -365,7 +365,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( t, false, "", stateSyncEnabledJSON, "", alloc, ) - shutdownOnceSyncerVM := &shutdownOnceVM{sharedEvm: syncerVM} + shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} t.Cleanup(func() { require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) }) @@ -422,13 +422,13 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s // syncVMSetup contains the required set up for a client VM to perform state sync // off of a server VM. type syncVMSetup struct { - serverVM *sharedEvm + serverVM *VM serverAppSender *enginetest.Sender includedAtomicTxs []*atomic.Tx fundedAccounts map[*keystore.Key]*types.StateAccount - syncerVM *sharedEvm + syncerVM *VM syncerDB avalanchedatabase.Database syncerEngineChan <-chan commonEng.Message syncerAtomicMemory *avalancheatomic.Memory @@ -436,19 +436,19 @@ type syncVMSetup struct { } type shutdownOnceVM struct { - *sharedEvm + *VM shutdownOnce sync.Once } func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { var err error - vm.shutdownOnce.Do(func() { err = vm.sharedEvm.Shutdown(ctx) }) + vm.shutdownOnce.Do(func() { err = vm.VM.Shutdown(ctx) }) return err } // syncTest contains both the actual VMs as well as the parameters with the expected output. type syncTest struct { - responseIntercept func(vm *sharedEvm, nodeID ids.NodeID, requestID uint32, response []byte) + responseIntercept func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte) stateSyncMinBlocks uint64 syncableInterval uint64 syncMode block.StateSyncMode @@ -617,7 +617,7 @@ func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Bl // generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then // calls Verify and Accept on each generated block // TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests -func generateAndAcceptBlocks(t *testing.T, vm *sharedEvm, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { +func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { t.Helper() // acceptExternalBlock defines a function to parse, verify, and accept a block once it has been diff --git a/plugin/evm/tx_test.go b/plugin/evm/tx_test.go index 689b13a1cb..9bef967e68 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/tx_test.go @@ -79,14 +79,14 @@ func executeTxVerifyTest(t *testing.T, test atomicTxVerifyTest) { type atomicTxTest struct { // setup returns the atomic transaction for the test - setup func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) *atomic.Tx + setup func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx // define a string that should be contained in the error message if the tx fails verification // at some point. If the strings are empty, then the tx should pass verification at the // respective step. semanticVerifyErr, evmStateTransferErr, acceptErr string // checkState is called iff building and verifying a block containing the transaction is successful. Verifies // the state of the VM following the block's acceptance. - checkState func(t *testing.T, vm *sharedEvm) + checkState func(t *testing.T, vm *VM) // Whether or not the VM should be considered to still be bootstrapping bootstrapping bool diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 00cd20cfa0..9690ebc70c 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -45,6 +45,7 @@ import ( atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/message" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" @@ -103,12 +104,12 @@ import ( ) var ( - _ block.ChainVM = &sharedEvm{} - _ block.BuildBlockWithContextChainVM = &sharedEvm{} - _ block.StateSyncableVM = &sharedEvm{} - _ statesyncclient.EthBlockParser = &sharedEvm{} - _ secp256k1fx.VM = &sharedEvm{} - _ vmsync.BlockAcceptor = &sharedEvm{} + _ block.ChainVM = &VM{} + _ block.BuildBlockWithContextChainVM = &VM{} + _ block.StateSyncableVM = &VM{} + _ statesyncclient.EthBlockParser = &VM{} + _ secp256k1fx.VM = &VM{} + _ vmsync.BlockAcceptor = &VM{} ) const ( @@ -201,8 +202,8 @@ func init() { originalStderr = os.Stderr } -// sharedEvm implements the snowman.ChainVM interface -type sharedEvm struct { +// VM implements the snowman.ChainVM interface +type VM struct { ctx *snow.Context // [cancel] may be nil until [snow.NormalOp] starts cancel context.CancelFunc @@ -219,7 +220,7 @@ type sharedEvm struct { ethConfig ethconfig.Config // Extension Points - networkCodec codec.Manager + extensionConfig extension.ExtensionConfig // pointers to eth constructs eth *eth.Ethereum @@ -308,26 +309,18 @@ type sharedEvm struct { rpcHandlers []interface{ Stop() } } -func newExtensibleEVM(isPlugin bool) *sharedEvm { - return &sharedEvm{IsPlugin: isPlugin} -} - -func (vm *sharedEvm) SetNetworkCodec(codec codec.Manager) error { - if vm.networkCodec != nil { - return fmt.Errorf("network codec already set to %T", vm.networkCodec) - } - vm.networkCodec = codec - return nil +func NewExtensibleEVM(isPlugin bool, extensionConfig extension.ExtensionConfig) *VM { + return &VM{IsPlugin: isPlugin, extensionConfig: extensionConfig} } // CodecRegistry implements the secp256k1fx interface -func (vm *sharedEvm) CodecRegistry() codec.Registry { return vm.baseCodec } +func (vm *VM) CodecRegistry() codec.Registry { return vm.baseCodec } // Clock implements the secp256k1fx interface -func (vm *sharedEvm) Clock() *mockable.Clock { return &vm.clock } +func (vm *VM) Clock() *mockable.Clock { return &vm.clock } // Logger implements the secp256k1fx interface -func (vm *sharedEvm) Logger() logging.Logger { return vm.ctx.Log } +func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } /* ****************************************************************************** @@ -336,12 +329,12 @@ func (vm *sharedEvm) Logger() logging.Logger { return vm.ctx.Log } */ // implements SnowmanPlusPlusVM interface -func (vm *sharedEvm) GetActivationTime() time.Time { +func (vm *VM) GetActivationTime() time.Time { return utils.Uint64ToTime(vm.chainConfig.ApricotPhase4BlockTimestamp) } // Initialize implements the snowman.ChainVM interface -func (vm *sharedEvm) Initialize( +func (vm *VM) Initialize( _ context.Context, chainCtx *snow.Context, db avalanchedatabase.Database, @@ -555,7 +548,7 @@ func (vm *sharedEvm) Initialize( return fmt.Errorf("failed to initialize p2p network: %w", err) } vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) - vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) + vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.extensionConfig.NetworkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) // Initialize warp backend @@ -638,7 +631,7 @@ func (vm *sharedEvm) Initialize( return vm.initializeStateSyncClient(lastAcceptedHeight) } -func (vm *sharedEvm) initializeMetrics() error { +func (vm *VM) initializeMetrics() error { vm.sdkMetrics = prometheus.NewRegistry() // If metrics are enabled, register the default metrics registry if !metrics.Enabled { @@ -652,7 +645,7 @@ func (vm *sharedEvm) initializeMetrics() error { return vm.ctx.Metrics.Register(sdkMetricsPrefix, vm.sdkMetrics) } -func (vm *sharedEvm) initializeChain(lastAcceptedHash common.Hash) error { +func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { nodecfg := &node.Config{ CorethVersion: Version, KeyStoreDir: vm.config.KeystoreDirectory, @@ -694,7 +687,7 @@ func (vm *sharedEvm) initializeChain(lastAcceptedHash common.Hash) error { // initializeStateSyncClient initializes the client for performing state sync. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. -func (vm *sharedEvm) initializeStateSyncClient(lastAcceptedHeight uint64) error { +func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { stateSyncEnabled := vm.stateSyncEnabled(lastAcceptedHeight) // parse nodeIDs from state sync IDs in vm config var stateSyncIDs []ids.NodeID @@ -723,7 +716,7 @@ func (vm *sharedEvm) initializeStateSyncClient(lastAcceptedHeight uint64) error Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, - Codec: vm.networkCodec, + Codec: vm.extensionConfig.NetworkCodec, Stats: stats.NewClientSyncerStats(leafMetricsNames), StateSyncNodeIDs: stateSyncIDs, BlockParser: vm, @@ -751,7 +744,7 @@ func (vm *sharedEvm) initializeStateSyncClient(lastAcceptedHeight uint64) error return nil } -func (vm *sharedEvm) initChainState(lastAcceptedBlock *types.Block) error { +func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { block, err := vm.newBlock(lastAcceptedBlock) if err != nil { return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) @@ -784,14 +777,14 @@ func (vm *sharedEvm) initChainState(lastAcceptedBlock *types.Block) error { return vm.ctx.Metrics.Register(chainStateMetricsPrefix, chainStateRegisterer) } -func (vm *sharedEvm) createConsensusCallbacks() dummy.ConsensusCallbacks { +func (vm *VM) createConsensusCallbacks() dummy.ConsensusCallbacks { return dummy.ConsensusCallbacks{ OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, OnExtraStateChange: vm.onExtraStateChange, } } -func (vm *sharedEvm) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { for { tx, exists := vm.mempool.NextTx() if !exists { @@ -838,7 +831,7 @@ func (vm *sharedEvm) preBatchOnFinalizeAndAssemble(header *types.Header, state * } // assumes that we are in at least Apricot Phase 5. -func (vm *sharedEvm) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { var ( batchAtomicTxs []*atomic.Tx batchAtomicUTXOs set.Set[ids.ID] @@ -938,14 +931,14 @@ func (vm *sharedEvm) postBatchOnFinalizeAndAssemble(header *types.Header, state return nil, nil, nil, nil } -func (vm *sharedEvm) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { +func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { if !vm.chainConfig.IsApricotPhase5(header.Time) { return vm.preBatchOnFinalizeAndAssemble(header, state, txs) } return vm.postBatchOnFinalizeAndAssemble(header, state, txs) } -func (vm *sharedEvm) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { +func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { var ( batchContribution *big.Int = big.NewInt(0) batchGasUsed *big.Int = big.NewInt(0) @@ -1010,7 +1003,7 @@ func (vm *sharedEvm) onExtraStateChange(block *types.Block, state *state.StateDB return batchContribution, batchGasUsed, nil } -func (vm *sharedEvm) SetState(_ context.Context, state snow.State) error { +func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: vm.bootstrapped.Set(false) @@ -1025,7 +1018,7 @@ func (vm *sharedEvm) SetState(_ context.Context, state snow.State) error { } // onBootstrapStarted marks this VM as bootstrapping -func (vm *sharedEvm) onBootstrapStarted() error { +func (vm *VM) onBootstrapStarted() error { vm.bootstrapped.Set(false) if err := vm.Client.Error(); err != nil { return err @@ -1042,7 +1035,7 @@ func (vm *sharedEvm) onBootstrapStarted() error { } // onNormalOperationsStarted marks this VM as bootstrapped -func (vm *sharedEvm) onNormalOperationsStarted() error { +func (vm *VM) onNormalOperationsStarted() error { if vm.bootstrapped.Get() { return nil } @@ -1056,7 +1049,7 @@ func (vm *sharedEvm) onNormalOperationsStarted() error { } // initBlockBuilding starts goroutines to manage block building -func (vm *sharedEvm) initBlockBuilding() error { +func (vm *VM) initBlockBuilding() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel @@ -1228,7 +1221,7 @@ func (vm *sharedEvm) initBlockBuilding() error { // setAppRequestHandlers sets the request handlers for the VM to serve state sync // requests. -func (vm *sharedEvm) setAppRequestHandlers() error { +func (vm *VM) setAppRequestHandlers() error { // Create standalone EVM TrieDB (read only) for serving leafs requests. // We create a standalone TrieDB here, so that it has a standalone cache from the one // used by the node when processing blocks. @@ -1252,14 +1245,14 @@ func (vm *sharedEvm) setAppRequestHandlers() error { vm.blockChain, vm.chaindb, vm.warpBackend, - vm.networkCodec, + vm.extensionConfig.NetworkCodec, vm.leafRequestTypeConfigs, ) vm.Network.SetRequestHandler(networkHandler) return nil } -func (vm *sharedEvm) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { +func (vm *VM) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { if vm.leafRequestTypeConfigs == nil { vm.leafRequestTypeConfigs = make(map[message.NodeType]LeafRequestTypeConfig) } @@ -1278,7 +1271,7 @@ func (vm *sharedEvm) RegisterLeafRequestHandler(nodeType message.NodeType, metri } // Shutdown implements the snowman.ChainVM interface -func (vm *sharedEvm) Shutdown(context.Context) error { +func (vm *VM) Shutdown(context.Context) error { if vm.ctx == nil { return nil } @@ -1300,11 +1293,11 @@ func (vm *sharedEvm) Shutdown(context.Context) error { } // buildBlock builds a block to be wrapped by ChainState -func (vm *sharedEvm) buildBlock(ctx context.Context) (snowman.Block, error) { +func (vm *VM) buildBlock(ctx context.Context) (snowman.Block, error) { return vm.buildBlockWithContext(ctx, nil) } -func (vm *sharedEvm) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { +func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { if proposerVMBlockCtx != nil { log.Debug("Building block with context", "pChainBlockHeight", proposerVMBlockCtx.PChainHeight) } else { @@ -1355,7 +1348,7 @@ func (vm *sharedEvm) buildBlockWithContext(ctx context.Context, proposerVMBlockC } // parseBlock parses [b] into a block to be wrapped by ChainState. -func (vm *sharedEvm) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { +func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { ethBlock := new(types.Block) if err := rlp.DecodeBytes(b, ethBlock); err != nil { return nil, err @@ -1374,7 +1367,7 @@ func (vm *sharedEvm) parseBlock(_ context.Context, b []byte) (snowman.Block, err return block, nil } -func (vm *sharedEvm) ParseEthBlock(b []byte) (*types.Block, error) { +func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { block, err := vm.parseBlock(context.TODO(), b) if err != nil { return nil, err @@ -1385,7 +1378,7 @@ func (vm *sharedEvm) ParseEthBlock(b []byte) (*types.Block, error) { // getBlock attempts to retrieve block [id] from the VM to be wrapped // by ChainState. -func (vm *sharedEvm) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { +func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { ethBlock := vm.blockChain.GetBlockByHash(common.Hash(id)) // If [ethBlock] is nil, return [avalanchedatabase.ErrNotFound] here // so that the miss is considered cacheable. @@ -1398,7 +1391,7 @@ func (vm *sharedEvm) getBlock(_ context.Context, id ids.ID) (snowman.Block, erro // GetAcceptedBlock attempts to retrieve block [blkID] from the VM. This method // only returns accepted blocks. -func (vm *sharedEvm) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { +func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { blk, err := vm.GetBlock(ctx, blkID) if err != nil { return nil, err @@ -1418,7 +1411,7 @@ func (vm *sharedEvm) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowma } // SetPreference sets what the current tail of the chain is -func (vm *sharedEvm) SetPreference(ctx context.Context, blkID ids.ID) error { +func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { // Since each internal handler used by [vm.State] always returns a block // with non-nil ethBlock value, GetBlockInternal should never return a // (*Block) with a nil ethBlock value. @@ -1432,7 +1425,7 @@ func (vm *sharedEvm) SetPreference(ctx context.Context, blkID ids.ID) error { // VerifyHeightIndex always returns a nil error since the index is maintained by // vm.blockChain. -func (vm *sharedEvm) VerifyHeightIndex(context.Context) error { +func (vm *VM) VerifyHeightIndex(context.Context) error { return nil } @@ -1440,7 +1433,7 @@ func (vm *sharedEvm) VerifyHeightIndex(context.Context) error { // Note: the engine assumes that if a block is not found at [height], then // [avalanchedatabase.ErrNotFound] will be returned. This indicates that the VM has state // synced and does not have all historical blocks available. -func (vm *sharedEvm) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { +func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { lastAcceptedBlock := vm.LastAcceptedBlock() if lastAcceptedBlock.Height() < height { return ids.ID{}, avalanchedatabase.ErrNotFound @@ -1453,7 +1446,7 @@ func (vm *sharedEvm) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.I return ids.ID(hash), nil } -func (vm *sharedEvm) Version(context.Context) (string, error) { +func (vm *VM) Version(context.Context) (string, error) { return Version, nil } @@ -1469,7 +1462,7 @@ func newHandler(name string, service interface{}) (http.Handler, error) { } // CreateHandlers makes new http handlers that can handle API calls -func (vm *sharedEvm) CreateHandlers(context.Context) (map[string]http.Handler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) if vm.config.HttpBodyLimit > 0 { handler.SetHTTPBodyLimit(int(vm.config.HttpBodyLimit)) @@ -1506,7 +1499,7 @@ func (vm *sharedEvm) CreateHandlers(context.Context) (map[string]http.Handler, e } if vm.config.WarpAPIEnabled { - warpAPI := warp.NewAPI(vm.ctx, vm.networkCodec, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners) + warpAPI := warp.NewAPI(vm.ctx, vm.extensionConfig.NetworkCodec, vm.warpBackend, vm.client, vm.requirePrimaryNetworkSigners) if err := handler.RegisterName("warp", warpAPI); err != nil { return nil, err } @@ -1527,7 +1520,7 @@ func (vm *sharedEvm) CreateHandlers(context.Context) (map[string]http.Handler, e } // CreateStaticHandlers makes new http handlers that can handle API calls -func (vm *sharedEvm) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { +func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(0) if vm.config.HttpBodyLimit > 0 { handler.SetHTTPBodyLimit(int(vm.config.HttpBodyLimit)) @@ -1550,7 +1543,7 @@ func (vm *sharedEvm) CreateStaticHandlers(context.Context) (map[string]http.Hand // getAtomicTx returns the requested transaction, status, and height. // If the status is Unknown, then the returned transaction will be nil. -func (vm *sharedEvm) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { +func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { return tx, atomic.Accepted, height, nil } else if err != avalanchedatabase.ErrNotFound { @@ -1569,7 +1562,7 @@ func (vm *sharedEvm) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64 // ParseAddress takes in an address and produces the ID of the chain it's for // the ID of the address -func (vm *sharedEvm) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { +func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) if err != nil { return ids.ID{}, ids.ShortID{}, err @@ -1594,7 +1587,7 @@ func (vm *sharedEvm) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { } // verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *sharedEvm) verifyTxAtTip(tx *atomic.Tx) error { +func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) } @@ -1635,7 +1628,7 @@ func (vm *sharedEvm) verifyTxAtTip(tx *atomic.Tx) error { // Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. -func (vm *sharedEvm) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { +func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) @@ -1660,7 +1653,7 @@ func (vm *sharedEvm) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *bi // verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] // using [rules] as the current rule set. -func (vm *sharedEvm) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { +func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { // Ensure that the parent was verified and inserted correctly. if !vm.blockChain.HasBlock(parentHash, height-1) { return errRejectedParent @@ -1707,7 +1700,7 @@ func (vm *sharedEvm) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee // GetAtomicUTXOs returns the utxos that at least one of the provided addresses is // referenced in. -func (vm *sharedEvm) GetAtomicUTXOs( +func (vm *VM) GetAtomicUTXOs( chainID ids.ID, addrs set.Set[ids.ShortID], startAddr ids.ShortID, @@ -1730,7 +1723,7 @@ func (vm *sharedEvm) GetAtomicUTXOs( } // currentRules returns the chain rules for the current block. -func (vm *sharedEvm) currentRules() params.Rules { +func (vm *VM) currentRules() params.Rules { header := vm.eth.APIBackend.CurrentHeader() return vm.chainConfig.Rules(header.Number, header.Time) } @@ -1738,7 +1731,7 @@ func (vm *sharedEvm) currentRules() params.Rules { // requirePrimaryNetworkSigners returns true if warp messages from the primary // network must be signed by the primary network validators. // This is necessary when the subnet is not validating the primary network. -func (vm *sharedEvm) requirePrimaryNetworkSigners() bool { +func (vm *VM) requirePrimaryNetworkSigners() bool { switch c := vm.currentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { case *warpcontract.Config: return c.RequirePrimaryNetworkSigners @@ -1747,7 +1740,7 @@ func (vm *sharedEvm) requirePrimaryNetworkSigners() bool { } } -func (vm *sharedEvm) startContinuousProfiler() { +func (vm *VM) startContinuousProfiler() { // If the profiler directory is empty, return immediately // without creating or starting a continuous profiler. if vm.config.ContinuousProfilerDir == "" { @@ -1777,7 +1770,7 @@ func (vm *sharedEvm) startContinuousProfiler() { // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. // Note: assumes [vm.chaindb] and [vm.genesisHash] have been initialized. -func (vm *sharedEvm) readLastAccepted() (common.Hash, uint64, error) { +func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { // Attempt to load last accepted block to determine if it is necessary to // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) @@ -1836,7 +1829,7 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } -func (vm *sharedEvm) stateSyncEnabled(lastAcceptedHeight uint64) bool { +func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { if vm.config.StateSyncEnabled != nil { // if the config is set, use that return *vm.config.StateSyncEnabled @@ -1846,7 +1839,7 @@ func (vm *sharedEvm) stateSyncEnabled(lastAcceptedHeight uint64) bool { return lastAcceptedHeight == 0 } -func (vm *sharedEvm) newImportTx( +func (vm *VM) newImportTx( chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 @@ -1866,7 +1859,7 @@ func (vm *sharedEvm) newImportTx( } // newExportTx returns a new ExportTx -func (vm *sharedEvm) newExportTx( +func (vm *VM) newExportTx( assetID ids.ID, // AssetID of the tokens to export amount uint64, // Amount of tokens to export chainID ids.ID, // Chain to send the UTXOs to @@ -1898,6 +1891,6 @@ func (vm *sharedEvm) newExportTx( return tx, nil } -func (vm *sharedEvm) PutLastAcceptedID(ID ids.ID) error { +func (vm *VM) PutLastAcceptedID(ID ids.ID) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } diff --git a/plugin/evm/vm_database.go b/plugin/evm/vm_database.go index 22ed7a9768..f2a5b4c344 100644 --- a/plugin/evm/vm_database.go +++ b/plugin/evm/vm_database.go @@ -17,7 +17,7 @@ import ( // initializeDBs initializes the databases used by the VM. // coreth always uses the avalanchego provided database. -func (vm *sharedEvm) initializeDBs(db avalanchedatabase.Database) error { +func (vm *VM) initializeDBs(db avalanchedatabase.Database) error { // Use NewNested rather than New so that the structure of the database // remains the same regardless of the provided baseDB type. vm.chaindb = rawdb.NewDatabase(database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, db))) @@ -32,7 +32,7 @@ func (vm *sharedEvm) initializeDBs(db avalanchedatabase.Database) error { return nil } -func (vm *sharedEvm) inspectDatabases() error { +func (vm *VM) inspectDatabases() error { start := time.Now() log.Info("Starting database inspection") if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index bd2c900467..4816591104 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -27,6 +27,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -223,16 +224,15 @@ func setupGenesis( // newDefaultTestVM returns a new instance of the VM with default extensions // This should not be called if the VM is being extended -func newDefaultTestVM() *sharedEvm { - vm := &sharedEvm{} +func newDefaultTestVM() *VM { defaultCodec, err := message.NewCodec(message.BlockSyncSummary{}) if err != nil { panic(err) } - if err := vm.SetNetworkCodec(defaultCodec); err != nil { - panic(err) - } - return vm + + return NewExtensibleEVM(false, extension.ExtensionConfig{ + NetworkCodec: defaultCodec, + }) } // GenesisVM creates a VM instance with the genesis test bytes and returns @@ -246,7 +246,7 @@ func GenesisVM(t *testing.T, upgradeJSON string, ) ( chan commonEng.Message, - *sharedEvm, + *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender, @@ -265,7 +265,7 @@ func GenesisVMWithClock( clock mockable.Clock, ) ( chan commonEng.Message, - *sharedEvm, + *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender, @@ -335,7 +335,7 @@ func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.I // GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map // Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *sharedEvm, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) for addr, avaxAmount := range utxos { txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) @@ -977,8 +977,8 @@ func testConflictingImportTxs(t *testing.T, genesis string) { func TestReissueAtomicTxHigherGasPrice(t *testing.T) { kc := secp256k1fx.NewKeychain(testKeys...) - for name, issueTxs := range map[string]func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ - "single UTXO override": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ + "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1001,7 +1001,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "one of two UTXOs overrides": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) @@ -1028,7 +1028,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} }, - "hola": func(t *testing.T, vm *sharedEvm, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) if err != nil { t.Fatal(err) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 9560d7ce59..e69927a303 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -509,7 +509,7 @@ func TestReceiveWarpMessage(t *testing.T) { } func testReceiveWarpMessage( - t *testing.T, issuer chan commonEng.Message, vm *sharedEvm, + t *testing.T, issuer chan commonEng.Message, vm *VM, sourceChainID ids.ID, msgFrom warpMsgFrom, useSigners useWarpMsgSigners, blockTime time.Time, diff --git a/plugin/main.go b/plugin/main.go index 5002571705..6289e95446 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -29,5 +29,10 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - rpcchainvm.Serve(context.Background(), evm.NewPluginVM()) + vm, err := evm.NewPluginVM() + if err != nil { + fmt.Printf("couldn't create evm plugin: %s", err) + os.Exit(1) + } + rpcchainvm.Serve(context.Background(), vm) } From 1dcfa229ec9ee9816e9024534722b4d5b3d25b6a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 28 Jan 2025 18:05:53 +0300 Subject: [PATCH 61/91] move with interfaces --- consensus/dummy/consensus.go | 8 +- params/config_extra.go | 2 +- params/network_upgrades.go | 2 +- plugin/evm/api.go | 227 ---- plugin/evm/atomic/extension/config.go | 88 ++ plugin/evm/atomic/import_tx.go | 10 +- plugin/evm/atomic/state/atomic_backend.go | 37 +- plugin/evm/atomic/state/atomic_state.go | 8 +- .../atomic/state/atomic_trie_iterator_test.go | 6 +- plugin/evm/atomic/state/atomic_trie_test.go | 34 +- .../evm/atomic/state/atomic_tx_repository.go | 21 +- ...provider.go => atomic_summary_provider.go} | 25 +- .../evm/atomic/sync/atomic_sync_extender.go | 10 +- plugin/evm/atomic/sync/atomic_syncer_test.go | 2 +- plugin/evm/atomic/tx.go | 2 +- plugin/evm/atomic/txpool/mempool.go | 39 +- plugin/evm/atomic/txpool/mempool_test.go | 9 +- plugin/evm/atomic/vm/api.go | 231 +++++ plugin/evm/atomic/vm/atomic_leaf_handler.go | 37 + plugin/evm/atomic/vm/block_extension.go | 260 +++++ plugin/evm/atomic/vm/bonus_blocks.go | 77 ++ plugin/evm/{ => atomic/vm}/export_tx_test.go | 141 +-- plugin/evm/{ => atomic/vm}/ext_data_hashes.go | 2 +- plugin/evm/{ => atomic/vm}/formatting.go | 28 +- .../{ => atomic/vm}/fuji_ext_data_hashes.json | 0 .../vm}/mainnet_ext_data_hashes.json | 0 plugin/evm/atomic/vm/vm.go | 787 +++++++++++++- plugin/evm/block.go | 226 +--- plugin/evm/block_builder.go | 27 +- ...block_verification.go => block_manager.go} | 121 +-- plugin/evm/client/client.go | 1 + plugin/evm/config/constants.go | 11 + plugin/evm/{gossip.go => eth_gossiper.go} | 56 - plugin/evm/extension/extension.go | 7 - plugin/evm/factory.go | 12 +- plugin/evm/gossip/handler.go | 64 ++ plugin/evm/network_handler.go | 20 +- plugin/evm/sync/extender.go | 31 + plugin/evm/sync/syncervm_client.go | 31 +- plugin/evm/sync/syncervm_server.go | 31 +- plugin/evm/syncervm_test.go | 5 +- plugin/evm/testutils/utils.go | 35 + plugin/evm/vm.go | 968 +++--------------- sync/client/mock_client.go | 4 +- sync/client/stats/stats.go | 2 +- sync/handlers/leafs_request.go | 16 +- sync/handlers/stats/stats.go | 2 + utils/handler.go | 19 + vmerrs/vmerrs.go | 3 + 49 files changed, 2122 insertions(+), 1663 deletions(-) create mode 100644 plugin/evm/atomic/extension/config.go rename plugin/evm/atomic/sync/{atomic_sync_provider.go => atomic_summary_provider.go} (51%) create mode 100644 plugin/evm/atomic/vm/api.go create mode 100644 plugin/evm/atomic/vm/atomic_leaf_handler.go create mode 100644 plugin/evm/atomic/vm/block_extension.go create mode 100644 plugin/evm/atomic/vm/bonus_blocks.go rename plugin/evm/{ => atomic/vm}/export_tx_test.go (92%) rename plugin/evm/{ => atomic/vm}/ext_data_hashes.go (98%) rename plugin/evm/{ => atomic/vm}/formatting.go (69%) rename plugin/evm/{ => atomic/vm}/fuji_ext_data_hashes.json (100%) rename plugin/evm/{ => atomic/vm}/mainnet_ext_data_hashes.json (100%) rename plugin/evm/{block_verification.go => block_manager.go} (66%) rename plugin/evm/{gossip.go => eth_gossiper.go} (76%) delete mode 100644 plugin/evm/extension/extension.go create mode 100644 plugin/evm/gossip/handler.go create mode 100644 plugin/evm/sync/extender.go create mode 100644 plugin/evm/testutils/utils.go create mode 100644 utils/handler.go diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 1452335a95..adc7d0ab81 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -48,7 +48,7 @@ type ( } DummyEngine struct { - cb ConsensusCallbacks + cb *ConsensusCallbacks clock *mockable.Clock consensusMode Mode } @@ -67,21 +67,21 @@ func NewFaker() *DummyEngine { } } -func NewFakerWithClock(cb ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { +func NewFakerWithClock(cb *ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { return &DummyEngine{ cb: cb, clock: clock, } } -func NewFakerWithCallbacks(cb ConsensusCallbacks) *DummyEngine { +func NewFakerWithCallbacks(cb *ConsensusCallbacks) *DummyEngine { return &DummyEngine{ cb: cb, clock: &mockable.Clock{}, } } -func NewFakerWithMode(cb ConsensusCallbacks, mode Mode) *DummyEngine { +func NewFakerWithMode(cb *ConsensusCallbacks, mode Mode) *DummyEngine { return &DummyEngine{ cb: cb, clock: &mockable.Clock{}, diff --git a/params/config_extra.go b/params/config_extra.go index abe6ed7bb0..a9449d5206 100644 --- a/params/config_extra.go +++ b/params/config_extra.go @@ -192,7 +192,7 @@ func GetChainConfig(agoUpgrade upgrade.Config, chainID *big.Int) *ChainConfig { PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - NetworkUpgrades: getNetworkUpgrades(agoUpgrade), + NetworkUpgrades: GetNetworkUpgrades(agoUpgrade), } return c } diff --git a/params/network_upgrades.go b/params/network_upgrades.go index 6af886b093..2a1869e87d 100644 --- a/params/network_upgrades.go +++ b/params/network_upgrades.go @@ -194,7 +194,7 @@ func (n *NetworkUpgrades) Description() string { return banner } -func getNetworkUpgrades(agoUpgrade upgrade.Config) NetworkUpgrades { +func GetNetworkUpgrades(agoUpgrade upgrade.Config) NetworkUpgrades { return NetworkUpgrades{ ApricotPhase1BlockTimestamp: utils.TimeToNewUint64(agoUpgrade.ApricotPhase1Time), ApricotPhase2BlockTimestamp: utils.TimeToNewUint64(agoUpgrade.ApricotPhase2Time), diff --git a/plugin/evm/api.go b/plugin/evm/api.go index 12791d4e08..d2e286f3ff 100644 --- a/plugin/evm/api.go +++ b/plugin/evm/api.go @@ -5,40 +5,12 @@ package evm import ( "context" - "errors" - "fmt" "math/big" - "net/http" - "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) -// test constants -const ( - GenesisTestAddr = "0x751a0b96e1042bee789452ecb20253fba40dbe85" - GenesisTestKey = "0xabd71b35d559563fea757f0f5edbde286fb8c043105b15abb7cd57189306d7d1" - - // Max number of addresses that can be passed in as argument to GetUTXOs - maxGetUTXOsAddrs = 1024 -) - -var ( - errNoAddresses = errors.New("no addresses provided") - errNoSourceChain = errors.New("no source chain provided") - errNilTxID = errors.New("nil transaction ID") - - initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) -) - // SnowmanAPI introduces snowman specific functionality to the evm type SnowmanAPI struct{ vm *VM } @@ -65,202 +37,3 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { api.vm.builder.signalTxsReady() return nil } - -// AvaxAPI offers Avalanche network related API methods -type AvaxAPI struct{ vm *VM } - -type VersionReply struct { - Version string `json:"version"` -} - -// ClientVersion returns the version of the VM running -func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionReply) error { - reply.Version = Version - return nil -} - -// GetUTXOs gets all utxos for passed in addresses -func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { - log.Info("EVM: GetUTXOs called", "Addresses", args.Addresses) - - if len(args.Addresses) == 0 { - return errNoAddresses - } - if len(args.Addresses) > maxGetUTXOsAddrs { - return fmt.Errorf("number of addresses given, %d, exceeds maximum, %d", len(args.Addresses), maxGetUTXOsAddrs) - } - - if args.SourceChain == "" { - return errNoSourceChain - } - - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) - if err != nil { - return fmt.Errorf("problem parsing source chainID %q: %w", args.SourceChain, err) - } - sourceChain := chainID - - addrSet := set.Set[ids.ShortID]{} - for _, addrStr := range args.Addresses { - addr, err := service.vm.ParseServiceAddress(addrStr) - if err != nil { - return fmt.Errorf("couldn't parse address %q: %w", addrStr, err) - } - addrSet.Add(addr) - } - - startAddr := ids.ShortEmpty - startUTXO := ids.Empty - if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { - startAddr, err = service.vm.ParseServiceAddress(args.StartIndex.Address) - if err != nil { - return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) - } - startUTXO, err = ids.FromString(args.StartIndex.UTXO) - if err != nil { - return fmt.Errorf("couldn't parse start index utxo: %w", err) - } - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - utxos, endAddr, endUTXOID, err := service.vm.GetAtomicUTXOs( - sourceChain, - addrSet, - startAddr, - startUTXO, - int(args.Limit), - ) - if err != nil { - return fmt.Errorf("problem retrieving UTXOs: %w", err) - } - - reply.UTXOs = make([]string, len(utxos)) - for i, utxo := range utxos { - b, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - return fmt.Errorf("problem marshalling UTXO: %w", err) - } - str, err := formatting.Encode(args.Encoding, b) - if err != nil { - return fmt.Errorf("problem encoding utxo: %w", err) - } - reply.UTXOs[i] = str - } - - endAddress, err := service.vm.FormatLocalAddress(endAddr) - if err != nil { - return fmt.Errorf("problem formatting address: %w", err) - } - - reply.EndIndex.Address = endAddress - reply.EndIndex.UTXO = endUTXOID.String() - reply.NumFetched = json.Uint64(len(utxos)) - reply.Encoding = args.Encoding - return nil -} - -func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { - log.Info("EVM: IssueTx called") - - txBytes, err := formatting.Decode(args.Encoding, args.Tx) - if err != nil { - return fmt.Errorf("problem decoding transaction: %w", err) - } - - tx := &atomic.Tx{} - if _, err := atomic.Codec.Unmarshal(txBytes, tx); err != nil { - return fmt.Errorf("problem parsing transaction: %w", err) - } - if err := tx.Sign(atomic.Codec, nil); err != nil { - return fmt.Errorf("problem initializing transaction: %w", err) - } - - response.TxID = tx.ID() - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - if err := service.vm.mempool.AddLocalTx(tx); err != nil { - return err - } - service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - return nil -} - -// GetAtomicTxStatus returns the status of the specified transaction -func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *client.GetAtomicTxStatusReply) error { - log.Info("EVM: GetAtomicTxStatus called", "txID", args.TxID) - - if args.TxID == ids.Empty { - return errNilTxID - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - _, status, height, _ := service.vm.getAtomicTx(args.TxID) - - reply.Status = status - if status == atomic.Accepted { - // Since chain state updates run asynchronously with VM block acceptance, - // avoid returning [Accepted] until the chain state reaches the block - // containing the atomic tx. - lastAccepted := service.vm.blockChain.LastAcceptedBlock() - if height > lastAccepted.NumberU64() { - reply.Status = atomic.Processing - return nil - } - - jsonHeight := json.Uint64(height) - reply.BlockHeight = &jsonHeight - } - return nil -} - -type FormattedTx struct { - api.FormattedTx - BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` -} - -// GetAtomicTx returns the specified transaction -func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply *FormattedTx) error { - log.Info("EVM: GetAtomicTx called", "txID", args.TxID) - - if args.TxID == ids.Empty { - return errNilTxID - } - - service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() - - tx, status, height, err := service.vm.getAtomicTx(args.TxID) - if err != nil { - return err - } - - if status == atomic.Unknown { - return fmt.Errorf("could not find tx %s", args.TxID) - } - - txBytes, err := formatting.Encode(args.Encoding, tx.SignedBytes()) - if err != nil { - return err - } - reply.Tx = txBytes - reply.Encoding = args.Encoding - if status == atomic.Accepted { - // Since chain state updates run asynchronously with VM block acceptance, - // avoid returning [Accepted] until the chain state reaches the block - // containing the atomic tx. - lastAccepted := service.vm.blockChain.LastAcceptedBlock() - if height > lastAccepted.NumberU64() { - return nil - } - - jsonHeight := json.Uint64(height) - reply.BlockHeight = &jsonHeight - } - return nil -} diff --git a/plugin/evm/atomic/extension/config.go b/plugin/evm/atomic/extension/config.go new file mode 100644 index 0000000000..06b018d916 --- /dev/null +++ b/plugin/evm/atomic/extension/config.go @@ -0,0 +1,88 @@ +package extension + +import ( + "context" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/sync/handlers" + "github.com/prometheus/client_golang/prometheus" + + "github.com/ethereum/go-ethereum/common" +) + +// TODO: move this file out from atomic pkg + +type ExtensibleVM interface { + // NewClient returns a client to send messages with for the given protocol + NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client + // AddHandler registers a server handler for an application protocol + AddHandler(protocol uint64, handler p2p.Handler) error + GetBlockExtended(ctx context.Context, blkID ids.ID) (ExtendedBlock, error) + LastAcceptedBlockInternal() snowman.Block + Validators() *p2p.Validators + SetExtensionConfig(config *Config) error + Blockchain() *core.BlockChain + Config() *config.Config + MetricRegistry() *prometheus.Registry + ReadLastAccepted() (common.Hash, uint64, error) + VersionDB() *versiondb.Database +} + +type InnerVM interface { + ExtensibleVM + avalanchecommon.VM + block.ChainVM + block.BuildBlockWithContextChainVM + block.StateSyncableVM +} + +type ExtendedBlock interface { + snowman.Block + GetExtraData() interface{} + GetEthBlock() *types.Block +} + +type BlockExtension interface { + InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (any, error) + SyntacticVerify(b ExtendedBlock, rules params.Rules) error + Accept(b ExtendedBlock, acceptedBatch database.Batch) error + Reject(b ExtendedBlock) error + Cleanup(b ExtendedBlock) +} + +type BuilderMempool interface { + Len() int + SubscribePendingTxs() <-chan struct{} +} + +type LeafRequestConfig struct { + LeafType message.NodeType + MetricName string + Handler handlers.LeafRequestHandler +} + +type Config struct { + NetworkCodec codec.Manager + ConsensusCallbacks *dummy.ConsensusCallbacks + SyncSummaryProvider sync.SummaryProvider + SyncExtender sync.Extender + SyncableParser message.SyncableParser + BlockExtension BlockExtension + SyncLeafType *LeafRequestConfig + ExtraMempool BuilderMempool +} diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 5b608ca2f5..8dc0b9e68c 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -42,7 +42,7 @@ var ( ErrAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") - errRejectedParent = errors.New("rejected parent") + ErrRejectedParent = errors.New("rejected parent") ) // UnsignedImportTx is an unsigned ImportTx @@ -463,13 +463,9 @@ func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor Atomic // will be missing. // If the ancestor is processing, then the block may have // been verified. - nextAncestorIntf, err := fetcher.GetBlockInternal(context.TODO(), nextAncestorID) + nextAncestor, err := fetcher.GetAtomicBlock(context.TODO(), nextAncestorID) if err != nil { - return errRejectedParent - } - nextAncestor, ok := nextAncestorIntf.(AtomicBlockContext) - if !ok { - return fmt.Errorf("ancestor block %s had unexpected type %T", nextAncestor.ID(), nextAncestorIntf) + return ErrRejectedParent } ancestor = nextAncestor } diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index 309fbb41ca..79ebae32dc 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -11,8 +11,6 @@ import ( avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -21,12 +19,7 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var ( - atomicTrieDBPrefix = []byte("atomicTrieDB") - atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") - appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") - sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates -) +var sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates const ( progressLogFrequency = 30 * time.Second @@ -36,9 +29,7 @@ const ( // the AtomicTrie, AtomicTxRepository, and the VM's shared memory. type AtomicBackend struct { codec codec.Manager - bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing - db *versiondb.Database // Underlying database - metadataDB database.Database // Underlying database containing the atomic trie metadata + bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing sharedMemory avalancheatomic.SharedMemory repo *AtomicTxRepository @@ -50,22 +41,18 @@ type AtomicBackend struct { // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( - db *versiondb.Database, sharedMemory avalancheatomic.SharedMemory, + sharedMemory avalancheatomic.SharedMemory, bonusBlocks map[uint64]ids.ID, repo *AtomicTxRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (*AtomicBackend, error) { - atomicTrieDB := prefixdb.New(atomicTrieDBPrefix, db) - metadataDB := prefixdb.New(atomicTrieMetaDBPrefix, db) - codec := repo.Codec() + codec := repo.codec - atomicTrie, err := NewAtomicTrie(atomicTrieDB, metadataDB, codec, lastAcceptedHeight, commitInterval) + atomicTrie, err := NewAtomicTrie(repo.atomicTrieDB, repo.metadataDB, codec, lastAcceptedHeight, commitInterval) if err != nil { return nil, err } atomicBackend := &AtomicBackend{ codec: codec, - db: db, - metadataDB: metadataDB, sharedMemory: sharedMemory, bonusBlocks: bonusBlocks, repo: repo, @@ -144,7 +131,7 @@ func (a *AtomicBackend) initialize(lastAcceptedHeight uint64) error { return err } if isCommit { - if err := a.db.Commit(); err != nil { + if err := a.repo.db.Commit(); err != nil { return err } } @@ -194,7 +181,7 @@ func (a *AtomicBackend) initialize(lastAcceptedHeight uint64) error { // The cursor is initially set by MarkApplyToSharedMemoryCursor to signal to the atomic trie // the range of operations that were added to the trie without being executed on shared memory. func (a *AtomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { - sharedMemoryCursor, err := a.metadataDB.Get(appliedSharedMemoryCursorKey) + sharedMemoryCursor, err := a.repo.metadataDB.Get(appliedSharedMemoryCursorKey) if err == database.ErrNotFound { return nil } else if err != nil { @@ -266,10 +253,10 @@ func (a *AtomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { // Update the cursor to the key of the atomic operation being executed on shared memory. // If the node shuts down in the middle of this function call, ApplyToSharedMemory will // resume operation starting at the key immediately following [it.Key()]. - if err = a.metadataDB.Put(appliedSharedMemoryCursorKey, it.Key()); err != nil { + if err = a.repo.metadataDB.Put(appliedSharedMemoryCursorKey, it.Key()); err != nil { return err } - batch, err := a.db.CommitBatch() + batch, err := a.repo.db.CommitBatch() if err != nil { return err } @@ -291,10 +278,10 @@ func (a *AtomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { return err } - if err = a.metadataDB.Delete(appliedSharedMemoryCursorKey); err != nil { + if err = a.repo.metadataDB.Delete(appliedSharedMemoryCursorKey); err != nil { return err } - batch, err := a.db.CommitBatch() + batch, err := a.repo.db.CommitBatch() if err != nil { return err } @@ -317,7 +304,7 @@ func (a *AtomicBackend) ApplyToSharedMemory(lastAcceptedBlock uint64) error { func (a *AtomicBackend) MarkApplyToSharedMemoryCursor(previousLastAcceptedHeight uint64) error { // Set the cursor to [previousLastAcceptedHeight+1] so that we begin the iteration at the // first item that has not been applied to shared memory. - return database.PutUInt64(a.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) + return database.PutUInt64(a.repo.metadataDB, appliedSharedMemoryCursorKey, previousLastAcceptedHeight+1) } func (a *AtomicBackend) GetVerifiedAtomicState(blockHash common.Hash) (*atomicState, error) { diff --git a/plugin/evm/atomic/state/atomic_state.go b/plugin/evm/atomic/state/atomic_state.go index 07d9e806fd..940c987fe5 100644 --- a/plugin/evm/atomic/state/atomic_state.go +++ b/plugin/evm/atomic/state/atomic_state.go @@ -30,11 +30,7 @@ func (a *atomicState) Root() common.Hash { } // Accept applies the state change to VM's persistent storage. -func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*avalancheatomic.Requests) error { - // Add the new requests to the batch to be accepted - for chainID, requests := range requests { - mergeAtomicOpsToMap(a.atomicOps, chainID, requests) - } +func (a *atomicState) Accept(commitBatch database.Batch) error { // Update the atomic tx repository. Note it is necessary to invoke // the correct method taking bonus blocks into consideration. if a.backend.IsBonus(a.blockHeight, a.blockHash) { @@ -58,7 +54,7 @@ func (a *atomicState) Accept(commitBatch database.Batch, requests map[ids.ID]*av // get changes from the atomic trie and repository in a batch // to be committed atomically with [commitBatch] and shared memory. - atomicChangesBatch, err := a.backend.db.CommitBatch() + atomicChangesBatch, err := a.backend.repo.db.CommitBatch() if err != nil { return fmt.Errorf("could not create commit batch in atomicState accept: %w", err) } diff --git a/plugin/evm/atomic/state/atomic_trie_iterator_test.go b/plugin/evm/atomic/state/atomic_trie_iterator_test.go index 71aa2f6dd7..47533e5d5b 100644 --- a/plugin/evm/atomic/state/atomic_trie_iterator_test.go +++ b/plugin/evm/atomic/state/atomic_trie_iterator_test.go @@ -34,7 +34,7 @@ func TestIteratorCanIterate(t *testing.T) { // create an atomic trie // on create it will initialize all the transactions from the above atomic repository - atomicBackend, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend, err := NewAtomicBackend(snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie1 := atomicBackend.AtomicTrie() @@ -47,7 +47,7 @@ func TestIteratorCanIterate(t *testing.T) { // iterate on a new atomic trie to make sure there is no resident state affecting the data and the // iterator - atomicBackend2, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) + atomicBackend2, err := NewAtomicBackend(snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 100) assert.NoError(t, err) atomicTrie2 := atomicBackend2.AtomicTrie() lastCommittedHash2, lastCommittedHeight2 := atomicTrie2.LastCommitted() @@ -76,7 +76,7 @@ func TestIteratorHandlesInvalidData(t *testing.T) { snowCtx := utils.TestSnowContext() commitInterval := uint64(100) - atomicBackend, err := NewAtomicBackend(db, snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(snowCtx.SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, commitInterval) require.NoError(err) atomicTrie := atomicBackend.AtomicTrie() diff --git a/plugin/evm/atomic/state/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go index 99a72bab85..ada195e7ee 100644 --- a/plugin/evm/atomic/state/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -142,7 +142,7 @@ func TestAtomicTrieInitialize(t *testing.T) { writeTxs(t, repo, 1, test.lastAcceptedHeight+1, test.numTxsPerBlock, nil, operationsMap) // Construct the atomic trie for the first time - atomicBackend1, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend1, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -158,7 +158,7 @@ func TestAtomicTrieInitialize(t *testing.T) { verifyOperations(t, atomicTrie1, atomictest.TestTxCodec, rootHash1, 1, test.expectedCommitHeight, operationsMap) // Construct the atomic trie again (on the same database) and ensure the last accepted root is correct. - atomicBackend2, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + atomicBackend2, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -166,9 +166,7 @@ func TestAtomicTrieInitialize(t *testing.T) { assert.Equal(t, atomicTrie1.LastAcceptedRoot(), atomicTrie2.LastAcceptedRoot()) // Construct the atomic trie again (on an empty database) and ensure that it produces the same hash. - atomicBackend3, err := NewAtomicBackend( - versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, - ) + atomicBackend3, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -204,9 +202,7 @@ func TestAtomicTrieInitialize(t *testing.T) { verifyOperations(t, atomicTrie1, atomictest.TestTxCodec, updatedRoot, 1, updatedLastCommitHeight, operationsMap) // Generate a new atomic trie to compare the root against. - atomicBackend4, err := NewAtomicBackend( - versiondb.New(memdb.New()), utils.TestSnowContext().SharedMemory, nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval, - ) + atomicBackend4, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, nextCommitHeight, common.Hash{}, test.commitInterval) if err != nil { t.Fatal(err) } @@ -228,7 +224,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { writeTxs(t, repo, 1, lastAcceptedHeight+1, constTxsPerHeight(2), nil, operationsMap) // Initialize atomic repository - atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) + atomicBackend, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval*/) assert.NoError(t, err) atomicTrie := atomicBackend.AtomicTrie() @@ -244,7 +240,7 @@ func TestIndexerInitializesOnlyOnce(t *testing.T) { assert.NoError(t, err) // Re-initialize the atomic trie - atomicBackend, err = NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) + atomicBackend, err = NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 10 /* commitInterval */) assert.NoError(t, err) atomicTrie = atomicBackend.AtomicTrie() @@ -259,7 +255,7 @@ func newTestAtomicTrie(t *testing.T) *AtomicTrie { if err != nil { t.Fatal(err) } - atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, testCommitInterval) + atomicBackend, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, testCommitInterval) if err != nil { t.Fatal(err) } @@ -346,7 +342,7 @@ func TestAtomicTrieDoesNotSkipBonusBlocks(t *testing.T) { 14: {}, } // Construct the atomic trie for the first time - atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) + atomicBackend, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, bonusBlocks, repo, lastAcceptedHeight, common.Hash{}, commitInterval) if err != nil { t.Fatal(err) } @@ -440,7 +436,7 @@ func TestApplyToSharedMemory(t *testing.T) { cursor := make([]byte, wrappers.LongLen+len(atomictest.TestBlockchainID[:])) binary.BigEndian.PutUint64(cursor, 10) copy(cursor[wrappers.LongLen:], atomictest.TestBlockchainID[:]) - return a.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) + return a.repo.metadataDB.Put(appliedSharedMemoryCursorKey, cursor) }, expectOpsApplied: func(height uint64) bool { return height > 10 && height <= 20 }, }, @@ -461,7 +457,7 @@ func TestApplyToSharedMemory(t *testing.T) { // Initialize atomic repository m := avalancheatomic.NewMemory(db) sharedMemories := atomictest.NewSharedMemories(m, ids.GenerateTestID(), atomictest.TestBlockchainID) - backend, err := NewAtomicBackend(db, sharedMemories.ThisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) + backend, err := NewAtomicBackend(sharedMemories.ThisChain, test.bonusBlockHeights, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) atomicTrie := backend.AtomicTrie() @@ -494,9 +490,7 @@ func TestApplyToSharedMemory(t *testing.T) { assert.NoError(t, err) assert.False(t, hasMarker) // reinitialize the atomic trie - backend, err = NewAtomicBackend( - db, sharedMemories.ThisChain, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval, - ) + backend, err = NewAtomicBackend(sharedMemories.ThisChain, nil, repo, test.lastAcceptedHeight, common.Hash{}, test.commitInterval) assert.NoError(t, err) // no further changes should have occurred in shared memory // assert they are as they were prior to reinitializing @@ -536,7 +530,7 @@ func BenchmarkAtomicTrieInit(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { sharedMemory := utils.TestSnowContext().SharedMemory - atomicBackend, err := NewAtomicBackend(db, sharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) + atomicBackend, err := NewAtomicBackend(sharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie = atomicBackend.AtomicTrie() @@ -561,7 +555,7 @@ func BenchmarkAtomicTrieIterate(b *testing.B) { assert.NoError(b, err) writeTxs(b, repo, 1, lastAcceptedHeight, constTxsPerHeight(3), nil, operationsMap) - atomicBackend, err := NewAtomicBackend(db, utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) + atomicBackend, err := NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, lastAcceptedHeight, common.Hash{}, 5000) assert.NoError(b, err) atomicTrie := atomicBackend.AtomicTrie() @@ -636,7 +630,7 @@ func benchmarkApplyToSharedMemory(b *testing.B, disk database.Database, blocks u repo, err := NewAtomicTxRepository(db, atomictest.TestTxCodec, lastAcceptedHeight) assert.NoError(b, err) - backend, err := NewAtomicBackend(db, sharedMemory, nil, repo, 0, common.Hash{}, 5000) + backend, err := NewAtomicBackend(sharedMemory, nil, repo, 0, common.Hash{}, 5000) if err != nil { b.Fatal(err) } diff --git a/plugin/evm/atomic/state/atomic_tx_repository.go b/plugin/evm/atomic/state/atomic_tx_repository.go index 6beae0ff51..83794fea75 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository.go +++ b/plugin/evm/atomic/state/atomic_tx_repository.go @@ -27,10 +27,13 @@ const ( ) var ( - atomicTxIDDBPrefix = []byte("atomicTxDB") - atomicHeightTxDBPrefix = []byte("atomicHeightTxDB") - atomicRepoMetadataDBPrefix = []byte("atomicRepoMetadataDB") - maxIndexedHeightKey = []byte("maxIndexedAtomicTxHeight") + atomicTxIDDBPrefix = []byte("atomicTxDB") + atomicHeightTxDBPrefix = []byte("atomicHeightTxDB") + atomicRepoMetadataDBPrefix = []byte("atomicRepoMetadataDB") + atomicTrieDBPrefix = []byte("atomicTrieDB") + atomicTrieMetaDBPrefix = []byte("atomicTrieMetaDB") + appliedSharedMemoryCursorKey = []byte("atomicTrieLastAppliedToSharedMemory") + maxIndexedHeightKey = []byte("maxIndexedAtomicTxHeight") // Historically used to track the completion of a migration // bonusBlocksRepairedKey = []byte("bonusBlocksRepaired") @@ -48,6 +51,10 @@ type AtomicTxRepository struct { // has indexed. atomicRepoMetadataDB database.Database + metadataDB database.Database // Underlying database containing the atomic trie metadata + + atomicTrieDB database.Database // Underlying database containing the atomic trie + // [db] is used to commit to the underlying versiondb. db *versiondb.Database @@ -59,6 +66,8 @@ func NewAtomicTxRepository( db *versiondb.Database, codec codec.Manager, lastAcceptedHeight uint64, ) (*AtomicTxRepository, error) { repo := &AtomicTxRepository{ + atomicTrieDB: prefixdb.New(atomicTrieDBPrefix, db), + metadataDB: prefixdb.New(atomicTrieMetaDBPrefix, db), acceptedAtomicTxDB: prefixdb.New(atomicTxIDDBPrefix, db), acceptedAtomicTxByHeightDB: prefixdb.New(atomicHeightTxDBPrefix, db), atomicRepoMetadataDB: prefixdb.New(atomicRepoMetadataDBPrefix, db), @@ -349,7 +358,3 @@ func (a *AtomicTxRepository) IterateByHeight(height uint64) database.Iterator { binary.BigEndian.PutUint64(heightBytes, height) return a.acceptedAtomicTxByHeightDB.NewIteratorWithStart(heightBytes) } - -func (a *AtomicTxRepository) Codec() codec.Manager { - return a.codec -} diff --git a/plugin/evm/atomic/sync/atomic_sync_provider.go b/plugin/evm/atomic/sync/atomic_summary_provider.go similarity index 51% rename from plugin/evm/atomic/sync/atomic_sync_provider.go rename to plugin/evm/atomic/sync/atomic_summary_provider.go index 40060f60ed..cddf2cdabb 100644 --- a/plugin/evm/atomic/sync/atomic_sync_provider.go +++ b/plugin/evm/atomic/sync/atomic_summary_provider.go @@ -7,24 +7,24 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ethereum/go-ethereum/common" ) -var _ sync.SummaryProvider = &AtomicSyncProvider{} +var _ sync.SummaryProvider = &AtomicSummaryProvider{} -type AtomicSyncProvider struct { - chain *core.BlockChain +type AtomicSummaryProvider struct { atomicTrie AtomicTrie } -func NewAtomicProvider(chain *core.BlockChain, atomicTrie AtomicTrie) *AtomicSyncProvider { - return &AtomicSyncProvider{chain: chain, atomicTrie: atomicTrie} +func (a *AtomicSummaryProvider) Initialize(atomicTrie AtomicTrie) { + a.atomicTrie = atomicTrie } -// StateSummaryAtHeight returns the block state summary at [height] if valid and available. -func (a *AtomicSyncProvider) StateSummaryAtHeight(height uint64) (block.StateSummary, error) { +// StateSummaryAtBlock returns the block state summary at [block] if valid. +func (a *AtomicSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { + height := blk.NumberU64() atomicRoot, err := a.atomicTrie.Root(height) if err != nil { return nil, fmt.Errorf("failed to retrieve atomic trie root for height (%d): %w", height, err) @@ -34,15 +34,6 @@ func (a *AtomicSyncProvider) StateSummaryAtHeight(height uint64) (block.StateSum return nil, fmt.Errorf("atomic trie root not found for height (%d)", height) } - blk := a.chain.GetBlockByNumber(height) - if blk == nil { - return nil, fmt.Errorf("block not found for height (%d)", height) - } - - if !a.chain.HasState(blk.Root()) { - return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) - } - summary, err := NewAtomicSyncSummary(blk.Hash(), height, blk.Root(), atomicRoot) if err != nil { return nil, fmt.Errorf("failed to construct syncable block at height %d: %w", height, err) diff --git a/plugin/evm/atomic/sync/atomic_sync_extender.go b/plugin/evm/atomic/sync/atomic_sync_extender.go index 4d4216c18c..58c8bbed74 100644 --- a/plugin/evm/atomic/sync/atomic_sync_extender.go +++ b/plugin/evm/atomic/sync/atomic_sync_extender.go @@ -42,12 +42,10 @@ type AtomicSyncExtender struct { stateSyncRequestSize uint16 } -func NewAtomicSyncExtender(backend AtomicBackend, atomicTrie AtomicTrie, stateSyncRequestSize uint16) *AtomicSyncExtender { - return &AtomicSyncExtender{ - backend: backend, - atomicTrie: atomicTrie, - stateSyncRequestSize: stateSyncRequestSize, - } +func (a *AtomicSyncExtender) Initialize(backend AtomicBackend, atomicTrie AtomicTrie, stateSyncRequestSize uint16) { + a.backend = backend + a.atomicTrie = atomicTrie + a.stateSyncRequestSize = stateSyncRequestSize } func (a *AtomicSyncExtender) Sync(ctx context.Context, client syncclient.LeafClient, verDB *versiondb.Database, syncSummary message.Syncable) error { diff --git a/plugin/evm/atomic/sync/atomic_syncer_test.go b/plugin/evm/atomic/sync/atomic_syncer_test.go index 41984a7c2f..d708921604 100644 --- a/plugin/evm/atomic/sync/atomic_syncer_test.go +++ b/plugin/evm/atomic/sync/atomic_syncer_test.go @@ -58,7 +58,7 @@ func testAtomicSyncer(t *testing.T, serverTrieDB *triedb.Database, targetHeight if err != nil { t.Fatal("could not initialize atomix tx repository", err) } - atomicBackend, err := state.NewAtomicBackend(clientDB, utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) + atomicBackend, err := state.NewAtomicBackend(utils.TestSnowContext().SharedMemory, nil, repo, 0, common.Hash{}, commitInterval) if err != nil { t.Fatal("could not initialize atomic backend", err) } diff --git a/plugin/evm/atomic/tx.go b/plugin/evm/atomic/tx.go index 5c44b61937..75823d6ad2 100644 --- a/plugin/evm/atomic/tx.go +++ b/plugin/evm/atomic/tx.go @@ -138,7 +138,7 @@ type VerifierBackend struct { type BlockFetcher interface { LastAcceptedBlockInternal() snowman.Block - GetBlockInternal(context.Context, ids.ID) (snowman.Block, error) + GetAtomicBlock(context.Context, ids.ID) (AtomicBlockContext, error) } type AtomicBlockContext interface { diff --git a/plugin/evm/atomic/txpool/mempool.go b/plugin/evm/atomic/txpool/mempool.go index df5bb22ec6..253591d3ec 100644 --- a/plugin/evm/atomic/txpool/mempool.go +++ b/plugin/evm/atomic/txpool/mempool.go @@ -85,30 +85,29 @@ type Mempool struct { verify func(tx *atomic.Tx) error } -// NewMempool returns a Mempool with [maxSize] -func NewMempool(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *atomic.Tx) error) (*Mempool, error) { +// Initialize initializes the Mempool with [maxSize] +func (m *Mempool) Initialize(ctx *snow.Context, registerer prometheus.Registerer, maxSize int, verify func(tx *atomic.Tx) error) error { bloom, err := gossip.NewBloomFilter(registerer, "atomic_mempool_bloom_filter", config.TxGossipBloomMinTargetElements, config.TxGossipBloomTargetFalsePositiveRate, config.TxGossipBloomResetFalsePositiveRate, ) if err != nil { - return nil, fmt.Errorf("failed to initialize bloom filter: %w", err) - } - - return &Mempool{ - ctx: ctx, - issuedTxs: make(map[ids.ID]*atomic.Tx), - discardedTxs: &cache.LRU[ids.ID, *atomic.Tx]{Size: discardedTxsCacheSize}, - currentTxs: make(map[ids.ID]*atomic.Tx), - Pending: make(chan struct{}, 1), - txHeap: newTxHeap(maxSize), - maxSize: maxSize, - utxoSpenders: make(map[ids.ID]*atomic.Tx), - bloom: bloom, - metrics: newMempoolMetrics(), - verify: verify, - }, nil + return fmt.Errorf("failed to initialize bloom filter: %w", err) + } + + m.ctx = ctx + m.issuedTxs = make(map[ids.ID]*atomic.Tx) + m.discardedTxs = &cache.LRU[ids.ID, *atomic.Tx]{Size: discardedTxsCacheSize} + m.currentTxs = make(map[ids.ID]*atomic.Tx) + m.Pending = make(chan struct{}, 1) + m.txHeap = newTxHeap(maxSize) + m.maxSize = maxSize + m.utxoSpenders = make(map[ids.ID]*atomic.Tx) + m.bloom = bloom + m.metrics = newMempoolMetrics() + m.verify = verify + return nil } // Len returns the number of transactions in the mempool @@ -580,3 +579,7 @@ func (m *Mempool) addPending() { default: } } + +func (m *Mempool) SubscribePendingTxs() <-chan struct{} { + return m.Pending +} diff --git a/plugin/evm/atomic/txpool/mempool_test.go b/plugin/evm/atomic/txpool/mempool_test.go index 2d70b58489..9a1e9f089d 100644 --- a/plugin/evm/atomic/txpool/mempool_test.go +++ b/plugin/evm/atomic/txpool/mempool_test.go @@ -16,7 +16,8 @@ import ( func TestMempoolAddTx(t *testing.T) { require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + m := &Mempool{} + err := m.Initialize(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) require.NoError(err) txs := make([]*atomic.GossipAtomicTx, 0) @@ -41,7 +42,8 @@ func TestMempoolAddTx(t *testing.T) { // Add should return an error if a tx is already known func TestMempoolAdd(t *testing.T) { require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) + m := &Mempool{} + err := m.Initialize(&snow.Context{}, prometheus.NewRegistry(), 5_000, nil) require.NoError(err) tx := &atomic.GossipAtomicTx{ @@ -110,7 +112,8 @@ func TestAtomicMempoolIterate(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - m, err := NewMempool(&snow.Context{}, prometheus.NewRegistry(), 10, nil) + m := &Mempool{} + err := m.Initialize(&snow.Context{}, prometheus.NewRegistry(), 10, nil) require.NoError(err) for _, add := range tt.add { diff --git a/plugin/evm/atomic/vm/api.go b/plugin/evm/atomic/vm/api.go new file mode 100644 index 0000000000..f2ade6c31f --- /dev/null +++ b/plugin/evm/atomic/vm/api.go @@ -0,0 +1,231 @@ +package vm + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/client" + "github.com/ethereum/go-ethereum/log" +) + +const ( + // Max number of addresses that can be passed in as argument to GetUTXOs + maxGetUTXOsAddrs = 1024 +) + +var ( + errNoAddresses = errors.New("no addresses provided") + errNoSourceChain = errors.New("no source chain provided") + errNilTxID = errors.New("nil transaction ID") +) + +// AvaxAPI offers Avalanche network related API methods +type AvaxAPI struct{ vm *VM } + +type VersionReply struct { + Version string `json:"version"` +} + +// ClientVersion returns the version of the VM running +func (service *AvaxAPI) Version(r *http.Request, _ *struct{}, reply *VersionReply) error { + version, err := service.vm.Version(context.Background()) + if err != nil { + return err + } + reply.Version = version + return nil +} + +// GetUTXOs gets all utxos for passed in addresses +func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { + log.Info("EVM: GetUTXOs called", "Addresses", args.Addresses) + + if len(args.Addresses) == 0 { + return errNoAddresses + } + if len(args.Addresses) > maxGetUTXOsAddrs { + return fmt.Errorf("number of addresses given, %d, exceeds maximum, %d", len(args.Addresses), maxGetUTXOsAddrs) + } + + if args.SourceChain == "" { + return errNoSourceChain + } + + chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) + if err != nil { + return fmt.Errorf("problem parsing source chainID %q: %w", args.SourceChain, err) + } + sourceChain := chainID + + addrSet := set.Set[ids.ShortID]{} + for _, addrStr := range args.Addresses { + addr, err := service.vm.ParseServiceAddress(addrStr) + if err != nil { + return fmt.Errorf("couldn't parse address %q: %w", addrStr, err) + } + addrSet.Add(addr) + } + + startAddr := ids.ShortEmpty + startUTXO := ids.Empty + if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { + startAddr, err = service.vm.ParseServiceAddress(args.StartIndex.Address) + if err != nil { + return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) + } + startUTXO, err = ids.FromString(args.StartIndex.UTXO) + if err != nil { + return fmt.Errorf("couldn't parse start index utxo: %w", err) + } + } + + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + + utxos, endAddr, endUTXOID, err := service.vm.GetAtomicUTXOs( + sourceChain, + addrSet, + startAddr, + startUTXO, + int(args.Limit), + ) + if err != nil { + return fmt.Errorf("problem retrieving UTXOs: %w", err) + } + + reply.UTXOs = make([]string, len(utxos)) + for i, utxo := range utxos { + b, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) + if err != nil { + return fmt.Errorf("problem marshalling UTXO: %w", err) + } + str, err := formatting.Encode(args.Encoding, b) + if err != nil { + return fmt.Errorf("problem encoding utxo: %w", err) + } + reply.UTXOs[i] = str + } + + endAddress, err := service.vm.FormatLocalAddress(endAddr) + if err != nil { + return fmt.Errorf("problem formatting address: %w", err) + } + + reply.EndIndex.Address = endAddress + reply.EndIndex.UTXO = endUTXOID.String() + reply.NumFetched = json.Uint64(len(utxos)) + reply.Encoding = args.Encoding + return nil +} + +func (service *AvaxAPI) IssueTx(r *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { + log.Info("EVM: IssueTx called") + + txBytes, err := formatting.Decode(args.Encoding, args.Tx) + if err != nil { + return fmt.Errorf("problem decoding transaction: %w", err) + } + + tx := &atomic.Tx{} + if _, err := atomic.Codec.Unmarshal(txBytes, tx); err != nil { + return fmt.Errorf("problem parsing transaction: %w", err) + } + if err := tx.Sign(atomic.Codec, nil); err != nil { + return fmt.Errorf("problem initializing transaction: %w", err) + } + + response.TxID = tx.ID() + + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + + if err := service.vm.mempool.AddLocalTx(tx); err != nil { + return err + } + service.vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) + return nil +} + +// GetAtomicTxStatus returns the status of the specified transaction +func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, reply *client.GetAtomicTxStatusReply) error { + log.Info("EVM: GetAtomicTxStatus called", "txID", args.TxID) + + if args.TxID == ids.Empty { + return errNilTxID + } + + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + + _, status, height, _ := service.vm.getAtomicTx(args.TxID) + + reply.Status = status + if status == atomic.Accepted { + // Since chain state updates run asynchronously with VM block acceptance, + // avoid returning [Accepted] until the chain state reaches the block + // containing the atomic tx. + lastAccepted := service.vm.Blockchain().LastAcceptedBlock() + if height > lastAccepted.NumberU64() { + reply.Status = atomic.Processing + return nil + } + + jsonHeight := json.Uint64(height) + reply.BlockHeight = &jsonHeight + } + return nil +} + +type FormattedTx struct { + api.FormattedTx + BlockHeight *json.Uint64 `json:"blockHeight,omitempty"` +} + +// GetAtomicTx returns the specified transaction +func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply *FormattedTx) error { + log.Info("EVM: GetAtomicTx called", "txID", args.TxID) + + if args.TxID == ids.Empty { + return errNilTxID + } + + service.vm.ctx.Lock.Lock() + defer service.vm.ctx.Lock.Unlock() + + tx, status, height, err := service.vm.getAtomicTx(args.TxID) + if err != nil { + return err + } + + if status == atomic.Unknown { + return fmt.Errorf("could not find tx %s", args.TxID) + } + + txBytes, err := formatting.Encode(args.Encoding, tx.SignedBytes()) + if err != nil { + return err + } + reply.Tx = txBytes + reply.Encoding = args.Encoding + if status == atomic.Accepted { + // Since chain state updates run asynchronously with VM block acceptance, + // avoid returning [Accepted] until the chain state reaches the block + // containing the atomic tx. + lastAccepted := service.vm.Blockchain().LastAcceptedBlock() + if height > lastAccepted.NumberU64() { + return nil + } + + jsonHeight := json.Uint64(height) + reply.BlockHeight = &jsonHeight + } + return nil +} diff --git a/plugin/evm/atomic/vm/atomic_leaf_handler.go b/plugin/evm/atomic/vm/atomic_leaf_handler.go new file mode 100644 index 0000000000..e47339d385 --- /dev/null +++ b/plugin/evm/atomic/vm/atomic_leaf_handler.go @@ -0,0 +1,37 @@ +package vm + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/sync/handlers" + "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/triedb" +) + +var errUninitialized = errors.New("uninitialized handler") + +type uninitializedHandler struct{} + +func (h *uninitializedHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { + return nil, errUninitialized +} + +type atomicLeafHandler struct { + handlers.LeafRequestHandler +} + +func NewAtomicLeafHandler() *atomicLeafHandler { + return &atomicLeafHandler{ + LeafRequestHandler: &uninitializedHandler{}, + } +} + +func (a *atomicLeafHandler) Initialize(atomicTrieDB *triedb.Database, trieKeyLength int, networkCodec codec.Manager) { + handlerStats := stats.NewHandlerStats(metrics.Enabled) + a.LeafRequestHandler = handlers.NewLeafsRequestHandler(atomicTrieDB, trieKeyLength, nil, networkCodec, handlerStats) +} diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go new file mode 100644 index 0000000000..18079d2949 --- /dev/null +++ b/plugin/evm/atomic/vm/block_extension.go @@ -0,0 +1,260 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + + "github.com/ava-labs/avalanchego/database" + safemath "github.com/ava-labs/avalanchego/utils/math" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/extension" +) + +var _ extension.BlockExtension = (*blockExtension)(nil) + +var ( + errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") + errNilEthBlock = errors.New("nil ethBlock") + errNilExtraData = errors.New("nil extra data") + errMissingUTXOs = errors.New("missing UTXOs") + errEmptyBlock = errors.New("empty block") +) + +type blockExtension struct { + extDataHashes map[common.Hash]common.Hash + vm *VM +} + +func newBlockExtension( + extDataHashes map[common.Hash]common.Hash, + vm *VM, +) *blockExtension { + return &blockExtension{ + extDataHashes: extDataHashes, + // Note: we need VM here to access the atomic backend that + // could be initialized later in the VM. + vm: vm, + } +} + +func (be *blockExtension) InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (interface{}, error) { + isApricotPhase5 := chainConfig.IsApricotPhase5(ethBlock.Time()) + atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) + if err != nil { + return nil, err + } + + return atomicTxs, nil +} + +func (be *blockExtension) SyntacticVerify(b extension.ExtendedBlock, rules params.Rules) error { + ethBlock := b.GetEthBlock() + if ethBlock == nil { + return errNilEthBlock + } + ethHeader := ethBlock.Header() + blockHash := ethBlock.Hash() + + if !rules.IsApricotPhase1 { + if be.extDataHashes != nil { + extData := ethBlock.ExtData() + extDataHash := types.CalcExtDataHash(extData) + // If there is no extra data, check that there is no extra data in the hash map either to ensure we do not + // have a block that is unexpectedly missing extra data. + expectedExtDataHash, ok := be.extDataHashes[blockHash] + if len(extData) == 0 { + if ok { + return fmt.Errorf("found block with unexpected missing extra data (%s, %d), expected extra data hash: %s", blockHash, b.Height(), expectedExtDataHash) + } + } else { + // If there is extra data, check to make sure that the extra data hash matches the expected extra data hash for this + // block + if extDataHash != expectedExtDataHash { + return fmt.Errorf("extra data hash in block (%s, %d): %s, did not match the expected extra data hash: %s", blockHash, b.Height(), extDataHash, expectedExtDataHash) + } + } + } + } + + // Verify the ExtDataHash field + if rules.IsApricotPhase1 { + if hash := types.CalcExtDataHash(ethBlock.ExtData()); ethHeader.ExtDataHash != hash { + return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) + } + } else { + if ethHeader.ExtDataHash != (common.Hash{}) { + return fmt.Errorf( + "expected ExtDataHash to be empty but got %x", + ethHeader.ExtDataHash, + ) + } + } + + // Block must not be empty + txs := ethBlock.Transactions() + atomicTxs, err := getAtomicFromExtra(b) + if err != nil { + return err + } + if len(txs) == 0 && len(atomicTxs) == 0 { + return errEmptyBlock + } + + // If we are in ApricotPhase4, ensure that ExtDataGasUsed is populated correctly. + if rules.IsApricotPhase4 { + // Make sure ExtDataGasUsed is not nil and correct + if ethHeader.ExtDataGasUsed == nil { + return errNilExtDataGasUsedApricotPhase4 + } + if rules.IsApricotPhase5 { + if ethHeader.ExtDataGasUsed.Cmp(params.AtomicGasLimit) == 1 { + return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) + } + } else { + if !ethHeader.ExtDataGasUsed.IsUint64() { + return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) + } + } + var totalGasUsed uint64 + for _, atomicTx := range atomicTxs { + // We perform this check manually here to avoid the overhead of having to + // reparse the atomicTx in `CalcExtDataGasUsed`. + fixedFee := rules.IsApricotPhase5 // Charge the atomic tx fixed fee as of ApricotPhase5 + gasUsed, err := atomicTx.GasUsed(fixedFee) + if err != nil { + return err + } + totalGasUsed, err = safemath.Add(totalGasUsed, gasUsed) + if err != nil { + return err + } + } + + if ethHeader.ExtDataGasUsed.Cmp(new(big.Int).SetUint64(totalGasUsed)) != 0 { + return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", ethHeader.ExtDataGasUsed, totalGasUsed) + } + } + + // if bootstrapped, verify UTXOs named in atomic txs are present in shared memory + if be.vm.bootstrapped.Get() { + return be.verifyUTXOsPresent(b, atomicTxs) + } + + return nil +} + +func (be *blockExtension) Accept(b extension.ExtendedBlock, acceptedBatch database.Batch) error { + atomicTxs, err := getAtomicFromExtra(b) + if err != nil { + return err + } + for _, tx := range atomicTxs { + // Remove the accepted transaction from the mempool + be.vm.mempool.RemoveTx(tx) + } + + // Update VM state for atomic txs in this block. This includes updating the + // atomic tx repo, atomic trie, and shared memory. + atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(common.Hash(b.ID())) + if err != nil { + // should never occur since [b] must be verified before calling Accept + return err + } + // Apply any shared memory changes atomically with other pending batched changes + return atomicState.Accept(acceptedBatch) +} + +func (be *blockExtension) Reject(b extension.ExtendedBlock) error { + atomicTxs, err := getAtomicFromExtra(b) + if err != nil { + return err + } + for _, tx := range atomicTxs { + // Re-issue the transaction in the mempool, continue even if it fails + be.vm.mempool.RemoveTx(tx) + if err := be.vm.mempool.AddRemoteTx(tx); err != nil { + log.Debug("Failed to re-issue transaction in rejected block", "txID", tx.ID(), "err", err) + } + } + atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(common.Hash(b.ID())) + if err != nil { + // should never occur since [b] must be verified before calling Reject + return err + } + return atomicState.Reject() +} + +func getAtomicFromExtra(b extension.ExtendedBlock) ([]*atomic.Tx, error) { + extraData := b.GetExtraData() + if extraData == nil { + return nil, errNilExtraData + } + + atomicTxs, ok := extraData.([]*atomic.Tx) + if !ok { + return nil, fmt.Errorf("expected extra data to be of type []*atomic.Tx but got %T", extraData) + } + + return atomicTxs, nil +} + +func (be *blockExtension) Cleanup(b extension.ExtendedBlock) { + if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { + atomicState.Reject() + } +} + +// verifyUTXOsPresent returns an error if any of the atomic transactions name UTXOs that +// are not present in shared memory. +func (be *blockExtension) verifyUTXOsPresent(b extension.ExtendedBlock, atomicTxs []*atomic.Tx) error { + blockHash := common.Hash(b.ID()) + if be.vm.atomicBackend.IsBonus(b.Height(), blockHash) { + log.Info("skipping atomic tx verification on bonus block", "block", blockHash) + return nil + } + + // verify UTXOs named in import txs are present in shared memory. + for _, atomicTx := range atomicTxs { + utx := atomicTx.UnsignedAtomicTx + chainID, requests, err := utx.AtomicOps() + if err != nil { + return err + } + if _, err := be.vm.ctx.SharedMemory.Get(chainID, requests.RemoveRequests); err != nil { + return fmt.Errorf("%w: %s", errMissingUTXOs, err) + } + } + return nil +} + +var _ atomic.AtomicBlockContext = (*atomicBlock)(nil) + +type atomicBlock struct { + extension.ExtendedBlock + atomicTxs []*atomic.Tx +} + +func wrapAtomicBlock(b extension.ExtendedBlock) (*atomicBlock, error) { + txs, err := getAtomicFromExtra(b) + if err != nil { + return nil, err + } + return &atomicBlock{ + ExtendedBlock: b, + atomicTxs: txs, + }, nil +} + +func (ab *atomicBlock) AtomicTxs() []*atomic.Tx { + return ab.atomicTxs +} diff --git a/plugin/evm/atomic/vm/bonus_blocks.go b/plugin/evm/atomic/vm/bonus_blocks.go new file mode 100644 index 0000000000..ea887d6b32 --- /dev/null +++ b/plugin/evm/atomic/vm/bonus_blocks.go @@ -0,0 +1,77 @@ +package vm + +import "github.com/ava-labs/avalanchego/ids" + +// readMainnetBonusBlocks returns maps of bonus block numbers to block IDs. +// Note bonus blocks are indexed in the atomic trie. +func readMainnetBonusBlocks() (map[uint64]ids.ID, error) { + mainnetBonusBlocks := map[uint64]string{ + 102972: "Njm9TcLUXRojZk8YhEM6ksvfiPdC1TME4zJvGaDXgzMCyB6oB", + 103105: "BYqLB6xpqy7HsAgP2XNfGE8Ubg1uEzse5mBPTSJH9z5s8pvMa", + 103143: "AfWvJH3rB2fdHuPWQp6qYNCFVT29MooQPRigD88rKKwUDEDhq", + 103183: "2KPW9G5tiNF14tZNfG4SqHuQrtUYVZyxuof37aZ7AnTKrQdsHn", + 103197: "pE93VXY3N5QKfwsEFcM9i59UpPFgeZ8nxpJNaGaDQyDgsscNf", + 103203: "2czmtnBS44VCWNRFUM89h4Fe9m3ZeZVYyh7Pe3FhNqjRNgPXhZ", + 103208: "esx5J962LtYm2aSrskpLai5e4CMMsaS1dsu9iuLGJ3KWgSu2M", + 103209: "DK9NqAJGry1wAo767uuYc1dYXAjUhzwka6vi8d9tNheqzGUTd", + 103259: "i1HoerJ1axognkUKKL58FvF9aLrbZKtv7TdKLkT5kgzoeU1vB", + 103261: "2DpCuBaH94zKKFNY2XTs4GeJcwsEv6qT2DHc59S8tdg97GZpcJ", + 103266: "2ez4CA7w4HHr8SSobHQUAwFgj2giRNjNFUZK9JvrZFa1AuRj6X", + 103287: "2QBNMMFJmhVHaGF45GAPszKyj1gK6ToBERRxYvXtM7yfrdUGPK", + 103339: "2pSjfo7rkFCfZ2CqAxqfw8vqM2CU2nVLHrFZe3rwxz43gkVuGo", + 103346: "2SiSziHHqPjb1qkw7CdGYupokiYpd2b7mMqRiyszurctcA5AKr", + 103350: "2F5tSQbdTfhZxvkxZqdFp7KR3FrJPKEsDLQK7KtPhNXj1EZAh4", + 103358: "2tCe88ur6MLQcVgwE5XxoaHiTGtSrthwKN3SdbHE4kWiQ7MSTV", + 103437: "21o2fVTnzzmtgXqkV1yuQeze7YEQhR5JB31jVVD9oVUnaaV8qm", + 103472: "2nG4exd9eUoAGzELfksmBR8XDCKhohY1uDKRFzEXJG4M8p3qA7", + 103478: "63YLdYXfXc5tY3mwWLaDsbXzQHYmwWVxMP7HKbRh4Du3C2iM1", + 103493: "soPweZ8DGaoUMjrnzjH3V2bypa7ZvvfqBan4UCsMUxMP759gw", + 103514: "2dNkpQF4mooveyUDfBYQTBfsGDV4wkncQPpEw4kHKfSTSTo5x", + 103536: "PJTkRrHvKZ1m4AQdPND1MBpUXpCrGN4DDmXmJQAiUrsxPoLQX", + 103545: "22ck2Z7cC38hmBfX2v3jMWxun8eD8psNaicfYeokS67DxwmPTx", + 103547: "pTf7gfk1ksj7bqMrLyMCij8FBKth1uRqQrtfykMFeXhx5xnrL", + 103554: "9oZh4qyBCcVwSGyDoUzRAuausvPJN3xH6nopKS6bwYzMfLoQ2", + 103555: "MjExz2z1qhwugc1tAyiGxRsCq4GvJwKfyyS29nr4tRVB8ooic", + 103559: "cwJusfmn98TW3DjAbfLRN9utYR24KAQ82qpAXmVSvjHyJZuM2", + 103561: "2YgxGHns7Z2hMMHJsPCgVXuJaL7x1b3gnHbmSCfCdyAcYGr6mx", + 103563: "2AXxT3PSEnaYHNtBTnYrVTf24TtKDWjky9sqoFEhydrGXE9iKH", + 103564: "Ry2sfjFfGEnJxRkUGFSyZNn7GR3m4aKAf1scDW2uXSNQB568Y", + 103569: "21Jys8UNURmtckKSV89S2hntEWymJszrLQbdLaNcbXcxDAsQSa", + 103570: "sg6wAwFBsPQiS5Yfyh41cVkCRQbrrXsxXmeNyQ1xkunf2sdyv", + 103575: "z3BgePPpCXq1mRBRvUi28rYYxnEtJizkUEHnDBrcZeVA7MFVk", + 103577: "uK5Ff9iBfDtREpVv9NgCQ1STD1nzLJG3yrfibHG4mGvmybw6f", + 103578: "Qv5v5Ru8ArfnWKB1w6s4G5EYPh7TybHJtF6UsVwAkfvZFoqmj", + 103582: "7KCZKBpxovtX9opb7rMRie9WmW5YbZ8A4HwBBokJ9eSHpZPqx", + 103587: "2AfTQ2FXNj9bkSUQnud9pFXULx6EbF7cbbw6i3ayvc2QNhgxfF", + 103590: "2gTygYckZgFZfN5QQWPaPBD3nabqjidV55mwy1x1Nd4JmJAwaM", + 103591: "2cUPPHy1hspr2nAKpQrrAEisLKkaWSS9iF2wjNFyFRs8vnSkKK", + 103594: "5MptSdP6dBMPSwk9GJjeVe39deZJTRh9i82cgNibjeDffrrTf", + 103597: "2J8z7HNv4nwh82wqRGyEHqQeuw4wJ6mCDCSvUgusBu35asnshK", + 103598: "2i2FP6nJyvhX9FR15qN2D9AVoK5XKgBD2i2AQ7FoSpfowxvQDX", + 103603: "2v3smb35s4GLACsK4Zkd2RcLBLdWA4huqrvq8Y3VP4CVe8kfTM", + 103604: "b7XfDDLgwB12DfL7UTWZoxwBpkLPL5mdHtXngD94Y2RoeWXSh", + 103607: "PgaRk1UAoUvRybhnXsrLq5t6imWhEa6ksNjbN6hWgs4qPrSzm", + 103612: "2oueNTj4dUE2FFtGyPpawnmCCsy6EUQeVHVLZy8NHeQmkAciP4", + 103614: "2YHZ1KymFjiBhpXzgt6HXJhLSt5SV9UQ4tJuUNjfN1nQQdm5zz", + 103617: "amgH2C1s9H3Av7vSW4y7n7TXb9tKyKHENvrDXutgNN6nsejgc", + 103618: "fV8k1U8oQDmfVwK66kAwN73aSsWiWhm8quNpVnKmSznBycV2W", + 103621: "Nzs93kFTvcXanFUp9Y8VQkKYnzmH8xykxVNFJTkdyAEeuxWbP", + 103623: "2rAsBj3emqQa13CV8r5fTtHogs4sXnjvbbXVzcKPi3WmzhpK9D", + 103624: "2JbuExUGKW5mYz5KfXATwq1ibRDimgks9wEdYGNSC6Ttey1R4U", + 103627: "tLLijh7oKfvWT1yk9zRv4FQvuQ5DAiuvb5kHCNN9zh4mqkFMG", + 103628: "dWBsRYRwFrcyi3DPdLoHsL67QkZ5h86hwtVfP94ZBaY18EkmF", + 103629: "XMoEsew2DhSgQaydcJFJUQAQYP8BTNTYbEJZvtbrV2QsX7iE3", + 103630: "2db2wMbVAoCc5EUJrsBYWvNZDekqyY8uNpaaVapdBAQZ5oRaou", + 103633: "2QiHZwLhQ3xLuyyfcdo5yCUfoSqWDvRZox5ECU19HiswfroCGp", + } + + bonusBlockMainnetHeights := make(map[uint64]ids.ID) + for height, blkIDStr := range mainnetBonusBlocks { + blkID, err := ids.FromString(blkIDStr) + if err != nil { + return nil, err + } + bonusBlockMainnetHeights[height] = blkID + } + return bonusBlockMainnetHeights, nil +} diff --git a/plugin/evm/export_tx_test.go b/plugin/evm/atomic/vm/export_tx_test.go similarity index 92% rename from plugin/evm/export_tx_test.go rename to plugin/evm/atomic/vm/export_tx_test.go index 643eef391c..22514dba09 100644 --- a/plugin/evm/export_tx_test.go +++ b/plugin/evm/atomic/vm/export_tx_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "bytes" @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" @@ -35,7 +36,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, }, }, } @@ -50,14 +51,14 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].Address().Bytes(), + testutils.TestKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } // Import the funds - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -92,7 +93,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } for _, addr := range testShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -103,7 +104,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } func TestExportTxEVMStateTransfer(t *testing.T) { - key := testKeys[0] + key := testutils.TestKeys[0] addr := key.Address() ethAddr := key.EthAddress() @@ -373,7 +374,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -451,9 +452,9 @@ func TestExportTxSemanticVerify(t *testing.T) { parent := vm.LastAcceptedBlockInternal().(*Block) - key := testKeys[0] + key := testutils.TestKeys[0] addr := key.Address() - ethAddr := testEthAddrs[0] + ethAddr := testutils.TestEthAddrs[0] var ( avaxBalance = 10 * units.Avax @@ -543,7 +544,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: false, }, @@ -557,7 +558,7 @@ func TestExportTxSemanticVerify(t *testing.T) { signers: [][]*secp256k1.PrivateKey{ {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -571,7 +572,7 @@ func TestExportTxSemanticVerify(t *testing.T) { signers: [][]*secp256k1.PrivateKey{ {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase5, shouldErr: false, }, @@ -585,7 +586,7 @@ func TestExportTxSemanticVerify(t *testing.T) { signers: [][]*secp256k1.PrivateKey{ {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase5, shouldErr: true, }, @@ -601,7 +602,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -617,7 +618,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase5, shouldErr: true, }, @@ -633,7 +634,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase5, shouldErr: true, }, @@ -649,7 +650,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -665,7 +666,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -681,7 +682,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -698,7 +699,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -723,7 +724,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -764,7 +765,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -781,7 +782,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -808,7 +809,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -835,7 +836,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -848,7 +849,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -859,7 +860,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -867,11 +868,11 @@ func TestExportTxSemanticVerify(t *testing.T) { name: "too many signatures on credential", tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ - {key, testKeys[1]}, + {key, testutils.TestKeys[1]}, {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -883,7 +884,7 @@ func TestExportTxSemanticVerify(t *testing.T) { {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -891,11 +892,11 @@ func TestExportTxSemanticVerify(t *testing.T) { name: "wrong signature on credential", tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{ - {testKeys[1]}, + {testutils.TestKeys[1]}, {key}, {key}, }, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -903,7 +904,7 @@ func TestExportTxSemanticVerify(t *testing.T) { name: "no signatures", tx: &atomic.Tx{UnsignedAtomicTx: validExportTx}, signers: [][]*secp256k1.PrivateKey{}, - baseFee: initialBaseFee, + baseFee: testutils.InitialBaseFee, rules: apricotRulesPhase3, shouldErr: true, }, @@ -948,9 +949,9 @@ func TestExportTxAccept(t *testing.T) { } }() - key := testKeys[0] + key := testutils.TestKeys[0] addr := key.Address() - ethAddr := testEthAddrs[0] + ethAddr := testutils.TestEthAddrs[0] var ( avaxBalance = 10 * units.Avax @@ -1095,13 +1096,13 @@ func TestExportTxVerify(t *testing.T) { DestinationChain: ctx.XChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: ctx.AVAXAssetID, Nonce: 0, }, { - Address: testEthAddrs[2], + Address: testutils.TestEthAddrs[2], Amount: exportAmount, AssetID: ctx.AVAXAssetID, Nonce: 0, @@ -1259,7 +1260,7 @@ func TestExportTxVerify(t *testing.T) { tx := *exportTx tx.Ins = []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 0, AssetID: ctx.AVAXAssetID, Nonce: 0, @@ -1296,7 +1297,7 @@ func TestExportTxVerify(t *testing.T) { tx := *exportTx tx.Ins = []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: nonExistentID, Nonce: 0, @@ -1335,7 +1336,7 @@ func TestExportTxVerify(t *testing.T) { tx := *exportTx tx.Ins = []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: nonExistentID, Nonce: 0, @@ -1403,7 +1404,7 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1423,7 +1424,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -1435,7 +1436,7 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1455,7 +1456,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 11230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -1468,7 +1469,7 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1488,7 +1489,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 30750, BaseFee: big.NewInt(25 * params.GWei), @@ -1500,7 +1501,7 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1520,7 +1521,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 276750, BaseFee: big.NewInt(225 * params.GWei), @@ -1532,19 +1533,19 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, }, { - Address: testEthAddrs[1], + Address: testutils.TestEthAddrs[1], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, }, { - Address: testEthAddrs[2], + Address: testutils.TestEthAddrs[2], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1564,7 +1565,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[0], testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0], testutils.TestKeys[0], testutils.TestKeys[0]}}, ExpectedGasUsed: 3366, ExpectedFee: 84150, BaseFee: big.NewInt(25 * params.GWei), @@ -1576,19 +1577,19 @@ func TestExportTxGasCost(t *testing.T) { DestinationChain: xChainID, Ins: []atomic.EVMInput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, }, { - Address: testEthAddrs[1], + Address: testutils.TestEthAddrs[1], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, }, { - Address: testEthAddrs[2], + Address: testutils.TestEthAddrs[2], Amount: exportAmount, AssetID: avaxAssetID, Nonce: 0, @@ -1608,7 +1609,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[0], testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0], testutils.TestKeys[0], testutils.TestKeys[0]}}, ExpectedGasUsed: 3366, ExpectedFee: 757350, BaseFee: big.NewInt(225 * params.GWei), @@ -1715,7 +1716,7 @@ func TestNewExportTx(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, }, }, } @@ -1730,13 +1731,13 @@ func TestNewExportTx(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].Address().Bytes(), + testutils.TestKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -1772,7 +1773,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -1822,7 +1823,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := testKeys[0].EthAddress() + addr := testutils.TestKeys[0].EthAddress() if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } @@ -1888,7 +1889,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, }, }, } @@ -1909,7 +1910,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount2, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, }, }, } @@ -1925,21 +1926,21 @@ func TestNewExportTxMulticoin(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].Address().Bytes(), + testutils.TestKeys[0].Address().Bytes(), }, }, { Key: inputID2[:], Value: utxoBytes2, Traits: [][]byte{ - testKeys[0].Address().Bytes(), + testutils.TestKeys[0].Address().Bytes(), }, }, }}}); err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -1970,8 +1971,8 @@ func TestNewExportTxMulticoin(t *testing.T) { parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - testKeys0Addr := testKeys[0].EthAddress() - exportId, err := ids.ToShortID(testKeys0Addr[:]) + testutils.TestKeys0Addr := testutils.TestKeys[0].EthAddress() + exportId, err := ids.ToShortID(testutils.TestKeys0Addr[:]) if err != nil { t.Fatal(err) } @@ -1981,7 +1982,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -2022,7 +2023,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := testKeys[0].EthAddress() + addr := testutils.TestKeys[0].EthAddress() if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) } diff --git a/plugin/evm/ext_data_hashes.go b/plugin/evm/atomic/vm/ext_data_hashes.go similarity index 98% rename from plugin/evm/ext_data_hashes.go rename to plugin/evm/atomic/vm/ext_data_hashes.go index 7648c3bcdb..82748c24c0 100644 --- a/plugin/evm/ext_data_hashes.go +++ b/plugin/evm/atomic/vm/ext_data_hashes.go @@ -1,4 +1,4 @@ -package evm +package vm import ( _ "embed" diff --git a/plugin/evm/formatting.go b/plugin/evm/atomic/vm/formatting.go similarity index 69% rename from plugin/evm/formatting.go rename to plugin/evm/atomic/vm/formatting.go index feeab134b7..e1914a7299 100644 --- a/plugin/evm/formatting.go +++ b/plugin/evm/atomic/vm/formatting.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "fmt" @@ -50,3 +50,29 @@ func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { hrp := constants.GetHRP(vm.ctx.NetworkID) return address.Format(chainIDAlias, hrp, addr.Bytes()) } + +// ParseAddress takes in an address and produces the ID of the chain it's for +// the ID of the address +func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { + chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) + if err != nil { + return ids.ID{}, ids.ShortID{}, err + } + + chainID, err := vm.ctx.BCLookup.Lookup(chainIDAlias) + if err != nil { + return ids.ID{}, ids.ShortID{}, err + } + + expectedHRP := constants.GetHRP(vm.ctx.NetworkID) + if hrp != expectedHRP { + return ids.ID{}, ids.ShortID{}, fmt.Errorf("expected hrp %q but got %q", + expectedHRP, hrp) + } + + addr, err := ids.ToShortID(addrBytes) + if err != nil { + return ids.ID{}, ids.ShortID{}, err + } + return chainID, addr, nil +} diff --git a/plugin/evm/fuji_ext_data_hashes.json b/plugin/evm/atomic/vm/fuji_ext_data_hashes.json similarity index 100% rename from plugin/evm/fuji_ext_data_hashes.json rename to plugin/evm/atomic/vm/fuji_ext_data_hashes.json diff --git a/plugin/evm/mainnet_ext_data_hashes.json b/plugin/evm/atomic/vm/mainnet_ext_data_hashes.json similarity index 100% rename from plugin/evm/mainnet_ext_data_hashes.json rename to plugin/evm/atomic/vm/mainnet_ext_data_hashes.json diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 7212913ca4..b06bf4c598 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -2,49 +2,99 @@ package vm import ( "context" + "errors" "fmt" + "math/big" + "net/http" + "sync" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" avalanchedatabase "github.com/ava-labs/avalanchego/database" - avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/coreth/plugin/evm/atomic/sync" - "github.com/ava-labs/coreth/plugin/evm/extension" - "github.com/ava-labs/coreth/plugin/evm/message" - + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + avalanchegossip "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + avalancheutils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/extension" + atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" + atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" + "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" + "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/gossip" + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/vmerrs" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" ) var ( - _ secp256k1fx.VM = (*VM)(nil) - _ block.ChainVM = (*VM)(nil) - _ block.BuildBlockWithContextChainVM = (*VM)(nil) - _ block.StateSyncableVM = (*VM)(nil) + _ secp256k1fx.VM = &VM{} + _ block.ChainVM = &VM{} + _ block.BuildBlockWithContextChainVM = &VM{} + _ block.StateSyncableVM = &VM{} ) -type InnerVM interface { - avalanchecommon.VM - secp256k1fx.VM - block.ChainVM - block.BuildBlockWithContextChainVM - block.StateSyncableVM -} +const ( + secpCacheSize = 1024 + defaultMempoolSize = 4096 + targetAtomicTxsSize = 40 * units.KiB + maxUTXOsToFetch = 1024 + atomicTxGossipNamespace = "atomic_tx_gossip" + avaxEndpoint = "/avax" +) type VM struct { - InnerVM -} + // TODO: decide if we want to directly import the evm package and VM struct + extension.InnerVM -func NewAtomicExtensionConfig() (extension.ExtensionConfig, error) { - codec, err := message.NewCodec(sync.AtomicSyncSummary{}) - if err != nil { - return extension.ExtensionConfig{}, fmt.Errorf("failed to create codec manager: %w", err) - } - return extension.ExtensionConfig{ - NetworkCodec: codec, - }, nil + secpCache secp256k1.RecoverCache + baseCodec codec.Registry + mempool *txpool.Mempool + fx secp256k1fx.Fx + ctx *snow.Context + + bootstrapped *avalancheutils.Atomic[bool] + + // [atomicTxRepository] maintains two indexes on accepted atomic txs. + // - txID to accepted atomic tx + // - block height to list of atomic txs accepted on block at that height + atomicTxRepository *atomicstate.AtomicTxRepository + // [atomicBackend] abstracts verification and processing of atomic transactions + atomicBackend *atomicstate.AtomicBackend + + atomicTxGossipHandler p2p.Handler + atomicTxPushGossiper *avalanchegossip.PushGossiper[*atomic.GossipAtomicTx] + atomicTxPullGossiper avalanchegossip.Gossiper + + // [cancel] may be nil until [snow.NormalOp] starts + cancel context.CancelFunc + shutdownWg sync.WaitGroup + + clock mockable.Clock } -func WrapVM(vm InnerVM) *VM { +func WrapVM(vm extension.InnerVM) *VM { return &VM{InnerVM: vm} } @@ -60,7 +110,54 @@ func (vm *VM) Initialize( fxs []*avalanchecommon.Fx, appSender avalanchecommon.AppSender, ) error { - return vm.InnerVM.Initialize( + innerVM := vm.InnerVM + + vm.ctx = chainCtx + + var extDataHashes map[common.Hash]common.Hash + // Set the chain config for mainnet/fuji chain IDs + switch chainCtx.NetworkID { + case constants.MainnetID: + extDataHashes = mainnetExtDataHashes + case constants.FujiID: + extDataHashes = fujiExtDataHashes + } + // Free the memory of the extDataHash map + fujiExtDataHashes = nil + mainnetExtDataHashes = nil + + codec, err := message.NewCodec(atomicsync.AtomicSyncSummary{}) + if err != nil { + return fmt.Errorf("failed to create codec manager: %w", err) + } + + blockExtension := newBlockExtension(extDataHashes, vm) + syncExtender := &atomicsync.AtomicSyncExtender{} + syncProvider := &atomicsync.AtomicSummaryProvider{} + leafHandler := NewAtomicLeafHandler() + atomicLeafTypeConfig := &extension.LeafRequestConfig{ + LeafType: atomicsync.AtomicTrieNode, + MetricName: "sync_atomic_trie_leaves", + Handler: leafHandler, + } + + vm.mempool = &txpool.Mempool{} + + extensionConfig := &extension.Config{ + NetworkCodec: codec, + ConsensusCallbacks: vm.createConsensusCallbacks(), + BlockExtension: blockExtension, + SyncableParser: atomicsync.NewAtomicSyncSummaryParser(), + SyncExtender: syncExtender, + SyncSummaryProvider: syncProvider, + SyncLeafType: atomicLeafTypeConfig, + ExtraMempool: vm.mempool, + } + if err := innerVM.SetExtensionConfig(extensionConfig); err != nil { + return fmt.Errorf("failed to set extension config: %w", err) + } + + innerVM.Initialize( ctx, chainCtx, db, @@ -71,4 +168,638 @@ func (vm *VM) Initialize( fxs, appSender, ) + + err = vm.mempool.Initialize(chainCtx, innerVM.MetricRegistry(), defaultMempoolSize, vm.verifyTxAtTip) + if err != nil { + return fmt.Errorf("failed to initialize mempool: %w", err) + } + + // initialize bonus blocks on mainnet + var ( + bonusBlockHeights map[uint64]ids.ID + ) + if vm.ctx.NetworkID == constants.MainnetID { + bonusBlockHeights, err = readMainnetBonusBlocks() + if err != nil { + return fmt.Errorf("failed to read mainnet bonus blocks: %w", err) + } + } + + // initialize atomic repository + lastAcceptedHash, lastAcceptedHeight, err := vm.InnerVM.ReadLastAccepted() + if err != nil { + return fmt.Errorf("failed to read last accepted block: %w", err) + } + vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(vm.InnerVM.VersionDB(), codec, lastAcceptedHeight) + if err != nil { + return fmt.Errorf("failed to create atomic repository: %w", err) + } + vm.atomicBackend, err = atomicstate.NewAtomicBackend( + vm.ctx.SharedMemory, bonusBlockHeights, + vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, + vm.InnerVM.Config().CommitInterval, + ) + if err != nil { + return fmt.Errorf("failed to create atomic backend: %w", err) + } + + // Atomic backend is available now, we can initialize structs that depend on it + syncProvider.Initialize(vm.atomicBackend.AtomicTrie()) + syncExtender.Initialize(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), vm.InnerVM.Config().StateSyncRequestSize) + leafHandler.Initialize(vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, codec) + vm.secpCache = secp256k1.RecoverCache{ + LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + Size: secpCacheSize, + }, + } + + // so [vm.baseCodec] is a dummy codec use to fulfill the secp256k1fx VM + // interface. The fx will register all of its types, which can be safely + // ignored by the VM's codec. + vm.baseCodec = linearcodec.NewDefault() + + if err := vm.fx.Initialize(vm); err != nil { + return err + } + return nil +} + +func (vm *VM) SetState(ctx context.Context, state snow.State) error { + switch state { + case snow.StateSyncing: + vm.bootstrapped.Set(false) + case snow.Bootstrapping: + if err := vm.onBootstrapStarted(); err != nil { + return err + } + case snow.NormalOp: + if err := vm.onNormalOperationsStarted(); err != nil { + return err + } + default: + return snow.ErrUnknownState + } + + return vm.InnerVM.SetState(ctx, state) +} + +// onBootstrapStarted marks this VM as bootstrapping +func (vm *VM) onBootstrapStarted() error { + vm.bootstrapped.Set(false) + + return vm.fx.Bootstrapping() +} + +// onNormalOperationsStarted marks this VM as bootstrapped +func (vm *VM) onNormalOperationsStarted() error { + if vm.bootstrapped.Get() { + return nil + } + vm.bootstrapped.Set(true) + if err := vm.fx.Bootstrapped(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.TODO()) + vm.cancel = cancel + + atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} + atomicTxGossipClient := vm.InnerVM.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.InnerVM.Validators())) + atomicTxGossipMetrics, err := avalanchegossip.NewMetrics(vm.InnerVM.MetricRegistry(), atomicTxGossipNamespace) + if err != nil { + return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) + } + + pushGossipParams := avalanchegossip.BranchingFactor{ + StakePercentage: vm.InnerVM.Config().PushGossipPercentStake, + Validators: vm.InnerVM.Config().PushGossipNumValidators, + Peers: vm.InnerVM.Config().PushGossipNumPeers, + } + pushRegossipParams := avalanchegossip.BranchingFactor{ + Validators: vm.InnerVM.Config().PushRegossipNumValidators, + Peers: vm.InnerVM.Config().PushRegossipNumPeers, + } + + if vm.atomicTxPushGossiper == nil { + vm.atomicTxPushGossiper, err = avalanchegossip.NewPushGossiper[*atomic.GossipAtomicTx]( + atomicTxGossipMarshaller, + vm.mempool, + vm.InnerVM.Validators(), + atomicTxGossipClient, + atomicTxGossipMetrics, + pushGossipParams, + pushRegossipParams, + config.PushGossipDiscardedElements, + config.TxGossipTargetMessageSize, + vm.InnerVM.Config().RegossipFrequency.Duration, + ) + if err != nil { + return fmt.Errorf("failed to initialize atomic tx push gossiper: %w", err) + } + } + + if vm.atomicTxGossipHandler == nil { + vm.atomicTxGossipHandler = gossip.NewTxGossipHandler[*atomic.GossipAtomicTx]( + vm.ctx.Log, + atomicTxGossipMarshaller, + vm.mempool, + atomicTxGossipMetrics, + config.TxGossipTargetMessageSize, + config.TxGossipThrottlingPeriod, + config.TxGossipThrottlingLimit, + vm.InnerVM.Validators(), + ) + } + + if err := vm.InnerVM.AddHandler(p2p.AtomicTxGossipHandlerID, vm.atomicTxGossipHandler); err != nil { + return fmt.Errorf("failed to add atomic tx gossip handler: %w", err) + } + + if vm.atomicTxPullGossiper == nil { + atomicTxPullGossiper := avalanchegossip.NewPullGossiper[*atomic.GossipAtomicTx]( + vm.ctx.Log, + atomicTxGossipMarshaller, + vm.mempool, + atomicTxGossipClient, + atomicTxGossipMetrics, + config.TxGossipPollSize, + ) + + vm.atomicTxPullGossiper = &avalanchegossip.ValidatorGossiper{ + Gossiper: atomicTxPullGossiper, + NodeID: vm.ctx.NodeID, + Validators: vm.InnerVM.Validators(), + } + } + + vm.shutdownWg.Add(2) + go func() { + avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, vm.InnerVM.Config().PushGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + go func() { + avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, vm.InnerVM.Config().PullGossipFrequency.Duration) + vm.shutdownWg.Done() + }() + return nil +} + +func (vm *VM) Shutdown(context.Context) error { + if vm.ctx == nil { + return nil + } + if vm.cancel != nil { + vm.cancel() + } + if err := vm.InnerVM.Shutdown(context.Background()); err != nil { + log.Error("failed to shutdown inner VM", "err", err) + } + vm.shutdownWg.Wait() + return nil +} + +func (vm *VM) CreateHandlers(ctx context.Context) (map[string]http.Handler, error) { + apis, err := vm.InnerVM.CreateHandlers(ctx) + if err != nil { + return nil, err + } + avaxAPI, err := utils.NewHandler("avax", &AvaxAPI{vm}) + if err != nil { + return nil, fmt.Errorf("failed to register service for AVAX API due to %w", err) + } + log.Info("AVAX API enabled") + apis[avaxEndpoint] = avaxAPI + return apis, nil +} + +// verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block +func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { + if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { + return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) + } + gasUsed, err := tx.GasUsed(true) + if err != nil { + return err + } + if new(big.Int).SetUint64(gasUsed).Cmp(params.AtomicGasLimit) > 0 { + return fmt.Errorf("tx gas usage (%d) exceeds atomic gas limit (%d)", gasUsed, params.AtomicGasLimit.Uint64()) + } + + // Note: we fetch the current block and then the state at that block instead of the current state directly + // since we need the header of the current block below. + preferredBlock := vm.InnerVM.Blockchain().CurrentBlock() + preferredState, err := vm.InnerVM.Blockchain().StateAt(preferredBlock.Root) + if err != nil { + return fmt.Errorf("failed to retrieve block state at tip while verifying atomic tx: %w", err) + } + chainConfig := vm.InnerVM.Blockchain().Config() + rules := vm.InnerVM.Blockchain().Config().Rules(preferredBlock.Number, preferredBlock.Time) + parentHeader := preferredBlock + var nextBaseFee *big.Int + timestamp := uint64(vm.clock.Time().Unix()) + _, nextBaseFee, err = dummy.EstimateNextBaseFee(chainConfig, parentHeader, timestamp) + if err != nil { + // Return extremely detailed error since CalcBaseFee should never encounter an issue here + return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) + } + + // We don’t need to revert the state here in case verifyTx errors, because + // [preferredState] is thrown away either way. + return vm.verifyTx(tx, parentHeader.Hash(), nextBaseFee, preferredState, rules) +} + +// verifyTx verifies that [tx] is valid to be issued into a block with parent block [parentHash] +// and validated at [state] using [rules] as the current rule set. +// Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible +// for reverting to the correct snapshot after calling this function. If this function is called with a +// throwaway state, then this is not necessary. +func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { + parent, err := vm.GetAtomicBlock(context.TODO(), ids.ID(parentHash)) + if err != nil { + return fmt.Errorf("failed to get parent block: %w", err) + } + atomicBackend := &atomic.VerifierBackend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + if err := tx.UnsignedAtomicTx.SemanticVerify(atomicBackend, tx, parent, baseFee); err != nil { + return err + } + return tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state) +} + +func (vm *VM) GetAtomicBlock(ctx context.Context, id ids.ID) (atomic.AtomicBlockContext, error) { + extendedBlock, err := vm.InnerVM.GetBlockExtended(ctx, id) + if err != nil { + return nil, err + } + + return wrapAtomicBlock(extendedBlock) +} + +// verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] +// using [rules] as the current rule set. +func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { + // Ensure that the parent was verified and inserted correctly. + if !vm.InnerVM.Blockchain().HasBlock(parentHash, height-1) { + return atomic.ErrRejectedParent + } + + ancestorID := ids.ID(parentHash) + // If the ancestor is unknown, then the parent failed verification when + // it was called. + // If the ancestor is rejected, then this block shouldn't be inserted + // into the canonical chain because the parent will be missing. + ancestor, err := vm.GetAtomicBlock(context.TODO(), ancestorID) + if err != nil { + return atomic.ErrRejectedParent + } + + // Ensure each tx in [txs] doesn't conflict with any other atomic tx in + // a processing ancestor block. + inputs := set.Set[ids.ID]{} + atomicBackend := &atomic.VerifierBackend{ + Ctx: vm.ctx, + Fx: &vm.fx, + Rules: rules, + Bootstrapped: vm.bootstrapped.Get(), + BlockFetcher: vm, + SecpCache: &vm.secpCache, + } + for _, atomicTx := range txs { + utx := atomicTx.UnsignedAtomicTx + if err := utx.SemanticVerify(atomicBackend, atomicTx, ancestor, baseFee); err != nil { + return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) + } + txInputs := utx.InputUTXOs() + if inputs.Overlaps(txInputs) { + return atomic.ErrConflictingAtomicInputs + } + inputs.Union(txInputs) + } + return nil +} + +// CodecRegistry implements the secp256k1fx interface +func (vm *VM) CodecRegistry() codec.Registry { return vm.baseCodec } + +// Clock implements the secp256k1fx interface +func (vm *VM) Clock() *mockable.Clock { return &vm.clock } + +// Logger implements the secp256k1fx interface +func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } + +func (vm *VM) createConsensusCallbacks() *dummy.ConsensusCallbacks { + return &dummy.ConsensusCallbacks{ + OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, + OnExtraStateChange: vm.onExtraStateChange, + } +} + +func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + for { + tx, exists := vm.mempool.NextTx() + if !exists { + break + } + // Take a snapshot of [state] before calling verifyTx so that if the transaction fails verification + // we can revert to [snapshot]. + // Note: snapshot is taken inside the loop because you cannot revert to the same snapshot more than + // once. + snapshot := state.Snapshot() + rules := vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { + // Discard the transaction from the mempool on failed verification. + log.Debug("discarding tx from mempool on failed verification", "txID", tx.ID(), "err", err) + vm.mempool.DiscardCurrentTx(tx.ID()) + state.RevertToSnapshot(snapshot) + continue + } + + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, tx) + if err != nil { + // Discard the transaction from the mempool and error if the transaction + // cannot be marshalled. This should never happen. + log.Debug("discarding tx due to unmarshal err", "txID", tx.ID(), "err", err) + vm.mempool.DiscardCurrentTx(tx.ID()) + return nil, nil, nil, fmt.Errorf("failed to marshal atomic transaction %s due to %w", tx.ID(), err) + } + var contribution, gasUsed *big.Int + if rules.IsApricotPhase4 { + contribution, gasUsed, err = tx.BlockFeeContribution(rules.IsApricotPhase5, vm.ctx.AVAXAssetID, header.BaseFee) + if err != nil { + return nil, nil, nil, err + } + } + return atomicTxBytes, contribution, gasUsed, nil + } + + if len(txs) == 0 { + // this could happen due to the async logic of geth tx pool + return nil, nil, nil, errEmptyBlock + } + + return nil, nil, nil, nil +} + +// assumes that we are in at least Apricot Phase 5. +func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + var ( + batchAtomicTxs []*atomic.Tx + batchAtomicUTXOs set.Set[ids.ID] + batchContribution *big.Int = new(big.Int).Set(common.Big0) + batchGasUsed *big.Int = new(big.Int).Set(common.Big0) + rules = vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + size int + ) + + for { + tx, exists := vm.mempool.NextTx() + if !exists { + break + } + + // Ensure that adding [tx] to the block will not exceed the block size soft limit. + txSize := len(tx.SignedBytes()) + if size+txSize > targetAtomicTxsSize { + vm.mempool.CancelCurrentTx(tx.ID()) + break + } + + var ( + txGasUsed, txContribution *big.Int + err error + ) + + // Note: we do not need to check if we are in at least ApricotPhase4 here because + // we assume that this function will only be called when the block is in at least + // ApricotPhase5. + txContribution, txGasUsed, err = tx.BlockFeeContribution(true, vm.ctx.AVAXAssetID, header.BaseFee) + if err != nil { + return nil, nil, nil, err + } + // ensure [gasUsed] + [batchGasUsed] doesnt exceed the [atomicGasLimit] + if totalGasUsed := new(big.Int).Add(batchGasUsed, txGasUsed); totalGasUsed.Cmp(params.AtomicGasLimit) > 0 { + // Send [tx] back to the mempool's tx heap. + vm.mempool.CancelCurrentTx(tx.ID()) + break + } + + if batchAtomicUTXOs.Overlaps(tx.InputUTXOs()) { + // Discard the transaction from the mempool since it will fail verification + // after this block has been accepted. + // Note: if the proposed block is not accepted, the transaction may still be + // valid, but we discard it early here based on the assumption that the proposed + // block will most likely be accepted. + // Discard the transaction from the mempool on failed verification. + log.Debug("discarding tx due to overlapping input utxos", "txID", tx.ID()) + vm.mempool.DiscardCurrentTx(tx.ID()) + continue + } + + snapshot := state.Snapshot() + if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { + // Discard the transaction from the mempool and reset the state to [snapshot] + // if it fails verification here. + // Note: prior to this point, we have not modified [state] so there is no need to + // revert to a snapshot if we discard the transaction prior to this point. + log.Debug("discarding tx from mempool due to failed verification", "txID", tx.ID(), "err", err) + vm.mempool.DiscardCurrentTx(tx.ID()) + state.RevertToSnapshot(snapshot) + continue + } + + batchAtomicTxs = append(batchAtomicTxs, tx) + batchAtomicUTXOs.Union(tx.InputUTXOs()) + // Add the [txGasUsed] to the [batchGasUsed] when the [tx] has passed verification + batchGasUsed.Add(batchGasUsed, txGasUsed) + batchContribution.Add(batchContribution, txContribution) + size += txSize + } + + // If there is a non-zero number of transactions, marshal them and return the byte slice + // for the block's extra data along with the contribution and gas used. + if len(batchAtomicTxs) > 0 { + atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, batchAtomicTxs) + if err != nil { + // If we fail to marshal the batch of atomic transactions for any reason, + // discard the entire set of current transactions. + log.Debug("discarding txs due to error marshaling atomic transactions", "err", err) + vm.mempool.DiscardCurrentTxs() + return nil, nil, nil, fmt.Errorf("failed to marshal batch of atomic transactions due to %w", err) + } + return atomicTxBytes, batchContribution, batchGasUsed, nil + } + + // If there are no regular transactions and there were also no atomic transactions to be included, + // then the block is empty and should be considered invalid. + if len(txs) == 0 { + // this could happen due to the async logic of geth tx pool + return nil, nil, nil, errEmptyBlock + } + + // If there are no atomic transactions, but there is a non-zero number of regular transactions, then + // we return a nil slice with no contribution from the atomic transactions and a nil error. + return nil, nil, nil, nil +} + +func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + if !vm.InnerVM.Blockchain().Config().IsApricotPhase5(header.Time) { + return vm.preBatchOnFinalizeAndAssemble(header, state, txs) + } + return vm.postBatchOnFinalizeAndAssemble(header, state, txs) +} + +func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { + var ( + batchContribution *big.Int = big.NewInt(0) + batchGasUsed *big.Int = big.NewInt(0) + header = block.Header() + rules = vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + ) + + txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, atomic.Codec) + if err != nil { + return nil, nil, err + } + + // If [atomicBackend] is nil, the VM is still initializing and is reprocessing accepted blocks. + if vm.atomicBackend != nil { + if vm.atomicBackend.IsBonus(block.NumberU64(), block.Hash()) { + log.Info("skipping atomic tx verification on bonus block", "block", block.Hash()) + } else { + // Verify [txs] do not conflict with themselves or ancestor blocks. + if err := vm.verifyTxs(txs, block.ParentHash(), block.BaseFee(), block.NumberU64(), rules); err != nil { + return nil, nil, err + } + } + // Update the atomic backend with [txs] from this block. + // + // Note: The atomic trie canonically contains the duplicate operations + // from any bonus blocks. + _, err := vm.atomicBackend.InsertTxs(block.Hash(), block.NumberU64(), block.ParentHash(), txs) + if err != nil { + return nil, nil, err + } + } + + // If there are no transactions, we can return early. + if len(txs) == 0 { + return nil, nil, nil + } + + for _, tx := range txs { + if err := tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state); err != nil { + return nil, nil, err + } + // If ApricotPhase4 is enabled, calculate the block fee contribution + if rules.IsApricotPhase4 { + contribution, gasUsed, err := tx.BlockFeeContribution(rules.IsApricotPhase5, vm.ctx.AVAXAssetID, block.BaseFee()) + if err != nil { + return nil, nil, err + } + + batchContribution.Add(batchContribution, contribution) + batchGasUsed.Add(batchGasUsed, gasUsed) + } + + // If ApricotPhase5 is enabled, enforce that the atomic gas used does not exceed the + // atomic gas limit. + if rules.IsApricotPhase5 { + // Ensure that [tx] does not push [block] above the atomic gas limit. + if batchGasUsed.Cmp(params.AtomicGasLimit) == 1 { + return nil, nil, fmt.Errorf("atomic gas used (%d) by block (%s), exceeds atomic gas limit (%d)", batchGasUsed, block.Hash().Hex(), params.AtomicGasLimit) + } + } + } + return batchContribution, batchGasUsed, nil +} + +// getAtomicTx returns the requested transaction, status, and height. +// If the status is Unknown, then the returned transaction will be nil. +func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { + if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { + return tx, atomic.Accepted, height, nil + } else if err != avalanchedatabase.ErrNotFound { + return nil, atomic.Unknown, 0, err + } + tx, dropped, found := vm.mempool.GetTx(txID) + switch { + case found && dropped: + return tx, atomic.Dropped, 0, nil + case found: + return tx, atomic.Processing, 0, nil + default: + return nil, atomic.Unknown, 0, nil + } +} + +// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetAtomicUTXOs( + chainID ids.ID, + addrs set.Set[ids.ShortID], + startAddr ids.ShortID, + startUTXOID ids.ID, + limit int, +) ([]*avax.UTXO, ids.ShortID, ids.ID, error) { + if limit <= 0 || limit > maxUTXOsToFetch { + limit = maxUTXOsToFetch + } + + return avax.GetAtomicUTXOs( + vm.ctx.SharedMemory, + atomic.Codec, + chainID, + addrs, + startAddr, + startUTXOID, + limit, + ) +} + +func (vm *VM) newImportTx( + chainID ids.ID, // chain to import from + to common.Address, // Address of recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Keys to import the funds +) (*atomic.Tx, error) { + kc := secp256k1fx.NewKeychain() + for _, key := range keys { + kc.Add(key) + } + + atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + + currentHeader := vm.InnerVM.Blockchain().CurrentHeader() + currentRules := vm.InnerVM.Blockchain().Config().Rules(currentHeader.Number, currentHeader.Time) + return atomic.NewImportTx(vm.ctx, currentRules, vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) +} + +func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { + return vm.BuildBlockWithContext(ctx, nil) +} + +func (vm *VM) BuildBlockWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) (snowman.Block, error) { + blk, err := vm.InnerVM.BuildBlockWithContext(ctx, proposerVMBlockCtx) + + // Handle errors and signal the mempool to take appropriate action + switch { + case errors.Is(err, vmerrs.ErrGenerateBlockFailed), errors.Is(err, vmerrs.ErrBlockVerificationFailed): + vm.mempool.CancelCurrentTxs() + case errors.Is(err, vmerrs.ErrMakeNewBlockFailed): + vm.mempool.DiscardCurrentTxs() + case err != nil: + // Marks the current transactions from the mempool as being successfully issued + // into a block. + vm.mempool.IssueCurrentTxs() + } + return blk, err } diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 80bd07de7b..98ecac21e5 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -10,7 +10,6 @@ import ( "fmt" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -18,7 +17,6 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" @@ -32,114 +30,20 @@ var ( _ block.WithVerifyContext = (*Block)(nil) ) -var errMissingUTXOs = errors.New("missing UTXOs") - -// readMainnetBonusBlocks returns maps of bonus block numbers to block IDs. -// Note bonus blocks are indexed in the atomic trie. -func readMainnetBonusBlocks() (map[uint64]ids.ID, error) { - mainnetBonusBlocks := map[uint64]string{ - 102972: "Njm9TcLUXRojZk8YhEM6ksvfiPdC1TME4zJvGaDXgzMCyB6oB", - 103105: "BYqLB6xpqy7HsAgP2XNfGE8Ubg1uEzse5mBPTSJH9z5s8pvMa", - 103143: "AfWvJH3rB2fdHuPWQp6qYNCFVT29MooQPRigD88rKKwUDEDhq", - 103183: "2KPW9G5tiNF14tZNfG4SqHuQrtUYVZyxuof37aZ7AnTKrQdsHn", - 103197: "pE93VXY3N5QKfwsEFcM9i59UpPFgeZ8nxpJNaGaDQyDgsscNf", - 103203: "2czmtnBS44VCWNRFUM89h4Fe9m3ZeZVYyh7Pe3FhNqjRNgPXhZ", - 103208: "esx5J962LtYm2aSrskpLai5e4CMMsaS1dsu9iuLGJ3KWgSu2M", - 103209: "DK9NqAJGry1wAo767uuYc1dYXAjUhzwka6vi8d9tNheqzGUTd", - 103259: "i1HoerJ1axognkUKKL58FvF9aLrbZKtv7TdKLkT5kgzoeU1vB", - 103261: "2DpCuBaH94zKKFNY2XTs4GeJcwsEv6qT2DHc59S8tdg97GZpcJ", - 103266: "2ez4CA7w4HHr8SSobHQUAwFgj2giRNjNFUZK9JvrZFa1AuRj6X", - 103287: "2QBNMMFJmhVHaGF45GAPszKyj1gK6ToBERRxYvXtM7yfrdUGPK", - 103339: "2pSjfo7rkFCfZ2CqAxqfw8vqM2CU2nVLHrFZe3rwxz43gkVuGo", - 103346: "2SiSziHHqPjb1qkw7CdGYupokiYpd2b7mMqRiyszurctcA5AKr", - 103350: "2F5tSQbdTfhZxvkxZqdFp7KR3FrJPKEsDLQK7KtPhNXj1EZAh4", - 103358: "2tCe88ur6MLQcVgwE5XxoaHiTGtSrthwKN3SdbHE4kWiQ7MSTV", - 103437: "21o2fVTnzzmtgXqkV1yuQeze7YEQhR5JB31jVVD9oVUnaaV8qm", - 103472: "2nG4exd9eUoAGzELfksmBR8XDCKhohY1uDKRFzEXJG4M8p3qA7", - 103478: "63YLdYXfXc5tY3mwWLaDsbXzQHYmwWVxMP7HKbRh4Du3C2iM1", - 103493: "soPweZ8DGaoUMjrnzjH3V2bypa7ZvvfqBan4UCsMUxMP759gw", - 103514: "2dNkpQF4mooveyUDfBYQTBfsGDV4wkncQPpEw4kHKfSTSTo5x", - 103536: "PJTkRrHvKZ1m4AQdPND1MBpUXpCrGN4DDmXmJQAiUrsxPoLQX", - 103545: "22ck2Z7cC38hmBfX2v3jMWxun8eD8psNaicfYeokS67DxwmPTx", - 103547: "pTf7gfk1ksj7bqMrLyMCij8FBKth1uRqQrtfykMFeXhx5xnrL", - 103554: "9oZh4qyBCcVwSGyDoUzRAuausvPJN3xH6nopKS6bwYzMfLoQ2", - 103555: "MjExz2z1qhwugc1tAyiGxRsCq4GvJwKfyyS29nr4tRVB8ooic", - 103559: "cwJusfmn98TW3DjAbfLRN9utYR24KAQ82qpAXmVSvjHyJZuM2", - 103561: "2YgxGHns7Z2hMMHJsPCgVXuJaL7x1b3gnHbmSCfCdyAcYGr6mx", - 103563: "2AXxT3PSEnaYHNtBTnYrVTf24TtKDWjky9sqoFEhydrGXE9iKH", - 103564: "Ry2sfjFfGEnJxRkUGFSyZNn7GR3m4aKAf1scDW2uXSNQB568Y", - 103569: "21Jys8UNURmtckKSV89S2hntEWymJszrLQbdLaNcbXcxDAsQSa", - 103570: "sg6wAwFBsPQiS5Yfyh41cVkCRQbrrXsxXmeNyQ1xkunf2sdyv", - 103575: "z3BgePPpCXq1mRBRvUi28rYYxnEtJizkUEHnDBrcZeVA7MFVk", - 103577: "uK5Ff9iBfDtREpVv9NgCQ1STD1nzLJG3yrfibHG4mGvmybw6f", - 103578: "Qv5v5Ru8ArfnWKB1w6s4G5EYPh7TybHJtF6UsVwAkfvZFoqmj", - 103582: "7KCZKBpxovtX9opb7rMRie9WmW5YbZ8A4HwBBokJ9eSHpZPqx", - 103587: "2AfTQ2FXNj9bkSUQnud9pFXULx6EbF7cbbw6i3ayvc2QNhgxfF", - 103590: "2gTygYckZgFZfN5QQWPaPBD3nabqjidV55mwy1x1Nd4JmJAwaM", - 103591: "2cUPPHy1hspr2nAKpQrrAEisLKkaWSS9iF2wjNFyFRs8vnSkKK", - 103594: "5MptSdP6dBMPSwk9GJjeVe39deZJTRh9i82cgNibjeDffrrTf", - 103597: "2J8z7HNv4nwh82wqRGyEHqQeuw4wJ6mCDCSvUgusBu35asnshK", - 103598: "2i2FP6nJyvhX9FR15qN2D9AVoK5XKgBD2i2AQ7FoSpfowxvQDX", - 103603: "2v3smb35s4GLACsK4Zkd2RcLBLdWA4huqrvq8Y3VP4CVe8kfTM", - 103604: "b7XfDDLgwB12DfL7UTWZoxwBpkLPL5mdHtXngD94Y2RoeWXSh", - 103607: "PgaRk1UAoUvRybhnXsrLq5t6imWhEa6ksNjbN6hWgs4qPrSzm", - 103612: "2oueNTj4dUE2FFtGyPpawnmCCsy6EUQeVHVLZy8NHeQmkAciP4", - 103614: "2YHZ1KymFjiBhpXzgt6HXJhLSt5SV9UQ4tJuUNjfN1nQQdm5zz", - 103617: "amgH2C1s9H3Av7vSW4y7n7TXb9tKyKHENvrDXutgNN6nsejgc", - 103618: "fV8k1U8oQDmfVwK66kAwN73aSsWiWhm8quNpVnKmSznBycV2W", - 103621: "Nzs93kFTvcXanFUp9Y8VQkKYnzmH8xykxVNFJTkdyAEeuxWbP", - 103623: "2rAsBj3emqQa13CV8r5fTtHogs4sXnjvbbXVzcKPi3WmzhpK9D", - 103624: "2JbuExUGKW5mYz5KfXATwq1ibRDimgks9wEdYGNSC6Ttey1R4U", - 103627: "tLLijh7oKfvWT1yk9zRv4FQvuQ5DAiuvb5kHCNN9zh4mqkFMG", - 103628: "dWBsRYRwFrcyi3DPdLoHsL67QkZ5h86hwtVfP94ZBaY18EkmF", - 103629: "XMoEsew2DhSgQaydcJFJUQAQYP8BTNTYbEJZvtbrV2QsX7iE3", - 103630: "2db2wMbVAoCc5EUJrsBYWvNZDekqyY8uNpaaVapdBAQZ5oRaou", - 103633: "2QiHZwLhQ3xLuyyfcdo5yCUfoSqWDvRZox5ECU19HiswfroCGp", - } - - bonusBlockMainnetHeights := make(map[uint64]ids.ID) - for height, blkIDStr := range mainnetBonusBlocks { - blkID, err := ids.FromString(blkIDStr) - if err != nil { - return nil, err - } - bonusBlockMainnetHeights[height] = blkID - } - return bonusBlockMainnetHeights, nil -} - // Block implements the snowman.Block interface type Block struct { - id ids.ID - ethBlock *types.Block - vm *VM - atomicTxs []*atomic.Tx -} - -// newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { - isApricotPhase5 := vm.chainConfig.IsApricotPhase5(ethBlock.Time()) - atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) - if err != nil { - return nil, err - } - - return &Block{ - id: ids.ID(ethBlock.Hash()), - ethBlock: ethBlock, - vm: vm, - atomicTxs: atomicTxs, - }, nil + id ids.ID + ethBlock *types.Block + blockManager *blockManager + extraData any } // ID implements the snowman.Block interface func (b *Block) ID() ids.ID { return b.id } -func (b *Block) AtomicTxs() []*atomic.Tx { return b.atomicTxs } - // Accept implements the snowman.Block interface func (b *Block) Accept(context.Context) error { - vm := b.vm + vm := b.blockManager.vm // Although returning an error from Accept is considered fatal, it is good // practice to cleanup the batch we were modifying in the case of an error. @@ -150,7 +54,7 @@ func (b *Block) Accept(context.Context) error { // Call Accept for relevant precompile logs. Note we do this prior to // calling Accept on the blockChain so any side effects (eg warp signatures) // take place before the accepted log is emitted to subscribers. - rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + rules := vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) if err := b.handlePrecompileAccept(rules); err != nil { return err } @@ -162,48 +66,37 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("failed to put %s as the last accepted block: %w", b.ID(), err) } - for _, tx := range b.atomicTxs { - // Remove the accepted transaction from the mempool - vm.mempool.RemoveTx(tx) - } - - // Update VM state for atomic txs in this block. This includes updating the - // atomic tx repo, atomic trie, and shared memory. - atomicState, err := b.vm.atomicBackend.GetVerifiedAtomicState(common.Hash(b.ID())) - if err != nil { - // should never occur since [b] must be verified before calling Accept - return err - } // Get pending operations on the vm's versionDB so we can apply them atomically - // with the shared memory changes. - vdbBatch, err := b.vm.versiondb.CommitBatch() + // with the block extension's changes. + vdbBatch, err := vm.versiondb.CommitBatch() if err != nil { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } - // Apply any shared memory changes atomically with other pending changes to + // Apply any changes atomically with other pending changes to // the vm's versionDB. - return atomicState.Accept(vdbBatch, nil) + return b.blockManager.blockExtension.Accept(b, vdbBatch) } // handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements // contract.Accepter func (b *Block) handlePrecompileAccept(rules params.Rules) error { + vm := b.blockManager.vm // Short circuit early if there are no precompile accepters to execute if len(rules.AccepterPrecompiles) == 0 { return nil } // Read receipts from disk - receipts := rawdb.ReadReceipts(b.vm.chaindb, b.ethBlock.Hash(), b.ethBlock.NumberU64(), b.ethBlock.Time(), b.vm.chainConfig) + receipts := rawdb.ReadReceipts(vm.chaindb, b.ethBlock.Hash(), b.ethBlock.NumberU64(), b.ethBlock.Time(), vm.chainConfig) // If there are no receipts, ReadReceipts may be nil, so we check the length and confirm the ReceiptHash // is empty to ensure that missing receipts results in an error on accept. if len(receipts) == 0 && b.ethBlock.ReceiptHash() != types.EmptyRootHash { return fmt.Errorf("failed to fetch receipts for accepted block with non-empty root hash (%s) (Block: %s, Height: %d)", b.ethBlock.ReceiptHash(), b.ethBlock.Hash(), b.ethBlock.NumberU64()) } acceptCtx := &precompileconfig.AcceptContext{ - SnowCtx: b.vm.ctx, - Warp: b.vm.warpBackend, + SnowCtx: vm.ctx, + Warp: vm.warpBackend, } for _, receipt := range receipts { for logIdx, log := range receipt.Logs { @@ -223,23 +116,14 @@ func (b *Block) handlePrecompileAccept(rules params.Rules) error { // Reject implements the snowman.Block interface // If [b] contains an atomic transaction, attempt to re-issue it func (b *Block) Reject(context.Context) error { + vm := b.blockManager.vm log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) - for _, tx := range b.atomicTxs { - // Re-issue the transaction in the mempool, continue even if it fails - b.vm.mempool.RemoveTx(tx) - if err := b.vm.mempool.AddRemoteTx(tx); err != nil { - log.Debug("Failed to re-issue transaction in rejected block", "txID", tx.ID(), "err", err) - } - } - atomicState, err := b.vm.atomicBackend.GetVerifiedAtomicState(common.Hash(b.ID())) - if err != nil { - // should never occur since [b] must be verified before calling Reject - return err - } - if err := atomicState.Reject(); err != nil { - return err + + if err := vm.blockChain.Reject(b.ethBlock); err != nil { + return fmt.Errorf("chain could not reject %s: %w", b.ID(), err) } - return b.vm.blockChain.Reject(b.ethBlock) + + return b.blockManager.blockExtension.Reject(b) } // Parent implements the snowman.Block interface @@ -263,22 +147,29 @@ func (b *Block) syntacticVerify() error { return errInvalidBlock } + vm := b.blockManager.vm + header := b.ethBlock.Header() - rules := b.vm.chainConfig.Rules(header.Number, header.Time) - return b.vm.syntacticBlockValidator.SyntacticVerify(b, rules) + + // Skip verification of the genesis block since it should already be marked as accepted. + if b.ethBlock.Hash() == vm.genesisHash { + return nil + } + rules := vm.chainConfig.Rules(header.Number, header.Time) + return b.blockManager.SyntacticVerify(b, rules) } // Verify implements the snowman.Block interface func (b *Block) Verify(context.Context) error { return b.verify(&precompileconfig.PredicateContext{ - SnowCtx: b.vm.ctx, + SnowCtx: b.blockManager.vm.ctx, ProposerVMBlockCtx: nil, }, true) } // ShouldVerifyWithContext implements the block.WithVerifyContext interface func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { - predicates := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters + predicates := b.blockManager.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters // Short circuit early if there are no predicates to verify if len(predicates) == 0 { return false, nil @@ -302,7 +193,7 @@ func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { // VerifyWithContext implements the block.WithVerifyContext interface func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { return b.verify(&precompileconfig.PredicateContext{ - SnowCtx: b.vm.ctx, + SnowCtx: b.blockManager.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, }, true) } @@ -311,6 +202,7 @@ func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block // Enforces that the predicates are valid within [predicateContext]. // Writes the block details to disk and the state to the trie manager iff writes=true. func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { + vm := b.blockManager.vm if predicateContext.ProposerVMBlockCtx != nil { log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) } else { @@ -320,16 +212,11 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ return fmt.Errorf("syntactic block verification failed: %w", err) } - // verify UTXOs named in import txs are present in shared memory. - if err := b.verifyUTXOsPresent(); err != nil { - return err - } - // Only enforce predicates if the chain has already bootstrapped. // If the chain is still bootstrapping, we can assume that all blocks we are verifying have // been accepted by the network (so the predicate was validated by the network when the // block was originally verified). - if b.vm.bootstrapped.Get() { + if vm.bootstrapped.Get() { if err := b.verifyPredicates(predicateContext); err != nil { return fmt.Errorf("failed to verify predicates: %w", err) } @@ -340,25 +227,21 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ // Additionally, if a block is already in processing, then it has already passed verification and // at this point we have checked the predicates are still valid in the different context so we // can return nil. - if b.vm.State.IsProcessing(b.id) { + if vm.State.IsProcessing(b.id) { return nil } - err := b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) + err := vm.blockChain.InsertBlockManual(b.ethBlock, writes) if err != nil || !writes { - // if an error occurred inserting the block into the chain - // or if we are not pinning to memory, unpin the atomic trie - // changes from memory (if they were pinned). - if atomicState, err := b.vm.atomicBackend.GetVerifiedAtomicState(b.ethBlock.Hash()); err == nil { - _ = atomicState.Reject() // ignore this error so we can return the original error instead. - } + b.blockManager.blockExtension.Cleanup(b) } return err } // verifyPredicates verifies the predicates in the block are valid according to predicateContext. func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { - rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + vm := b.blockManager.vm + rules := vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) switch { case !rules.IsDurango && rules.PredicatersExist(): @@ -391,33 +274,6 @@ func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateCon return nil } -// verifyUTXOsPresent returns an error if any of the atomic transactions name UTXOs that -// are not present in shared memory. -func (b *Block) verifyUTXOsPresent() error { - blockHash := common.Hash(b.ID()) - if b.vm.atomicBackend.IsBonus(b.Height(), blockHash) { - log.Info("skipping atomic tx verification on bonus block", "block", blockHash) - return nil - } - - if !b.vm.bootstrapped.Get() { - return nil - } - - // verify UTXOs named in import txs are present in shared memory. - for _, atomicTx := range b.atomicTxs { - utx := atomicTx.UnsignedAtomicTx - chainID, requests, err := utx.AtomicOps() - if err != nil { - return err - } - if _, err := b.vm.ctx.SharedMemory.Get(chainID, requests.RemoveRequests); err != nil { - return fmt.Errorf("%w: %s", errMissingUTXOs, err) - } - } - return nil -} - // Bytes implements the snowman.Block interface func (b *Block) Bytes() []byte { res, err := rlp.EncodeToBytes(b.ethBlock) @@ -432,3 +288,7 @@ func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID( func (b *Block) GetEthBlock() *types.Block { return b.ethBlock } + +func (b *Block) GetExtraData() interface{} { + return b.extraData +} diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index 967444b0d0..ad28541b42 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" - atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/snow" @@ -25,12 +24,17 @@ const ( minBlockBuildingRetryDelay = 500 * time.Millisecond ) +type BuilderMempool interface { + Len() int + SubscribePendingTxs() <-chan struct{} +} + type blockBuilder struct { ctx *snow.Context chainConfig *params.ChainConfig - txPool *txpool.TxPool - mempool *atomictxpool.Mempool + txPool *txpool.TxPool + extraMempool BuilderMempool shutdownChan <-chan struct{} shutdownWg *sync.WaitGroup @@ -53,12 +57,14 @@ type blockBuilder struct { buildBlockTimer *timer.Timer } -func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { +// NewBlockBuilder creates a new block builder. extraMempool is an optional mempool (can be nil) that +// can be used to add transactions to the block builder, in addition to the txPool. +func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message, extraMempool BuilderMempool) *blockBuilder { b := &blockBuilder{ ctx: vm.ctx, chainConfig: vm.chainConfig, txPool: vm.txPool, - mempool: vm.mempool, + extraMempool: extraMempool, shutdownChan: vm.shutdownChan, shutdownWg: &vm.shutdownWg, notifyBuildBlockChan: notifyBuildBlockChan, @@ -105,7 +111,7 @@ func (b *blockBuilder) needToBuild() bool { size := b.txPool.PendingSize(txpool.PendingFilter{ MinTip: uint256.MustFromBig(b.txPool.GasTip()), }) - return size > 0 || b.mempool.Len() > 0 + return size > 0 || (b.extraMempool != nil && b.extraMempool.Len() > 0) } // markBuilding adds a PendingTxs message to the toEngine channel. @@ -150,6 +156,11 @@ func (b *blockBuilder) awaitSubmittedTxs() { txSubmitChan := make(chan core.NewTxsEvent) b.txPool.SubscribeTransactions(txSubmitChan, true) + var extraChan <-chan struct{} + if b.extraMempool != nil { + extraChan = b.extraMempool.SubscribePendingTxs() + } + b.shutdownWg.Add(1) go b.ctx.Log.RecoverAndPanic(func() { defer b.shutdownWg.Done() @@ -159,8 +170,8 @@ func (b *blockBuilder) awaitSubmittedTxs() { case <-txSubmitChan: log.Trace("New tx detected, trying to generate a block") b.signalTxsReady() - case <-b.mempool.Pending: - log.Trace("New atomic Tx detected, trying to generate a block") + case <-extraChan: + log.Trace("New extra Tx detected, trying to generate a block") b.signalTxsReady() case <-b.shutdownChan: b.buildBlockTimer.Stop() diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_manager.go similarity index 66% rename from plugin/evm/block_verification.go rename to plugin/evm/block_manager.go index f0f8db1658..f8909a6107 100644 --- a/plugin/evm/block_verification.go +++ b/plugin/evm/block_manager.go @@ -10,11 +10,12 @@ import ( "github.com/ethereum/go-ethereum/common" - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic/extension" "github.com/ava-labs/coreth/trie" ) @@ -23,67 +24,34 @@ var ( apricotPhase1MinGasPrice = big.NewInt(params.ApricotPhase1MinGasPrice) ) -type BlockValidator interface { - SyntacticVerify(b *Block, rules params.Rules) error +type blockManager struct { + blockExtension extension.BlockExtension + vm *VM } -type blockValidator struct { - extDataHashes map[common.Hash]common.Hash -} - -func NewBlockValidator(extDataHashes map[common.Hash]common.Hash) BlockValidator { - return &blockValidator{ - extDataHashes: extDataHashes, +func newBlockManager(vm *VM, blockExtension extension.BlockExtension) *blockManager { + return &blockManager{ + blockExtension: blockExtension, + vm: vm, } } -func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { - if b == nil || b.ethBlock == nil { - return errInvalidBlock - } +// newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface +func (bm *blockManager) newBlock(ethBlock *types.Block) (*Block, error) { + extraData, err := bm.blockExtension.InitializeExtraData(ethBlock, bm.vm.chainConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize block extension: %w", err) + } + return &Block{ + id: ids.ID(ethBlock.Hash()), + ethBlock: ethBlock, + blockManager: bm, + extraData: extraData, + }, nil +} +func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { ethHeader := b.ethBlock.Header() - blockHash := b.ethBlock.Hash() - - if !rules.IsApricotPhase1 { - if v.extDataHashes != nil { - extData := b.ethBlock.ExtData() - extDataHash := types.CalcExtDataHash(extData) - // If there is no extra data, check that there is no extra data in the hash map either to ensure we do not - // have a block that is unexpectedly missing extra data. - expectedExtDataHash, ok := v.extDataHashes[blockHash] - if len(extData) == 0 { - if ok { - return fmt.Errorf("found block with unexpected missing extra data (%s, %d), expected extra data hash: %s", blockHash, b.Height(), expectedExtDataHash) - } - } else { - // If there is extra data, check to make sure that the extra data hash matches the expected extra data hash for this - // block - if extDataHash != expectedExtDataHash { - return fmt.Errorf("extra data hash in block (%s, %d): %s, did not match the expected extra data hash: %s", blockHash, b.Height(), extDataHash, expectedExtDataHash) - } - } - } - } - - // Skip verification of the genesis block since it should already be marked as accepted. - if blockHash == b.vm.genesisHash { - return nil - } - - // Verify the ExtDataHash field - if rules.IsApricotPhase1 { - if hash := types.CalcExtDataHash(b.ethBlock.ExtData()); ethHeader.ExtDataHash != hash { - return fmt.Errorf("extra data hash mismatch: have %x, want %x", ethHeader.ExtDataHash, hash) - } - } else { - if ethHeader.ExtDataHash != (common.Hash{}) { - return fmt.Errorf( - "expected ExtDataHash to be empty but got %x", - ethHeader.ExtDataHash, - ) - } - } // Perform block and header sanity checks if !ethHeader.Number.IsUint64() { @@ -176,12 +144,6 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { return errUnclesUnsupported } - // Block must not be empty - txs := b.ethBlock.Transactions() - if len(txs) == 0 && len(b.atomicTxs) == 0 { - return errEmptyBlock - } - // Enforce minimum gas prices here prior to dynamic fees going into effect. switch { case !rules.IsApricotPhase1: @@ -203,7 +165,7 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { // Make sure the block isn't too far in the future // TODO: move this to only be part of semantic verification. blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + if maxBlockTime := uint64(bm.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) } @@ -218,40 +180,8 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } - // If we are in ApricotPhase4, ensure that ExtDataGasUsed is populated correctly. if rules.IsApricotPhase4 { - // Make sure ExtDataGasUsed is not nil and correct - if ethHeader.ExtDataGasUsed == nil { - return errNilExtDataGasUsedApricotPhase4 - } - if rules.IsApricotPhase5 { - if ethHeader.ExtDataGasUsed.Cmp(params.AtomicGasLimit) == 1 { - return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) - } - } else { - if !ethHeader.ExtDataGasUsed.IsUint64() { - return fmt.Errorf("too large extDataGasUsed: %d", ethHeader.ExtDataGasUsed) - } - } - var totalGasUsed uint64 - for _, atomicTx := range b.atomicTxs { - // We perform this check manually here to avoid the overhead of having to - // reparse the atomicTx in `CalcExtDataGasUsed`. - fixedFee := rules.IsApricotPhase5 // Charge the atomic tx fixed fee as of ApricotPhase5 - gasUsed, err := atomicTx.GasUsed(fixedFee) - if err != nil { - return err - } - totalGasUsed, err = safemath.Add64(totalGasUsed, gasUsed) - if err != nil { - return err - } - } - switch { - case ethHeader.ExtDataGasUsed.Cmp(new(big.Int).SetUint64(totalGasUsed)) != 0: - return fmt.Errorf("invalid extDataGasUsed: have %d, want %d", ethHeader.ExtDataGasUsed, totalGasUsed) - // Make sure BlockGasCost is not nil // NOTE: ethHeader.BlockGasCost correctness is checked in header verification case ethHeader.BlockGasCost == nil: @@ -293,5 +223,6 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { return fmt.Errorf("blobs not enabled on avalanche networks: used %d blob gas, expected 0", *ethHeader.BlobGasUsed) } } - return nil + + return bm.blockExtension.SyntacticVerify(b, rules) } diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index b507342405..b92b8bb0c0 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/config" ) +// TODO: this should be defined per chain vm (coreth, subnet-evm) in their relative pkgs. // Interface compliance var _ Client = (*client)(nil) diff --git a/plugin/evm/config/constants.go b/plugin/evm/config/constants.go index 2e47489f1c..b978afcc7c 100644 --- a/plugin/evm/config/constants.go +++ b/plugin/evm/config/constants.go @@ -1,8 +1,19 @@ package config +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/units" +) + const ( TxGossipBloomMinTargetElements = 8 * 1024 TxGossipBloomTargetFalsePositiveRate = 0.01 TxGossipBloomResetFalsePositiveRate = 0.05 TxGossipBloomChurnMultiplier = 3 + PushGossipDiscardedElements = 16_384 + TxGossipTargetMessageSize = 20 * units.KiB + TxGossipThrottlingPeriod = 10 * time.Second + TxGossipThrottlingLimit = 2 + TxGossipPollSize = 1 ) diff --git a/plugin/evm/gossip.go b/plugin/evm/eth_gossiper.go similarity index 76% rename from plugin/evm/gossip.go rename to plugin/evm/eth_gossiper.go index 16d632bd94..d946ac91ff 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/eth_gossiper.go @@ -10,17 +10,13 @@ import ( "fmt" "sync" "sync/atomic" - "time" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" @@ -32,8 +28,6 @@ import ( const pendingTxsBuffer = 10 var ( - _ p2p.Handler = (*txGossipHandler)(nil) - _ gossip.Gossipable = (*GossipEthTx)(nil) _ gossip.Marshaller[*GossipEthTx] = (*GossipEthTxMarshaller)(nil) _ gossip.Set[*GossipEthTx] = (*GossipEthTxPool)(nil) @@ -41,56 +35,6 @@ var ( _ eth.PushGossiper = (*EthPushGossiper)(nil) ) -func newTxGossipHandler[T gossip.Gossipable]( - log logging.Logger, - marshaller gossip.Marshaller[T], - mempool gossip.Set[T], - metrics gossip.Metrics, - maxMessageSize int, - throttlingPeriod time.Duration, - throttlingLimit int, - validators *p2p.Validators, -) txGossipHandler { - // push gossip messages can be handled from any peer - handler := gossip.NewHandler( - log, - marshaller, - mempool, - metrics, - maxMessageSize, - ) - - // pull gossip requests are filtered by validators and are throttled - // to prevent spamming - validatorHandler := p2p.NewValidatorHandler( - p2p.NewThrottlerHandler( - handler, - p2p.NewSlidingWindowThrottler(throttlingPeriod, throttlingLimit), - log, - ), - validators, - log, - ) - - return txGossipHandler{ - appGossipHandler: handler, - appRequestHandler: validatorHandler, - } -} - -type txGossipHandler struct { - appGossipHandler p2p.Handler - appRequestHandler p2p.Handler -} - -func (t txGossipHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { - t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) -} - -func (t txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { - return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) -} - func NewGossipEthTxPool(mempool *txpool.TxPool, registerer prometheus.Registerer) (*GossipEthTxPool, error) { bloom, err := gossip.NewBloomFilter( registerer, diff --git a/plugin/evm/extension/extension.go b/plugin/evm/extension/extension.go deleted file mode 100644 index 416d131632..0000000000 --- a/plugin/evm/extension/extension.go +++ /dev/null @@ -1,7 +0,0 @@ -package extension - -import "github.com/ava-labs/avalanchego/codec" - -type ExtensionConfig struct { - NetworkCodec codec.Manager -} diff --git a/plugin/evm/factory.go b/plugin/evm/factory.go index b3b2143a58..f42d8f8b41 100644 --- a/plugin/evm/factory.go +++ b/plugin/evm/factory.go @@ -22,17 +22,9 @@ var ( type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - extensionCfg, err := atomicvm.NewAtomicExtensionConfig() - if err != nil { - return nil, err - } - return atomicvm.WrapVM(NewExtensibleEVM(false, extensionCfg)), nil + return atomicvm.WrapVM(&VM{}), nil } func NewPluginVM() (block.ChainVM, error) { - extensionCfg, err := atomicvm.NewAtomicExtensionConfig() - if err != nil { - return nil, err - } - return atomicvm.WrapVM(NewExtensibleEVM(true, extensionCfg)), nil + return atomicvm.WrapVM(&VM{IsPlugin: true}), nil } diff --git a/plugin/evm/gossip/handler.go b/plugin/evm/gossip/handler.go new file mode 100644 index 0000000000..d5cd18c0d9 --- /dev/null +++ b/plugin/evm/gossip/handler.go @@ -0,0 +1,64 @@ +package gossip + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var _ p2p.Handler = (*txGossipHandler)(nil) + +func NewTxGossipHandler[T gossip.Gossipable]( + log logging.Logger, + marshaller gossip.Marshaller[T], + mempool gossip.Set[T], + metrics gossip.Metrics, + maxMessageSize int, + throttlingPeriod time.Duration, + throttlingLimit int, + validators *p2p.Validators, +) *txGossipHandler { + // push gossip messages can be handled from any peer + handler := gossip.NewHandler( + log, + marshaller, + mempool, + metrics, + maxMessageSize, + ) + + // pull gossip requests are filtered by validators and are throttled + // to prevent spamming + validatorHandler := p2p.NewValidatorHandler( + p2p.NewThrottlerHandler( + handler, + p2p.NewSlidingWindowThrottler(throttlingPeriod, throttlingLimit), + log, + ), + validators, + log, + ) + + return &txGossipHandler{ + appGossipHandler: handler, + appRequestHandler: validatorHandler, + } +} + +type txGossipHandler struct { + appGossipHandler p2p.Handler + appRequestHandler p2p.Handler +} + +func (t *txGossipHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { + t.appGossipHandler.AppGossip(ctx, nodeID, gossipBytes) +} + +func (t *txGossipHandler) AppRequest(ctx context.Context, nodeID ids.NodeID, deadline time.Time, requestBytes []byte) ([]byte, *common.AppError) { + return t.appRequestHandler.AppRequest(ctx, nodeID, deadline, requestBytes) +} diff --git a/plugin/evm/network_handler.go b/plugin/evm/network_handler.go index 048f288797..99c9d0388e 100644 --- a/plugin/evm/network_handler.go +++ b/plugin/evm/network_handler.go @@ -8,10 +8,9 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/plugin/evm/message" syncHandlers "github.com/ava-labs/coreth/sync/handlers" - syncStats "github.com/ava-labs/coreth/sync/handlers/stats" + "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/warp" warpHandlers "github.com/ava-labs/coreth/warp/handlers" @@ -21,7 +20,7 @@ import ( var _ message.RequestHandler = &networkHandler{} -type LeafHandlers map[message.NodeType]*syncHandlers.LeafsRequestHandler +type LeafHandlers map[message.NodeType]syncHandlers.LeafRequestHandler type networkHandler struct { leafRequestHandlers LeafHandlers @@ -44,18 +43,9 @@ func newNetworkHandler( diskDB ethdb.KeyValueReader, warpBackend warp.Backend, networkCodec codec.Manager, - leafRequesTypeConfigs map[message.NodeType]LeafRequestTypeConfig, -) message.RequestHandler { - syncStats := syncStats.NewHandlerStats(metrics.Enabled) - leafRequestHandlers := make(LeafHandlers) - for _, config := range leafRequesTypeConfigs { - snapshotProvider := provider - if !config.UseSnapshots { - snapshotProvider = nil - } - leafRequestHandler := syncHandlers.NewLeafsRequestHandler(config.TrieDB, config.NodeKeyLen, snapshotProvider, networkCodec, syncStats) - leafRequestHandlers[config.NodeType] = leafRequestHandler - } + leafRequestHandlers LeafHandlers, + syncStats stats.HandlerStats, +) *networkHandler { return &networkHandler{ leafRequestHandlers: leafRequestHandlers, blockRequestHandler: syncHandlers.NewBlockRequestHandler(provider, networkCodec, syncStats), diff --git a/plugin/evm/sync/extender.go b/plugin/evm/sync/extender.go new file mode 100644 index 0000000000..def5057c26 --- /dev/null +++ b/plugin/evm/sync/extender.go @@ -0,0 +1,31 @@ +package sync + +import ( + "context" + + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/coreth/plugin/evm/message" + syncclient "github.com/ava-labs/coreth/sync/client" +) + +var _ Extender = (*NoOpExtender)(nil) + +type Extender interface { + Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error + OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error + OnFinishAfterCommit(summaryHeight uint64) error +} + +type NoOpExtender struct{} + +func (n *NoOpExtender) Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error { + return nil +} + +func (n *NoOpExtender) OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error { + return nil +} + +func (n *NoOpExtender) OnFinishAfterCommit(summaryHeight uint64) error { + return nil +} diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index b315895127..ac7dfbd826 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -31,7 +31,11 @@ import ( // The last 256 block hashes are necessary to support the BLOCKHASH opcode. const ParentsToFetch = 256 -var stateSyncSummaryKey = []byte("stateSyncSummary") +var ( + stateSyncSummaryKey = []byte("stateSyncSummary") + + errExtenderAlreadySet = fmt.Errorf("sync extender already set") +) type BlockAcceptor interface { PutLastAcceptedID(ids.ID) error @@ -41,12 +45,6 @@ type EthBlockWrapper interface { GetEthBlock() *types.Block } -type Extender interface { - Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error - OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error - OnFinishAfterCommit(summaryHeight uint64) error -} - // ClientConfig defines the options and dependencies needed to construct a Client type ClientConfig struct { Enabled bool @@ -68,7 +66,7 @@ type ClientConfig struct { // Extension points SyncableParser message.SyncableParser - ExtraSyncer Extender + SyncExtender Extender Client syncclient.Client @@ -171,7 +169,10 @@ func (client *stateSyncerClient) stateSync(ctx context.Context) error { return err } - return client.ClientConfig.ExtraSyncer.Sync(ctx, client.Client, client.VerDB, client.syncSummary) + if client.SyncExtender != nil { + return client.SyncExtender.Sync(ctx, client.Client, client.VerDB, client.syncSummary) + } + return nil } // acceptSyncSummary returns true if sync will be performed and launches the state sync process @@ -365,8 +366,10 @@ func (client *stateSyncerClient) finishSync() error { return err } - if err := client.ExtraSyncer.OnFinishBeforeCommit(client.LastAcceptedHeight, client.syncSummary); err != nil { - return err + if client.SyncExtender != nil { + if err := client.SyncExtender.OnFinishBeforeCommit(client.LastAcceptedHeight, client.syncSummary); err != nil { + return err + } } if err := client.commitVMMarkers(); err != nil { @@ -377,7 +380,11 @@ func (client *stateSyncerClient) finishSync() error { return err } - return client.ExtraSyncer.OnFinishAfterCommit(block.NumberU64()) + if client.SyncExtender != nil { + return client.SyncExtender.OnFinishAfterCommit(block.NumberU64()) + } + + return nil } // commitVMMarkers updates the following markers in the VM's database diff --git a/plugin/evm/sync/syncervm_server.go b/plugin/evm/sync/syncervm_server.go index 215e3a19b9..39de12f562 100644 --- a/plugin/evm/sync/syncervm_server.go +++ b/plugin/evm/sync/syncervm_server.go @@ -4,22 +4,26 @@ package sync import ( "context" + "fmt" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/log" ) +var errProviderNotSet = fmt.Errorf("provider not set") + type SummaryProvider interface { - StateSummaryAtHeight(height uint64) (block.StateSummary, error) + StateSummaryAtBlock(ethBlock *types.Block) (block.StateSummary, error) } type stateSyncServer struct { - chain *core.BlockChain - provider SummaryProvider + chain *core.BlockChain + provider SummaryProvider syncableInterval uint64 } @@ -31,8 +35,8 @@ type Server interface { func SyncServer(chain *core.BlockChain, provider SummaryProvider, syncableInterval uint64) Server { return &stateSyncServer{ chain: chain, - provider: provider, syncableInterval: syncableInterval, + provider: provider, } } @@ -44,7 +48,7 @@ func (server *stateSyncServer) GetLastStateSummary(context.Context) (block.State lastHeight := server.chain.LastAcceptedBlock().NumberU64() lastSyncSummaryNumber := lastHeight - lastHeight%server.syncableInterval - summary, err := server.provider.StateSummaryAtHeight(lastSyncSummaryNumber) + summary, err := server.stateSummaryAtHeight(lastSyncSummaryNumber) if err != nil { log.Debug("could not get latest state summary", "err", err) return nil, database.ErrNotFound @@ -64,7 +68,7 @@ func (server *stateSyncServer) GetStateSummary(_ context.Context, height uint64) return nil, database.ErrNotFound } - summary, err := server.provider.StateSummaryAtHeight(summaryBlock.NumberU64()) + summary, err := server.stateSummaryAtHeight(summaryBlock.NumberU64()) if err != nil { log.Debug("could not get state summary", "height", height, "err", err) return nil, database.ErrNotFound @@ -73,3 +77,18 @@ func (server *stateSyncServer) GetStateSummary(_ context.Context, height uint64) log.Debug("Serving syncable block at requested height", "height", height, "summary", summary) return summary, nil } + +func (server *stateSyncServer) stateSummaryAtHeight(height uint64) (block.StateSummary, error) { + blk := server.chain.GetBlockByNumber(height) + if blk == nil { + return nil, fmt.Errorf("block not found for height (%d)", height) + } + + if !server.chain.HasState(blk.Root()) { + return nil, fmt.Errorf("block root does not exist for height (%d), root (%s)", height, blk.Root()) + } + if server.provider == nil { + return nil, errProviderNotSet + } + return server.provider.StateSummaryAtBlock(blk) +} diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 9f1bebfa48..9e11afd15c 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -39,6 +39,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" "github.com/ava-labs/coreth/plugin/evm/database" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/predicate" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" @@ -281,7 +282,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s require = require.New(t) importAmount = 2000000 * units.Avax // 2M avax alloc = map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, + testutils.TestShortIDAddrs[0]: importAmount, } ) // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] @@ -317,7 +318,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s serverVM.ctx.AVAXAssetID, importAmount/2, serverVM.ctx.XChainID, - testShortIDAddrs[0], + testutils.TestShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}, ) diff --git a/plugin/evm/testutils/utils.go b/plugin/evm/testutils/utils.go new file mode 100644 index 0000000000..7a94fe6c02 --- /dev/null +++ b/plugin/evm/testutils/utils.go @@ -0,0 +1,35 @@ +package testutils + +import ( + "math/big" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/cb58" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/coreth/params" + + "github.com/ethereum/go-ethereum/common" +) + +var ( + TestKeys []*secp256k1.PrivateKey + TestEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] + TestShortIDAddrs []ids.ShortID + InitialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) +) + +func init() { + var b []byte + + for _, key := range []string{ + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + } { + b, _ = cb58.Decode(key) + pk, _ := secp256k1.ToPrivateKey(b) + TestKeys = append(TestKeys, pk) + TestEthAddrs = append(TestEthAddrs, pk.EthAddress()) + TestShortIDAddrs = append(TestShortIDAddrs, pk.Address()) + } +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 9690ebc70c..ad75c5e0c7 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -17,19 +17,12 @@ import ( "sync" "time" - "github.com/ava-labs/avalanchego/cache/metercacher" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/network/p2p/acp118" - "github.com/ava-labs/avalanchego/network/p2p/gossip" - "github.com/ava-labs/avalanchego/upgrade" - avalanchegoConstants "github.com/ava-labs/avalanchego/utils/constants" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/eth" @@ -40,21 +33,21 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" - "github.com/ava-labs/coreth/plugin/evm/atomic" - atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" - atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" - atomictxpool "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" + "github.com/ava-labs/coreth/plugin/evm/atomic/extension" "github.com/ava-labs/coreth/plugin/evm/config" - "github.com/ava-labs/coreth/plugin/evm/extension" + "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/rpc" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/client/stats" + "github.com/ava-labs/coreth/sync/handlers" + handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/utils" + "github.com/ava-labs/coreth/vmerrs" "github.com/ava-labs/coreth/warp" // Force-load tracer engine to trigger registration @@ -74,33 +67,26 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" - avalancheRPC "github.com/gorilla/rpc/v2" - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/cache/metercacher" avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/acp118" + avalanchegossip "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" avalancheUtils "github.com/ava-labs/avalanchego/utils" - avalancheJSON "github.com/ava-labs/avalanchego/utils/json" ) var ( @@ -108,18 +94,15 @@ var ( _ block.BuildBlockWithContextChainVM = &VM{} _ block.StateSyncableVM = &VM{} _ statesyncclient.EthBlockParser = &VM{} - _ secp256k1fx.VM = &VM{} _ vmsync.BlockAcceptor = &VM{} + _ extension.InnerVM = &VM{} ) const ( // Max time from current time allowed for blocks, before they're considered future blocks // and fail verification maxFutureBlockTime = 10 * time.Second - maxUTXOsToFetch = 1024 - defaultMempoolSize = 4096 - secpCacheSize = 1024 decidedCacheSize = 10 * units.MiB missingCacheSize = 50 unverifiedCacheSize = 5 * units.MiB @@ -131,25 +114,16 @@ const ( sdkMetricsPrefix = "sdk" chainStateMetricsPrefix = "chain_state" - targetAtomicTxsSize = 40 * units.KiB - // gossip constants - pushGossipDiscardedElements = 16_384 - txGossipTargetMessageSize = 20 * units.KiB - maxValidatorSetStaleness = time.Minute - txGossipThrottlingPeriod = 10 * time.Second - txGossipThrottlingLimit = 2 - txGossipPollSize = 1 + maxValidatorSetStaleness = time.Minute ) // Define the API endpoints for the VM const ( - avaxEndpoint = "/avax" - adminEndpoint = "/admin" - ethRPCEndpoint = "/rpc" - ethWSEndpoint = "/ws" - ethTxGossipNamespace = "eth_tx_gossip" - atomicTxGossipNamespace = "atomic_tx_gossip" + adminEndpoint = "/admin" + ethRPCEndpoint = "/rpc" + ethWSEndpoint = "/ws" + ethTxGossipNamespace = "eth_tx_gossip" ) var ( @@ -162,16 +136,18 @@ var ( ) var ( - errEmptyBlock = errors.New("empty block") - errUnsupportedFXs = errors.New("unsupported feature extensions") - errInvalidBlock = errors.New("invalid block") - errInvalidNonce = errors.New("invalid nonce") - errUnclesUnsupported = errors.New("uncles unsupported") - errRejectedParent = errors.New("rejected parent") - errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") - errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") - errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") - errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") + errUnsupportedFXs = errors.New("unsupported feature extensions") + errInvalidBlock = errors.New("invalid block") + errInvalidNonce = errors.New("invalid nonce") + errUnclesUnsupported = errors.New("uncles unsupported") + errRejectedParent = errors.New("rejected parent") + errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") + errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") + errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") + errEthAlreadyInitialized = errors.New("ethereum already initialized") + errConsensusCallbacksAlreadySet = errors.New("consensus callbacks already set") + errVMAlreadyInitialized = errors.New("vm already initialized") + errExtensionConfigAlreadySet = errors.New("extension config already set") ) var originalStderr *os.File @@ -220,7 +196,8 @@ type VM struct { ethConfig ethconfig.Config // Extension Points - extensionConfig extension.ExtensionConfig + extensionConfig *extension.Config + blockManager *blockManager // pointers to eth constructs eth *eth.Ethereum @@ -249,27 +226,13 @@ type VM struct { toEngine chan<- commonEng.Message - syntacticBlockValidator BlockValidator - - // [atomicTxRepository] maintains two indexes on accepted atomic txs. - // - txID to accepted atomic tx - // - block height to list of atomic txs accepted on block at that height - atomicTxRepository *atomicstate.AtomicTxRepository - // [atomicBackend] abstracts verification and processing of atomic transactions - atomicBackend *atomicstate.AtomicBackend - builder *blockBuilder - baseCodec codec.Registry - clock mockable.Clock - mempool *atomictxpool.Mempool + clock mockable.Clock shutdownChan chan struct{} shutdownWg sync.WaitGroup - fx secp256k1fx.Fx - secpCache secp256k1.RecoverCache - // Continuous Profiler profiler profiler.ContinuousProfiler @@ -289,48 +252,30 @@ type VM struct { vmsync.Server vmsync.Client - leafRequestTypeConfigs map[message.NodeType]LeafRequestTypeConfig - // Avalanche Warp Messaging backend // Used to serve BLS signatures of warp messages over RPC warpBackend warp.Backend // Initialize only sets these if nil so they can be overridden in tests - p2pSender commonEng.AppSender - ethTxGossipHandler p2p.Handler - ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] - ethTxPullGossiper gossip.Gossiper - atomicTxGossipHandler p2p.Handler - atomicTxPushGossiper *gossip.PushGossiper[*atomic.GossipAtomicTx] - atomicTxPullGossiper gossip.Gossiper + p2pSender commonEng.AppSender + ethTxGossipHandler p2p.Handler + ethTxPushGossiper avalancheUtils.Atomic[*avalanchegossip.PushGossiper[*GossipEthTx]] + ethTxPullGossiper avalanchegossip.Gossiper chainAlias string // RPC handlers (should be stopped before closing chaindb) rpcHandlers []interface{ Stop() } } -func NewExtensibleEVM(isPlugin bool, extensionConfig extension.ExtensionConfig) *VM { - return &VM{IsPlugin: isPlugin, extensionConfig: extensionConfig} -} - -// CodecRegistry implements the secp256k1fx interface -func (vm *VM) CodecRegistry() codec.Registry { return vm.baseCodec } - -// Clock implements the secp256k1fx interface -func (vm *VM) Clock() *mockable.Clock { return &vm.clock } - -// Logger implements the secp256k1fx interface -func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } - -/* - ****************************************************************************** - ********************************* Snowman API ******************************** - ****************************************************************************** - */ - -// implements SnowmanPlusPlusVM interface -func (vm *VM) GetActivationTime() time.Time { - return utils.Uint64ToTime(vm.chainConfig.ApricotPhase4BlockTimestamp) +func (vm *VM) SetExtensionConfig(config *extension.Config) error { + if vm.ctx != nil { + return errVMAlreadyInitialized + } + if vm.extensionConfig != nil { + return errExtensionConfigAlreadySet + } + vm.extensionConfig = config + return nil } // Initialize implements the snowman.ChainVM interface @@ -415,27 +360,7 @@ func (vm *VM) Initialize( return err } - var extDataHashes map[common.Hash]common.Hash - var chainID *big.Int - // Set the chain config for mainnet/fuji chain IDs - switch chainCtx.NetworkID { - case avalanchegoConstants.MainnetID: - chainID = params.AvalancheMainnetChainID - extDataHashes = mainnetExtDataHashes - case avalanchegoConstants.FujiID: - chainID = params.AvalancheFujiChainID - extDataHashes = fujiExtDataHashes - case avalanchegoConstants.LocalID: - chainID = params.AvalancheLocalChainID - default: - chainID = g.Config.ChainID - } - - // if the chainCtx.NetworkUpgrades is not empty, set the chain config - // normally it should not be empty, but some tests may not set it - if chainCtx.NetworkUpgrades != (upgrade.Config{}) { - g.Config = params.GetChainConfig(chainCtx.NetworkUpgrades, new(big.Int).Set(chainID)) - } + g.Config.NetworkUpgrades = params.GetNetworkUpgrades(chainCtx.NetworkUpgrades) // If the Durango is activated, activate the Warp Precompile at the same time if g.Config.DurangoBlockTimestamp != nil { @@ -448,12 +373,6 @@ func (vm *VM) Initialize( g.Config.AvalancheContext = params.AvalancheContext{ SnowCtx: chainCtx, } - vm.syntacticBlockValidator = NewBlockValidator(extDataHashes) - - // Free the memory of the extDataHash map that is not used (i.e. if mainnet - // config, free fuji) - fujiExtDataHashes = nil - mainnetExtDataHashes = nil vm.chainID = g.Config.ChainID @@ -462,8 +381,8 @@ func (vm *VM) Initialize( vm.ethConfig = ethconfig.NewDefaultConfig() vm.ethConfig.Genesis = g vm.ethConfig.NetworkId = vm.chainID.Uint64() - vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.readLastAccepted] - lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() + vm.genesisHash = vm.ethConfig.Genesis.ToBlock().Hash() // must create genesis hash before [vm.ReadLastAccepted] + lastAcceptedHash, lastAcceptedHeight, err := vm.ReadLastAccepted() if err != nil { return err } @@ -501,7 +420,6 @@ func (vm *VM) Initialize( vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) vm.ethConfig.SnapshotWait = vm.config.SnapshotWait vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify - vm.ethConfig.HistoricalProofQueryWindow = vm.config.HistoricalProofQueryWindow vm.ethConfig.OfflinePruning = vm.config.OfflinePruning vm.ethConfig.OfflinePruningBloomFilterSize = vm.config.OfflinePruningBloomFilterSize vm.ethConfig.OfflinePruningDataDirectory = vm.config.OfflinePruningDataDirectory @@ -521,22 +439,11 @@ func (vm *VM) Initialize( vm.chainConfig = g.Config vm.networkID = vm.ethConfig.NetworkId - vm.secpCache = secp256k1.RecoverCache{ - LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ - Size: secpCacheSize, - }, - } if err := vm.chainConfig.Verify(); err != nil { return fmt.Errorf("failed to verify chain config: %w", err) } - // TODO: read size from settings - vm.mempool, err = atomictxpool.NewMempool(chainCtx, vm.sdkMetrics, defaultMempoolSize, vm.verifyTxAtTip) - if err != nil { - return fmt.Errorf("failed to initialize mempool: %w", err) - } - // initialize peer network if vm.p2pSender == nil { vm.p2pSender = appSender @@ -584,51 +491,14 @@ func (vm *VM) Initialize( if err := vm.initializeChain(lastAcceptedHash); err != nil { return err } - // initialize bonus blocks on mainnet - var ( - bonusBlockHeights map[uint64]ids.ID - ) - if vm.ctx.NetworkID == avalanchegoConstants.MainnetID { - bonusBlockHeights, err = readMainnetBonusBlocks() - if err != nil { - return fmt.Errorf("failed to read mainnet bonus blocks: %w", err) - } - } - - // initialize atomic repository - vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(vm.versiondb, atomic.Codec, lastAcceptedHeight) - if err != nil { - return fmt.Errorf("failed to create atomic repository: %w", err) - } - vm.atomicBackend, err = atomicstate.NewAtomicBackend( - vm.versiondb, vm.ctx.SharedMemory, bonusBlockHeights, - vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, - vm.config.CommitInterval, - ) - if err != nil { - return fmt.Errorf("failed to create atomic backend: %w", err) - } go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler) - // so [vm.baseCodec] is a dummy codec use to fulfill the secp256k1fx VM - // interface. The fx will register all of its types, which can be safely - // ignored by the VM's codec. - vm.baseCodec = linearcodec.NewDefault() - - if err := vm.fx.Initialize(vm); err != nil { - return err - } - // Add p2p warp message warpHandler warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) - vm.setAppRequestHandlers() - - atomicProvider := atomicsync.NewAtomicProvider(vm.blockChain, vm.atomicBackend.AtomicTrie()) - vm.Server = vmsync.SyncServer(vm.blockChain, atomicProvider, vm.config.StateSyncCommitInterval) - return vm.initializeStateSyncClient(lastAcceptedHeight) + return vm.initializeStateSync(lastAcceptedHeight) } func (vm *VM) initializeMetrics() error { @@ -656,7 +526,6 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { if err != nil { return err } - callbacks := vm.createConsensusCallbacks() vm.eth, err = eth.New( node, &vm.ethConfig, @@ -664,7 +533,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { vm.chaindb, eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, - dummy.NewFakerWithClock(callbacks, &vm.clock), + dummy.NewFakerWithClock(vm.extensionConfig.ConsensusCallbacks, &vm.clock), &vm.clock, ) if err != nil { @@ -684,10 +553,53 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { return vm.initChainState(vm.blockChain.LastAcceptedBlock()) } -// initializeStateSyncClient initializes the client for performing state sync. +// initializeStateSync initializes the client for performing state sync. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. -func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { +func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { + // Create standalone EVM TrieDB (read only) for serving leafs requests. + // We create a standalone TrieDB here, so that it has a standalone cache from the one + // used by the node when processing blocks. + evmTrieDB := triedb.NewDatabase( + vm.chaindb, + &triedb.Config{ + HashDB: &hashdb.Config{ + CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB, + }, + }, + ) + var leafConfigs []*extension.LeafRequestConfig + syncStats := handlerstats.NewHandlerStats(metrics.Enabled) + // register default leaf request handler for state trie + leafConfigs = append(leafConfigs, &extension.LeafRequestConfig{ + LeafType: message.StateTrieNode, + MetricName: "sync_state_trie_leaves", + Handler: handlers.NewLeafsRequestHandler(evmTrieDB, + message.StateTrieKeyLength, + vm.blockChain, vm.extensionConfig.NetworkCodec, + syncStats, + ), + }) + leafConfigs = append(leafConfigs, vm.extensionConfig.SyncLeafType) + + leafHandlers := make(LeafHandlers, len(leafConfigs)) + for _, leafConfig := range leafConfigs { + if _, exists := leafHandlers[leafConfig.LeafType]; exists { + return fmt.Errorf("duplicate leaf type %s", leafConfig.LeafType) + } + leafHandlers[leafConfig.LeafType] = leafConfig.Handler + } + networkHandler := newNetworkHandler( + vm.blockChain, + vm.chaindb, + vm.warpBackend, + vm.extensionConfig.NetworkCodec, + leafHandlers, + syncStats, + ) + vm.Network.SetRequestHandler(networkHandler) + + vm.Server = vmsync.SyncServer(vm.blockChain, vm.extensionConfig.SyncSummaryProvider, vm.config.StateSyncCommitInterval) stateSyncEnabled := vm.stateSyncEnabled(lastAcceptedHeight) // parse nodeIDs from state sync IDs in vm config var stateSyncIDs []ids.NodeID @@ -703,16 +615,14 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - // Get leaf metrics from config - leafMetricsNames := make(map[message.NodeType]string, len(vm.leafRequestTypeConfigs)) - for _, nodeType := range vm.leafRequestTypeConfigs { - leafMetricsNames[nodeType.NodeType] = nodeType.MetricName + // Initialize the state sync client + leafMetricsNames := make(map[message.NodeType]string, len(leafConfigs)) + for _, leafConfig := range leafConfigs { + leafMetricsNames[leafConfig.LeafType] = leafConfig.MetricName } - vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ - Chain: vm.eth, - State: vm.State, - ExtraSyncer: atomicsync.NewAtomicSyncExtender(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), vm.config.StateSyncRequestSize), + Chain: vm.eth, + State: vm.State, Client: statesyncclient.NewClient( &statesyncclient.ClientConfig{ NetworkClient: vm.client, @@ -732,7 +642,8 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { MetadataDB: vm.metadataDB, ToEngine: vm.toEngine, Acceptor: vm, - SyncableParser: atomicsync.NewAtomicSyncSummaryParser(), + SyncableParser: vm.extensionConfig.SyncableParser, + SyncExtender: vm.extensionConfig.SyncExtender, }) // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume @@ -745,7 +656,8 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { - block, err := vm.newBlock(lastAcceptedBlock) + vm.blockManager = newBlockManager(vm, vm.extensionConfig.BlockExtension) + block, err := vm.blockManager.newBlock(lastAcceptedBlock) if err != nil { return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) } @@ -777,232 +689,6 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { return vm.ctx.Metrics.Register(chainStateMetricsPrefix, chainStateRegisterer) } -func (vm *VM) createConsensusCallbacks() dummy.ConsensusCallbacks { - return dummy.ConsensusCallbacks{ - OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, - OnExtraStateChange: vm.onExtraStateChange, - } -} - -func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - for { - tx, exists := vm.mempool.NextTx() - if !exists { - break - } - // Take a snapshot of [state] before calling verifyTx so that if the transaction fails verification - // we can revert to [snapshot]. - // Note: snapshot is taken inside the loop because you cannot revert to the same snapshot more than - // once. - snapshot := state.Snapshot() - rules := vm.chainConfig.Rules(header.Number, header.Time) - if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { - // Discard the transaction from the mempool on failed verification. - log.Debug("discarding tx from mempool on failed verification", "txID", tx.ID(), "err", err) - vm.mempool.DiscardCurrentTx(tx.ID()) - state.RevertToSnapshot(snapshot) - continue - } - - atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, tx) - if err != nil { - // Discard the transaction from the mempool and error if the transaction - // cannot be marshalled. This should never happen. - log.Debug("discarding tx due to unmarshal err", "txID", tx.ID(), "err", err) - vm.mempool.DiscardCurrentTx(tx.ID()) - return nil, nil, nil, fmt.Errorf("failed to marshal atomic transaction %s due to %w", tx.ID(), err) - } - var contribution, gasUsed *big.Int - if rules.IsApricotPhase4 { - contribution, gasUsed, err = tx.BlockFeeContribution(rules.IsApricotPhase5, vm.ctx.AVAXAssetID, header.BaseFee) - if err != nil { - return nil, nil, nil, err - } - } - return atomicTxBytes, contribution, gasUsed, nil - } - - if len(txs) == 0 { - // this could happen due to the async logic of geth tx pool - return nil, nil, nil, errEmptyBlock - } - - return nil, nil, nil, nil -} - -// assumes that we are in at least Apricot Phase 5. -func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - var ( - batchAtomicTxs []*atomic.Tx - batchAtomicUTXOs set.Set[ids.ID] - batchContribution *big.Int = new(big.Int).Set(common.Big0) - batchGasUsed *big.Int = new(big.Int).Set(common.Big0) - rules = vm.chainConfig.Rules(header.Number, header.Time) - size int - ) - - for { - tx, exists := vm.mempool.NextTx() - if !exists { - break - } - - // Ensure that adding [tx] to the block will not exceed the block size soft limit. - txSize := len(tx.SignedBytes()) - if size+txSize > targetAtomicTxsSize { - vm.mempool.CancelCurrentTx(tx.ID()) - break - } - - var ( - txGasUsed, txContribution *big.Int - err error - ) - - // Note: we do not need to check if we are in at least ApricotPhase4 here because - // we assume that this function will only be called when the block is in at least - // ApricotPhase5. - txContribution, txGasUsed, err = tx.BlockFeeContribution(true, vm.ctx.AVAXAssetID, header.BaseFee) - if err != nil { - return nil, nil, nil, err - } - // ensure [gasUsed] + [batchGasUsed] doesnt exceed the [atomicGasLimit] - if totalGasUsed := new(big.Int).Add(batchGasUsed, txGasUsed); totalGasUsed.Cmp(params.AtomicGasLimit) > 0 { - // Send [tx] back to the mempool's tx heap. - vm.mempool.CancelCurrentTx(tx.ID()) - break - } - - if batchAtomicUTXOs.Overlaps(tx.InputUTXOs()) { - // Discard the transaction from the mempool since it will fail verification - // after this block has been accepted. - // Note: if the proposed block is not accepted, the transaction may still be - // valid, but we discard it early here based on the assumption that the proposed - // block will most likely be accepted. - // Discard the transaction from the mempool on failed verification. - log.Debug("discarding tx due to overlapping input utxos", "txID", tx.ID()) - vm.mempool.DiscardCurrentTx(tx.ID()) - continue - } - - snapshot := state.Snapshot() - if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { - // Discard the transaction from the mempool and reset the state to [snapshot] - // if it fails verification here. - // Note: prior to this point, we have not modified [state] so there is no need to - // revert to a snapshot if we discard the transaction prior to this point. - log.Debug("discarding tx from mempool due to failed verification", "txID", tx.ID(), "err", err) - vm.mempool.DiscardCurrentTx(tx.ID()) - state.RevertToSnapshot(snapshot) - continue - } - - batchAtomicTxs = append(batchAtomicTxs, tx) - batchAtomicUTXOs.Union(tx.InputUTXOs()) - // Add the [txGasUsed] to the [batchGasUsed] when the [tx] has passed verification - batchGasUsed.Add(batchGasUsed, txGasUsed) - batchContribution.Add(batchContribution, txContribution) - size += txSize - } - - // If there is a non-zero number of transactions, marshal them and return the byte slice - // for the block's extra data along with the contribution and gas used. - if len(batchAtomicTxs) > 0 { - atomicTxBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, batchAtomicTxs) - if err != nil { - // If we fail to marshal the batch of atomic transactions for any reason, - // discard the entire set of current transactions. - log.Debug("discarding txs due to error marshaling atomic transactions", "err", err) - vm.mempool.DiscardCurrentTxs() - return nil, nil, nil, fmt.Errorf("failed to marshal batch of atomic transactions due to %w", err) - } - return atomicTxBytes, batchContribution, batchGasUsed, nil - } - - // If there are no regular transactions and there were also no atomic transactions to be included, - // then the block is empty and should be considered invalid. - if len(txs) == 0 { - // this could happen due to the async logic of geth tx pool - return nil, nil, nil, errEmptyBlock - } - - // If there are no atomic transactions, but there is a non-zero number of regular transactions, then - // we return a nil slice with no contribution from the atomic transactions and a nil error. - return nil, nil, nil, nil -} - -func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - if !vm.chainConfig.IsApricotPhase5(header.Time) { - return vm.preBatchOnFinalizeAndAssemble(header, state, txs) - } - return vm.postBatchOnFinalizeAndAssemble(header, state, txs) -} - -func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { - var ( - batchContribution *big.Int = big.NewInt(0) - batchGasUsed *big.Int = big.NewInt(0) - header = block.Header() - rules = vm.chainConfig.Rules(header.Number, header.Time) - ) - - txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, atomic.Codec) - if err != nil { - return nil, nil, err - } - - // If [atomicBackend] is nil, the VM is still initializing and is reprocessing accepted blocks. - if vm.atomicBackend != nil { - if vm.atomicBackend.IsBonus(block.NumberU64(), block.Hash()) { - log.Info("skipping atomic tx verification on bonus block", "block", block.Hash()) - } else { - // Verify [txs] do not conflict with themselves or ancestor blocks. - if err := vm.verifyTxs(txs, block.ParentHash(), block.BaseFee(), block.NumberU64(), rules); err != nil { - return nil, nil, err - } - } - // Update the atomic backend with [txs] from this block. - // - // Note: The atomic trie canonically contains the duplicate operations - // from any bonus blocks. - _, err := vm.atomicBackend.InsertTxs(block.Hash(), block.NumberU64(), block.ParentHash(), txs) - if err != nil { - return nil, nil, err - } - } - - // If there are no transactions, we can return early. - if len(txs) == 0 { - return nil, nil, nil - } - - for _, tx := range txs { - if err := tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state); err != nil { - return nil, nil, err - } - // If ApricotPhase4 is enabled, calculate the block fee contribution - if rules.IsApricotPhase4 { - contribution, gasUsed, err := tx.BlockFeeContribution(rules.IsApricotPhase5, vm.ctx.AVAXAssetID, block.BaseFee()) - if err != nil { - return nil, nil, err - } - - batchContribution.Add(batchContribution, contribution) - batchGasUsed.Add(batchGasUsed, gasUsed) - } - - // If ApricotPhase5 is enabled, enforce that the atomic gas used does not exceed the - // atomic gas limit. - if rules.IsApricotPhase5 { - // Ensure that [tx] does not push [block] above the atomic gas limit. - if batchGasUsed.Cmp(params.AtomicGasLimit) == 1 { - return nil, nil, fmt.Errorf("atomic gas used (%d) by block (%s), exceeds atomic gas limit (%d)", batchGasUsed, block.Hash().Hex(), params.AtomicGasLimit) - } - } - } - return batchContribution, batchGasUsed, nil -} - func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: @@ -1030,8 +716,7 @@ func (vm *VM) onBootstrapStarted() error { // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). // Note calling this function has no effect if snapshots are already initialized. vm.blockChain.InitializeSnapshots() - - return vm.fx.Bootstrapping() + return nil } // onNormalOperationsStarted marks this VM as bootstrapped @@ -1040,9 +725,6 @@ func (vm *VM) onNormalOperationsStarted() error { return nil } vm.bootstrapped.Set(true) - if err := vm.fx.Bootstrapped(); err != nil { - return err - } // Initialize goroutines related to block building // once we enter normal operation as there is no need to handle mempool gossip before this point. return vm.initBlockBuilding() @@ -1055,7 +737,7 @@ func (vm *VM) initBlockBuilding() error { ethTxGossipMarshaller := GossipEthTxMarshaller{} ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) - ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) + ethTxGossipMetrics, err := avalanchegossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) } @@ -1068,27 +750,19 @@ func (vm *VM) initBlockBuilding() error { ethTxPool.Subscribe(ctx) vm.shutdownWg.Done() }() - - atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} - atomicTxGossipClient := vm.Network.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) - atomicTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, atomicTxGossipNamespace) - if err != nil { - return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) - } - - pushGossipParams := gossip.BranchingFactor{ + pushGossipParams := avalanchegossip.BranchingFactor{ StakePercentage: vm.config.PushGossipPercentStake, Validators: vm.config.PushGossipNumValidators, Peers: vm.config.PushGossipNumPeers, } - pushRegossipParams := gossip.BranchingFactor{ + pushRegossipParams := avalanchegossip.BranchingFactor{ Validators: vm.config.PushRegossipNumValidators, Peers: vm.config.PushRegossipNumPeers, } ethTxPushGossiper := vm.ethTxPushGossiper.Get() if ethTxPushGossiper == nil { - ethTxPushGossiper, err = gossip.NewPushGossiper[*GossipEthTx]( + ethTxPushGossiper, err = avalanchegossip.NewPushGossiper[*GossipEthTx]( ethTxGossipMarshaller, ethTxPool, vm.p2pValidators, @@ -1096,8 +770,8 @@ func (vm *VM) initBlockBuilding() error { ethTxGossipMetrics, pushGossipParams, pushRegossipParams, - pushGossipDiscardedElements, - txGossipTargetMessageSize, + config.PushGossipDiscardedElements, + config.TxGossipTargetMessageSize, vm.config.RegossipFrequency.Duration, ) if err != nil { @@ -1106,37 +780,19 @@ func (vm *VM) initBlockBuilding() error { vm.ethTxPushGossiper.Set(ethTxPushGossiper) } - if vm.atomicTxPushGossiper == nil { - vm.atomicTxPushGossiper, err = gossip.NewPushGossiper[*atomic.GossipAtomicTx]( - atomicTxGossipMarshaller, - vm.mempool, - vm.p2pValidators, - atomicTxGossipClient, - atomicTxGossipMetrics, - pushGossipParams, - pushRegossipParams, - pushGossipDiscardedElements, - txGossipTargetMessageSize, - vm.config.RegossipFrequency.Duration, - ) - if err != nil { - return fmt.Errorf("failed to initialize atomic tx push gossiper: %w", err) - } - } - // NOTE: gossip network must be initialized first otherwise ETH tx gossip will not work. - vm.builder = vm.NewBlockBuilder(vm.toEngine) + vm.builder = vm.NewBlockBuilder(vm.toEngine, vm.extensionConfig.ExtraMempool) vm.builder.awaitSubmittedTxs() if vm.ethTxGossipHandler == nil { - vm.ethTxGossipHandler = newTxGossipHandler[*GossipEthTx]( + vm.ethTxGossipHandler = gossip.NewTxGossipHandler[*GossipEthTx]( vm.ctx.Log, ethTxGossipMarshaller, ethTxPool, ethTxGossipMetrics, - txGossipTargetMessageSize, - txGossipThrottlingPeriod, - txGossipThrottlingLimit, + config.TxGossipTargetMessageSize, + config.TxGossipThrottlingPeriod, + config.TxGossipThrottlingLimit, vm.p2pValidators, ) } @@ -1145,34 +801,17 @@ func (vm *VM) initBlockBuilding() error { return fmt.Errorf("failed to add eth tx gossip handler: %w", err) } - if vm.atomicTxGossipHandler == nil { - vm.atomicTxGossipHandler = newTxGossipHandler[*atomic.GossipAtomicTx]( - vm.ctx.Log, - atomicTxGossipMarshaller, - vm.mempool, - atomicTxGossipMetrics, - txGossipTargetMessageSize, - txGossipThrottlingPeriod, - txGossipThrottlingLimit, - vm.p2pValidators, - ) - } - - if err := vm.Network.AddHandler(p2p.AtomicTxGossipHandlerID, vm.atomicTxGossipHandler); err != nil { - return fmt.Errorf("failed to add atomic tx gossip handler: %w", err) - } - if vm.ethTxPullGossiper == nil { - ethTxPullGossiper := gossip.NewPullGossiper[*GossipEthTx]( + ethTxPullGossiper := avalanchegossip.NewPullGossiper[*GossipEthTx]( vm.ctx.Log, ethTxGossipMarshaller, ethTxPool, ethTxGossipClient, ethTxGossipMetrics, - txGossipPollSize, + config.TxGossipPollSize, ) - vm.ethTxPullGossiper = gossip.ValidatorGossiper{ + vm.ethTxPullGossiper = avalanchegossip.ValidatorGossiper{ Gossiper: ethTxPullGossiper, NodeID: vm.ctx.NodeID, Validators: vm.p2pValidators, @@ -1181,38 +820,11 @@ func (vm *VM) initBlockBuilding() error { vm.shutdownWg.Add(2) go func() { - gossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) + avalanchegossip.Every(ctx, vm.ctx.Log, ethTxPushGossiper, vm.config.PushGossipFrequency.Duration) vm.shutdownWg.Done() }() go func() { - gossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) - vm.shutdownWg.Done() - }() - - if vm.atomicTxPullGossiper == nil { - atomicTxPullGossiper := gossip.NewPullGossiper[*atomic.GossipAtomicTx]( - vm.ctx.Log, - atomicTxGossipMarshaller, - vm.mempool, - atomicTxGossipClient, - atomicTxGossipMetrics, - txGossipPollSize, - ) - - vm.atomicTxPullGossiper = &gossip.ValidatorGossiper{ - Gossiper: atomicTxPullGossiper, - NodeID: vm.ctx.NodeID, - Validators: vm.p2pValidators, - } - } - - vm.shutdownWg.Add(2) - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, vm.config.PushGossipFrequency.Duration) - vm.shutdownWg.Done() - }() - go func() { - gossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, vm.config.PullGossipFrequency.Duration) + avalanchegossip.Every(ctx, vm.ctx.Log, vm.ethTxPullGossiper, vm.config.PullGossipFrequency.Duration) vm.shutdownWg.Done() }() @@ -1221,52 +833,7 @@ func (vm *VM) initBlockBuilding() error { // setAppRequestHandlers sets the request handlers for the VM to serve state sync // requests. -func (vm *VM) setAppRequestHandlers() error { - // Create standalone EVM TrieDB (read only) for serving leafs requests. - // We create a standalone TrieDB here, so that it has a standalone cache from the one - // used by the node when processing blocks. - evmTrieDB := triedb.NewDatabase( - vm.chaindb, - &triedb.Config{ - HashDB: &hashdb.Config{ - CleanCacheSize: vm.config.StateSyncServerTrieCache * units.MiB, - }, - }, - ) - if err := vm.RegisterLeafRequestHandler(message.StateTrieNode, "sync_state_trie_leaves", evmTrieDB, message.StateTrieKeyLength, true); err != nil { - return fmt.Errorf("failed to register leaf request handler for state trie: %w", err) - } - // Register atomic trieDB for serving atomic leafs requests. - if err := vm.RegisterLeafRequestHandler(atomicsync.AtomicTrieNode, "sync_atomic_trie_leaves", vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, false); err != nil { - return fmt.Errorf("failed to register leaf request handler for atomic trie: %w", err) - } - - networkHandler := newNetworkHandler( - vm.blockChain, - vm.chaindb, - vm.warpBackend, - vm.extensionConfig.NetworkCodec, - vm.leafRequestTypeConfigs, - ) - vm.Network.SetRequestHandler(networkHandler) - return nil -} - -func (vm *VM) RegisterLeafRequestHandler(nodeType message.NodeType, metricName string, trieDB *triedb.Database, trieKeyLen int, useSnapshot bool) error { - if vm.leafRequestTypeConfigs == nil { - vm.leafRequestTypeConfigs = make(map[message.NodeType]LeafRequestTypeConfig) - } - if _, ok := vm.leafRequestTypeConfigs[nodeType]; ok { - return fmt.Errorf("leaf request handler for node type %d already registered", nodeType) - } - handlerConfig := LeafRequestTypeConfig{ - NodeType: nodeType, - TrieDB: trieDB, - UseSnapshots: useSnapshot, - NodeKeyLen: trieKeyLen, - MetricName: metricName, - } - vm.leafRequestTypeConfigs[nodeType] = handlerConfig +func (vm *VM) setAppRequestHandlers(leafConfigs []*extension.LeafRequestConfig, syncStats handlerstats.HandlerStats) error { return nil } @@ -1311,19 +878,17 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo block, err := vm.miner.GenerateBlock(predicateCtx) vm.builder.handleGenerateBlock() if err != nil { - vm.mempool.CancelCurrentTxs() - return nil, err + return nil, fmt.Errorf("%w: %w", vmerrs.ErrGenerateBlockFailed, err) } // Note: the status of block is set by ChainState - blk, err := vm.newBlock(block) + blk, err := vm.blockManager.newBlock(block) if err != nil { log.Debug("discarding txs due to error making new block", "err", err) - vm.mempool.DiscardCurrentTxs() - return nil, err + return nil, fmt.Errorf("%w: %w", vmerrs.ErrMakeNewBlockFailed, err) } - // Verify is called on a non-wrapped block here, such that this + // Verify is called on a non-wr apped block here, such that this // does not add [blk] to the processing blocks map in ChainState. // // TODO cache verification since Verify() will be called by the @@ -1336,14 +901,10 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // to the blk state root in the triedb when we are going to call verify // again from the consensus engine with writes enabled. if err := blk.verify(predicateCtx, false /*=writes*/); err != nil { - vm.mempool.CancelCurrentTxs() - return nil, fmt.Errorf("block failed verification due to: %w", err) + return nil, fmt.Errorf("%w: %w", vmerrs.ErrBlockVerificationFailed, err) } log.Debug(fmt.Sprintf("Built block %s", blk.ID())) - // Marks the current transactions from the mempool as being successfully issued - // into a block. - vm.mempool.IssueCurrentTxs() return blk, nil } @@ -1355,7 +916,7 @@ func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { } // Note: the status of block is set by ChainState - block, err := vm.newBlock(ethBlock) + block, err := vm.blockManager.newBlock(ethBlock) if err != nil { return nil, err } @@ -1386,7 +947,7 @@ func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { return nil, avalanchedatabase.ErrNotFound } // Note: the status of block is set by ChainState - return vm.newBlock(ethBlock) + return vm.blockManager.newBlock(ethBlock) } // GetAcceptedBlock attempts to retrieve block [blkID] from the VM. This method @@ -1450,17 +1011,6 @@ func (vm *VM) Version(context.Context) (string, error) { return Version, nil } -// NewHandler returns a new Handler for a service where: -// - The handler's functionality is defined by [service] -// [service] should be a gorilla RPC service (see https://www.gorillatoolkit.org/pkg/rpc/v2) -// - The name of the service is [name] -func newHandler(name string, service interface{}) (http.Handler, error) { - server := avalancheRPC.NewServer() - server.RegisterCodec(avalancheJSON.NewCodec(), "application/json") - server.RegisterCodec(avalancheJSON.NewCodec(), "application/json;charset=UTF-8") - return server, server.RegisterService(service, name) -} - // CreateHandlers makes new http handlers that can handle API calls func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) @@ -1474,15 +1024,9 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } apis := make(map[string]http.Handler) - avaxAPI, err := newHandler("avax", &AvaxAPI{vm}) - if err != nil { - return nil, fmt.Errorf("failed to register service for AVAX API due to %w", err) - } - enabledAPIs = append(enabledAPIs, "avax") - apis[avaxEndpoint] = avaxAPI if vm.config.AdminAPIEnabled { - adminAPI, err := newHandler("admin", newAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, vm.chainAlias)))) + adminAPI, err := utils.NewHandler("admin", newAdminService(vm, os.ExpandEnv(fmt.Sprintf("%s_coreth_performance_%s", vm.config.AdminAPIDir, vm.chainAlias)))) if err != nil { return nil, fmt.Errorf("failed to register service for admin API due to %w", err) } @@ -1536,192 +1080,9 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er } /* - ****************************************************************************** *********************************** Helpers ********************************** - ****************************************************************************** */ -// getAtomicTx returns the requested transaction, status, and height. -// If the status is Unknown, then the returned transaction will be nil. -func (vm *VM) getAtomicTx(txID ids.ID) (*atomic.Tx, atomic.Status, uint64, error) { - if tx, height, err := vm.atomicTxRepository.GetByTxID(txID); err == nil { - return tx, atomic.Accepted, height, nil - } else if err != avalanchedatabase.ErrNotFound { - return nil, atomic.Unknown, 0, err - } - tx, dropped, found := vm.mempool.GetTx(txID) - switch { - case found && dropped: - return tx, atomic.Dropped, 0, nil - case found: - return tx, atomic.Processing, 0, nil - default: - return nil, atomic.Unknown, 0, nil - } -} - -// ParseAddress takes in an address and produces the ID of the chain it's for -// the ID of the address -func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { - chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) - if err != nil { - return ids.ID{}, ids.ShortID{}, err - } - - chainID, err := vm.ctx.BCLookup.Lookup(chainIDAlias) - if err != nil { - return ids.ID{}, ids.ShortID{}, err - } - - expectedHRP := avalanchegoConstants.GetHRP(vm.ctx.NetworkID) - if hrp != expectedHRP { - return ids.ID{}, ids.ShortID{}, fmt.Errorf("expected hrp %q but got %q", - expectedHRP, hrp) - } - - addr, err := ids.ToShortID(addrBytes) - if err != nil { - return ids.ID{}, ids.ShortID{}, err - } - return chainID, addr, nil -} - -// verifyTxAtTip verifies that [tx] is valid to be issued on top of the currently preferred block -func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { - if txByteLen := len(tx.SignedBytes()); txByteLen > targetAtomicTxsSize { - return fmt.Errorf("tx size (%d) exceeds total atomic txs size target (%d)", txByteLen, targetAtomicTxsSize) - } - gasUsed, err := tx.GasUsed(true) - if err != nil { - return err - } - if new(big.Int).SetUint64(gasUsed).Cmp(params.AtomicGasLimit) > 0 { - return fmt.Errorf("tx gas usage (%d) exceeds atomic gas limit (%d)", gasUsed, params.AtomicGasLimit.Uint64()) - } - - // Note: we fetch the current block and then the state at that block instead of the current state directly - // since we need the header of the current block below. - preferredBlock := vm.blockChain.CurrentBlock() - preferredState, err := vm.blockChain.StateAt(preferredBlock.Root) - if err != nil { - return fmt.Errorf("failed to retrieve block state at tip while verifying atomic tx: %w", err) - } - rules := vm.currentRules() - parentHeader := preferredBlock - var nextBaseFee *big.Int - timestamp := uint64(vm.clock.Time().Unix()) - if vm.chainConfig.IsApricotPhase3(timestamp) { - _, nextBaseFee, err = dummy.EstimateNextBaseFee(vm.chainConfig, parentHeader, timestamp) - if err != nil { - // Return extremely detailed error since CalcBaseFee should never encounter an issue here - return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) - } - } - - // We don’t need to revert the state here in case verifyTx errors, because - // [preferredState] is thrown away either way. - return vm.verifyTx(tx, parentHeader.Hash(), nextBaseFee, preferredState, rules) -} - -// verifyTx verifies that [tx] is valid to be issued into a block with parent block [parentHash] -// and validated at [state] using [rules] as the current rule set. -// Note: verifyTx may modify [state]. If [state] needs to be properly maintained, the caller is responsible -// for reverting to the correct snapshot after calling this function. If this function is called with a -// throwaway state, then this is not necessary. -func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { - parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) - if err != nil { - return fmt.Errorf("failed to get parent block: %w", err) - } - parent, ok := parentIntf.(*Block) - if !ok { - return fmt.Errorf("parent block %s had unexpected type %T", parentIntf.ID(), parentIntf) - } - atomicBackend := &atomic.VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: rules, - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, - } - if err := tx.UnsignedAtomicTx.SemanticVerify(atomicBackend, tx, parent, baseFee); err != nil { - return err - } - return tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state) -} - -// verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] -// using [rules] as the current rule set. -func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { - // Ensure that the parent was verified and inserted correctly. - if !vm.blockChain.HasBlock(parentHash, height-1) { - return errRejectedParent - } - - ancestorID := ids.ID(parentHash) - // If the ancestor is unknown, then the parent failed verification when - // it was called. - // If the ancestor is rejected, then this block shouldn't be inserted - // into the canonical chain because the parent will be missing. - ancestorInf, err := vm.GetBlockInternal(context.TODO(), ancestorID) - if err != nil { - return errRejectedParent - } - ancestor, ok := ancestorInf.(*Block) - if !ok { - return fmt.Errorf("expected parent block %s, to be *Block but is %T", ancestor.ID(), ancestorInf) - } - - // Ensure each tx in [txs] doesn't conflict with any other atomic tx in - // a processing ancestor block. - inputs := set.Set[ids.ID]{} - atomicBackend := &atomic.VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: rules, - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, - } - for _, atomicTx := range txs { - utx := atomicTx.UnsignedAtomicTx - if err := utx.SemanticVerify(atomicBackend, atomicTx, ancestor, baseFee); err != nil { - return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) - } - txInputs := utx.InputUTXOs() - if inputs.Overlaps(txInputs) { - return atomic.ErrConflictingAtomicInputs - } - inputs.Union(txInputs) - } - return nil -} - -// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is -// referenced in. -func (vm *VM) GetAtomicUTXOs( - chainID ids.ID, - addrs set.Set[ids.ShortID], - startAddr ids.ShortID, - startUTXOID ids.ID, - limit int, -) ([]*avax.UTXO, ids.ShortID, ids.ID, error) { - if limit <= 0 || limit > maxUTXOsToFetch { - limit = maxUTXOsToFetch - } - - return avax.GetAtomicUTXOs( - vm.ctx.SharedMemory, - atomic.Codec, - chainID, - addrs, - startAddr, - startUTXOID, - limit, - ) -} - // currentRules returns the chain rules for the current block. func (vm *VM) currentRules() params.Rules { header := vm.eth.APIBackend.CurrentHeader() @@ -1766,11 +1127,11 @@ func (vm *VM) startContinuousProfiler() { <-vm.shutdownChan } -// readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the +// ReadLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. // Note: assumes [vm.chaindb] and [vm.genesisHash] have been initialized. -func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { +func (vm *VM) ReadLastAccepted() (common.Hash, uint64, error) { // Attempt to load last accepted block to determine if it is necessary to // initialize state with the genesis block. lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) @@ -1839,58 +1200,39 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { return lastAcceptedHeight == 0 } -func (vm *VM) newImportTx( - chainID ids.ID, // chain to import from - to common.Address, // Address of recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds -) (*atomic.Tx, error) { - kc := secp256k1fx.NewKeychain() - for _, key := range keys { - kc.Add(key) - } +func (vm *VM) PutLastAcceptedID(ID ids.ID) error { + return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) +} - atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } +/* + *********************************** ExtensibleVM functions ********************************** + // All these methods assumes that VM is already initialized +*/ - return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) +func (vm *VM) Blockchain() *core.BlockChain { + return vm.blockChain } -// newExportTx returns a new ExportTx -func (vm *VM) newExportTx( - assetID ids.ID, // AssetID of the tokens to export - amount uint64, // Amount of tokens to export - chainID ids.ID, // Chain to send the UTXOs to - to ids.ShortID, // Address of chain recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens -) (*atomic.Tx, error) { - state, err := vm.blockChain.State() - if err != nil { - return nil, err - } +func (vm *VM) Config() *config.Config { + return &vm.config +} - // Create the transaction - tx, err := atomic.NewExportTx( - vm.ctx, // Context - vm.currentRules(), // VM rules - state, - assetID, // AssetID - amount, // Amount - chainID, // ID of the chain to send the funds to - to, // Address - baseFee, - keys, // Private keys - ) +func (vm *VM) GetBlockExtended(ctx context.Context, blkID ids.ID) (extension.ExtendedBlock, error) { + blk, err := vm.GetBlock(ctx, blkID) if err != nil { return nil, err } + return blk.(*Block), nil +} - return tx, nil +func (vm *VM) MetricRegistry() *prometheus.Registry { + return vm.sdkMetrics } -func (vm *VM) PutLastAcceptedID(ID ids.ID) error { - return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) +func (vm *VM) Validators() *p2p.Validators { + return vm.p2pValidators +} + +func (vm *VM) VersionDB() *versiondb.Database { + return vm.versiondb } diff --git a/sync/client/mock_client.go b/sync/client/mock_client.go index 038bdf73bf..aaa1af1a2b 100644 --- a/sync/client/mock_client.go +++ b/sync/client/mock_client.go @@ -25,7 +25,7 @@ var ( // TODO replace with gomock library type MockClient struct { codec codec.Manager - leafsHandler *handlers.LeafsRequestHandler + leafsHandler handlers.LeafRequestHandler leavesReceived int32 codesHandler *handlers.CodeRequestHandler codeReceived int32 @@ -44,7 +44,7 @@ type MockClient struct { func NewMockClient( codec codec.Manager, - leafHandler *handlers.LeafsRequestHandler, + leafHandler handlers.LeafRequestHandler, codesHandler *handlers.CodeRequestHandler, blocksHandler *handlers.BlockRequestHandler, ) *MockClient { diff --git a/sync/client/stats/stats.go b/sync/client/stats/stats.go index f8c7d2e3e9..d50a6f6aa2 100644 --- a/sync/client/stats/stats.go +++ b/sync/client/stats/stats.go @@ -81,7 +81,7 @@ type clientSyncerStats struct { } // NewClientSyncerStats returns stats for the client syncer -func NewClientSyncerStats(leafMetricNames map[message.NodeType]string) ClientSyncerStats { +func NewClientSyncerStats(leafMetricNames map[message.NodeType]string) *clientSyncerStats { leafMetrics := make(map[message.NodeType]MessageMetric, len(leafMetricNames)) for nodeType, name := range leafMetricNames { leafMetrics[nodeType] = NewMessageMetric(name) diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index a3d6453416..3d9f0d8363 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -25,6 +25,8 @@ import ( "github.com/ethereum/go-ethereum/log" ) +var _ LeafRequestHandler = (*leafsRequestHandler)(nil) + const ( // Maximum number of leaves to return in a message.LeafsResponse // This parameter overrides any other Limit specified @@ -38,9 +40,13 @@ const ( segmentLen = 64 // divide data from snapshot to segments of this size ) -// LeafsRequestHandler is a peer.RequestHandler for types.LeafsRequest +type LeafRequestHandler interface { + OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) +} + +// leafsRequestHandler is a peer.RequestHandler for types.LeafsRequest // serving requested trie data -type LeafsRequestHandler struct { +type leafsRequestHandler struct { trieDB *triedb.Database snapshotProvider SnapshotProvider codec codec.Manager @@ -49,8 +55,8 @@ type LeafsRequestHandler struct { trieKeyLength int } -func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *LeafsRequestHandler { - return &LeafsRequestHandler{ +func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshotProvider SnapshotProvider, codec codec.Manager, syncerStats stats.LeafsRequestHandlerStats) *leafsRequestHandler { + return &leafsRequestHandler{ trieDB: trieDB, snapshotProvider: snapshotProvider, codec: codec, @@ -72,7 +78,7 @@ func NewLeafsRequestHandler(trieDB *triedb.Database, trieKeyLength int, snapshot // Never returns errors // Returns nothing if NodeType is invalid or requested trie root is not found // Assumes ctx is active -func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { +func (lrh *leafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, leafsRequest message.LeafsRequest) ([]byte, error) { startTime := time.Now() lrh.stats.IncLeafsRequest() diff --git a/sync/handlers/stats/stats.go b/sync/handlers/stats/stats.go index 9dd04c4ea0..cff8185b42 100644 --- a/sync/handlers/stats/stats.go +++ b/sync/handlers/stats/stats.go @@ -166,6 +166,8 @@ func (h *handlerStats) IncSnapshotReadSuccess() { h.snapshotReadSuccess.Inc(1 func (h *handlerStats) IncSnapshotSegmentValid() { h.snapshotSegmentValid.Inc(1) } func (h *handlerStats) IncSnapshotSegmentInvalid() { h.snapshotSegmentInvalid.Inc(1) } +// NewHandlerStats returns a new HandlerStats instance to track state sync handler metrics. +// Calling this multiple times will return same registered metrics. func NewHandlerStats(enabled bool) HandlerStats { if !enabled { return NewNoopHandlerStats() diff --git a/utils/handler.go b/utils/handler.go new file mode 100644 index 0000000000..8a8832c428 --- /dev/null +++ b/utils/handler.go @@ -0,0 +1,19 @@ +package utils + +import ( + "net/http" + + "github.com/ava-labs/avalanchego/utils/json" + gorillarpc "github.com/gorilla/rpc/v2" +) + +// NewHandler returns a new Handler for a service where: +// - The handler's functionality is defined by [service] +// [service] should be a gorilla RPC service (see https://www.gorillatoolkit.org/pkg/rpc/v2) +// - The name of the service is [name] +func NewHandler(name string, service interface{}) (http.Handler, error) { + server := gorillarpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") + return server, server.RegisterService(service, name) +} diff --git a/vmerrs/vmerrs.go b/vmerrs/vmerrs.go index 4a7afcfd1f..1518b2fb7d 100644 --- a/vmerrs/vmerrs.go +++ b/vmerrs/vmerrs.go @@ -47,4 +47,7 @@ var ( ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") ErrNonceUintOverflow = errors.New("nonce uint64 overflow") ErrAddrProhibited = errors.New("prohibited address cannot be sender or created contract address") + ErrGenerateBlockFailed = errors.New("failed to generate block") + ErrBlockVerificationFailed = errors.New("failed to verify block") + ErrMakeNewBlockFailed = errors.New("failed to make new block") ) From 52d78cd8728f4c65d754a5b9498932c7ee5e941d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 4 Feb 2025 16:06:28 +0300 Subject: [PATCH 62/91] use visitors --- consensus/dummy/consensus.go | 8 +- core/blockchain_test.go | 13 +- eth/gasprice/gasprice_test.go | 4 +- plugin/evm/atomic/atomictest/tx.go | 7 +- plugin/evm/atomic/export_tx.go | 102 +- plugin/evm/atomic/import_tx.go | 126 +- plugin/evm/atomic/state/atomic_trie.go | 8 +- plugin/evm/atomic/tx.go | 33 +- plugin/evm/atomic/vm/api.go | 4 +- plugin/evm/atomic/vm/block_extension.go | 69 +- plugin/evm/atomic/vm/export_tx_test.go | 137 +- .../vm}/gossiper_atomic_gossiping_test.go | 6 +- plugin/evm/{ => atomic/vm}/import_tx_test.go | 174 +- .../vm}/mempool_atomic_gossiping_test.go | 30 +- plugin/evm/atomic/vm/syncervm_test.go | 106 + plugin/evm/atomic/vm/tx_gossip_test.go | 318 +++ plugin/evm/atomic/vm/tx_semantic_verifier.go | 239 ++ plugin/evm/{ => atomic/vm}/tx_test.go | 30 +- plugin/evm/atomic/vm/vm.go | 110 +- plugin/evm/atomic/vm/vm_test.go | 1650 +++++++++++ plugin/evm/block.go | 5 - plugin/evm/block_manager.go | 14 +- plugin/evm/{atomic => }/extension/config.go | 37 +- plugin/evm/extension/no_op_block_extension.go | 33 + .../message/block_sync_summary_provider.go | 16 + plugin/evm/sync/extender.go | 31 - plugin/evm/sync/syncervm_client.go | 6 + plugin/evm/syncervm_test.go | 673 +---- plugin/evm/testutils/genesis.go | 122 + plugin/evm/testutils/test_syncervm.go | 667 +++++ plugin/evm/testutils/test_vm.go | 52 + plugin/evm/tx_gossip_test.go | 309 +- plugin/evm/vm.go | 89 +- plugin/evm/vm_test.go | 2517 +++-------------- plugin/evm/vm_warp_test.go | 48 +- plugin/{evm => }/factory.go | 7 +- plugin/main.go | 2 +- 37 files changed, 4035 insertions(+), 3767 deletions(-) rename plugin/evm/{ => atomic/vm}/gossiper_atomic_gossiping_test.go (97%) rename plugin/evm/{ => atomic/vm}/import_tx_test.go (88%) rename plugin/evm/{ => atomic/vm}/mempool_atomic_gossiping_test.go (74%) create mode 100644 plugin/evm/atomic/vm/syncervm_test.go create mode 100644 plugin/evm/atomic/vm/tx_gossip_test.go create mode 100644 plugin/evm/atomic/vm/tx_semantic_verifier.go rename plugin/evm/{ => atomic/vm}/tx_test.go (91%) create mode 100644 plugin/evm/atomic/vm/vm_test.go rename plugin/evm/{atomic => }/extension/config.go (76%) create mode 100644 plugin/evm/extension/no_op_block_extension.go create mode 100644 plugin/evm/message/block_sync_summary_provider.go delete mode 100644 plugin/evm/sync/extender.go create mode 100644 plugin/evm/testutils/genesis.go create mode 100644 plugin/evm/testutils/test_syncervm.go create mode 100644 plugin/evm/testutils/test_vm.go rename plugin/{evm => }/factory.go (80%) diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index adc7d0ab81..1452335a95 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -48,7 +48,7 @@ type ( } DummyEngine struct { - cb *ConsensusCallbacks + cb ConsensusCallbacks clock *mockable.Clock consensusMode Mode } @@ -67,21 +67,21 @@ func NewFaker() *DummyEngine { } } -func NewFakerWithClock(cb *ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { +func NewFakerWithClock(cb ConsensusCallbacks, clock *mockable.Clock) *DummyEngine { return &DummyEngine{ cb: cb, clock: clock, } } -func NewFakerWithCallbacks(cb *ConsensusCallbacks) *DummyEngine { +func NewFakerWithCallbacks(cb ConsensusCallbacks) *DummyEngine { return &DummyEngine{ cb: cb, clock: &mockable.Clock{}, } } -func NewFakerWithMode(cb *ConsensusCallbacks, mode Mode) *DummyEngine { +func NewFakerWithMode(cb ConsensusCallbacks, mode Mode) *DummyEngine { return &DummyEngine{ cb: cb, clock: &mockable.Clock{}, diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 601cb12f02..fe998a9a1d 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -544,7 +544,7 @@ func TestCanonicalHashMarker(t *testing.T) { } func testCanonicalHashMarker(t *testing.T, scheme string) { - var cases = []struct { + cases := []struct { forkA int forkB int }{ @@ -713,6 +713,7 @@ func TestCreateThenDeletePreByzantium(t *testing.T) { testCreateThenDelete(t, &config) } + func TestCreateThenDeletePostByzantium(t *testing.T) { testCreateThenDelete(t, params.TestChainConfig) } @@ -737,7 +738,8 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { byte(vm.PUSH1), 0x1, byte(vm.SSTORE), // Get the runtime-code on the stack - byte(vm.PUSH32)} + byte(vm.PUSH32), + } initCode = append(initCode, code...) initCode = append(initCode, []byte{ byte(vm.PUSH1), 0x0, // offset @@ -779,8 +781,8 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { }) // Import the canonical chain chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{ - //Debug: true, - //Tracer: logger.NewJSONLogger(nil, os.Stdout), + // Debug: true, + // Tracer: logger.NewJSONLogger(nil, os.Stdout), }, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) @@ -939,7 +941,8 @@ func TestTransientStorageReset(t *testing.T) { byte(vm.TSTORE), // Get the runtime-code on the stack - byte(vm.PUSH32)} + byte(vm.PUSH32), + } initCode = append(initCode, code...) initCode = append(initCode, []byte{ byte(vm.PUSH1), 0x0, // offset diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 19ea33878d..8c1281fdb4 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -93,7 +93,7 @@ func (b *testBackend) teardown() { } func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: config, Alloc: types.GenesisAlloc{addr: {Balance: bal}}, } @@ -120,7 +120,7 @@ func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBloc // newTestBackend creates a test backend. OBS: don't forget to invoke tearDown // after use, otherwise the blockchain instance will mem-leak via goroutines. func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { - var gspec = &core.Genesis{ + gspec := &core.Genesis{ Config: config, Alloc: types.GenesisAlloc{addr: {Balance: bal}}, } diff --git a/plugin/evm/atomic/atomictest/tx.go b/plugin/evm/atomic/atomictest/tx.go index cba4303b43..1118e5e199 100644 --- a/plugin/evm/atomic/atomictest/tx.go +++ b/plugin/evm/atomic/atomictest/tx.go @@ -4,7 +4,6 @@ package atomictest import ( - "math/big" "math/rand" "github.com/ava-labs/avalanchego/codec" @@ -49,7 +48,7 @@ type TestUnsignedTx struct { UnsignedBytesV []byte SignedBytesV []byte InputUTXOsV set.Set[ids.ID] - SemanticVerifyV error + VisitV error EVMStateTransferV error } @@ -85,8 +84,8 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) SemanticVerify(backend *atomic.VerifierBackend, stx *atomic.Tx, parent atomic.AtomicBlockContext, baseFee *big.Int) error { - return t.SemanticVerifyV +func (t *TestUnsignedTx) Visit(v atomic.Visitor) error { + return t.VisitV } // EVMStateTransfer implements the UnsignedAtomicTx interface diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index e63152d273..f61d998cb8 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -29,15 +29,15 @@ import ( ) var ( - _ UnsignedAtomicTx = &UnsignedExportTx{} - _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} - ErrExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") - ErrExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") - ErrNoExportOutputs = errors.New("tx has no export outputs") - errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") - errOverflowExport = errors.New("overflow when computing export amount + txFee") - errInsufficientFunds = errors.New("insufficient funds") - errInvalidNonce = errors.New("invalid nonce") + _ UnsignedAtomicTx = &UnsignedExportTx{} + _ secp256k1fx.UnsignedTx = &UnsignedExportTx{} + ErrExportNonAVAXInputBanff = errors.New("export input cannot contain non-AVAX in Banff") + ErrExportNonAVAXOutputBanff = errors.New("export output cannot contain non-AVAX in Banff") + ErrNoExportOutputs = errors.New("tx has no export outputs") + errOverflowExport = errors.New("overflow when computing export amount + txFee") + errInsufficientFunds = errors.New("insufficient funds") + errInvalidNonce = errors.New("invalid nonce") + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") ) // UnsignedExportTx is an unsigned ExportTx @@ -178,75 +178,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { return math.Sub(input, spent) } -// SemanticVerify this transaction is valid. -func (utx *UnsignedExportTx) SemanticVerify( - backend *VerifierBackend, - stx *Tx, - parent AtomicBlockContext, - baseFee *big.Int, -) error { - ctx := backend.Ctx - rules := backend.Rules - if err := utx.Verify(ctx, rules); err != nil { - return err - } - - // Check the transaction consumes and produces the right amounts - fc := avax.NewFlowChecker() - switch { - // Apply dynamic fees to export transactions as of Apricot Phase 3 - case rules.IsApricotPhase3: - gasUsed, err := stx.GasUsed(rules.IsApricotPhase5) - if err != nil { - return err - } - txFee, err := CalculateDynamicFee(gasUsed, baseFee) - if err != nil { - return err - } - fc.Produce(ctx.AVAXAssetID, txFee) - // Apply fees to export transactions before Apricot Phase 3 - default: - fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) - } - for _, out := range utx.ExportedOutputs { - fc.Produce(out.AssetID(), out.Output().Amount()) - } - for _, in := range utx.Ins { - fc.Consume(in.AssetID, in.Amount) - } - - if err := fc.Verify(); err != nil { - return fmt.Errorf("export tx flow check failed due to: %w", err) - } - - if len(utx.Ins) != len(stx.Creds) { - return fmt.Errorf("export tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.Ins), len(stx.Creds)) - } - - for i, input := range utx.Ins { - cred, ok := stx.Creds[i].(*secp256k1fx.Credential) - if !ok { - return fmt.Errorf("expected *secp256k1fx.Credential but got %T", cred) - } - if err := cred.Verify(); err != nil { - return err - } - - if len(cred.Sigs) != 1 { - return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) - } - pubKey, err := backend.SecpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) - if err != nil { - return err - } - if input.Address != pubKey.EthAddress() { - return errPublicKeySignatureMismatch - } - } - - return nil -} +func (utx *UnsignedExportTx) Visit(v Visitor) error { return v.ExportTx(utx) } // AtomicOps returns the atomic operations for this transaction. func (utx *UnsignedExportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { @@ -315,7 +247,7 @@ func NewExportTx( // consume non-AVAX if assetID != ctx.AVAXAssetID { - ins, signers, err = GetSpendableFunds(ctx, state, keys, assetID, amount) + ins, signers, err = getSpendableFunds(ctx, state, keys, assetID, amount) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) } @@ -343,14 +275,14 @@ func NewExportTx( return nil, err } - avaxIns, avaxSigners, err = GetSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) + avaxIns, avaxSigners, err = getSpendableAVAXWithFee(ctx, state, keys, avaxNeeded, cost, baseFee) default: var newAvaxNeeded uint64 newAvaxNeeded, err = math.Add64(avaxNeeded, params.AvalancheAtomicTxFee) if err != nil { return nil, errOverflowExport } - avaxIns, avaxSigners, err = GetSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) + avaxIns, avaxSigners, err = getSpendableFunds(ctx, state, keys, ctx.AVAXAssetID, newAvaxNeeded) } if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/signers: %w", err) @@ -411,12 +343,12 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) return nil } -// GetSpendableFunds returns a list of EVMInputs and keys (in corresponding +// getSpendableFunds returns a list of EVMInputs and keys (in corresponding // order) to total [amount] of [assetID] owned by [keys]. // Note: we return [][]*secp256k1.PrivateKey even though each input // corresponds to a single key, so that the signers can be passed in to // [tx.Sign] which supports multiple keys on a single input. -func GetSpendableFunds( +func getSpendableFunds( ctx *snow.Context, state StateDB, keys []*secp256k1.PrivateKey, @@ -465,7 +397,7 @@ func GetSpendableFunds( return inputs, signers, nil } -// GetSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding +// getSpendableAVAXWithFee returns a list of EVMInputs and keys (in corresponding // order) to total [amount] + [fee] of [AVAX] owned by [keys]. // This function accounts for the added cost of the additional inputs needed to // create the transaction and makes sure to skip any keys with a balance that is @@ -473,7 +405,7 @@ func GetSpendableFunds( // Note: we return [][]*secp256k1.PrivateKey even though each input // corresponds to a single key, so that the signers can be passed in to // [tx.Sign] which supports multiple keys on a single input. -func GetSpendableAVAXWithFee( +func getSpendableAVAXWithFee( ctx *snow.Context, state StateDB, keys []*secp256k1.PrivateKey, diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index 8dc0b9e68c..62c71fc39d 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -33,16 +33,13 @@ var ( ErrImportNonAVAXInputBanff = errors.New("import input cannot contain non-AVAX in Banff") ErrImportNonAVAXOutputBanff = errors.New("import output cannot contain non-AVAX in Banff") ErrNoImportInputs = errors.New("tx has no imported inputs") - ErrConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") ErrWrongChainID = errors.New("tx has wrong chain ID") ErrNoEVMOutputs = errors.New("tx has no EVM outputs") ErrInputsNotSortedUnique = errors.New("inputs not sorted and unique") ErrOutputsNotSortedUnique = errors.New("outputs not sorted and unique") ErrOutputsNotSorted = errors.New("tx outputs not sorted") - ErrAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errInsufficientFundsForFee = errors.New("insufficient AVAX funds to pay transaction fee") - ErrRejectedParent = errors.New("rejected parent") ) // UnsignedImportTx is an unsigned ImportTx @@ -185,93 +182,6 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { return math.Sub(input, spent) } -// SemanticVerify this transaction is valid. -func (utx *UnsignedImportTx) SemanticVerify( - backend *VerifierBackend, - stx *Tx, - parent AtomicBlockContext, - baseFee *big.Int, -) error { - ctx := backend.Ctx - rules := backend.Rules - if err := utx.Verify(ctx, rules); err != nil { - return err - } - - // Check the transaction consumes and produces the right amounts - fc := avax.NewFlowChecker() - switch { - // Apply dynamic fees to import transactions as of Apricot Phase 3 - case rules.IsApricotPhase3: - gasUsed, err := stx.GasUsed(rules.IsApricotPhase5) - if err != nil { - return err - } - txFee, err := CalculateDynamicFee(gasUsed, baseFee) - if err != nil { - return err - } - fc.Produce(ctx.AVAXAssetID, txFee) - - // Apply fees to import transactions as of Apricot Phase 2 - case rules.IsApricotPhase2: - fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) - } - for _, out := range utx.Outs { - fc.Produce(out.AssetID, out.Amount) - } - for _, in := range utx.ImportedInputs { - fc.Consume(in.AssetID(), in.Input().Amount()) - } - - if err := fc.Verify(); err != nil { - return fmt.Errorf("import tx flow check failed due to: %w", err) - } - - if len(stx.Creds) != len(utx.ImportedInputs) { - return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) - } - - if !backend.Bootstrapped { - // Allow for force committing during bootstrapping - return nil - } - - utxoIDs := make([][]byte, len(utx.ImportedInputs)) - for i, in := range utx.ImportedInputs { - inputID := in.UTXOID.InputID() - utxoIDs[i] = inputID[:] - } - // allUTXOBytes is guaranteed to be the same length as utxoIDs - allUTXOBytes, err := ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) - if err != nil { - return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", utx.SourceChain, err) - } - - for i, in := range utx.ImportedInputs { - utxoBytes := allUTXOBytes[i] - - utxo := &avax.UTXO{} - if _, err := Codec.Unmarshal(utxoBytes, utxo); err != nil { - return fmt.Errorf("failed to unmarshal UTXO: %w", err) - } - - cred := stx.Creds[i] - - utxoAssetID := utxo.AssetID() - inAssetID := in.AssetID() - if utxoAssetID != inAssetID { - return ErrAssetIDMismatch - } - - if err := backend.Fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { - return fmt.Errorf("import tx transfer failed verification: %w", err) - } - } - - return conflicts(backend, utx.InputUTXOs(), parent) -} - // AtomicOps returns imported inputs spent on this transaction // We spend imported UTXOs here rather than in semanticVerify because // we don't want to remove an imported UTXO in semanticVerify @@ -437,38 +347,4 @@ func (utx *UnsignedImportTx) EVMStateTransfer(ctx *snow.Context, state StateDB) return nil } -// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] -// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is -// accepted, then nil will be returned immediately. -// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor AtomicBlockContext) error { - fetcher := backend.BlockFetcher - lastAcceptedBlock := fetcher.LastAcceptedBlockInternal() - lastAcceptedHeight := lastAcceptedBlock.Height() - for ancestor.Height() > lastAcceptedHeight { - // If any of the atomic transactions in the ancestor conflict with [inputs] - // return an error. - for _, atomicTx := range ancestor.AtomicTxs() { - if inputs.Overlaps(atomicTx.InputUTXOs()) { - return ErrConflictingAtomicInputs - } - } - - // Move up the chain. - nextAncestorID := ancestor.Parent() - // If the ancestor is unknown, then the parent failed - // verification when it was called. - // If the ancestor is rejected, then this block shouldn't be - // inserted into the canonical chain because the parent is - // will be missing. - // If the ancestor is processing, then the block may have - // been verified. - nextAncestor, err := fetcher.GetAtomicBlock(context.TODO(), nextAncestorID) - if err != nil { - return ErrRejectedParent - } - ancestor = nextAncestor - } - - return nil -} +func (utx *UnsignedImportTx) Visit(v Visitor) error { return v.ImportTx(utx) } diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index 08c93f0b91..a196ad3e08 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -136,8 +136,8 @@ func (a *AtomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { return trie.New(trie.TrieID(root), a.trieDB) } -// commit calls commit on the underlying trieDB and updates metadata pointers. -func (a *AtomicTrie) commit(height uint64, root common.Hash) error { +// Commit calls Commit on the underlying trieDB and updates metadata pointers. +func (a *AtomicTrie) Commit(height uint64, root common.Hash) error { if err := a.trieDB.Commit(root, false); err != nil { return err } @@ -270,7 +270,7 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // Because we do not accept the trie at every height, we may need to // populate roots at prior commit heights that were skipped. for nextCommitHeight := a.lastCommittedHeight + a.commitInterval; nextCommitHeight < height; nextCommitHeight += a.commitInterval { - if err := a.commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { + if err := a.Commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { return false, err } hasCommitted = true @@ -284,7 +284,7 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // Commit this root if we have reached the [commitInterval]. if height%a.commitInterval == 0 { - if err := a.commit(height, root); err != nil { + if err := a.Commit(height, root); err != nil { return false, err } hasCommitted = true diff --git a/plugin/evm/atomic/tx.go b/plugin/evm/atomic/tx.go index 75823d6ad2..0400529865 100644 --- a/plugin/evm/atomic/tx.go +++ b/plugin/evm/atomic/tx.go @@ -5,7 +5,6 @@ package atomic import ( "bytes" - "context" "errors" "fmt" "math/big" @@ -20,13 +19,11 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -117,6 +114,12 @@ func (in *EVMInput) Verify() error { return nil } +// Allow vm to execute custom logic against the underlying transaction types. +type Visitor interface { + ImportTx(*UnsignedImportTx) error + ExportTx(*UnsignedExportTx) error +} + // UnsignedTx is an unsigned transaction type UnsignedTx interface { Initialize(unsignedBytes, signedBytes []byte) @@ -127,25 +130,6 @@ type UnsignedTx interface { SignedBytes() []byte } -type VerifierBackend struct { - Ctx *snow.Context - Fx fx.Fx - Rules params.Rules - Bootstrapped bool - BlockFetcher BlockFetcher - SecpCache *secp256k1.RecoverCache -} - -type BlockFetcher interface { - LastAcceptedBlockInternal() snowman.Block - GetAtomicBlock(context.Context, ids.ID) (AtomicBlockContext, error) -} - -type AtomicBlockContext interface { - AtomicTxs() []*Tx - snowman.Block -} - type StateDB interface { AddBalance(common.Address, *uint256.Int) AddBalanceMultiCoin(common.Address, common.Hash, *big.Int) @@ -168,9 +152,8 @@ type UnsignedAtomicTx interface { InputUTXOs() set.Set[ids.ID] // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules params.Rules) error - // Attempts to verify this transaction with the provided state. - // SemanticVerify this transaction is valid. - SemanticVerify(backend *VerifierBackend, stx *Tx, parent AtomicBlockContext, baseFee *big.Int) error + // Allow vm to execute custom logic against the underlying transaction types. + Visit(v Visitor) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. // The set of atomic requests must be returned in a consistent order. diff --git a/plugin/evm/atomic/vm/api.go b/plugin/evm/atomic/vm/api.go index f2ade6c31f..a9cb96ea9d 100644 --- a/plugin/evm/atomic/vm/api.go +++ b/plugin/evm/atomic/vm/api.go @@ -172,7 +172,7 @@ func (service *AvaxAPI) GetAtomicTxStatus(r *http.Request, args *api.JSONTxID, r // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. - lastAccepted := service.vm.Blockchain().LastAcceptedBlock() + lastAccepted := service.vm.Ethereum().BlockChain().LastAcceptedBlock() if height > lastAccepted.NumberU64() { reply.Status = atomic.Processing return nil @@ -219,7 +219,7 @@ func (service *AvaxAPI) GetAtomicTx(r *http.Request, args *api.GetTxArgs, reply // Since chain state updates run asynchronously with VM block acceptance, // avoid returning [Accepted] until the chain state reaches the block // containing the atomic tx. - lastAccepted := service.vm.Blockchain().LastAcceptedBlock() + lastAccepted := service.vm.Ethereum().BlockChain().LastAcceptedBlock() if height > lastAccepted.NumberU64() { return nil } diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 18079d2949..291bdbefe9 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -17,15 +17,14 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/extension" + "github.com/ava-labs/coreth/plugin/evm/extension" ) -var _ extension.BlockExtension = (*blockExtension)(nil) +var _ extension.BlockManagerExtension = (*blockExtension)(nil) var ( errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") errNilEthBlock = errors.New("nil ethBlock") - errNilExtraData = errors.New("nil extra data") errMissingUTXOs = errors.New("missing UTXOs") errEmptyBlock = errors.New("empty block") ) @@ -47,17 +46,7 @@ func newBlockExtension( } } -func (be *blockExtension) InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (interface{}, error) { - isApricotPhase5 := chainConfig.IsApricotPhase5(ethBlock.Time()) - atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) - if err != nil { - return nil, err - } - - return atomicTxs, nil -} - -func (be *blockExtension) SyntacticVerify(b extension.ExtendedBlock, rules params.Rules) error { +func (be *blockExtension) SyntacticVerify(b extension.VMBlock, rules params.Rules) error { ethBlock := b.GetEthBlock() if ethBlock == nil { return errNilEthBlock @@ -102,7 +91,7 @@ func (be *blockExtension) SyntacticVerify(b extension.ExtendedBlock, rules param // Block must not be empty txs := ethBlock.Transactions() - atomicTxs, err := getAtomicFromExtra(b) + atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err } @@ -153,8 +142,8 @@ func (be *blockExtension) SyntacticVerify(b extension.ExtendedBlock, rules param return nil } -func (be *blockExtension) Accept(b extension.ExtendedBlock, acceptedBatch database.Batch) error { - atomicTxs, err := getAtomicFromExtra(b) +func (be *blockExtension) Accept(b extension.VMBlock, acceptedBatch database.Batch) error { + atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err } @@ -174,8 +163,8 @@ func (be *blockExtension) Accept(b extension.ExtendedBlock, acceptedBatch databa return atomicState.Accept(acceptedBatch) } -func (be *blockExtension) Reject(b extension.ExtendedBlock) error { - atomicTxs, err := getAtomicFromExtra(b) +func (be *blockExtension) Reject(b extension.VMBlock) error { + atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err } @@ -194,21 +183,7 @@ func (be *blockExtension) Reject(b extension.ExtendedBlock) error { return atomicState.Reject() } -func getAtomicFromExtra(b extension.ExtendedBlock) ([]*atomic.Tx, error) { - extraData := b.GetExtraData() - if extraData == nil { - return nil, errNilExtraData - } - - atomicTxs, ok := extraData.([]*atomic.Tx) - if !ok { - return nil, fmt.Errorf("expected extra data to be of type []*atomic.Tx but got %T", extraData) - } - - return atomicTxs, nil -} - -func (be *blockExtension) Cleanup(b extension.ExtendedBlock) { +func (be *blockExtension) Cleanup(b extension.VMBlock) { if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { atomicState.Reject() } @@ -216,7 +191,7 @@ func (be *blockExtension) Cleanup(b extension.ExtendedBlock) { // verifyUTXOsPresent returns an error if any of the atomic transactions name UTXOs that // are not present in shared memory. -func (be *blockExtension) verifyUTXOsPresent(b extension.ExtendedBlock, atomicTxs []*atomic.Tx) error { +func (be *blockExtension) verifyUTXOsPresent(b extension.VMBlock, atomicTxs []*atomic.Tx) error { blockHash := common.Hash(b.ID()) if be.vm.atomicBackend.IsBonus(b.Height(), blockHash) { log.Info("skipping atomic tx verification on bonus block", "block", blockHash) @@ -237,24 +212,16 @@ func (be *blockExtension) verifyUTXOsPresent(b extension.ExtendedBlock, atomicTx return nil } -var _ atomic.AtomicBlockContext = (*atomicBlock)(nil) - -type atomicBlock struct { - extension.ExtendedBlock - atomicTxs []*atomic.Tx -} - -func wrapAtomicBlock(b extension.ExtendedBlock) (*atomicBlock, error) { - txs, err := getAtomicFromExtra(b) +func extractAtomicTxsFromBlock(b extension.VMBlock, chainConfig *params.ChainConfig) ([]*atomic.Tx, error) { + ethBlock := b.GetEthBlock() + if ethBlock == nil { + return nil, errNilEthBlock + } + isApricotPhase5 := chainConfig.IsApricotPhase5(ethBlock.Time()) + atomicTxs, err := atomic.ExtractAtomicTxs(ethBlock.ExtData(), isApricotPhase5, atomic.Codec) if err != nil { return nil, err } - return &atomicBlock{ - ExtendedBlock: b, - atomicTxs: txs, - }, nil -} -func (ab *atomicBlock) AtomicTxs() []*atomic.Tx { - return ab.atomicTxs + return atomicTxs, nil } diff --git a/plugin/evm/atomic/vm/export_tx_test.go b/plugin/evm/atomic/vm/export_tx_test.go index 22514dba09..15cb08bd05 100644 --- a/plugin/evm/atomic/vm/export_tx_test.go +++ b/plugin/evm/atomic/vm/export_tx_test.go @@ -25,6 +25,19 @@ import ( "github.com/holiman/uint256" ) +var nonExistentID = ids.ID{'F'} + +var ( + apricotRulesPhase0 = params.Rules{} + apricotRulesPhase1 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true}} + apricotRulesPhase2 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true}} + apricotRulesPhase3 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true}} + apricotRulesPhase4 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true}} + apricotRulesPhase5 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true}} + apricotRulesPhase6 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true}} + banffRules = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true, IsBanff: true}} +) + // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { @@ -88,12 +101,12 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, // Use the funds to create 3 conflicting export transactions sending the funds to each of the test addresses exportTxs := make([]*atomic.Tx, 0, 3) - state, err := vm.blockChain.State() + state, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } - for _, addr := range testShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + for _, addr := range testutils.TestShortIDAddrs { + exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, testutils.TestKeys[0:1]) if err != nil { t.Fatal(err) } @@ -325,7 +338,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) @@ -406,7 +419,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { Ins: test.tx, } - stateDB, err := vm.blockChain.State() + stateDB, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } @@ -442,7 +455,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { } func TestExportTxSemanticVerify(t *testing.T) { - _, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") + _, vm, _, _, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -450,7 +463,7 @@ func TestExportTxSemanticVerify(t *testing.T) { } }() - parent := vm.LastAcceptedBlockInternal().(*Block) + parent := vm.LastAcceptedVMBlock() key := testutils.TestKeys[0] addr := key.Address() @@ -914,10 +927,11 @@ func TestExportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - backend := &atomic.VerifierBackend{ + backend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: test.rules, + ChainConfig: vm.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), BlockFetcher: vm, SecpCache: &vm.secpCache, @@ -927,7 +941,12 @@ func TestExportTxSemanticVerify(t *testing.T) { tx := test.tx exportTx := tx.UnsignedAtomicTx - err := exportTx.SemanticVerify(backend, tx, parent, test.baseFee) + err := exportTx.Visit(&semanticVerifier{ + backend: backend, + atx: tx, + parent: parent, + baseFee: test.baseFee, + }) if test.shouldErr && err == nil { t.Fatalf("should have errored but returned valid") } @@ -939,7 +958,7 @@ func TestExportTxSemanticVerify(t *testing.T) { } func TestExportTxAccept(t *testing.T) { - _, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") + _, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) @@ -1013,7 +1032,7 @@ func TestExportTxAccept(t *testing.T) { t.Fatal(err) } - commitBatch, err := vm.versiondb.CommitBatch() + commitBatch, err := vm.VersionDB().CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1116,7 +1135,7 @@ func TestExportTxVerify(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1127,7 +1146,7 @@ func TestExportTxVerify(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[1]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[1]}, }, }, }, @@ -1320,7 +1339,7 @@ func TestExportTxVerify(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1359,7 +1378,7 @@ func TestExportTxVerify(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1418,7 +1437,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1450,7 +1469,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1483,7 +1502,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1515,7 +1534,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1559,7 +1578,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1603,7 +1622,7 @@ func TestExportTxGasCost(t *testing.T) { OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, - Addrs: []ids.ShortID{testShortIDAddrs[0]}, + Addrs: []ids.ShortID{testutils.TestShortIDAddrs[0]}, }, }, }, @@ -1654,42 +1673,42 @@ func TestNewExportTx(t *testing.T) { }{ { name: "apricot phase 0", - genesis: genesisJSONApricotPhase0, + genesis: testutils.GenesisJSONApricotPhase0, rules: apricotRulesPhase0, bal: 44000000, expectedBurnedAVAX: 1000000, }, { name: "apricot phase 1", - genesis: genesisJSONApricotPhase1, + genesis: testutils.GenesisJSONApricotPhase1, rules: apricotRulesPhase1, bal: 44000000, expectedBurnedAVAX: 1000000, }, { name: "apricot phase 2", - genesis: genesisJSONApricotPhase2, + genesis: testutils.GenesisJSONApricotPhase2, rules: apricotRulesPhase2, bal: 43000000, expectedBurnedAVAX: 1000000, }, { name: "apricot phase 3", - genesis: genesisJSONApricotPhase3, + genesis: testutils.GenesisJSONApricotPhase3, rules: apricotRulesPhase3, bal: 44446500, expectedBurnedAVAX: 276750, }, { name: "apricot phase 4", - genesis: genesisJSONApricotPhase4, + genesis: testutils.GenesisJSONApricotPhase4, rules: apricotRulesPhase4, bal: 44446500, expectedBurnedAVAX: 276750, }, { name: "apricot phase 5", - genesis: genesisJSONApricotPhase5, + genesis: testutils.GenesisJSONApricotPhase5, rules: apricotRulesPhase5, bal: 39946500, expectedBurnedAVAX: 2526750, @@ -1697,7 +1716,7 @@ func TestNewExportTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, test.genesis, "", "") + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, test.genesis, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -1705,7 +1724,7 @@ func TestNewExportTx(t *testing.T) { } }() - parent := vm.LastAcceptedBlockInternal().(*Block) + parent := vm.LastAcceptedVMBlock() importAmount := uint64(50000000) utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} @@ -1765,31 +1784,37 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - parent = vm.LastAcceptedBlockInternal().(*Block) + parent = vm.LastAcceptedVMBlock() exportAmount := uint64(5000000) - state, err := vm.blockChain.State() + state, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testutils.TestShortIDAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - backend := &atomic.VerifierBackend{ + backend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, - Rules: vm.currentRules(), + Rules: vm.CurrentRules(), + ChainConfig: vm.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), BlockFetcher: vm, SecpCache: &vm.secpCache, } - if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { + if err := exportTx.Visit(&semanticVerifier{ + backend: backend, + atx: tx, + parent: parent, + baseFee: parent.GetEthBlock().BaseFee(), + }); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } @@ -1801,7 +1826,7 @@ func TestNewExportTx(t *testing.T) { t.Fatalf("burned wrong amount of AVAX - expected %d burned %d", test.expectedBurnedAVAX, burnedAVAX) } - commitBatch, err := vm.versiondb.CommitBatch() + commitBatch, err := vm.VersionDB().CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -1814,7 +1839,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - sdb, err := vm.blockChain.State() + sdb, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } @@ -1841,28 +1866,28 @@ func TestNewExportTxMulticoin(t *testing.T) { }{ { name: "apricot phase 0", - genesis: genesisJSONApricotPhase0, + genesis: testutils.GenesisJSONApricotPhase0, rules: apricotRulesPhase0, bal: 49000000, balmc: 25000000, }, { name: "apricot phase 1", - genesis: genesisJSONApricotPhase1, + genesis: testutils.GenesisJSONApricotPhase1, rules: apricotRulesPhase1, bal: 49000000, balmc: 25000000, }, { name: "apricot phase 2", - genesis: genesisJSONApricotPhase2, + genesis: testutils.GenesisJSONApricotPhase2, rules: apricotRulesPhase2, bal: 48000000, balmc: 25000000, }, { name: "apricot phase 3", - genesis: genesisJSONApricotPhase3, + genesis: testutils.GenesisJSONApricotPhase3, rules: apricotRulesPhase3, bal: 48947900, balmc: 25000000, @@ -1870,7 +1895,7 @@ func TestNewExportTxMulticoin(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, test.genesis, "", "") + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, test.genesis, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -1878,7 +1903,7 @@ func TestNewExportTxMulticoin(t *testing.T) { } }() - parent := vm.LastAcceptedBlockInternal().(*Block) + parent := vm.LastAcceptedVMBlock() importAmount := uint64(50000000) utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} @@ -1968,40 +1993,46 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - parent = vm.LastAcceptedBlockInternal().(*Block) + parent = vm.LastAcceptedVMBlock() exportAmount := uint64(5000000) - testutils.TestKeys0Addr := testutils.TestKeys[0].EthAddress() - exportId, err := ids.ToShortID(testutils.TestKeys0Addr[:]) + testKeys0Addr := testutils.TestKeys[0].EthAddress() + exportId, err := ids.ToShortID(testKeys0Addr[:]) if err != nil { t.Fatal(err) } - state, err := vm.blockChain.State() + state, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - backend := &atomic.VerifierBackend{ + backend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, - Rules: vm.currentRules(), + Rules: vm.CurrentRules(), + ChainConfig: vm.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), BlockFetcher: vm, SecpCache: &vm.secpCache, } - if err := exportTx.SemanticVerify(backend, tx, parent, parent.ethBlock.BaseFee()); err != nil { + if err := exportTx.Visit(&semanticVerifier{ + backend: backend, + atx: tx, + parent: parent, + baseFee: parent.GetEthBlock().BaseFee(), + }); err != nil { t.Fatal("newExportTx created an invalid transaction", err) } - commitBatch, err := vm.versiondb.CommitBatch() + commitBatch, err := vm.VersionDB().CommitBatch() if err != nil { t.Fatalf("Failed to create commit batch for VM due to %s", err) } @@ -2014,7 +2045,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - stdb, err := vm.blockChain.State() + stdb, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } diff --git a/plugin/evm/gossiper_atomic_gossiping_test.go b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go similarity index 97% rename from plugin/evm/gossiper_atomic_gossiping_test.go rename to plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go index e2c97167f8..636a6ccca8 100644 --- a/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go @@ -1,7 +1,7 @@ // (c) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "context" @@ -27,7 +27,7 @@ import ( func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, "", "", "") defer func() { assert.NoError(vm.Shutdown(context.Background())) }() @@ -118,7 +118,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, "", "", "") defer func() { assert.NoError(vm.Shutdown(context.Background())) }() diff --git a/plugin/evm/import_tx_test.go b/plugin/evm/atomic/vm/import_tx_test.go similarity index 88% rename from plugin/evm/import_tx_test.go rename to plugin/evm/atomic/vm/import_tx_test.go index da8a5c9db6..31db861f5f 100644 --- a/plugin/evm/import_tx_test.go +++ b/plugin/evm/atomic/vm/import_tx_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "math/big" @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" @@ -33,7 +34,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, }, }, } @@ -48,15 +49,15 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *avalancheatomic.M Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testKeys[0].Address().Bytes(), + testutils.TestKeys[0].Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } importTxs := make([]*atomic.Tx, 0, 3) - for _, ethAddr := range testEthAddrs { - importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + for _, ethAddr := range testutils.TestEthAddrs { + importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -105,12 +106,12 @@ func TestImportTxVerify(t *testing.T) { }, Outs: []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount - params.AvalancheAtomicTxFee, AssetID: ctx.AVAXAssetID, }, { - Address: testEthAddrs[1], + Address: testutils.TestEthAddrs[1], Amount: importAmount, AssetID: ctx.AVAXAssetID, }, @@ -315,7 +316,7 @@ func TestImportTxVerify(t *testing.T) { tx := *importTx tx.Outs = []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 0, AssetID: ctx.AVAXAssetID, }, @@ -430,12 +431,12 @@ func TestNewImportTx(t *testing.T) { // and checks that it has the correct fee for the base fee that has been used createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) + _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } @@ -445,14 +446,14 @@ func TestNewImportTx(t *testing.T) { if err != nil { t.Fatal(err) } - rules := vm.currentRules() + rules := vm.CurrentRules() switch { case rules.IsApricotPhase3: actualCost, err := importTx.GasUsed(rules.IsApricotPhase5) if err != nil { t.Fatal(err) } - actualFee, err = atomic.CalculateDynamicFee(actualCost, initialBaseFee) + actualFee, err = atomic.CalculateDynamicFee(actualCost, testutils.InitialBaseFee) if err != nil { t.Fatal(err) } @@ -469,7 +470,11 @@ func TestNewImportTx(t *testing.T) { return tx } checkState := func(t *testing.T, vm *VM) { - txs := vm.LastAcceptedBlockInternal().(*Block).atomicTxs + blk := vm.LastAcceptedVMBlock() + txs, err := extractAtomicTxsFromBlock(blk, vm.Ethereum().BlockChain().Config()) + if err != nil { + t.Fatal(err) + } if len(txs) != 1 { t.Fatalf("Expected one import tx to be in the last accepted block, but found %d", len(txs)) } @@ -482,7 +487,7 @@ func TestNewImportTx(t *testing.T) { // Ensure that the UTXO has been removed from shared memory within Accept addrSet := set.Set[ids.ShortID]{} - addrSet.Add(testShortIDAddrs[0]) + addrSet.Add(testutils.TestShortIDAddrs[0]) utxos, _, _, err := vm.GetAtomicUTXOs(vm.ctx.XChainID, addrSet, ids.ShortEmpty, ids.Empty, -1) if err != nil { t.Fatal(err) @@ -492,14 +497,14 @@ func TestNewImportTx(t *testing.T) { } // Ensure that the call to EVMStateTransfer correctly updates the balance of [addr] - sdb, err := vm.blockChain.State() + sdb, err := vm.Ethereum().BlockChain().State() if err != nil { t.Fatal(err) } expectedRemainingBalance := new(uint256.Int).Mul( uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) - addr := testKeys[0].EthAddress() + addr := testutils.TestKeys[0].EthAddress() if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) } @@ -508,22 +513,22 @@ func TestNewImportTx(t *testing.T) { "apricot phase 0": { setup: createNewImportAVAXTx, checkState: checkState, - genesisJSON: genesisJSONApricotPhase0, + genesisJSON: testutils.GenesisJSONApricotPhase0, }, "apricot phase 1": { setup: createNewImportAVAXTx, checkState: checkState, - genesisJSON: genesisJSONApricotPhase1, + genesisJSON: testutils.GenesisJSONApricotPhase1, }, "apricot phase 2": { setup: createNewImportAVAXTx, checkState: checkState, - genesisJSON: genesisJSONApricotPhase2, + genesisJSON: testutils.GenesisJSONApricotPhase2, }, "apricot phase 3": { setup: createNewImportAVAXTx, checkState: checkState, - genesisJSON: genesisJSONApricotPhase3, + genesisJSON: testutils.GenesisJSONApricotPhase3, }, } @@ -567,12 +572,12 @@ func TestImportTxGasCost(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, }}, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 30750, BaseFee: big.NewInt(25 * params.GWei), @@ -591,12 +596,12 @@ func TestImportTxGasCost(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, }}, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -615,12 +620,12 @@ func TestImportTxGasCost(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, }}, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}, ExpectedGasUsed: 11230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -651,13 +656,13 @@ func TestImportTxGasCost(t *testing.T) { }, Outs: []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: antAssetID, }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}, {testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}, {testutils.TestKeys[0]}}, ExpectedGasUsed: 2318, ExpectedFee: 57950, BaseFee: big.NewInt(25 * params.GWei), @@ -687,18 +692,18 @@ func TestImportTxGasCost(t *testing.T) { }, Outs: []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, }, { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: antAssetID, }, }, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}, {testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}, {testutils.TestKeys[0]}}, ExpectedGasUsed: 2378, ExpectedFee: 59450, BaseFee: big.NewInt(25 * params.GWei), @@ -717,12 +722,12 @@ func TestImportTxGasCost(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount, AssetID: avaxAssetID, }}, }, - Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[1]}}, + Keys: [][]*secp256k1.PrivateKey{{testutils.TestKeys[0], testutils.TestKeys[1]}}, ExpectedGasUsed: 2234, ExpectedFee: 55850, BaseFee: big.NewInt(25 * params.GWei), @@ -816,23 +821,23 @@ func TestImportTxGasCost(t *testing.T) { }, Outs: []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: importAmount * 10, AssetID: avaxAssetID, }, }, }, Keys: [][]*secp256k1.PrivateKey{ - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, - {testKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, + {testutils.TestKeys[0]}, }, ExpectedGasUsed: 11022, ExpectedFee: 275550, @@ -887,12 +892,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -916,12 +921,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -937,7 +942,7 @@ func TestImportTxSemanticVerify(t *testing.T) { Key: inputID[:], Value: []byte("hey there"), Traits: [][]byte{ - testShortIDAddrs[0].Bytes(), + testutils.TestShortIDAddrs[0].Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -956,12 +961,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -972,7 +977,7 @@ func TestImportTxSemanticVerify(t *testing.T) { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -990,22 +995,22 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx }, - semanticVerifyErr: atomic.ErrAssetIDMismatch.Error(), + semanticVerifyErr: errAssetIDMismatch.Error(), }, "insufficient AVAX funds": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1023,12 +1028,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1039,7 +1044,7 @@ func TestImportTxSemanticVerify(t *testing.T) { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1057,12 +1062,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 2, // Produce more output than is consumed by the transaction AssetID: assetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1072,7 +1077,7 @@ func TestImportTxSemanticVerify(t *testing.T) { "no signatures": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1090,7 +1095,7 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, @@ -1105,7 +1110,7 @@ func TestImportTxSemanticVerify(t *testing.T) { "incorrect signature": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1123,13 +1128,13 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} // Sign the transaction with the incorrect key - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[1]}}); err != nil { t.Fatal(err) } return tx @@ -1139,7 +1144,7 @@ func TestImportTxSemanticVerify(t *testing.T) { "non-unique EVM Outputs": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 2, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1158,23 +1163,23 @@ func TestImportTxSemanticVerify(t *testing.T) { }}, Outs: []atomic.EVMOutput{ { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }, { - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }, }, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx }, - genesisJSON: genesisJSONApricotPhase3, + genesisJSON: testutils.GenesisJSONApricotPhase3, semanticVerifyErr: atomic.ErrOutputsNotSortedUnique.Error(), }, } @@ -1192,7 +1197,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { "AVAX UTXO": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1210,25 +1215,25 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx }, checkState: func(t *testing.T, vm *VM) { - lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) + lastAcceptedBlock := vm.LastAcceptedVMBlock() - sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) + sdb, err := vm.Ethereum().BlockChain().StateAt(lastAcceptedBlock.GetEthBlock().Root()) if err != nil { t.Fatal(err) } - avaxBalance := sdb.GetBalance(testEthAddrs[0]) + avaxBalance := sdb.GetBalance(testutils.TestEthAddrs[0]) if avaxBalance.Cmp(atomic.X2CRate) != 0 { t.Fatalf("Expected AVAX balance to be %d, found balance: %d", *atomic.X2CRate, avaxBalance) } @@ -1237,7 +1242,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { "non-AVAX UTXO": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testutils.TestShortIDAddrs[0]) if err != nil { t.Fatal(err) } @@ -1255,29 +1260,28 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testEthAddrs[0], + Address: testutils.TestEthAddrs[0], Amount: 1, AssetID: assetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { t.Fatal(err) } return tx }, checkState: func(t *testing.T, vm *VM) { - lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - - sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) + lastAcceptedBlock := vm.LastAcceptedVMBlock() + sdb, err := vm.Ethereum().BlockChain().StateAt(lastAcceptedBlock.GetEthBlock().Root()) if err != nil { t.Fatal(err) } - assetBalance := sdb.GetBalanceMultiCoin(testEthAddrs[0], common.Hash(assetID)) + assetBalance := sdb.GetBalanceMultiCoin(testutils.TestEthAddrs[0], common.Hash(assetID)) if assetBalance.Cmp(common.Big1) != 0 { t.Fatalf("Expected asset balance to be %d, found balance: %d", common.Big1, assetBalance) } - avaxBalance := sdb.GetBalance(testEthAddrs[0]) + avaxBalance := sdb.GetBalance(testutils.TestEthAddrs[0]) if avaxBalance.Cmp(common.U2560) != 0 { t.Fatalf("Expected AVAX balance to be 0, found balance: %d", avaxBalance) } diff --git a/plugin/evm/mempool_atomic_gossiping_test.go b/plugin/evm/atomic/vm/mempool_atomic_gossiping_test.go similarity index 74% rename from plugin/evm/mempool_atomic_gossiping_test.go rename to plugin/evm/atomic/vm/mempool_atomic_gossiping_test.go index ae0be940e0..9a073c276b 100644 --- a/plugin/evm/mempool_atomic_gossiping_test.go +++ b/plugin/evm/atomic/vm/mempool_atomic_gossiping_test.go @@ -1,7 +1,7 @@ // (c) 2019-2021, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "context" @@ -13,6 +13,8 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/extension" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" @@ -29,7 +31,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { assert := assert.New(t) // we use AP3 genesis here to not trip any block fees - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase3, "", "") + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase3, "", "") defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) @@ -72,10 +74,12 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { blk, err := vm.BuildBlock(context.Background()) assert.NoError(err, "could not build block out of mempool") - evmBlk, ok := blk.(*chain.BlockWrapper).Block.(*Block) + evmBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) assert.True(ok, "unknown block type") - assert.Equal(txID, evmBlk.atomicTxs[0].ID(), "block does not include expected transaction") + atomicTxs, err := extractAtomicTxsFromBlock(evmBlk, vm.Ethereum().BlockChain().Config()) + assert.NoError(err) + assert.Equal(txID, atomicTxs[0].ID(), "block does not include expected transaction") has = mempool.Has(txID) assert.True(has, "tx should stay in mempool until block is accepted") @@ -97,7 +101,8 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { func TestMempoolMaxMempoolSizeHandling(t *testing.T) { assert := assert.New(t) - mempool, err := atomictxpool.NewMempool(&snow.Context{}, prometheus.NewRegistry(), 1, nil) + mempool := atomictxpool.Mempool{} + err := mempool.Initialize(&snow.Context{}, prometheus.NewRegistry(), 1, nil) assert.NoError(err) // create candidate tx (we will drop before validation) tx := atomictest.GenerateTestImportTx() @@ -121,25 +126,26 @@ func TestMempoolPriorityDrop(t *testing.T) { // we use AP3 genesis here to not trip any block fees importAmount := uint64(50000000) - _, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase3, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - testShortIDAddrs[1]: importAmount, + _, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase3, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + testutils.TestShortIDAddrs[1]: importAmount, }) defer func() { err := vm.Shutdown(context.Background()) assert.NoError(err) }() - mempool, err := atomictxpool.NewMempool(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) + mempool := atomictxpool.Mempool{} + err := mempool.Initialize(vm.ctx, prometheus.NewRegistry(), 1, vm.verifyTxAtTip) assert.NoError(err) - tx1, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx1, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) if err != nil { t.Fatal(err) } assert.NoError(mempool.AddRemoteTx(tx1)) assert.True(mempool.Has(tx1.ID())) - tx2, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[1]}) + tx2, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[1], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[1]}) if err != nil { t.Fatal(err) } @@ -147,7 +153,7 @@ func TestMempoolPriorityDrop(t *testing.T) { assert.True(mempool.Has(tx1.ID())) assert.False(mempool.Has(tx2.ID())) - tx3, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[1], new(big.Int).Mul(initialBaseFee, big.NewInt(2)), []*secp256k1.PrivateKey{testKeys[1]}) + tx3, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[1], new(big.Int).Mul(testutils.InitialBaseFee, big.NewInt(2)), []*secp256k1.PrivateKey{testutils.TestKeys[1]}) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/atomic/vm/syncervm_test.go b/plugin/evm/atomic/vm/syncervm_test.go new file mode 100644 index 0000000000..ec03ecbd11 --- /dev/null +++ b/plugin/evm/atomic/vm/syncervm_test.go @@ -0,0 +1,106 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "testing" + + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" + "github.com/ava-labs/coreth/plugin/evm/extension" + "github.com/ava-labs/coreth/plugin/evm/testutils" + "github.com/ava-labs/coreth/predicate" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestAtomicSyncerVM(t *testing.T) { + importAmount := 2000000 * units.Avax // 2M avax + for _, test := range testutils.SyncerVMTests { + includedAtomicTxs := make([]*atomic.Tx, 0) + + t.Run(test.Name, func(t *testing.T) { + genFn := func(i int, vm extension.InnerVM, gen *core.BlockGen) { + atomicVM, ok := vm.(*VM) + require.True(t, ok) + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) + switch i { + case 0: + // spend the UTXOs from shared memory + importTx, err := atomicVM.newImportTx(atomicVM.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) + require.NoError(t, err) + require.NoError(t, atomicVM.mempool.AddLocalTx(importTx)) + includedAtomicTxs = append(includedAtomicTxs, importTx) + case 1: + // export some of the imported UTXOs to test exportTx is properly synced + state, err := vm.Ethereum().BlockChain().State() + if err != nil { + t.Fatal(err) + } + exportTx, err := atomic.NewExportTx( + atomicVM.ctx, + atomicVM.CurrentRules(), + state, + atomicVM.ctx.AVAXAssetID, + importAmount/2, + atomicVM.ctx.XChainID, + testutils.TestShortIDAddrs[0], + testutils.InitialBaseFee, + testutils.TestKeys[0:1], + ) + require.NoError(t, err) + require.NoError(t, atomicVM.mempool.AddLocalTx(exportTx)) + includedAtomicTxs = append(includedAtomicTxs, exportTx) + default: // Generate simple transfer transactions. + pk := testutils.TestKeys[0].ToECDSA() + tx := types.NewTransaction(gen.TxNonce(testutils.TestEthAddrs[0]), testutils.TestEthAddrs[1], common.Big1, params.TxGas, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.Ethereum().BlockChain().Config().ChainID), pk) + require.NoError(t, err) + gen.AddTx(signedTx) + } + } + newVMFn := func() (extension.InnerVM, dummy.ConsensusCallbacks) { + vm := newAtomicTestVM() + return vm, vm.createConsensusCallbacks() + } + + afterInit := func(t *testing.T, params testutils.SyncTestParams, vm extension.InnerVM) { + atomicVM, ok := vm.(*VM) + require.True(t, ok) + serverAtomicTrie := atomicVM.atomicBackend.AtomicTrie() + require.NoError(t, serverAtomicTrie.Commit(params.SyncableInterval, serverAtomicTrie.LastAcceptedRoot())) + require.NoError(t, atomicVM.VersionDB().Commit()) + } + + testSetup := &testutils.SyncTestSetup{ + NewVM: newVMFn, + GenFn: genFn, + AfterInit: afterInit, + ExtraSyncerVMTest: func(t *testing.T, syncerVMSetup testutils.SyncerVMSetup) { + // check atomic memory was synced properly + syncerVM := syncerVMSetup.VM + atomicVM, ok := syncerVM.(*VM) + require.True(t, ok) + syncerSharedMemories := atomictest.NewSharedMemories(syncerVMSetup.AtomicMemory, atomicVM.ctx.ChainID, atomicVM.ctx.XChainID) + + for _, tx := range includedAtomicTxs { + atomicOps, err := atomictest.ConvertToAtomicOps(tx) + require.NoError(t, err) + syncerSharedMemories.AssertOpsApplied(t, atomicOps) + } + }, + } + test.TestFunc(t, testSetup) + }) + } +} diff --git a/plugin/evm/atomic/vm/tx_gossip_test.go b/plugin/evm/atomic/vm/tx_gossip_test.go new file mode 100644 index 0000000000..0fcfca3963 --- /dev/null +++ b/plugin/evm/atomic/vm/tx_gossip_test.go @@ -0,0 +1,318 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vm + +import ( + "context" + "encoding/binary" + "sync" + "testing" + "time" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/snow/validators" + agoUtils "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/testutils" + "github.com/ava-labs/coreth/utils" +) + +func TestAtomicTxGossip(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + validatorState := utils.NewTestValidatorState() + snowCtx.ValidatorState = validatorState + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := pk.EthAddress() + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + responseSender := &enginetest.SenderStub{ + SentAppResponse: make(chan []byte, 1), + } + vm := newAtomicTestVM() + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + responseSender, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + defer func() { + require.NoError(vm.Shutdown(ctx)) + }() + + // sender for the peer requesting gossip from [vm] + peerSender := &enginetest.SenderStub{ + SentAppRequest: make(chan []byte, 1), + } + network, err := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(p2p.AtomicTxGossipHandlerID) + + // we only accept gossip requests from validators + requestingNodeID := ids.GenerateTestNodeID() + require.NoError(vm.Connected(ctx, requestingNodeID, nil)) + validatorState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return 0, nil + } + validatorState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + requestingNodeID: { + NodeID: requestingNodeID, + Weight: 1, + }, + }, nil + } + + // Ask the VM for any new transactions. We should get nothing at first. + emptyBloomFilter, err := gossip.NewBloomFilter( + prometheus.NewRegistry(), + "", + config.TxGossipBloomMinTargetElements, + config.TxGossipBloomTargetFalsePositiveRate, + config.TxGossipBloomResetFalsePositiveRate, + ) + require.NoError(err) + emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() + request := &sdk.PullGossipRequest{ + Filter: emptyBloomFilterBytes, + Salt: agoUtils.RandomBytes(32), + } + + requestBytes, err := proto.Marshal(request) + require.NoError(err) + + wg := &sync.WaitGroup{} + wg.Add(1) + onResponse := func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Empty(response.Gossip) + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 1, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 1, <-responseSender.SentAppResponse)) + wg.Wait() + + // Issue a tx to the VM + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.Address(), + ) + require.NoError(err) + tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + + // wait so we aren't throttled by the vm + time.Sleep(5 * time.Second) + + // Ask the VM for new transactions. We should get the newly issued tx. + wg.Add(1) + + marshaller := atomic.GossipAtomicTxMarshaller{} + onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { + require.NoError(err) + + response := &sdk.PullGossipResponse{} + require.NoError(proto.Unmarshal(responseBytes, response)) + require.Len(response.Gossip, 1) + + gotTx, err := marshaller.UnmarshalGossip(response.Gossip[0]) + require.NoError(err) + require.Equal(tx.ID(), gotTx.GossipID()) + + wg.Done() + } + require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) + require.NoError(vm.AppRequest(ctx, requestingNodeID, 3, time.Time{}, <-peerSender.SentAppRequest)) + require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 3, <-responseSender.SentAppResponse)) + wg.Wait() +} + +// Tests that a tx is gossiped when it is issued +func TestAtomicTxPushGossipOutbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + validatorState := utils.NewTestValidatorState() + snowCtx.ValidatorState = validatorState + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := pk.EthAddress() + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + sender := &enginetest.SenderStub{ + SentAppGossip: make(chan []byte, 1), + } + vm := newAtomicTestVM() + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + sender, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + defer func() { + require.NoError(vm.Shutdown(ctx)) + }() + + // Issue a tx to the VM + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.Address(), + ) + require.NoError(err) + tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) + + gossipedBytes := <-sender.SentAppGossip + require.Equal(byte(p2p.AtomicTxGossipHandlerID), gossipedBytes[0]) + + outboundGossipMsg := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(gossipedBytes[1:], outboundGossipMsg)) + require.Len(outboundGossipMsg.Gossip, 1) + + marshaller := atomic.GossipAtomicTxMarshaller{} + gossipedTx, err := marshaller.UnmarshalGossip(outboundGossipMsg.Gossip[0]) + require.NoError(err) + require.Equal(tx.ID(), gossipedTx.Tx.ID()) +} + +// Tests that a tx is gossiped when it is issued +func TestAtomicTxPushGossipInbound(t *testing.T) { + require := require.New(t) + ctx := context.Background() + snowCtx := utils.TestSnowContext() + snowCtx.AVAXAssetID = ids.GenerateTestID() + validatorState := utils.NewTestValidatorState() + snowCtx.ValidatorState = validatorState + memory := avalancheatomic.NewMemory(memdb.New()) + snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) + + pk, err := secp256k1.NewPrivateKey() + require.NoError(err) + address := pk.EthAddress() + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) + genesisBytes, err := genesis.MarshalJSON() + require.NoError(err) + + sender := &enginetest.Sender{} + vm := newAtomicTestVM() + vm.atomicTxPullGossiper = gossip.NoOpGossiper{} + + require.NoError(vm.Initialize( + ctx, + snowCtx, + memdb.New(), + genesisBytes, + nil, + nil, + make(chan common.Message), + nil, + sender, + )) + require.NoError(vm.SetState(ctx, snow.NormalOp)) + + defer func() { + require.NoError(vm.Shutdown(ctx)) + }() + + // issue a tx to the vm + utxo, err := addUTXO( + memory, + snowCtx, + ids.GenerateTestID(), + 0, + snowCtx.AVAXAssetID, + 100_000_000_000, + pk.Address(), + ) + require.NoError(err) + tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + require.NoError(err) + require.NoError(vm.mempool.AddLocalTx(tx)) + + marshaller := atomic.GossipAtomicTxMarshaller{} + gossipedTx := &atomic.GossipAtomicTx{ + Tx: tx, + } + gossipBytes, err := marshaller.MarshalGossip(gossipedTx) + require.NoError(err) + + inboundGossip := &sdk.PushGossip{ + Gossip: [][]byte{gossipBytes}, + } + inboundGossipBytes, err := proto.Marshal(inboundGossip) + require.NoError(err) + + inboundGossipMsg := append(binary.AppendUvarint(nil, p2p.AtomicTxGossipHandlerID), inboundGossipBytes...) + + require.NoError(vm.AppGossip(ctx, ids.EmptyNodeID, inboundGossipMsg)) + require.True(vm.mempool.Has(tx.ID())) +} diff --git a/plugin/evm/atomic/vm/tx_semantic_verifier.go b/plugin/evm/atomic/vm/tx_semantic_verifier.go new file mode 100644 index 0000000000..9ea68c937f --- /dev/null +++ b/plugin/evm/atomic/vm/tx_semantic_verifier.go @@ -0,0 +1,239 @@ +package vm + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/extension" +) + +var _ atomic.Visitor = (*semanticVerifier)(nil) + +var ( + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errConflictingAtomicInputs = errors.New("invalid block due to conflicting atomic inputs") + errRejectedParent = errors.New("rejected parent") + errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") +) + +type BlockFetcher interface { + LastAcceptedVMBlock() extension.VMBlock + GetVMBlock(context.Context, ids.ID) (extension.VMBlock, error) +} + +type VerifierBackend struct { + Ctx *snow.Context + Fx fx.Fx + Rules params.Rules + ChainConfig *params.ChainConfig + Bootstrapped bool + BlockFetcher BlockFetcher + SecpCache *secp256k1.RecoverCache +} + +type semanticVerifier struct { + backend *VerifierBackend + atx *atomic.Tx + parent extension.VMBlock + baseFee *big.Int +} + +// SemanticVerify this transaction is valid. +func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { + backend := s.backend + ctx := backend.Ctx + rules := backend.Rules + stx := s.atx + if err := utx.Verify(ctx, rules); err != nil { + return err + } + + // Check the transaction consumes and produces the right amounts + fc := avax.NewFlowChecker() + switch { + // Apply dynamic fees to import transactions as of Apricot Phase 3 + case rules.IsApricotPhase3: + gasUsed, err := stx.GasUsed(rules.IsApricotPhase5) + if err != nil { + return err + } + txFee, err := atomic.CalculateDynamicFee(gasUsed, s.baseFee) + if err != nil { + return err + } + fc.Produce(ctx.AVAXAssetID, txFee) + + // Apply fees to import transactions as of Apricot Phase 2 + case rules.IsApricotPhase2: + fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + } + for _, out := range utx.Outs { + fc.Produce(out.AssetID, out.Amount) + } + for _, in := range utx.ImportedInputs { + fc.Consume(in.AssetID(), in.Input().Amount()) + } + + if err := fc.Verify(); err != nil { + return fmt.Errorf("import tx flow check failed due to: %w", err) + } + + if len(stx.Creds) != len(utx.ImportedInputs) { + return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) + } + + if !backend.Bootstrapped { + // Allow for force committing during bootstrapping + return nil + } + + utxoIDs := make([][]byte, len(utx.ImportedInputs)) + for i, in := range utx.ImportedInputs { + inputID := in.UTXOID.InputID() + utxoIDs[i] = inputID[:] + } + // allUTXOBytes is guaranteed to be the same length as utxoIDs + allUTXOBytes, err := ctx.SharedMemory.Get(utx.SourceChain, utxoIDs) + if err != nil { + return fmt.Errorf("failed to fetch import UTXOs from %s due to: %w", utx.SourceChain, err) + } + + for i, in := range utx.ImportedInputs { + utxoBytes := allUTXOBytes[i] + + utxo := &avax.UTXO{} + if _, err := atomic.Codec.Unmarshal(utxoBytes, utxo); err != nil { + return fmt.Errorf("failed to unmarshal UTXO: %w", err) + } + + cred := stx.Creds[i] + + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if utxoAssetID != inAssetID { + return errAssetIDMismatch + } + + if err := backend.Fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { + return fmt.Errorf("import tx transfer failed verification: %w", err) + } + } + + return conflicts(backend, utx.InputUTXOs(), s.parent) +} + +// conflicts returns an error if [inputs] conflicts with any of the atomic inputs contained in [ancestor] +// or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is +// accepted, then nil will be returned immediately. +// If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. +func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor extension.VMBlock) error { + lastAcceptedBlock := backend.BlockFetcher.LastAcceptedVMBlock() + lastAcceptedHeight := lastAcceptedBlock.Height() + for ancestor.Height() > lastAcceptedHeight { + atomicTxs, err := extractAtomicTxsFromBlock(ancestor, backend.ChainConfig) + if err != nil { + return err + } + // If any of the atomic transactions in the ancestor conflict with [inputs] + // return an error. + for _, atomicTx := range atomicTxs { + if inputs.Overlaps(atomicTx.InputUTXOs()) { + return errConflictingAtomicInputs + } + } + + // Move up the chain. + nextAncestorID := ancestor.Parent() + // If the ancestor is unknown, then the parent failed + // verification when it was called. + // If the ancestor is rejected, then this block shouldn't be + // inserted into the canonical chain because the parent is + // will be missing. + // If the ancestor is processing, then the block may have + // been verified. + nextAncestor, err := backend.BlockFetcher.GetVMBlock(context.TODO(), nextAncestorID) + if err != nil { + return errRejectedParent + } + ancestor = nextAncestor + } + + return nil +} + +// SemanticVerify this transaction is valid. +func (s *semanticVerifier) ExportTx(utx *atomic.UnsignedExportTx) error { + ctx := s.backend.Ctx + rules := s.backend.Rules + stx := s.atx + if err := utx.Verify(ctx, rules); err != nil { + return err + } + + // Check the transaction consumes and produces the right amounts + fc := avax.NewFlowChecker() + switch { + // Apply dynamic fees to export transactions as of Apricot Phase 3 + case rules.IsApricotPhase3: + gasUsed, err := stx.GasUsed(rules.IsApricotPhase5) + if err != nil { + return err + } + txFee, err := atomic.CalculateDynamicFee(gasUsed, s.baseFee) + if err != nil { + return err + } + fc.Produce(ctx.AVAXAssetID, txFee) + // Apply fees to export transactions before Apricot Phase 3 + default: + fc.Produce(ctx.AVAXAssetID, params.AvalancheAtomicTxFee) + } + for _, out := range utx.ExportedOutputs { + fc.Produce(out.AssetID(), out.Output().Amount()) + } + for _, in := range utx.Ins { + fc.Consume(in.AssetID, in.Amount) + } + + if err := fc.Verify(); err != nil { + return fmt.Errorf("export tx flow check failed due to: %w", err) + } + + if len(utx.Ins) != len(stx.Creds) { + return fmt.Errorf("export tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.Ins), len(stx.Creds)) + } + + for i, input := range utx.Ins { + cred, ok := stx.Creds[i].(*secp256k1fx.Credential) + if !ok { + return fmt.Errorf("expected *secp256k1fx.Credential but got %T", cred) + } + if err := cred.Verify(); err != nil { + return err + } + + if len(cred.Sigs) != 1 { + return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) + } + pubKey, err := s.backend.SecpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) + if err != nil { + return err + } + if input.Address != pubKey.EthAddress() { + return errPublicKeySignatureMismatch + } + } + + return nil +} diff --git a/plugin/evm/tx_test.go b/plugin/evm/atomic/vm/tx_test.go similarity index 91% rename from plugin/evm/tx_test.go rename to plugin/evm/atomic/vm/tx_test.go index 9bef967e68..efffc81447 100644 --- a/plugin/evm/tx_test.go +++ b/plugin/evm/atomic/vm/tx_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package vm import ( "context" @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/testutils" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" @@ -99,12 +100,8 @@ type atomicTxTest struct { } func executeTxTest(t *testing.T, test atomicTxTest) { - genesisJSON := test.genesisJSON - if len(genesisJSON) == 0 { - genesisJSON = genesisJSONApricotPhase0 - } - issuer, vm, _, sharedMemory, _ := GenesisVM(t, !test.bootstrapping, genesisJSON, test.configJSON, test.upgradeJSON) - rules := vm.currentRules() + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, !test.bootstrapping, test.genesisJSON, test.configJSON, test.upgradeJSON) + rules := vm.CurrentRules() tx := test.setup(t, vm, sharedMemory) @@ -112,19 +109,26 @@ func executeTxTest(t *testing.T, test atomicTxTest) { // If ApricotPhase3 is active, use the initial base fee for the atomic transaction switch { case rules.IsApricotPhase3: - baseFee = initialBaseFee + baseFee = testutils.InitialBaseFee } - lastAcceptedBlock := vm.LastAcceptedBlockInternal().(*Block) - backend := &atomic.VerifierBackend{ + lastAcceptedBlock := vm.LastAcceptedVMBlock() + backend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, + ChainConfig: vm.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), BlockFetcher: vm, SecpCache: &vm.secpCache, } - if err := tx.UnsignedAtomicTx.SemanticVerify(backend, tx, lastAcceptedBlock, baseFee); len(test.semanticVerifyErr) == 0 && err != nil { + if err := tx.UnsignedAtomicTx.Visit( + &semanticVerifier{ + backend: backend, + atx: tx, + parent: lastAcceptedBlock, + baseFee: baseFee, + }); len(test.semanticVerifyErr) == 0 && err != nil { t.Fatalf("SemanticVerify failed unexpectedly due to: %s", err) } else if len(test.semanticVerifyErr) != 0 { if err == nil { @@ -138,7 +142,7 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } // Retrieve dummy state to test that EVMStateTransfer works correctly - sdb, err := vm.blockChain.StateAt(lastAcceptedBlock.ethBlock.Root()) + sdb, err := vm.Ethereum().BlockChain().StateAt(lastAcceptedBlock.GetEthBlock().Root()) if err != nil { t.Fatal(err) } @@ -159,7 +163,7 @@ func executeTxTest(t *testing.T, test atomicTxTest) { // If this test simulates processing txs during bootstrapping (where some verification is skipped), // initialize the block building goroutines normally initialized in SetState(snow.NormalOps). // This ensures that the VM can build a block correctly during the test. - if err := vm.initBlockBuilding(); err != nil { + if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } } diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index b06bf4c598..ad345f1521 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -34,11 +34,11 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/extension" atomicstate "github.com/ava-labs/coreth/plugin/evm/atomic/state" atomicsync "github.com/ava-labs/coreth/plugin/evm/atomic/sync" "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/utils" @@ -186,18 +186,18 @@ func (vm *VM) Initialize( } // initialize atomic repository - lastAcceptedHash, lastAcceptedHeight, err := vm.InnerVM.ReadLastAccepted() + lastAcceptedHash, lastAcceptedHeight, err := innerVM.ReadLastAccepted() if err != nil { return fmt.Errorf("failed to read last accepted block: %w", err) } - vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(vm.InnerVM.VersionDB(), codec, lastAcceptedHeight) + vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(innerVM.VersionDB(), codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } vm.atomicBackend, err = atomicstate.NewAtomicBackend( vm.ctx.SharedMemory, bonusBlockHeights, vm.atomicTxRepository, lastAcceptedHeight, lastAcceptedHash, - vm.InnerVM.Config().CommitInterval, + innerVM.Config().CommitInterval, ) if err != nil { return fmt.Errorf("failed to create atomic backend: %w", err) @@ -205,7 +205,7 @@ func (vm *VM) Initialize( // Atomic backend is available now, we can initialize structs that depend on it syncProvider.Initialize(vm.atomicBackend.AtomicTrie()) - syncExtender.Initialize(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), vm.InnerVM.Config().StateSyncRequestSize) + syncExtender.Initialize(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), innerVM.Config().StateSyncRequestSize) leafHandler.Initialize(vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, codec) vm.secpCache = secp256k1.RecoverCache{ LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ @@ -262,36 +262,36 @@ func (vm *VM) onNormalOperationsStarted() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel - + innerVM := vm.InnerVM atomicTxGossipMarshaller := atomic.GossipAtomicTxMarshaller{} - atomicTxGossipClient := vm.InnerVM.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(vm.InnerVM.Validators())) - atomicTxGossipMetrics, err := avalanchegossip.NewMetrics(vm.InnerVM.MetricRegistry(), atomicTxGossipNamespace) + atomicTxGossipClient := innerVM.NewClient(p2p.AtomicTxGossipHandlerID, p2p.WithValidatorSampling(innerVM.Validators())) + atomicTxGossipMetrics, err := avalanchegossip.NewMetrics(innerVM.MetricRegistry(), atomicTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize atomic tx gossip metrics: %w", err) } pushGossipParams := avalanchegossip.BranchingFactor{ - StakePercentage: vm.InnerVM.Config().PushGossipPercentStake, - Validators: vm.InnerVM.Config().PushGossipNumValidators, - Peers: vm.InnerVM.Config().PushGossipNumPeers, + StakePercentage: innerVM.Config().PushGossipPercentStake, + Validators: innerVM.Config().PushGossipNumValidators, + Peers: innerVM.Config().PushGossipNumPeers, } pushRegossipParams := avalanchegossip.BranchingFactor{ - Validators: vm.InnerVM.Config().PushRegossipNumValidators, - Peers: vm.InnerVM.Config().PushRegossipNumPeers, + Validators: innerVM.Config().PushRegossipNumValidators, + Peers: innerVM.Config().PushRegossipNumPeers, } if vm.atomicTxPushGossiper == nil { vm.atomicTxPushGossiper, err = avalanchegossip.NewPushGossiper[*atomic.GossipAtomicTx]( atomicTxGossipMarshaller, vm.mempool, - vm.InnerVM.Validators(), + innerVM.Validators(), atomicTxGossipClient, atomicTxGossipMetrics, pushGossipParams, pushRegossipParams, config.PushGossipDiscardedElements, config.TxGossipTargetMessageSize, - vm.InnerVM.Config().RegossipFrequency.Duration, + innerVM.Config().RegossipFrequency.Duration, ) if err != nil { return fmt.Errorf("failed to initialize atomic tx push gossiper: %w", err) @@ -307,11 +307,11 @@ func (vm *VM) onNormalOperationsStarted() error { config.TxGossipTargetMessageSize, config.TxGossipThrottlingPeriod, config.TxGossipThrottlingLimit, - vm.InnerVM.Validators(), + innerVM.Validators(), ) } - if err := vm.InnerVM.AddHandler(p2p.AtomicTxGossipHandlerID, vm.atomicTxGossipHandler); err != nil { + if err := innerVM.AddHandler(p2p.AtomicTxGossipHandlerID, vm.atomicTxGossipHandler); err != nil { return fmt.Errorf("failed to add atomic tx gossip handler: %w", err) } @@ -328,17 +328,17 @@ func (vm *VM) onNormalOperationsStarted() error { vm.atomicTxPullGossiper = &avalanchegossip.ValidatorGossiper{ Gossiper: atomicTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.InnerVM.Validators(), + Validators: innerVM.Validators(), } } vm.shutdownWg.Add(2) go func() { - avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, vm.InnerVM.Config().PushGossipFrequency.Duration) + avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPushGossiper, innerVM.Config().PushGossipFrequency.Duration) vm.shutdownWg.Done() }() go func() { - avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, vm.InnerVM.Config().PullGossipFrequency.Duration) + avalanchegossip.Every(ctx, vm.ctx.Log, vm.atomicTxPullGossiper, innerVM.Config().PullGossipFrequency.Duration) vm.shutdownWg.Done() }() return nil @@ -384,16 +384,17 @@ func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { if new(big.Int).SetUint64(gasUsed).Cmp(params.AtomicGasLimit) > 0 { return fmt.Errorf("tx gas usage (%d) exceeds atomic gas limit (%d)", gasUsed, params.AtomicGasLimit.Uint64()) } - + innerVM := vm.InnerVM + blockchain := innerVM.Ethereum().BlockChain() // Note: we fetch the current block and then the state at that block instead of the current state directly // since we need the header of the current block below. - preferredBlock := vm.InnerVM.Blockchain().CurrentBlock() - preferredState, err := vm.InnerVM.Blockchain().StateAt(preferredBlock.Root) + preferredBlock := blockchain.CurrentBlock() + preferredState, err := blockchain.StateAt(preferredBlock.Root) if err != nil { return fmt.Errorf("failed to retrieve block state at tip while verifying atomic tx: %w", err) } - chainConfig := vm.InnerVM.Blockchain().Config() - rules := vm.InnerVM.Blockchain().Config().Rules(preferredBlock.Number, preferredBlock.Time) + chainConfig := blockchain.Config() + rules := blockchain.Config().Rules(preferredBlock.Number, preferredBlock.Time) parentHeader := preferredBlock var nextBaseFee *big.Int timestamp := uint64(vm.clock.Time().Unix()) @@ -414,39 +415,36 @@ func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { - parent, err := vm.GetAtomicBlock(context.TODO(), ids.ID(parentHash)) + parent, err := vm.InnerVM.GetVMBlock(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) } - atomicBackend := &atomic.VerifierBackend{ + atomicBackend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, + ChainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, + BlockFetcher: vm.InnerVM, SecpCache: &vm.secpCache, } - if err := tx.UnsignedAtomicTx.SemanticVerify(atomicBackend, tx, parent, baseFee); err != nil { + if err := tx.UnsignedAtomicTx.Visit(&semanticVerifier{ + backend: atomicBackend, + atx: tx, + parent: parent, + baseFee: baseFee, + }); err != nil { return err } return tx.UnsignedAtomicTx.EVMStateTransfer(vm.ctx, state) } -func (vm *VM) GetAtomicBlock(ctx context.Context, id ids.ID) (atomic.AtomicBlockContext, error) { - extendedBlock, err := vm.InnerVM.GetBlockExtended(ctx, id) - if err != nil { - return nil, err - } - - return wrapAtomicBlock(extendedBlock) -} - // verifyTxs verifies that [txs] are valid to be issued into a block with parent block [parentHash] // using [rules] as the current rule set. func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.Int, height uint64, rules params.Rules) error { // Ensure that the parent was verified and inserted correctly. - if !vm.InnerVM.Blockchain().HasBlock(parentHash, height-1) { - return atomic.ErrRejectedParent + if !vm.InnerVM.Ethereum().BlockChain().HasBlock(parentHash, height-1) { + return errRejectedParent } ancestorID := ids.ID(parentHash) @@ -454,30 +452,36 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I // it was called. // If the ancestor is rejected, then this block shouldn't be inserted // into the canonical chain because the parent will be missing. - ancestor, err := vm.GetAtomicBlock(context.TODO(), ancestorID) + ancestor, err := vm.GetVMBlock(context.TODO(), ancestorID) if err != nil { - return atomic.ErrRejectedParent + return errRejectedParent } // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} - atomicBackend := &atomic.VerifierBackend{ + atomicBackend := &VerifierBackend{ Ctx: vm.ctx, Fx: &vm.fx, Rules: rules, + ChainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), Bootstrapped: vm.bootstrapped.Get(), BlockFetcher: vm, SecpCache: &vm.secpCache, } + for _, atomicTx := range txs { utx := atomicTx.UnsignedAtomicTx - if err := utx.SemanticVerify(atomicBackend, atomicTx, ancestor, baseFee); err != nil { + if err := utx.Visit(&semanticVerifier{ + backend: atomicBackend, + atx: atomicTx, + parent: ancestor, + }); err != nil { return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) } txInputs := utx.InputUTXOs() if inputs.Overlaps(txInputs) { - return atomic.ErrConflictingAtomicInputs + return errConflictingAtomicInputs } inputs.Union(txInputs) } @@ -493,8 +497,8 @@ func (vm *VM) Clock() *mockable.Clock { return &vm.clock } // Logger implements the secp256k1fx interface func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } -func (vm *VM) createConsensusCallbacks() *dummy.ConsensusCallbacks { - return &dummy.ConsensusCallbacks{ +func (vm *VM) createConsensusCallbacks() dummy.ConsensusCallbacks { + return dummy.ConsensusCallbacks{ OnFinalizeAndAssemble: vm.onFinalizeAndAssemble, OnExtraStateChange: vm.onExtraStateChange, } @@ -511,7 +515,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S // Note: snapshot is taken inside the loop because you cannot revert to the same snapshot more than // once. snapshot := state.Snapshot() - rules := vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + rules := vm.InnerVM.Ethereum().BlockChain().Config().Rules(header.Number, header.Time) if err := vm.verifyTx(tx, header.ParentHash, header.BaseFee, state, rules); err != nil { // Discard the transaction from the mempool on failed verification. log.Debug("discarding tx from mempool on failed verification", "txID", tx.ID(), "err", err) @@ -553,7 +557,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. batchAtomicUTXOs set.Set[ids.ID] batchContribution *big.Int = new(big.Int).Set(common.Big0) batchGasUsed *big.Int = new(big.Int).Set(common.Big0) - rules = vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + rules = vm.InnerVM.Ethereum().BlockChain().Config().Rules(header.Number, header.Time) size int ) @@ -648,7 +652,7 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. } func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - if !vm.InnerVM.Blockchain().Config().IsApricotPhase5(header.Time) { + if !vm.InnerVM.Ethereum().BlockChain().Config().IsApricotPhase5(header.Time) { return vm.preBatchOnFinalizeAndAssemble(header, state, txs) } return vm.postBatchOnFinalizeAndAssemble(header, state, txs) @@ -659,7 +663,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big batchContribution *big.Int = big.NewInt(0) batchGasUsed *big.Int = big.NewInt(0) header = block.Header() - rules = vm.InnerVM.Blockchain().Config().Rules(header.Number, header.Time) + rules = vm.InnerVM.Ethereum().BlockChain().Config().Rules(header.Number, header.Time) ) txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, atomic.Codec) @@ -778,9 +782,7 @@ func (vm *VM) newImportTx( return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) } - currentHeader := vm.InnerVM.Blockchain().CurrentHeader() - currentRules := vm.InnerVM.Blockchain().Config().Rules(currentHeader.Number, currentHeader.Time) - return atomic.NewImportTx(vm.ctx, currentRules, vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) + return atomic.NewImportTx(vm.ctx, vm.InnerVM.CurrentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) } func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go new file mode 100644 index 0000000000..54987cef0b --- /dev/null +++ b/plugin/evm/atomic/vm/vm_test.go @@ -0,0 +1,1650 @@ +package vm + +import ( + "context" + "crypto/rand" + "errors" + "math/big" + "strings" + "testing" + "time" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/chain" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + accountKeystore "github.com/ava-labs/coreth/accounts/keystore" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm" + "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" + "github.com/ava-labs/coreth/plugin/evm/extension" + "github.com/ava-labs/coreth/plugin/evm/testutils" + "github.com/ava-labs/coreth/trie" + + "github.com/ethereum/go-ethereum/rlp" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newAtomicTestVM() *VM { + return WrapVM(&evm.VM{}) +} + +func GenesisAtomicVM(t *testing.T, + finishBootstrapping bool, + genesisJSON string, + configJSON string, + upgradeJSON string, +) ( + chan commonEng.Message, + *VM, + database.Database, + *avalancheatomic.Memory, + *enginetest.Sender, +) { + vm := WrapVM(&evm.VM{}) + ch, db, m, sender, _ := testutils.SetupVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON, vm) + return ch, vm, db, m, sender +} + +func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: index, + }, + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + } + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) + if err != nil { + return nil, err + } + + xChainSharedMemory := sharedMemory.NewSharedMemory(ctx.XChainID) + inputID := utxo.InputID() + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + addr.Bytes(), + }, + }}}}); err != nil { + return nil, err + } + + return utxo, nil +} + +// GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map +// Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. +// If [testutils.GenesisJSON] is empty, defaults to using [testutils.GenesisJSONLatest] +func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { + issuer, vm, db, sharedMemory, sender := GenesisAtomicVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) + for addr, avaxAmount := range utxos { + txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) + if err != nil { + t.Fatalf("Failed to generate txID from addr: %s", err) + } + if _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, avaxAmount, addr); err != nil { + t.Fatalf("Failed to add UTXO to shared memory: %s", err) + } + } + + return issuer, vm, db, sharedMemory, sender +} + +func TestImportMissingUTXOs(t *testing.T) { + // make a VM with a shared memory that has an importable UTXO to build a block + importAmount := uint64(50000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + }) + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(t, err) + }() + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) + require.NoError(t, err) + err = vm.mempool.AddLocalTx(importTx) + require.NoError(t, err) + <-issuer + blk, err := vm.BuildBlock(context.Background()) + require.NoError(t, err) + + // make another VM which is missing the UTXO in shared memory + _, vm2, _, _, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase2, "", "") + defer func() { + err := vm2.Shutdown(context.Background()) + require.NoError(t, err) + }() + + vm2Blk, err := vm2.ParseBlock(context.Background(), blk.Bytes()) + require.NoError(t, err) + err = vm2Blk.Verify(context.Background()) + require.ErrorIs(t, err, errMissingUTXOs) + + // This should not result in a bad block since the missing UTXO should + // prevent InsertBlockManual from being called. + badBlocks, _ := vm2.Ethereum().BlockChain().BadBlocks() + require.Len(t, badBlocks, 0) +} + +// Simple test to ensure we can issue an import transaction followed by an export transaction +// and they will be indexed correctly when accepted. +func TestIssueAtomicTxs(t *testing.T) { + importAmount := uint64(50000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + }) + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + t.Fatal(err) + } else if lastAcceptedID != blk.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) + } + vm.Ethereum().BlockChain().DrainAcceptorQueue() + + state, err := vm.Ethereum().BlockChain().State() + if err != nil { + t.Fatal(err) + } + + exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testutils.TestShortIDAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(exportTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk2, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk2.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := blk2.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + t.Fatal(err) + } else if lastAcceptedID != blk2.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) + } + + // Check that both atomic transactions were indexed as expected. + indexedImportTx, status, height, err := vm.getAtomicTx(importTx.ID()) + assert.NoError(t, err) + assert.Equal(t, atomic.Accepted, status) + assert.Equal(t, uint64(1), height, "expected height of indexed import tx to be 1") + assert.Equal(t, indexedImportTx.ID(), importTx.ID(), "expected ID of indexed import tx to match original txID") + + indexedExportTx, status, height, err := vm.getAtomicTx(exportTx.ID()) + assert.NoError(t, err) + assert.Equal(t, atomic.Accepted, status) + assert.Equal(t, uint64(2), height, "expected height of indexed export tx to be 2") + assert.Equal(t, indexedExportTx.ID(), exportTx.ID(), "expected ID of indexed import tx to match original txID") +} + +func testConflictingImportTxs(t *testing.T, genesis string) { + importAmount := uint64(10000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesis, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + testutils.TestShortIDAddrs[1]: importAmount, + testutils.TestShortIDAddrs[2]: importAmount, + }) + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + importTxs := make([]*atomic.Tx, 0, 3) + conflictTxs := make([]*atomic.Tx, 0, 3) + for i, key := range testutils.TestKeys { + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[i], testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + if err != nil { + t.Fatal(err) + } + importTxs = append(importTxs, importTx) + + conflictAddr := testutils.TestEthAddrs[(i+1)%len(testutils.TestEthAddrs)] + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + if err != nil { + t.Fatal(err) + } + conflictTxs = append(conflictTxs, conflictTx) + } + + expectedParentBlkID, err := vm.LastAccepted(context.Background()) + if err != nil { + t.Fatal(err) + } + for _, tx := range importTxs[:2] { + if err := vm.mempool.AddLocalTx(tx); err != nil { + t.Fatal(err) + } + + <-issuer + + vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if parentID := blk.Parent(); parentID != expectedParentBlkID { + t.Fatalf("Expected parent to have blockID %s, but found %s", expectedParentBlkID, parentID) + } + + expectedParentBlkID = blk.ID() + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + } + + // Check that for each conflict tx (whose conflict is in the chain ancestry) + // the VM returns an error when it attempts to issue the conflict into the mempool + // and when it attempts to build a block with the conflict force added to the mempool. + for i, tx := range conflictTxs[:2] { + if err := vm.mempool.AddLocalTx(tx); err == nil { + t.Fatal("Expected issueTx to fail due to conflicting transaction") + } + // Force issue transaction directly to the mempool + if err := vm.mempool.ForceAddTx(tx); err != nil { + t.Fatal(err) + } + <-issuer + + vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + _, err = vm.BuildBlock(context.Background()) + // The new block is verified in BuildBlock, so + // BuildBlock should fail due to an attempt to + // double spend an atomic UTXO. + if err == nil { + t.Fatalf("Block verification should have failed in BuildBlock %d due to double spending atomic UTXO", i) + } + } + + // Generate one more valid block so that we can copy the header to create an invalid block + // with modified extra data. This new block will be invalid for more than one reason (invalid merkle root) + // so we check to make sure that the expected error is returned from block verification. + if err := vm.mempool.AddLocalTx(importTxs[2]); err != nil { + t.Fatal(err) + } + <-issuer + vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + + validBlock, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := validBlock.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + validEthBlock := validBlock.(*chain.BlockWrapper).Block.(extension.VMBlock).GetEthBlock() + + rules := vm.CurrentRules() + var extraData []byte + switch { + case rules.IsApricotPhase5: + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{conflictTxs[1]}) + default: + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, conflictTxs[1]) + } + if err != nil { + t.Fatal(err) + } + + conflictingAtomicTxBlock := types.NewBlockWithExtData( + types.CopyHeader(validEthBlock.Header()), + nil, + nil, + nil, + new(trie.Trie), + extraData, + true, + ) + + blockBytes, err := rlp.EncodeToBytes(conflictingAtomicTxBlock) + if err != nil { + t.Fatal(err) + } + + parsedBlock, err := vm.ParseBlock(context.Background(), blockBytes) + if err != nil { + t.Fatal(err) + } + + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + } + + if !rules.IsApricotPhase5 { + return + } + + extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTxs[2], conflictTxs[2]}) + if err != nil { + t.Fatal(err) + } + + header := types.CopyHeader(validEthBlock.Header()) + header.ExtDataGasUsed.Mul(common.Big2, header.ExtDataGasUsed) + + internalConflictBlock := types.NewBlockWithExtData( + header, + nil, + nil, + nil, + new(trie.Trie), + extraData, + true, + ) + + blockBytes, err = rlp.EncodeToBytes(internalConflictBlock) + if err != nil { + t.Fatal(err) + } + + parsedBlock, err = vm.ParseBlock(context.Background(), blockBytes) + if err != nil { + t.Fatal(err) + } + + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) + } +} + +func TestReissueAtomicTxHigherGasPrice(t *testing.T) { + kc := secp256k1fx.NewKeychain(testutils.TestKeys...) + + for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ + "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testutils.TestShortIDAddrs[0]) + if err != nil { + t.Fatal(err) + } + tx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) + if err != nil { + t.Fatal(err) + } + tx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(tx1); err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(tx2); err != nil { + t.Fatal(err) + } + + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} + }, + "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testutils.TestShortIDAddrs[0]) + if err != nil { + t.Fatal(err) + } + utxo2, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testutils.TestShortIDAddrs[0]) + if err != nil { + t.Fatal(err) + } + tx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) + if err != nil { + t.Fatal(err) + } + tx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(tx1); err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(tx2); err != nil { + t.Fatal(err) + } + + return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} + }, + "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { + utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testutils.TestShortIDAddrs[0]) + if err != nil { + t.Fatal(err) + } + utxo2, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testutils.TestShortIDAddrs[0]) + if err != nil { + t.Fatal(err) + } + + importTx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1}) + if err != nil { + t.Fatal(err) + } + + importTx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(3), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo2}) + if err != nil { + t.Fatal(err) + } + + reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(2), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + if err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(importTx1); err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx2); err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, txpool.ErrConflictingAtomicTx) { + t.Fatalf("Expected to fail with err: %s, but found err: %s", txpool.ErrConflictingAtomicTx, err) + } + + assert.True(t, vm.mempool.Has(importTx1.ID())) + assert.True(t, vm.mempool.Has(importTx2.ID())) + assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) + + reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(4), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + if err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(reissuanceTx2); err != nil { + t.Fatal(err) + } + + return []*atomic.Tx{reissuanceTx2}, []*atomic.Tx{importTx1, importTx2} + }, + } { + t.Run(name, func(t *testing.T) { + _, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase5, "", "") + issuedTxs, evictedTxs := issueTxs(t, vm, sharedMemory) + + for i, tx := range issuedTxs { + _, issued := vm.mempool.GetPendingTx(tx.ID()) + assert.True(t, issued, "expected issued tx at index %d to be issued", i) + } + + for i, tx := range evictedTxs { + _, discarded, _ := vm.mempool.GetTx(tx.ID()) + assert.True(t, discarded, "expected discarded tx at index %d to be discarded", i) + } + }) + } +} + +func TestConflictingImportTxsAcrossBlocks(t *testing.T) { + for name, genesis := range map[string]string{ + "apricotPhase1": testutils.GenesisJSONApricotPhase1, + "apricotPhase2": testutils.GenesisJSONApricotPhase2, + "apricotPhase3": testutils.GenesisJSONApricotPhase3, + "apricotPhase4": testutils.GenesisJSONApricotPhase4, + "apricotPhase5": testutils.GenesisJSONApricotPhase5, + } { + genesis := genesis + t.Run(name, func(t *testing.T) { + testConflictingImportTxs(t, genesis) + }) + } +} + +func TestConflictingTransitiveAncestryWithGap(t *testing.T) { + key, err := accountKeystore.NewKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + + key0 := testutils.TestKeys[0] + addr0 := key0.Address() + + key1 := testutils.TestKeys[1] + addr1 := key1.Address() + + importAmount := uint64(1000000000) + + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase0, "", "", + map[ids.ShortID]uint64{ + addr0: importAmount, + addr1: importAmount, + }) + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + newHeadChan := make(chan core.ChainHeadEvent, 1) + vm.Ethereum().BlockChain().SubscribeChainHeadEvent(newHeadChan) + + importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key0}) + if err != nil { + t.Fatal(err) + } + // Create a conflicting transaction + importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[2], testutils.InitialBaseFee, []*secp256k1.PrivateKey{key0}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx0A); err != nil { + t.Fatalf("Failed to issue importTx0A: %s", err) + } + + <-issuer + + blk0, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatalf("Failed to build block with import transaction: %s", err) + } + + if err := blk0.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification: %s", err) + } + + if err := vm.SetPreference(context.Background(), blk0.ID()); err != nil { + t.Fatal(err) + } + + newHead := <-newHeadChan + if newHead.Block.Hash() != common.Hash(blk0.ID()) { + t.Fatalf("Expected new block to match") + } + + tx := types.NewTransaction(0, key.Address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.Ethereum().BlockChain().Config().ChainID), key.PrivateKey) + if err != nil { + t.Fatal(err) + } + + // Add the remote transactions, build the block, and set VM1's preference for block A + errs := vm.Ethereum().TxPool().AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + } + } + + <-issuer + + blk1, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatalf("Failed to build blk1: %s", err) + } + + if err := blk1.Verify(context.Background()); err != nil { + t.Fatalf("blk1 failed verification due to %s", err) + } + + if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { + t.Fatal(err) + } + + importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key1}) + if err != nil { + t.Fatalf("Failed to issue importTx1 due to: %s", err) + } + + if err := vm.mempool.AddLocalTx(importTx1); err != nil { + t.Fatal(err) + } + + <-issuer + + blk2, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatalf("Failed to build block with import transaction: %s", err) + } + + if err := blk2.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification: %s", err) + } + + if err := vm.SetPreference(context.Background(), blk2.ID()); err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx0B); err == nil { + t.Fatalf("Should not have been able to issue import tx with conflict") + } + // Force issue transaction directly into the mempool + if err := vm.mempool.ForceAddTx(importTx0B); err != nil { + t.Fatal(err) + } + <-issuer + + _, err = vm.BuildBlock(context.Background()) + if err == nil { + t.Fatal("Shouldn't have been able to build an invalid block") + } +} + +func TestBonusBlocksTxs(t *testing.T) { + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + importAmount := uint64(10000000) + utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} + + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: importAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + }, + }, + } + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) + if err != nil { + t.Fatal(err) + } + + xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) + inputID := utxo.InputID() + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + testutils.TestKeys[0].Address().Bytes(), + }, + }}}}); err != nil { + t.Fatal(err) + } + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + // Make [blk] a bonus block. + vm.atomicBackend.AddBonusBlock(blk.Height(), blk.ID()) + + // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification + if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + lastAcceptedID, err := vm.LastAccepted(context.Background()) + if err != nil { + t.Fatal(err) + } + if lastAcceptedID != blk.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) + } +} + +// Builds [blkA] with a virtuous import transaction and [blkB] with a separate import transaction +// that does not conflict. Accepts [blkB] and rejects [blkA], then asserts that the virtuous atomic +// transaction in [blkA] is correctly re-issued into the atomic transaction mempool. +func TestReissueAtomicTx(t *testing.T) { + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: 10000000, + testutils.TestShortIDAddrs[1]: 10000000, + }) + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + genesisBlkID, err := vm.LastAccepted(context.Background()) + if err != nil { + t.Fatal(err) + } + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blkA, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blkA.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blkA.ID()); err != nil { + t.Fatal(err) + } + + // SetPreference to parent before rejecting (will rollback state to genesis + // so that atomic transaction can be reissued, otherwise current block will + // conflict with UTXO to be reissued) + if err := vm.SetPreference(context.Background(), genesisBlkID); err != nil { + t.Fatal(err) + } + + // Rejecting [blkA] should cause [importTx] to be re-issued into the mempool. + if err := blkA.Reject(context.Background()); err != nil { + t.Fatal(err) + } + + // Sleep for a minimum of two seconds to ensure that [blkB] will have a different timestamp + // than [blkA] so that the block will be unique. This is necessary since we have marked [blkA] + // as Rejected. + time.Sleep(2 * time.Second) + <-issuer + blkB, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if blkB.Height() != blkA.Height() { + t.Fatalf("Expected blkB (%d) to have the same height as blkA (%d)", blkB.Height(), blkA.Height()) + } + + if err := blkB.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blkB.ID()); err != nil { + t.Fatal(err) + } + + if err := blkB.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { + t.Fatal(err) + } else if lastAcceptedID != blkB.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blkB.ID(), lastAcceptedID) + } + + // Check that [importTx] has been indexed correctly after [blkB] is accepted. + _, height, err := vm.atomicTxRepository.GetByTxID(importTx.ID()) + if err != nil { + t.Fatal(err) + } else if height != blkB.Height() { + t.Fatalf("Expected indexed height of import tx to be %d, but found %d", blkB.Height(), height) + } +} + +func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase1, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + exportTxs := createExportTxOptions(t, vm, issuer, sharedMemory) + exportTx1, exportTx2 := exportTxs[0], exportTxs[1] + + if err := vm.mempool.AddLocalTx(exportTx1); err != nil { + t.Fatal(err) + } + <-issuer + exportBlk1, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := exportBlk1.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), exportBlk1.ID()); err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(exportTx2); err == nil { + t.Fatal("Should have failed to issue due to an invalid export tx") + } + + if err := vm.mempool.AddRemoteTx(exportTx2); err == nil { + t.Fatal("Should have failed to add because conflicting") + } + + // Manually add transaction to mempool to bypass validation + if err := vm.mempool.ForceAddTx(exportTx2); err != nil { + t.Fatal(err) + } + <-issuer + + _, err = vm.BuildBlock(context.Background()) + if err == nil { + t.Fatal("BuildBlock should have returned an error due to invalid export transaction") + } +} + +// This is a regression test to ensure that if two consecutive atomic transactions fail verification +// in onFinalizeAndAssemble it will not cause a panic due to calling RevertToSnapshot(revID) on the +// same revision ID twice. +func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase1, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) + vm.Ethereum().TxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) + + // Create three conflicting import transactions + importTxs := createImportTxOptions(t, vm, sharedMemory) + + // Issue the first import transaction, build, and accept the block. + if err := vm.mempool.AddLocalTx(importTxs[0]); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + newHead := <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk.ID()) { + t.Fatalf("Expected new block to match") + } + + // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions + // will fail verification when build block is called. + vm.mempool.AddRemoteTx(importTxs[1]) + vm.mempool.AddRemoteTx(importTxs[2]) + + if _, err := vm.BuildBlock(context.Background()); err == nil { + t.Fatal("Expected build block to fail due to empty block") + } +} + +func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { + importAmount := uint64(10000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase5, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + testutils.TestShortIDAddrs[1]: importAmount, + testutils.TestShortIDAddrs[2]: importAmount, + }) + conflictKey, err := accountKeystore.NewKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + // Create a conflict set for each pair of transactions + conflictSets := make([]set.Set[ids.ID], len(testutils.TestKeys)) + for index, key := range testutils.TestKeys { + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[index], testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + if err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + conflictSets[index].Add(importTx.ID()) + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + if err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(conflictTx); err == nil { + t.Fatal("should conflict with the utxoSet in the mempool") + } + // force add the tx + vm.mempool.ForceAddTx(conflictTx) + conflictSets[index].Add(conflictTx.ID()) + } + <-issuer + // Note: this only checks the path through OnFinalizeAndAssemble, we should make sure to add a test + // that verifies blocks received from the network will also fail verification + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + wrappedBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + atomicTxs, err := extractAtomicTxsFromBlock(wrappedBlk, vm.Ethereum().BlockChain().Config()) + require.NoError(t, err) + assert.True(t, len(atomicTxs) == len(testutils.TestKeys), "Conflict transactions should be out of the batch") + atomicTxIDs := set.Set[ids.ID]{} + for _, tx := range atomicTxs { + atomicTxIDs.Add(tx.ID()) + } + + // Check that removing the txIDs actually included in the block from each conflict set + // leaves one item remaining for each conflict set ie. only one tx from each conflict set + // has been included in the block. + for _, conflictSet := range conflictSets { + conflictSet.Difference(atomicTxIDs) + assert.Equal(t, 1, conflictSet.Len()) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } +} + +func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { + importAmount := uint64(10000000) + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase5, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + kc := secp256k1fx.NewKeychain() + kc.Add(testutils.TestKeys[0]) + txID, err := ids.ToID(hashing.ComputeHash256(testutils.TestShortIDAddrs[0][:])) + assert.NoError(t, err) + + mempoolTxs := 200 + for i := 0; i < mempoolTxs; i++ { + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) + assert.NoError(t, err) + + importTx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) + if err != nil { + t.Fatal(err) + } + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + } + + <-issuer + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + wrappedBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + atomicTxs, err := extractAtomicTxsFromBlock(wrappedBlk, vm.Ethereum().BlockChain().Config()) + require.NoError(t, err) + // Need to ensure that not all of the transactions in the mempool are included in the block. + // This ensures that we hit the atomic gas limit while building the block before we hit the + // upper limit on the size of the codec for marshalling the atomic transactions. + if len(atomicTxs) >= mempoolTxs { + t.Fatalf("Expected number of atomic transactions included in the block (%d) to be less than the number of transactions added to the mempool (%d)", len(atomicTxs), mempoolTxs) + } +} + +func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { + importAmount := uint64(10000000) + // We create two VMs one in ApriotPhase4 and one in ApricotPhase5, so that we can construct a block + // containing a large enough atomic transaction that it will exceed the atomic gas limit in + // ApricotPhase5. + issuer, vm1, _, sharedMemory1, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase4, "", "") + _, vm2, _, sharedMemory2, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase5, "", "") + + defer func() { + if err := vm1.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + if err := vm2.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + kc := secp256k1fx.NewKeychain() + kc.Add(testutils.TestKeys[0]) + txID, err := ids.ToID(hashing.ComputeHash256(testutils.TestShortIDAddrs[0][:])) + assert.NoError(t, err) + + // Add enough UTXOs, such that the created import transaction will attempt to consume more gas than allowed + // in ApricotPhase5. + for i := 0; i < 100; i++ { + _, err := addUTXO(sharedMemory1, vm1.ctx, txID, uint32(i), vm1.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) + assert.NoError(t, err) + + _, err = addUTXO(sharedMemory2, vm2.ctx, txID, uint32(i), vm2.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) + assert.NoError(t, err) + } + + // Double the initial base fee used when estimating the cost of this transaction to ensure that when it is + // used in ApricotPhase5 it still pays a sufficient fee with the fixed fee per atomic transaction. + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + if err := vm1.mempool.ForceAddTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + blk1, err := vm1.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := blk1.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + wrappedBlk, ok := blk1.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + validEthBlock := wrappedBlk.GetEthBlock() + extraData, err := atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTx}) + if err != nil { + t.Fatal(err) + } + + // Construct the new block with the extra data in the new format (slice of atomic transactions). + ethBlk2 := types.NewBlockWithExtData( + types.CopyHeader(validEthBlock.Header()), + nil, + nil, + nil, + new(trie.Trie), + extraData, + true, + ) + + state, err := vm2.Ethereum().BlockChain().State() + if err != nil { + t.Fatal(err) + } + + // Hack: test [onExtraStateChange] directly to ensure it catches the atomic gas limit error correctly. + if _, _, err := vm2.onExtraStateChange(ethBlk2, state); err == nil || !strings.Contains(err.Error(), "exceeds atomic gas limit") { + t.Fatalf("Expected block to fail verification due to exceeded atomic gas limit, but found error: %v", err) + } +} + +// Regression test to ensure that a VM that is not able to parse a block that +// contains no transactions. +func TestEmptyBlock(t *testing.T) { + importAmount := uint64(1000000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testutils.GenesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + }) + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatalf("Failed to build block with import transaction: %s", err) + } + + // Create empty block from blkA + wrappedBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + ethBlock := wrappedBlk.GetEthBlock() + + emptyEthBlock := types.NewBlockWithExtData( + types.CopyHeader(ethBlock.Header()), + nil, + nil, + nil, + new(trie.Trie), + nil, + false, + ) + + if len(emptyEthBlock.ExtData()) != 0 || emptyEthBlock.Header().ExtDataHash != (common.Hash{}) { + t.Fatalf("emptyEthBlock should not have any extra data") + } + + emptyBlock, err := vm.NewVMBlock(emptyEthBlock) + if err != nil { + t.Fatal(err) + } + + if _, err := vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { + t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) + } + if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { + t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) + } +} + +// Regression test to ensure we can build blocks if we are starting with the +// Apricot Phase 5 ruleset in genesis. +func TestBuildApricotPhase5Block(t *testing.T) { + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase5, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) + vm.Ethereum().TxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) + + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] + + importAmount := uint64(1000000000) + utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} + + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: importAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + }, + }, + } + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) + if err != nil { + t.Fatal(err) + } + + xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) + inputID := utxo.InputID() + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + testutils.TestKeys[0].Address().Bytes(), + }, + }}}}); err != nil { + t.Fatal(err) + } + + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + wrappedBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + ethBlk := wrappedBlk.GetEthBlock() + if eBlockGasCost := ethBlk.BlockGasCost(); eBlockGasCost == nil || eBlockGasCost.Cmp(common.Big0) != 0 { + t.Fatalf("expected blockGasCost to be 0 but got %d", eBlockGasCost) + } + if eExtDataGasUsed := ethBlk.ExtDataGasUsed(); eExtDataGasUsed == nil || eExtDataGasUsed.Cmp(big.NewInt(11230)) != 0 { + t.Fatalf("expected extDataGasUsed to be 11230 but got %d", eExtDataGasUsed) + } + minRequiredTip, err := dummy.MinRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) + if err != nil { + t.Fatal(err) + } + if minRequiredTip == nil || minRequiredTip.Cmp(common.Big0) != 0 { + t.Fatalf("expected minRequiredTip to be 0 but got %d", minRequiredTip) + } + + newHead := <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk.ID()) { + t.Fatalf("Expected new block to match") + } + + txs := make([]*types.Transaction, 10) + for i := 0; i < 10; i++ { + tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice*3), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + if err != nil { + t.Fatal(err) + } + txs[i] = signedTx + } + errs := vm.Ethereum().TxPool().Add(txs, false, false) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } + } + + <-issuer + + blk, err = vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + wrappedBlk, ok = blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + ethBlk = wrappedBlk.GetEthBlock() + if ethBlk.BlockGasCost() == nil || ethBlk.BlockGasCost().Cmp(big.NewInt(100)) < 0 { + t.Fatalf("expected blockGasCost to be at least 100 but got %d", ethBlk.BlockGasCost()) + } + if ethBlk.ExtDataGasUsed() == nil || ethBlk.ExtDataGasUsed().Cmp(common.Big0) != 0 { + t.Fatalf("expected extDataGasUsed to be 0 but got %d", ethBlk.ExtDataGasUsed()) + } + minRequiredTip, err = dummy.MinRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) + if err != nil { + t.Fatal(err) + } + if minRequiredTip == nil || minRequiredTip.Cmp(big.NewInt(0.05*params.GWei)) < 0 { + t.Fatalf("expected minRequiredTip to be at least 0.05 gwei but got %d", minRequiredTip) + } + + lastAcceptedID, err := vm.LastAccepted(context.Background()) + if err != nil { + t.Fatal(err) + } + if lastAcceptedID != blk.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) + } + + // Confirm all txs are present + ethBlkTxs := vm.Ethereum().BlockChain().GetBlockByNumber(2).Transactions() + for i, tx := range txs { + if len(ethBlkTxs) <= i { + t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) + } + if ethBlkTxs[i].Hash() != tx.Hash() { + t.Fatalf("expected tx at index %d to have hash: %x but has: %x", i, txs[i].Hash(), tx.Hash()) + } + } +} + +// Regression test to ensure we can build blocks if we are starting with the +// Apricot Phase 4 ruleset in genesis. +func TestBuildApricotPhase4Block(t *testing.T) { + issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase4, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) + vm.Ethereum().TxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) + + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] + + importAmount := uint64(1000000000) + utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} + + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: importAmount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + }, + }, + } + utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) + if err != nil { + t.Fatal(err) + } + + xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) + inputID := utxo.InputID() + if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + testutils.TestKeys[0].Address().Bytes(), + }, + }}}}); err != nil { + t.Fatal(err) + } + + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + if err != nil { + t.Fatal(err) + } + + if err := vm.mempool.AddLocalTx(importTx); err != nil { + t.Fatal(err) + } + + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + wrappedBlk, ok := blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + ethBlk := wrappedBlk.GetEthBlock() + if eBlockGasCost := ethBlk.BlockGasCost(); eBlockGasCost == nil || eBlockGasCost.Cmp(common.Big0) != 0 { + t.Fatalf("expected blockGasCost to be 0 but got %d", eBlockGasCost) + } + if eExtDataGasUsed := ethBlk.ExtDataGasUsed(); eExtDataGasUsed == nil || eExtDataGasUsed.Cmp(big.NewInt(1230)) != 0 { + t.Fatalf("expected extDataGasUsed to be 1000 but got %d", eExtDataGasUsed) + } + minRequiredTip, err := dummy.MinRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) + if err != nil { + t.Fatal(err) + } + if minRequiredTip == nil || minRequiredTip.Cmp(common.Big0) != 0 { + t.Fatalf("expected minRequiredTip to be 0 but got %d", minRequiredTip) + } + + newHead := <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk.ID()) { + t.Fatalf("Expected new block to match") + } + + txs := make([]*types.Transaction, 10) + for i := 0; i < 5; i++ { + tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + if err != nil { + t.Fatal(err) + } + txs[i] = signedTx + } + for i := 5; i < 10; i++ { + tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.ApricotPhase1MinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + if err != nil { + t.Fatal(err) + } + txs[i] = signedTx + } + errs := vm.Ethereum().TxPool().AddRemotesSync(txs) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } + } + + <-issuer + + blk, err = vm.BuildBlock(context.Background()) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + wrappedBlk, ok = blk.(*chain.BlockWrapper).Block.(extension.VMBlock) + require.True(t, ok, "expected block to be a VMBlock") + ethBlk = wrappedBlk.GetEthBlock() + if ethBlk.BlockGasCost() == nil || ethBlk.BlockGasCost().Cmp(big.NewInt(100)) < 0 { + t.Fatalf("expected blockGasCost to be at least 100 but got %d", ethBlk.BlockGasCost()) + } + if ethBlk.ExtDataGasUsed() == nil || ethBlk.ExtDataGasUsed().Cmp(common.Big0) != 0 { + t.Fatalf("expected extDataGasUsed to be 0 but got %d", ethBlk.ExtDataGasUsed()) + } + minRequiredTip, err = dummy.MinRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) + if err != nil { + t.Fatal(err) + } + if minRequiredTip == nil || minRequiredTip.Cmp(big.NewInt(0.05*params.GWei)) < 0 { + t.Fatalf("expected minRequiredTip to be at least 0.05 gwei but got %d", minRequiredTip) + } + + lastAcceptedID, err := vm.LastAccepted(context.Background()) + if err != nil { + t.Fatal(err) + } + if lastAcceptedID != blk.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) + } + + // Confirm all txs are present + ethBlkTxs := vm.Ethereum().BlockChain().GetBlockByNumber(2).Transactions() + for i, tx := range txs { + if len(ethBlkTxs) <= i { + t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) + } + if ethBlkTxs[i].Hash() != tx.Hash() { + t.Fatalf("expected tx at index %d to have hash: %x but has: %x", i, txs[i].Hash(), tx.Hash()) + } + } +} + +func TestBuildInvalidBlockHead(t *testing.T) { + issuer, vm, _, _, _ := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + + defer func() { + if err := vm.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + key0 := testutils.TestKeys[0] + addr0 := key0.Address() + + // Create the transaction + utx := &atomic.UnsignedImportTx{ + NetworkID: vm.ctx.NetworkID, + BlockchainID: vm.ctx.ChainID, + Outs: []atomic.EVMOutput{{ + Address: common.Address(addr0), + Amount: 1 * units.Avax, + AssetID: vm.ctx.AVAXAssetID, + }}, + ImportedInputs: []*avax.TransferableInput{ + { + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + In: &secp256k1fx.TransferInput{ + Amt: 1 * units.Avax, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }, + }, + SourceChain: vm.ctx.XChainID, + } + tx := &atomic.Tx{UnsignedAtomicTx: utx} + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { + t.Fatal(err) + } + + currentBlock := vm.Ethereum().BlockChain().CurrentBlock() + + // Verify that the transaction fails verification when attempting to issue + // it into the atomic mempool. + if err := vm.mempool.AddLocalTx(tx); err == nil { + t.Fatal("Should have failed to issue invalid transaction") + } + // Force issue the transaction directly to the mempool + if err := vm.mempool.ForceAddTx(tx); err != nil { + t.Fatal(err) + } + + <-issuer + + if _, err := vm.BuildBlock(context.Background()); err == nil { + t.Fatalf("Unexpectedly created a block") + } + + newCurrentBlock := vm.Ethereum().BlockChain().CurrentBlock() + + if currentBlock.Hash() != newCurrentBlock.Hash() { + t.Fatal("current block changed") + } +} diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 98ecac21e5..4d9be76143 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -35,7 +35,6 @@ type Block struct { id ids.ID ethBlock *types.Block blockManager *blockManager - extraData any } // ID implements the snowman.Block interface @@ -288,7 +287,3 @@ func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID( func (b *Block) GetEthBlock() *types.Block { return b.ethBlock } - -func (b *Block) GetExtraData() interface{} { - return b.extraData -} diff --git a/plugin/evm/block_manager.go b/plugin/evm/block_manager.go index f8909a6107..95a09d1915 100644 --- a/plugin/evm/block_manager.go +++ b/plugin/evm/block_manager.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic/extension" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/trie" ) @@ -25,11 +25,11 @@ var ( ) type blockManager struct { - blockExtension extension.BlockExtension + blockExtension extension.BlockManagerExtension vm *VM } -func newBlockManager(vm *VM, blockExtension extension.BlockExtension) *blockManager { +func newBlockManager(vm *VM, blockExtension extension.BlockManagerExtension) *blockManager { return &blockManager{ blockExtension: blockExtension, vm: vm, @@ -38,15 +38,10 @@ func newBlockManager(vm *VM, blockExtension extension.BlockExtension) *blockMana // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface func (bm *blockManager) newBlock(ethBlock *types.Block) (*Block, error) { - extraData, err := bm.blockExtension.InitializeExtraData(ethBlock, bm.vm.chainConfig) - if err != nil { - return nil, fmt.Errorf("failed to initialize block extension: %w", err) - } return &Block{ id: ids.ID(ethBlock.Hash()), ethBlock: ethBlock, blockManager: bm, - extraData: extraData, }, nil } @@ -208,8 +203,7 @@ func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { if !cancun && ethHeader.ParentBeaconRoot != nil { return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) } - // TODO: decide what to do after Cancun - // currently we are enforcing it to be empty hash + if cancun { switch { case ethHeader.ParentBeaconRoot == nil: diff --git a/plugin/evm/atomic/extension/config.go b/plugin/evm/extension/config.go similarity index 76% rename from plugin/evm/atomic/extension/config.go rename to plugin/evm/extension/config.go index 06b018d916..e8e8b910ba 100644 --- a/plugin/evm/atomic/extension/config.go +++ b/plugin/evm/extension/config.go @@ -10,37 +10,40 @@ import ( "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/consensus/snowman" avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/plugin/evm/sync" "github.com/ava-labs/coreth/sync/handlers" - "github.com/prometheus/client_golang/prometheus" - - "github.com/ethereum/go-ethereum/common" ) -// TODO: move this file out from atomic pkg - type ExtensibleVM interface { + SetLastAcceptedBlock(lastAcceptedBlock snowman.Block) error + GetVMBlock(context.Context, ids.ID) (VMBlock, error) + NewVMBlock(*types.Block) (VMBlock, error) + LastAcceptedVMBlock() VMBlock // NewClient returns a client to send messages with for the given protocol NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client // AddHandler registers a server handler for an application protocol AddHandler(protocol uint64, handler p2p.Handler) error - GetBlockExtended(ctx context.Context, blkID ids.ID) (ExtendedBlock, error) LastAcceptedBlockInternal() snowman.Block Validators() *p2p.Validators SetExtensionConfig(config *Config) error - Blockchain() *core.BlockChain + Ethereum() *eth.Ethereum Config() *config.Config MetricRegistry() *prometheus.Registry ReadLastAccepted() (common.Hash, uint64, error) + CurrentRules() params.Rules VersionDB() *versiondb.Database + SyncerClient() sync.Client } type InnerVM interface { @@ -51,18 +54,16 @@ type InnerVM interface { block.StateSyncableVM } -type ExtendedBlock interface { +type VMBlock interface { snowman.Block - GetExtraData() interface{} GetEthBlock() *types.Block } -type BlockExtension interface { - InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (any, error) - SyntacticVerify(b ExtendedBlock, rules params.Rules) error - Accept(b ExtendedBlock, acceptedBatch database.Batch) error - Reject(b ExtendedBlock) error - Cleanup(b ExtendedBlock) +type BlockManagerExtension interface { + SyntacticVerify(b VMBlock, rules params.Rules) error + Accept(b VMBlock, acceptedBatch database.Batch) error + Reject(b VMBlock) error + Cleanup(b VMBlock) } type BuilderMempool interface { @@ -78,11 +79,11 @@ type LeafRequestConfig struct { type Config struct { NetworkCodec codec.Manager - ConsensusCallbacks *dummy.ConsensusCallbacks + ConsensusCallbacks dummy.ConsensusCallbacks SyncSummaryProvider sync.SummaryProvider SyncExtender sync.Extender SyncableParser message.SyncableParser - BlockExtension BlockExtension + BlockExtension BlockManagerExtension SyncLeafType *LeafRequestConfig ExtraMempool BuilderMempool } diff --git a/plugin/evm/extension/no_op_block_extension.go b/plugin/evm/extension/no_op_block_extension.go new file mode 100644 index 0000000000..0859466c19 --- /dev/null +++ b/plugin/evm/extension/no_op_block_extension.go @@ -0,0 +1,33 @@ +package extension + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" +) + +var _ BlockManagerExtension = (*noOpBlockExtension)(nil) + +type noOpBlockExtension struct{} + +func NewNoOpBlockExtension() *noOpBlockExtension { + return &noOpBlockExtension{} +} + +func (noOpBlockExtension) InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (interface{}, error) { + return nil, nil +} + +func (noOpBlockExtension) SyntacticVerify(b VMBlock, rules params.Rules) error { + return nil +} + +func (noOpBlockExtension) Accept(b VMBlock, acceptedBatch database.Batch) error { + return nil +} + +func (noOpBlockExtension) Reject(b VMBlock) error { + return nil +} + +func (noOpBlockExtension) Cleanup(b VMBlock) {} diff --git a/plugin/evm/message/block_sync_summary_provider.go b/plugin/evm/message/block_sync_summary_provider.go new file mode 100644 index 0000000000..2ca892b287 --- /dev/null +++ b/plugin/evm/message/block_sync_summary_provider.go @@ -0,0 +1,16 @@ +// (c) 2021-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. +package message + +import ( + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + "github.com/ava-labs/coreth/core/types" +) + +type BlockSyncSummaryProvider struct{} + +// StateSummaryAtBlock returns the block state summary at [block] if valid. +func (a *BlockSyncSummaryProvider) StateSummaryAtBlock(blk *types.Block) (block.StateSummary, error) { + return NewBlockSyncSummary(blk.Hash(), blk.NumberU64(), blk.Root()) +} diff --git a/plugin/evm/sync/extender.go b/plugin/evm/sync/extender.go deleted file mode 100644 index def5057c26..0000000000 --- a/plugin/evm/sync/extender.go +++ /dev/null @@ -1,31 +0,0 @@ -package sync - -import ( - "context" - - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/coreth/plugin/evm/message" - syncclient "github.com/ava-labs/coreth/sync/client" -) - -var _ Extender = (*NoOpExtender)(nil) - -type Extender interface { - Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error - OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error - OnFinishAfterCommit(summaryHeight uint64) error -} - -type NoOpExtender struct{} - -func (n *NoOpExtender) Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error { - return nil -} - -func (n *NoOpExtender) OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error { - return nil -} - -func (n *NoOpExtender) OnFinishAfterCommit(summaryHeight uint64) error { - return nil -} diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index ac7dfbd826..1657fbbefe 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -45,6 +45,12 @@ type EthBlockWrapper interface { GetEthBlock() *types.Block } +type Extender interface { + Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error + OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error + OnFinishAfterCommit(summaryHeight uint64) error +} + // ClientConfig defines the options and dependencies needed to construct a Client type ClientConfig struct { Enabled bool diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 9e11afd15c..1a60921249 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -4,674 +4,45 @@ package evm import ( - "context" - "fmt" - "math/big" - "math/rand" - "sync" "testing" - "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" - avalanchedatabase "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/enginetest" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/units" - - "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/atomictest" - "github.com/ava-labs/coreth/plugin/evm/database" - vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/predicate" - statesyncclient "github.com/ava-labs/coreth/sync/client" - "github.com/ava-labs/coreth/sync/statesync" - "github.com/ava-labs/coreth/trie" - "github.com/ava-labs/coreth/triedb" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" ) -func TestSkipStateSync(t *testing.T) { - rand.Seed(1) - test := syncTest{ - syncableInterval: 256, - stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync - syncMode: block.StateSyncSkipped, - } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) - - testSyncerVM(t, vmSetup, test) -} - -func TestStateSyncFromScratch(t *testing.T) { - rand.Seed(1) - test := syncTest{ - syncableInterval: 256, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) - - testSyncerVM(t, vmSetup, test) -} - -func TestStateSyncFromScratchExceedParent(t *testing.T) { - rand.Seed(1) - numToGen := vmsync.ParentsToFetch + uint64(32) - test := syncTest{ - syncableInterval: numToGen, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - } - vmSetup := createSyncServerAndClientVMs(t, test, int(numToGen)) - - testSyncerVM(t, vmSetup, test) -} - -func TestStateSyncToggleEnabledToDisabled(t *testing.T) { - rand.Seed(1) - // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. - metrics.Enabled = false - defer func() { - metrics.Enabled = true - }() - - var lock sync.Mutex - reqCount := 0 - test := syncTest{ - syncableInterval: 256, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { - lock.Lock() - defer lock.Unlock() - - reqCount++ - // Fail all requests after number 50 to interrupt the sync - if reqCount > 50 { - if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { - panic(err) - } - if err := syncerVM.Client.Shutdown(); err != nil { - panic(err) +func TestEVMSyncerVM(t *testing.T) { + for _, test := range testutils.SyncerVMTests { + t.Run(test.Name, func(t *testing.T) { + genFn := func(i int, vm extension.InnerVM, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) } - } else { - syncerVM.AppResponse(context.Background(), nodeID, requestID, response) - } - }, - expectedErr: context.Canceled, - } - vmSetup := createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) - - // Perform sync resulting in early termination. - testSyncerVM(t, vmSetup, test) - - test.syncMode = block.StateSyncStatic - test.responseIntercept = nil - test.expectedErr = nil - - syncDisabledVM := newDefaultTestVM() - appSender := &enginetest.Sender{T: t} - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { - nodeID, hasItem := nodeSet.Pop() - if !hasItem { - t.Fatal("expected nodeSet to contain at least 1 nodeID") - } - go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) - return nil - } - // Disable metrics to prevent duplicate registerer - stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` - if err := syncDisabledVM.Initialize( - context.Background(), - vmSetup.syncerVM.ctx, - vmSetup.syncerDB, - []byte(genesisJSONLatest), - nil, - []byte(stateSyncDisabledConfigJSON), - vmSetup.syncerVM.toEngine, - []*commonEng.Fx{}, - appSender, - ); err != nil { - t.Fatal(err) - } + gen.AppendExtra(b) - defer func() { - if err := syncDisabledVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { - t.Fatalf("Unexpected last accepted height: %d", height) - } - - enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) - assert.NoError(t, err) - assert.False(t, enabled, "sync should be disabled") - - // Process the first 10 blocks from the serverVM - for i := uint64(1); i < 10; i++ { - ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i) - if ethBlock == nil { - t.Fatalf("VM Server did not have a block available at height %d", i) - } - b, err := rlp.EncodeToBytes(ethBlock) - if err != nil { - t.Fatal(err) - } - blk, err := syncDisabledVM.ParseBlock(context.Background(), b) - if err != nil { - t.Fatal(err) - } - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - } - // Verify the snapshot disk layer matches the last block root - lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root - if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil { - t.Fatal(err) - } - syncDisabledVM.blockChain.DrainAcceptorQueue() - - // Create a new VM from the same database with state sync enabled. - syncReEnabledVM := newDefaultTestVM() - // Enable state sync in configJSON - configJSON := fmt.Sprintf( - `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, - test.stateSyncMinBlocks, - ) - if err := syncReEnabledVM.Initialize( - context.Background(), - vmSetup.syncerVM.ctx, - vmSetup.syncerDB, - []byte(genesisJSONLatest), - nil, - []byte(configJSON), - vmSetup.syncerVM.toEngine, - []*commonEng.Fx{}, - appSender, - ); err != nil { - t.Fatal(err) - } - - // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) - } else { - go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) - } - - return nil - } - - // connect peer to [syncerVM] - assert.NoError(t, syncReEnabledVM.Connected( - context.Background(), - vmSetup.serverVM.ctx.NodeID, - statesyncclient.StateSyncVersion, - )) - - enabled, err = syncReEnabledVM.StateSyncEnabled(context.Background()) - assert.NoError(t, err) - assert.True(t, enabled, "sync should be enabled") - - vmSetup.syncerVM = syncReEnabledVM - testSyncerVM(t, vmSetup, test) -} - -func TestVMShutdownWhileSyncing(t *testing.T) { - var ( - lock sync.Mutex - vmSetup *syncVMSetup - ) - reqCount := 0 - test := syncTest{ - syncableInterval: 256, - stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - syncMode: block.StateSyncStatic, - responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { - lock.Lock() - defer lock.Unlock() - - reqCount++ - // Shutdown the VM after 50 requests to interrupt the sync - if reqCount == 50 { - // Note this verifies the VM shutdown does not time out while syncing. - require.NoError(t, vmSetup.shutdownOnceSyncerVM.Shutdown(context.Background())) - } else if reqCount < 50 { - err := syncerVM.AppResponse(context.Background(), nodeID, requestID, response) + tx := types.NewTransaction(gen.TxNonce(testutils.TestEthAddrs[0]), testutils.TestEthAddrs[1], common.Big1, params.TxGas, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.Ethereum().BlockChain().Config().ChainID), testutils.TestKeys[0].ToECDSA()) require.NoError(t, err) + gen.AddTx(signedTx) } - }, - expectedErr: context.Canceled, - } - vmSetup = createSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch) - // Perform sync resulting in early termination. - testSyncerVM(t, vmSetup, test) -} - -func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *syncVMSetup { - var ( - require = require.New(t) - importAmount = 2000000 * units.Avax // 2M avax - alloc = map[ids.ShortID]uint64{ - testutils.TestShortIDAddrs[0]: importAmount, - } - ) - // override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index] - // creates a commit at the height [syncableInterval]. This is necessary to support - // fetching a state summary. - config := fmt.Sprintf(`{"commit-interval": %d, "state-sync-commit-interval": %d}`, test.syncableInterval, test.syncableInterval) - _, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs( - t, true, "", config, "", alloc, - ) - t.Cleanup(func() { - log.Info("Shutting down server VM") - require.NoError(serverVM.Shutdown(context.Background())) - }) - var ( - importTx, exportTx *atomic.Tx - err error - ) - generateAndAcceptBlocks(t, serverVM, numBlocks, func(i int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) - switch i { - case 0: - // spend the UTXOs from shared memory - importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(err) - require.NoError(serverVM.mempool.AddLocalTx(importTx)) - case 1: - // export some of the imported UTXOs to test exportTx is properly synced - exportTx, err = serverVM.newExportTx( - serverVM.ctx.AVAXAssetID, - importAmount/2, - serverVM.ctx.XChainID, - testutils.TestShortIDAddrs[0], - initialBaseFee, - []*secp256k1.PrivateKey{testKeys[0]}, - ) - require.NoError(err) - require.NoError(serverVM.mempool.AddLocalTx(exportTx)) - default: // Generate simple transfer transactions. - pk := testKeys[0].ToECDSA() - tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, params.TxGas, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), pk) - require.NoError(err) - gen.AddTx(signedTx) - } - }, nil) - - serverAtomicTrie := serverVM.atomicBackend.AtomicTrie() - require.True(serverAtomicTrie.AcceptTrie(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot())) - require.NoError(serverVM.versiondb.Commit()) - - serverSharedMemories := atomictest.NewSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID) - importOps, err := atomictest.ConvertToAtomicOps(importTx) - require.NoError(err) - exportOps, err := atomictest.ConvertToAtomicOps(exportTx) - require.NoError(err) - serverSharedMemories.AssertOpsApplied(t, importOps) - serverSharedMemories.AssertOpsApplied(t, exportOps) - - // make some accounts - trieDB := triedb.NewDatabase(serverVM.chaindb, nil) - root, accounts := statesync.FillAccountsWithOverlappingStorage(t, trieDB, types.EmptyRootHash, 1000, 16) - - // patch serverVM's lastAcceptedBlock to have the new root - // and update the vm's state so the trie with accounts will - // be returned by StateSyncGetLastSummary - lastAccepted := serverVM.blockChain.LastAcceptedBlock() - patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb) - blockBytes, err := rlp.EncodeToBytes(patchedBlock) - require.NoError(err) - internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes) - require.NoError(err) - require.NoError(serverVM.State.SetLastAcceptedBlock(internalBlock)) - - // initialise [syncerVM] with blank genesis state - // we also override [syncerVM]'s commit interval so the atomic trie works correctly. - stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.stateSyncMinBlocks, 4, test.syncableInterval) - syncerEngineChan, syncerVM, syncerDB, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs( - t, false, "", stateSyncEnabledJSON, "", alloc, - ) - shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} - t.Cleanup(func() { - require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) - }) - require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) - enabled, err := syncerVM.StateSyncEnabled(context.Background()) - require.NoError(err) - require.True(enabled) - - // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - if test.responseIntercept == nil { - go syncerVM.AppResponse(ctx, nodeID, requestID, response) - } else { - go test.responseIntercept(syncerVM, nodeID, requestID, response) - } - - return nil - } - - // connect peer to [syncerVM] - require.NoError( - syncerVM.Connected( - context.Background(), - serverVM.ctx.NodeID, - statesyncclient.StateSyncVersion, - ), - ) - - // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] - syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { - nodeID, hasItem := nodeSet.Pop() - require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") - err := serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) - require.NoError(err) - return nil - } - - return &syncVMSetup{ - serverVM: serverVM, - serverAppSender: serverAppSender, - includedAtomicTxs: []*atomic.Tx{ - importTx, - exportTx, - }, - fundedAccounts: accounts, - syncerVM: syncerVM, - syncerDB: syncerDB, - syncerEngineChan: syncerEngineChan, - syncerAtomicMemory: syncerAtomicMemory, - shutdownOnceSyncerVM: shutdownOnceSyncerVM, - } -} - -// syncVMSetup contains the required set up for a client VM to perform state sync -// off of a server VM. -type syncVMSetup struct { - serverVM *VM - serverAppSender *enginetest.Sender - - includedAtomicTxs []*atomic.Tx - fundedAccounts map[*keystore.Key]*types.StateAccount - - syncerVM *VM - syncerDB avalanchedatabase.Database - syncerEngineChan <-chan commonEng.Message - syncerAtomicMemory *avalancheatomic.Memory - shutdownOnceSyncerVM *shutdownOnceVM -} - -type shutdownOnceVM struct { - *VM - shutdownOnce sync.Once -} - -func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { - var err error - vm.shutdownOnce.Do(func() { err = vm.VM.Shutdown(ctx) }) - return err -} - -// syncTest contains both the actual VMs as well as the parameters with the expected output. -type syncTest struct { - responseIntercept func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte) - stateSyncMinBlocks uint64 - syncableInterval uint64 - syncMode block.StateSyncMode - expectedErr error -} - -func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { - t.Helper() - var ( - require = require.New(t) - serverVM = vmSetup.serverVM - includedAtomicTxs = vmSetup.includedAtomicTxs - fundedAccounts = vmSetup.fundedAccounts - syncerVM = vmSetup.syncerVM - syncerEngineChan = vmSetup.syncerEngineChan - syncerAtomicMemory = vmSetup.syncerAtomicMemory - ) - // get last summary and test related methods - summary, err := serverVM.GetLastStateSummary(context.Background()) - require.NoError(err, "error getting state sync last summary") - parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes()) - require.NoError(err, "error parsing state summary") - retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height()) - require.NoError(err, "error getting state sync summary at height") - require.Equal(summary, retrievedSummary) - - syncMode, err := parsedSummary.Accept(context.Background()) - require.NoError(err, "error accepting state summary") - require.Equal(test.syncMode, syncMode) - if syncMode == block.StateSyncSkipped { - return - } - - msg := <-syncerEngineChan - require.Equal(commonEng.StateSyncDone, msg) - - // If the test is expected to error, assert the correct error is returned and finish the test. - err = syncerVM.Client.Error() - if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) - // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. - chaindb := database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, syncerVM.db)) - assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) - return - } - require.NoError(err, "state sync failed") - - // set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly - // and that we can bootstrap and process some blocks. - require.NoError(syncerVM.SetState(context.Background(), snow.Bootstrapping)) - require.Equal(serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") - require.Equal(serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") - require.True(syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") - assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{retrievedSummary.Height(): {}}) - - lastNumber := syncerVM.blockChain.LastAcceptedBlock().NumberU64() - // check the last block is indexed - lastSyncedBlock := rawdb.ReadBlock(syncerVM.chaindb, rawdb.ReadCanonicalHash(syncerVM.chaindb, lastNumber), lastNumber) - for _, tx := range lastSyncedBlock.Transactions() { - index := rawdb.ReadTxLookupEntry(syncerVM.chaindb, tx.Hash()) - require.NotNilf(index, "Miss transaction indices, number %d hash %s", lastNumber, tx.Hash().Hex()) - } - - // tail should be the last block synced - if syncerVM.ethConfig.TransactionHistory != 0 { - tail := lastSyncedBlock.NumberU64() - - core.CheckTxIndices(t, &tail, tail, syncerVM.chaindb, true) - } - - blocksToBuild := 10 - txsPerBlock := 10 - toAddress := testEthAddrs[1] // arbitrary choice - generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) - i := 0 - for k := range fundedAccounts { - tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) - gen.AddTx(signedTx) - i++ - if i >= txsPerBlock { - break - } - } - }, - func(block *types.Block) { - if syncerVM.ethConfig.TransactionHistory != 0 { - tail := block.NumberU64() - syncerVM.ethConfig.TransactionHistory + 1 - // tail should be the minimum last synced block, since we skipped it to the last block - if tail < lastSyncedBlock.NumberU64() { - tail = lastSyncedBlock.NumberU64() - } - core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.chaindb, true) + newVMFn := func() (extension.InnerVM, dummy.ConsensusCallbacks) { + vm := newDefaultTestVM() + return vm, vm.extensionConfig.ConsensusCallbacks } - }, - ) - - // check we can transition to [NormalOp] state and continue to process blocks. - require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) - require.True(syncerVM.bootstrapped.Get()) - - // check atomic memory was synced properly - syncerSharedMemories := atomictest.NewSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID) - for _, tx := range includedAtomicTxs { - ops, err := atomictest.ConvertToAtomicOps(tx) - require.NoError(err) - syncerSharedMemories.AssertOpsApplied(t, ops) - } - - // Generate blocks after we have entered normal consensus as well - generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { - b, err := predicate.NewResults().Bytes() - if err != nil { - t.Fatal(err) - } - gen.AppendExtra(b) - i := 0 - for k := range fundedAccounts { - tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) - gen.AddTx(signedTx) - i++ - if i >= txsPerBlock { - break + testSetup := &testutils.SyncTestSetup{ + NewVM: newVMFn, + GenFn: genFn, + ExtraSyncerVMTest: nil, } - } - }, - func(block *types.Block) { - if syncerVM.ethConfig.TransactionHistory != 0 { - tail := block.NumberU64() - syncerVM.ethConfig.TransactionHistory + 1 - // tail should be the minimum last synced block, since we skipped it to the last block - if tail < lastSyncedBlock.NumberU64() { - tail = lastSyncedBlock.NumberU64() - } - core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.chaindb, true) - } - }, - ) -} - -// patchBlock returns a copy of [blk] with [root] and updates [db] to -// include the new block as canonical for [blk]'s height. -// This breaks the digestibility of the chain since after this call -// [blk] does not necessarily define a state transition from its parent -// state to the new state root. -func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Block { - header := blk.Header() - header.Root = root - receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64()) - newBlk := types.NewBlockWithExtData( - header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil), blk.ExtData(), true, - ) - rawdb.WriteBlock(db, newBlk) - rawdb.WriteCanonicalHash(db, newBlk.Hash(), newBlk.NumberU64()) - return newBlk -} - -// generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then -// calls Verify and Accept on each generated block -// TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests -func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen), accepted func(*types.Block)) { - t.Helper() - - // acceptExternalBlock defines a function to parse, verify, and accept a block once it has been - // generated by GenerateChain - acceptExternalBlock := func(block *types.Block) { - bytes, err := rlp.EncodeToBytes(block) - if err != nil { - t.Fatal(err) - } - vmBlock, err := vm.ParseBlock(context.Background(), bytes) - if err != nil { - t.Fatal(err) - } - if err := vmBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := vmBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - if accepted != nil { - accepted(block) - } - } - _, _, err := core.GenerateChain( - vm.chainConfig, - vm.blockChain.LastAcceptedBlock(), - dummy.NewFakerWithCallbacks(vm.createConsensusCallbacks()), - vm.chaindb, - numBlocks, - 10, - func(i int, g *core.BlockGen) { - g.SetOnBlockGenerated(acceptExternalBlock) - g.SetCoinbase(constants.BlackholeAddr) // necessary for syntactic validation of the block - gen(i, g) - }, - ) - if err != nil { - t.Fatal(err) - } - vm.blockChain.DrainAcceptorQueue() -} - -// assertSyncPerformedHeights iterates over all heights the VM has synced to and -// verifies it matches [expected]. -func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[uint64]struct{}) { - it := rawdb.NewSyncPerformedIterator(db) - defer it.Release() - - found := make(map[uint64]struct{}, len(expected)) - for it.Next() { - found[rawdb.UnpackSyncPerformedKey(it.Key())] = struct{}{} + test.TestFunc(t, testSetup) + }) } - require.NoError(t, it.Error()) - require.Equal(t, expected, found) } diff --git a/plugin/evm/testutils/genesis.go b/plugin/evm/testutils/genesis.go new file mode 100644 index 0000000000..31eb777f82 --- /dev/null +++ b/plugin/evm/testutils/genesis.go @@ -0,0 +1,122 @@ +package testutils + +import ( + "encoding/json" + "math/big" + "testing" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/snow" + commoneng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/utils" + "github.com/ethereum/go-ethereum/common" +) + +var ( + GenesisJSONApricotPhase0 = genesisJSON(params.TestLaunchConfig) + GenesisJSONApricotPhase1 = genesisJSON(params.TestApricotPhase1Config) + GenesisJSONApricotPhase2 = genesisJSON(params.TestApricotPhase2Config) + GenesisJSONApricotPhase3 = genesisJSON(params.TestApricotPhase3Config) + GenesisJSONApricotPhase4 = genesisJSON(params.TestApricotPhase4Config) + GenesisJSONApricotPhase5 = genesisJSON(params.TestApricotPhase5Config) + GenesisJSONApricotPhasePre6 = genesisJSON(params.TestApricotPhasePre6Config) + GenesisJSONApricotPhase6 = genesisJSON(params.TestApricotPhase6Config) + GenesisJSONApricotPhasePost6 = genesisJSON(params.TestApricotPhasePost6Config) + GenesisJSONBanff = genesisJSON(params.TestBanffChainConfig) + GenesisJSONCortina = genesisJSON(params.TestCortinaChainConfig) + GenesisJSONDurango = genesisJSON(params.TestDurangoChainConfig) + GenesisJSONEtna = genesisJSON(params.TestEtnaChainConfig) + + GenesisJSONLatest = GenesisJSONEtna +) + +// genesisJSON returns the JSON representation of the genesis block +// for the given chain configuration, with pre-funded accounts. +func genesisJSON(cfg *params.ChainConfig) string { + g := new(core.Genesis) + g.Difficulty = big.NewInt(0) + g.GasLimit = 0x5f5e100 + g.Timestamp = uint64(upgrade.InitiallyActiveTime.Unix()) + + // Use chainId: 43111, so that it does not overlap with any Avalanche ChainIDs, which may have their + // config overridden in vm.Initialize. + cpy := *cfg + cpy.ChainID = big.NewInt(43111) + g.Config = &cpy + + allocStr := `{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}}` + json.Unmarshal([]byte(allocStr), &g.Alloc) + // After Durango, an additional account is funded in tests to use + // with warp messages. + if cfg.IsDurango(0) { + addr := common.HexToAddress("0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5") + balance := new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(10)) + g.Alloc[addr] = types.GenesisAccount{Balance: balance} + } + + // Fund the test keys + for _, addr := range TestEthAddrs { + balance := new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(10)) + g.Alloc[addr] = types.GenesisAccount{Balance: balance} + } + + b, err := json.Marshal(g) + if err != nil { + panic(err) + } + return string(b) +} + +func NewPrefundedGenesis( + balance int, + addresses ...common.Address, +) *core.Genesis { + alloc := types.GenesisAlloc{} + for _, address := range addresses { + alloc[address] = types.GenesisAccount{ + Balance: big.NewInt(int64(balance)), + } + } + + return &core.Genesis{ + Config: params.TestChainConfig, + Difficulty: big.NewInt(0), + Alloc: alloc, + } +} + +// SetupGenesis sets up the genesis +// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] +func SetupGenesis( + t *testing.T, + genesisJSON string, +) (*snow.Context, + database.Database, + []byte, + chan commoneng.Message, + *avalancheatomic.Memory, +) { + genesisBytes := []byte(genesisJSON) + ctx := utils.TestSnowContext() + + baseDB := memdb.New() + + // initialize the atomic memory + atomicMemory := avalancheatomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) + + // NB: this lock is intentionally left locked when this function returns. + // The caller of this function is responsible for unlocking. + ctx.Lock.Lock() + + issuer := make(chan commoneng.Message, 1) + prefixedDB := prefixdb.New([]byte{1}, baseDB) + return ctx, prefixedDB, genesisBytes, issuer, atomicMemory +} diff --git a/plugin/evm/testutils/test_syncervm.go b/plugin/evm/testutils/test_syncervm.go new file mode 100644 index 0000000000..b3c804af8c --- /dev/null +++ b/plugin/evm/testutils/test_syncervm.go @@ -0,0 +1,667 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package testutils + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + avalanchedatabase "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/set" + + "github.com/ava-labs/coreth/accounts/keystore" + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/constants" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/database" + "github.com/ava-labs/coreth/plugin/evm/extension" + vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/predicate" + statesyncclient "github.com/ava-labs/coreth/sync/client" + "github.com/ava-labs/coreth/sync/statesync" + "github.com/ava-labs/coreth/trie" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +type SyncerVMTest struct { + Name string + TestFunc func( + t *testing.T, + testSetup *SyncTestSetup, + ) +} + +var SyncerVMTests = []SyncerVMTest{ + { + Name: "SkipStateSyncTest", + TestFunc: SkipStateSyncTest, + }, + { + Name: "StateSyncFromScratchTest", + TestFunc: StateSyncFromScratchTest, + }, + { + Name: "StateSyncFromScratchExceedParentTest", + TestFunc: StateSyncFromScratchExceedParentTest, + }, + { + Name: "StateSyncToggleEnabledToDisabledTest", + TestFunc: StateSyncToggleEnabledToDisabledTest, + }, + { + Name: "VMShutdownWhileSyncingTest", + TestFunc: VMShutdownWhileSyncingTest, + }, +} + +func SkipStateSyncTest(t *testing.T, testSetup *SyncTestSetup) { + rand.Seed(1) + test := SyncTestParams{ + SyncableInterval: 256, + StateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync + SyncMode: block.StateSyncSkipped, + } + testSyncVMSetup := initSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch, testSetup) + + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) +} + +func StateSyncFromScratchTest(t *testing.T, testSetup *SyncTestSetup) { + rand.Seed(1) + test := SyncTestParams{ + SyncableInterval: 256, + StateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync + SyncMode: block.StateSyncStatic, + } + testSyncVMSetup := initSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch, testSetup) + + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) +} + +func StateSyncFromScratchExceedParentTest(t *testing.T, testSetup *SyncTestSetup) { + rand.Seed(1) + numToGen := vmsync.ParentsToFetch + uint64(32) + test := SyncTestParams{ + SyncableInterval: numToGen, + StateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync + SyncMode: block.StateSyncStatic, + } + testSyncVMSetup := initSyncServerAndClientVMs(t, test, int(numToGen), testSetup) + + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) +} + +func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup) { + rand.Seed(1) + // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. + metrics.Enabled = false + defer func() { + metrics.Enabled = true + }() + + var lock sync.Mutex + reqCount := 0 + test := SyncTestParams{ + SyncableInterval: 256, + StateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync + SyncMode: block.StateSyncStatic, + responseIntercept: func(syncerVM extension.InnerVM, nodeID ids.NodeID, requestID uint32, response []byte) { + lock.Lock() + defer lock.Unlock() + + reqCount++ + // Fail all requests after number 50 to interrupt the sync + if reqCount > 50 { + if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { + panic(err) + } + if err := syncerVM.SyncerClient().Shutdown(); err != nil { + panic(err) + } + } else { + syncerVM.AppResponse(context.Background(), nodeID, requestID, response) + } + }, + expectedErr: context.Canceled, + } + testSyncVMSetup := initSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch, testSetup) + + // Perform sync resulting in early termination. + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) + + test.SyncMode = block.StateSyncStatic + test.responseIntercept = nil + test.expectedErr = nil + + syncDisabledVM, _ := testSetup.NewVM() + appSender := &enginetest.Sender{T: t} + appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { + nodeID, hasItem := nodeSet.Pop() + if !hasItem { + t.Fatal("expected nodeSet to contain at least 1 nodeID") + } + go testSyncVMSetup.serverVM.vm.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + return nil + } + // Disable metrics to prevent duplicate registerer + stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` + SetupVM(t, false, GenesisJSONLatest, stateSyncDisabledConfigJSON, "", syncDisabledVM) + if err := syncDisabledVM.Initialize( + context.Background(), + testSyncVMSetup.syncerVM.SnowCtx, + testSyncVMSetup.syncerVM.DB, + []byte(GenesisJSONLatest), + nil, + []byte(stateSyncDisabledConfigJSON), + testSyncVMSetup.syncerVM.EngineChan, + []*commonEng.Fx{}, + appSender, + ); err != nil { + t.Fatal(err) + } + + defer func() { + if err := syncDisabledVM.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + }() + + if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { + t.Fatalf("Unexpected last accepted height: %d", height) + } + + enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) + assert.NoError(t, err) + assert.False(t, enabled, "sync should be disabled") + + // Process the first 10 blocks from the serverVM + for i := uint64(1); i < 10; i++ { + ethBlock := testSyncVMSetup.serverVM.vm.Ethereum().BlockChain().GetBlockByNumber(i) + if ethBlock == nil { + t.Fatalf("VM Server did not have a block available at height %d", i) + } + b, err := rlp.EncodeToBytes(ethBlock) + if err != nil { + t.Fatal(err) + } + blk, err := syncDisabledVM.ParseBlock(context.Background(), b) + if err != nil { + t.Fatal(err) + } + if err := blk.Verify(context.Background()); err != nil { + t.Fatal(err) + } + if err := blk.Accept(context.Background()); err != nil { + t.Fatal(err) + } + } + // Verify the snapshot disk layer matches the last block root + lastRoot := syncDisabledVM.Ethereum().BlockChain().CurrentBlock().Root + if err := syncDisabledVM.Ethereum().BlockChain().Snapshots().Verify(lastRoot); err != nil { + t.Fatal(err) + } + syncDisabledVM.Ethereum().BlockChain().DrainAcceptorQueue() + + // Create a new VM from the same database with state sync enabled. + syncReEnabledVM, _ := testSetup.NewVM() + // Enable state sync in configJSON + configJSON := fmt.Sprintf( + `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, + test.StateSyncMinBlocks, + ) + if err := syncReEnabledVM.Initialize( + context.Background(), + testSyncVMSetup.syncerVM.SnowCtx, + testSyncVMSetup.syncerVM.DB, + []byte(GenesisJSONLatest), + nil, + []byte(configJSON), + testSyncVMSetup.syncerVM.EngineChan, + []*commonEng.Fx{}, + appSender, + ); err != nil { + t.Fatal(err) + } + + // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] + testSyncVMSetup.serverVM.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + if test.responseIntercept == nil { + go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) + } else { + go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) + } + + return nil + } + + // connect peer to [syncerVM] + assert.NoError(t, syncReEnabledVM.Connected( + context.Background(), + testSyncVMSetup.serverVM.ctx.NodeID, + statesyncclient.StateSyncVersion, + )) + + enabled, err = syncReEnabledVM.StateSyncEnabled(context.Background()) + assert.NoError(t, err) + assert.True(t, enabled, "sync should be enabled") + + testSyncVMSetup.syncerVM.VM = syncReEnabledVM + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) +} + +func VMShutdownWhileSyncingTest(t *testing.T, testSetup *SyncTestSetup) { + var ( + lock sync.Mutex + testSyncVMSetup *testSyncVMSetup + ) + reqCount := 0 + test := SyncTestParams{ + SyncableInterval: 256, + StateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync + SyncMode: block.StateSyncStatic, + responseIntercept: func(syncerVM extension.InnerVM, nodeID ids.NodeID, requestID uint32, response []byte) { + lock.Lock() + defer lock.Unlock() + + reqCount++ + // Shutdown the VM after 50 requests to interrupt the sync + if reqCount == 50 { + // Note this verifies the VM shutdown does not time out while syncing. + require.NoError(t, testSyncVMSetup.syncerVM.shutdownOnceSyncerVM.Shutdown(context.Background())) + } else if reqCount < 50 { + err := syncerVM.AppResponse(context.Background(), nodeID, requestID, response) + require.NoError(t, err) + } + }, + expectedErr: context.Canceled, + } + testSyncVMSetup = initSyncServerAndClientVMs(t, test, vmsync.ParentsToFetch, testSetup) + // Perform sync resulting in early termination. + testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) +} + +type SyncTestSetup struct { + NewVM func() (extension.InnerVM, dummy.ConsensusCallbacks) // should not be initialized + AfterInit func(t *testing.T, testParams SyncTestParams, vm extension.InnerVM) + GenFn func(i int, vm extension.InnerVM, gen *core.BlockGen) + ExtraSyncerVMTest func(t *testing.T, syncerVM SyncerVMSetup) +} + +func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int, testSetup *SyncTestSetup) *testSyncVMSetup { + require := require.New(t) + + // override commitInterval so the call to trie creates a commit at the height [syncableInterval]. + // This is necessary to support fetching a state summary. + config := fmt.Sprintf(`{"commit-interval": %d, "state-sync-commit-interval": %d}`, test.SyncableInterval, test.SyncableInterval) + serverVM, cb := testSetup.NewVM() + _, _, _, serverAppSender, serverCtx := SetupVM(t, true, GenesisJSONLatest, config, "", serverVM) + t.Cleanup(func() { + log.Info("Shutting down server VM") + require.NoError(serverVM.Shutdown(context.Background())) + }) + var err error + if testSetup.AfterInit != nil { + testSetup.AfterInit(t, test, serverVM) + } + generateAndAcceptBlocks(t, serverVM, numBlocks, testSetup.GenFn, nil, cb) + + // make some accounts + root, accounts := statesync.FillAccountsWithOverlappingStorage(t, serverVM.Ethereum().BlockChain().TrieDB(), types.EmptyRootHash, 1000, 16) + + // patch serverVM's lastAcceptedBlock to have the new root + // and update the vm's state so the trie with accounts will + // be returned by StateSyncGetLastSummary + lastAccepted := serverVM.Ethereum().BlockChain().LastAcceptedBlock() + patchedBlock := patchBlock(lastAccepted, root, serverVM.Ethereum().ChainDb()) + blockBytes, err := rlp.EncodeToBytes(patchedBlock) + require.NoError(err) + internalBlock, err := serverVM.ParseBlock(context.Background(), blockBytes) + require.NoError(err) + require.NoError(serverVM.SetLastAcceptedBlock(internalBlock)) + + // initialise [syncerVM] with blank genesis state + // we also override [syncerVM]'s commit interval so the atomic trie works correctly. + stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d, "commit-interval": %d}`, test.StateSyncMinBlocks, 4, test.SyncableInterval) + + syncerVM, syncerCB := testSetup.NewVM() + syncerEngineChan, syncerDB, syncerAtomicMemory, syncerAppSender, chainCtx := SetupVM(t, false, GenesisJSONLatest, stateSyncEnabledJSON, "", syncerVM) + shutdownOnceSyncerVM := &shutdownOnceVM{InnerVM: syncerVM} + t.Cleanup(func() { + require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) + }) + require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) + enabled, err := syncerVM.StateSyncEnabled(context.Background()) + require.NoError(err) + require.True(enabled) + + // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] + serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + if test.responseIntercept == nil { + go syncerVM.AppResponse(ctx, nodeID, requestID, response) + } else { + go test.responseIntercept(syncerVM, nodeID, requestID, response) + } + + return nil + } + + // connect peer to [syncerVM] + require.NoError( + syncerVM.Connected( + context.Background(), + chainCtx.NodeID, + statesyncclient.StateSyncVersion, + ), + ) + + // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] + syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { + nodeID, hasItem := nodeSet.Pop() + require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") + err := serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + require.NoError(err) + return nil + } + + return &testSyncVMSetup{ + serverVM: serverVMSetup{ + vm: serverVM, + appSender: serverAppSender, + ctx: serverCtx, + }, + fundedAccounts: accounts, + syncerVM: SyncerVMSetup{ + VM: syncerVM, + ConsensusCallbacks: syncerCB, + SnowCtx: chainCtx, + DB: syncerDB, + EngineChan: syncerEngineChan, + AtomicMemory: syncerAtomicMemory, + shutdownOnceSyncerVM: shutdownOnceSyncerVM, + }, + } +} + +// testSyncVMSetup contains the required set up for a client VM to perform state sync +// off of a server VM. +type testSyncVMSetup struct { + serverVM serverVMSetup + syncerVM SyncerVMSetup + + fundedAccounts map[*keystore.Key]*types.StateAccount +} + +type serverVMSetup struct { + vm extension.InnerVM + ctx *snow.Context + appSender *enginetest.Sender +} + +type SyncerVMSetup struct { + VM extension.InnerVM + SnowCtx *snow.Context + ConsensusCallbacks dummy.ConsensusCallbacks + DB avalanchedatabase.Database + EngineChan chan commonEng.Message + AtomicMemory *avalancheatomic.Memory + shutdownOnceSyncerVM *shutdownOnceVM +} + +type shutdownOnceVM struct { + extension.InnerVM + shutdownOnce sync.Once +} + +func (vm *shutdownOnceVM) Shutdown(ctx context.Context) error { + var err error + vm.shutdownOnce.Do(func() { err = vm.InnerVM.Shutdown(ctx) }) + return err +} + +// SyncTestParams contains both the actual VMs as well as the parameters with the expected output. +type SyncTestParams struct { + responseIntercept func(vm extension.InnerVM, nodeID ids.NodeID, requestID uint32, response []byte) + StateSyncMinBlocks uint64 + SyncableInterval uint64 + SyncMode block.StateSyncMode + expectedErr error +} + +func testSyncerVM(t *testing.T, testSyncVMSetup *testSyncVMSetup, test SyncTestParams, extraSyncerVMTest func(t *testing.T, syncerVMSetup SyncerVMSetup)) { + t.Helper() + var ( + require = require.New(t) + serverVM = testSyncVMSetup.serverVM.vm + fundedAccounts = testSyncVMSetup.fundedAccounts + syncerVM = testSyncVMSetup.syncerVM.VM + syncerEngineChan = testSyncVMSetup.syncerVM.EngineChan + ) + // get last summary and test related methods + summary, err := serverVM.GetLastStateSummary(context.Background()) + require.NoError(err, "error getting state sync last summary") + parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes()) + require.NoError(err, "error parsing state summary") + retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height()) + require.NoError(err, "error getting state sync summary at height") + require.Equal(summary, retrievedSummary) + + syncMode, err := parsedSummary.Accept(context.Background()) + require.NoError(err, "error accepting state summary") + require.Equal(test.SyncMode, syncMode) + if syncMode == block.StateSyncSkipped { + return + } + + msg := <-syncerEngineChan + require.Equal(commonEng.StateSyncDone, msg) + + // If the test is expected to error, assert the correct error is returned and finish the test. + err = syncerVM.SyncerClient().Error() + if test.expectedErr != nil { + require.ErrorIs(err, test.expectedErr) + // Note we re-open the database here to avoid a closed error when the test is for a shutdown VM. + // TODO: this avoids circular dependencies but is not ideal. + ethDBPrefix := []byte("ethdb") + chaindb := database.WrapDatabase(prefixdb.NewNested(ethDBPrefix, testSyncVMSetup.syncerVM.DB)) + assertSyncPerformedHeights(t, chaindb, map[uint64]struct{}{}) + return + } + require.NoError(err, "state sync failed") + + // set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly + // and that we can bootstrap and process some blocks. + require.NoError(syncerVM.SetState(context.Background(), snow.Bootstrapping)) + require.Equal(serverVM.LastAcceptedVMBlock().Height(), syncerVM.LastAcceptedVMBlock().Height(), "block height mismatch between syncer and server") + require.Equal(serverVM.LastAcceptedVMBlock().ID(), syncerVM.LastAcceptedVMBlock().ID(), "blockID mismatch between syncer and server") + require.True(syncerVM.Ethereum().BlockChain().HasState(syncerVM.Ethereum().BlockChain().LastAcceptedBlock().Root()), "unavailable state for last accepted block") + assertSyncPerformedHeights(t, syncerVM.Ethereum().ChainDb(), map[uint64]struct{}{retrievedSummary.Height(): {}}) + + lastNumber := syncerVM.Ethereum().BlockChain().LastAcceptedBlock().NumberU64() + // check the last block is indexed + lastSyncedBlock := rawdb.ReadBlock(syncerVM.Ethereum().ChainDb(), rawdb.ReadCanonicalHash(syncerVM.Ethereum().ChainDb(), lastNumber), lastNumber) + for _, tx := range lastSyncedBlock.Transactions() { + index := rawdb.ReadTxLookupEntry(syncerVM.Ethereum().ChainDb(), tx.Hash()) + require.NotNilf(index, "Miss transaction indices, number %d hash %s", lastNumber, tx.Hash().Hex()) + } + + // tail should be the last block synced + if syncerVM.Ethereum().BlockChain().CacheConfig().TransactionHistory != 0 { + tail := lastSyncedBlock.NumberU64() + + core.CheckTxIndices(t, &tail, tail, syncerVM.Ethereum().ChainDb(), true) + } + + blocksToBuild := 10 + txsPerBlock := 10 + toAddress := TestEthAddrs[1] // arbitrary choice + generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, vm extension.InnerVM, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) + i := 0 + for k := range fundedAccounts { + tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.Ethereum().BlockChain().Config().ChainID), k.PrivateKey) + require.NoError(err) + gen.AddTx(signedTx) + i++ + if i >= txsPerBlock { + break + } + } + }, + func(block *types.Block) { + if syncerVM.Ethereum().BlockChain().CacheConfig().TransactionHistory != 0 { + tail := block.NumberU64() - syncerVM.Ethereum().BlockChain().CacheConfig().TransactionHistory + 1 + // tail should be the minimum last synced block, since we skipped it to the last block + if tail < lastSyncedBlock.NumberU64() { + tail = lastSyncedBlock.NumberU64() + } + core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.Ethereum().ChainDb(), true) + } + }, + testSyncVMSetup.syncerVM.ConsensusCallbacks, + ) + + // check we can transition to [NormalOp] state and continue to process blocks. + require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) + + // Generate blocks after we have entered normal consensus as well + generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, vm extension.InnerVM, gen *core.BlockGen) { + b, err := predicate.NewResults().Bytes() + if err != nil { + t.Fatal(err) + } + gen.AppendExtra(b) + i := 0 + for k := range fundedAccounts { + tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.Ethereum().BlockChain().Config().ChainID), k.PrivateKey) + require.NoError(err) + gen.AddTx(signedTx) + i++ + if i >= txsPerBlock { + break + } + } + }, + func(block *types.Block) { + if syncerVM.Ethereum().BlockChain().CacheConfig().TransactionHistory != 0 { + tail := block.NumberU64() - syncerVM.Ethereum().BlockChain().CacheConfig().TransactionHistory + 1 + // tail should be the minimum last synced block, since we skipped it to the last block + if tail < lastSyncedBlock.NumberU64() { + tail = lastSyncedBlock.NumberU64() + } + core.CheckTxIndices(t, &tail, block.NumberU64(), syncerVM.Ethereum().ChainDb(), true) + } + }, + testSyncVMSetup.syncerVM.ConsensusCallbacks, + ) + + if extraSyncerVMTest != nil { + extraSyncerVMTest(t, testSyncVMSetup.syncerVM) + } +} + +// patchBlock returns a copy of [blk] with [root] and updates [db] to +// include the new block as canonical for [blk]'s height. +// This breaks the digestibility of the chain since after this call +// [blk] does not necessarily define a state transition from its parent +// state to the new state root. +func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Block { + header := blk.Header() + header.Root = root + receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64()) + newBlk := types.NewBlockWithExtData( + header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil), blk.ExtData(), true, + ) + rawdb.WriteBlock(db, newBlk) + rawdb.WriteCanonicalHash(db, newBlk.Hash(), newBlk.NumberU64()) + return newBlk +} + +// generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then +// calls Verify and Accept on each generated block +// TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests +func generateAndAcceptBlocks(t *testing.T, vm extension.InnerVM, numBlocks int, gen func(int, extension.InnerVM, *core.BlockGen), accepted func(*types.Block), cb dummy.ConsensusCallbacks) { + t.Helper() + + // acceptExternalBlock defines a function to parse, verify, and accept a block once it has been + // generated by GenerateChain + acceptExternalBlock := func(block *types.Block) { + bytes, err := rlp.EncodeToBytes(block) + if err != nil { + t.Fatal(err) + } + vmBlock, err := vm.ParseBlock(context.Background(), bytes) + if err != nil { + t.Fatal(err) + } + if err := vmBlock.Verify(context.Background()); err != nil { + t.Fatal(err) + } + if err := vmBlock.Accept(context.Background()); err != nil { + t.Fatal(err) + } + + if accepted != nil { + accepted(block) + } + } + _, _, err := core.GenerateChain( + vm.Ethereum().BlockChain().Config(), + vm.Ethereum().BlockChain().LastAcceptedBlock(), + dummy.NewFakerWithCallbacks(cb), + vm.Ethereum().ChainDb(), + numBlocks, + 10, + func(i int, g *core.BlockGen) { + g.SetOnBlockGenerated(acceptExternalBlock) + g.SetCoinbase(constants.BlackholeAddr) // necessary for syntactic validation of the block + gen(i, vm, g) + }, + ) + if err != nil { + t.Fatal(err) + } + vm.Ethereum().BlockChain().DrainAcceptorQueue() +} + +// assertSyncPerformedHeights iterates over all heights the VM has synced to and +// verifies it matches [expected]. +func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[uint64]struct{}) { + it := rawdb.NewSyncPerformedIterator(db) + defer it.Release() + + found := make(map[uint64]struct{}, len(expected)) + for it.Next() { + found[rawdb.UnpackSyncPerformedKey(it.Key())] = struct{}{} + } + require.NoError(t, it.Error()) + require.Equal(t, expected, found) +} diff --git a/plugin/evm/testutils/test_vm.go b/plugin/evm/testutils/test_vm.go new file mode 100644 index 0000000000..915de2dc18 --- /dev/null +++ b/plugin/evm/testutils/test_vm.go @@ -0,0 +1,52 @@ +package testutils + +import ( + "context" + "testing" + + avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/snow" + commoneng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/stretchr/testify/require" +) + +func SetupVM( + t *testing.T, + finishBootstrapping bool, + genesisJSON string, + configJSON string, + upgradeJSON string, + vm commoneng.VM, +) ( + chan commoneng.Message, + database.Database, + *avalancheatomic.Memory, + *enginetest.Sender, + *snow.Context, +) { + ctx, dbManager, genesisBytes, issuer, m := SetupGenesis(t, genesisJSON) + appSender := &enginetest.Sender{T: t} + appSender.CantSendAppGossip = true + appSender.SendAppGossipF = func(context.Context, commoneng.SendConfig, []byte) error { return nil } + err := vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + []byte(upgradeJSON), + []byte(configJSON), + issuer, + []*commoneng.Fx{}, + appSender, + ) + require.NoError(t, err, "error initializing GenesisVM") + + if finishBootstrapping { + require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) + } + + return issuer, dbManager, m, appSender, ctx +} diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 429aade245..4530c9aaa5 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" @@ -28,15 +27,12 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - "google.golang.org/protobuf/proto" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/atomic" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" ) @@ -50,7 +46,7 @@ func TestEthTxGossip(t *testing.T) { pk, err := secp256k1.NewPrivateKey() require.NoError(err) address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -59,10 +55,6 @@ func TestEthTxGossip(t *testing.T) { } vm := newDefaultTestVM() - vm.p2pSender = responseSender - vm.atomicTxGossipHandler = &p2p.NoOpHandler{} - vm.atomicTxPullGossiper = &gossip.NoOpGossiper{} - require.NoError(vm.Initialize( ctx, snowCtx, @@ -72,7 +64,7 @@ func TestEthTxGossip(t *testing.T) { nil, make(chan common.Message), nil, - &enginetest.Sender{}, + responseSender, )) require.NoError(vm.SetState(ctx, snow.NormalOp)) @@ -171,145 +163,6 @@ func TestEthTxGossip(t *testing.T) { wg.Wait() } -func TestAtomicTxGossip(t *testing.T) { - require := require.New(t) - ctx := context.Background() - snowCtx := utils.TestSnowContext() - snowCtx.AVAXAssetID = ids.GenerateTestID() - validatorState := utils.NewTestValidatorState() - snowCtx.ValidatorState = validatorState - memory := avalancheatomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) - - pk, err := secp256k1.NewPrivateKey() - require.NoError(err) - address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) - genesisBytes, err := genesis.MarshalJSON() - require.NoError(err) - - responseSender := &enginetest.SenderStub{ - SentAppResponse: make(chan []byte, 1), - } - vm := newDefaultTestVM() - vm.p2pSender = responseSender - vm.ethTxGossipHandler = &p2p.NoOpHandler{} - vm.ethTxPullGossiper = &gossip.NoOpGossiper{} - - require.NoError(vm.Initialize( - ctx, - snowCtx, - memdb.New(), - genesisBytes, - nil, - nil, - make(chan common.Message), - nil, - &enginetest.Sender{}, - )) - require.NoError(vm.SetState(ctx, snow.NormalOp)) - - defer func() { - require.NoError(vm.Shutdown(ctx)) - }() - - // sender for the peer requesting gossip from [vm] - peerSender := &enginetest.SenderStub{ - SentAppRequest: make(chan []byte, 1), - } - network, err := p2p.NewNetwork(logging.NoLog{}, peerSender, prometheus.NewRegistry(), "") - require.NoError(err) - client := network.NewClient(p2p.AtomicTxGossipHandlerID) - - // we only accept gossip requests from validators - requestingNodeID := ids.GenerateTestNodeID() - require.NoError(vm.Network.Connected(ctx, requestingNodeID, nil)) - validatorState.GetCurrentHeightF = func(context.Context) (uint64, error) { - return 0, nil - } - validatorState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{ - requestingNodeID: { - NodeID: requestingNodeID, - Weight: 1, - }, - }, nil - } - - // Ask the VM for any new transactions. We should get nothing at first. - emptyBloomFilter, err := gossip.NewBloomFilter( - prometheus.NewRegistry(), - "", - config.TxGossipBloomMinTargetElements, - config.TxGossipBloomTargetFalsePositiveRate, - config.TxGossipBloomResetFalsePositiveRate, - ) - require.NoError(err) - emptyBloomFilterBytes, _ := emptyBloomFilter.Marshal() - request := &sdk.PullGossipRequest{ - Filter: emptyBloomFilterBytes, - Salt: agoUtils.RandomBytes(32), - } - - requestBytes, err := proto.Marshal(request) - require.NoError(err) - - wg := &sync.WaitGroup{} - wg.Add(1) - onResponse := func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { - require.NoError(err) - - response := &sdk.PullGossipResponse{} - require.NoError(proto.Unmarshal(responseBytes, response)) - require.Empty(response.Gossip) - wg.Done() - } - require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) - require.NoError(vm.AppRequest(ctx, requestingNodeID, 1, time.Time{}, <-peerSender.SentAppRequest)) - require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 1, <-responseSender.SentAppResponse)) - wg.Wait() - - // Issue a tx to the VM - utxo, err := addUTXO( - memory, - snowCtx, - ids.GenerateTestID(), - 0, - snowCtx.AVAXAssetID, - 100_000_000_000, - pk.Address(), - ) - require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) - require.NoError(err) - require.NoError(vm.mempool.AddLocalTx(tx)) - - // wait so we aren't throttled by the vm - time.Sleep(5 * time.Second) - - // Ask the VM for new transactions. We should get the newly issued tx. - wg.Add(1) - - marshaller := atomic.GossipAtomicTxMarshaller{} - onResponse = func(_ context.Context, nodeID ids.NodeID, responseBytes []byte, err error) { - require.NoError(err) - - response := &sdk.PullGossipResponse{} - require.NoError(proto.Unmarshal(responseBytes, response)) - require.Len(response.Gossip, 1) - - gotTx, err := marshaller.UnmarshalGossip(response.Gossip[0]) - require.NoError(err) - require.Equal(tx.ID(), gotTx.GossipID()) - - wg.Done() - } - require.NoError(client.AppRequest(ctx, set.Of(vm.ctx.NodeID), requestBytes, onResponse)) - require.NoError(vm.AppRequest(ctx, requestingNodeID, 3, time.Time{}, <-peerSender.SentAppRequest)) - require.NoError(network.AppResponse(ctx, snowCtx.NodeID, 3, <-responseSender.SentAppResponse)) - wg.Wait() -} - // Tests that a tx is gossiped when it is issued func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) @@ -320,14 +173,11 @@ func TestEthTxPushGossipOutbound(t *testing.T) { } vm := newDefaultTestVM() - vm.p2pSender = sender - vm.ethTxPullGossiper = gossip.NoOpGossiper{} - vm.atomicTxPullGossiper = gossip.NoOpGossiper{} pk, err := secp256k1.NewPrivateKey() require.NoError(err) address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -379,14 +229,12 @@ func TestEthTxPushGossipInbound(t *testing.T) { sender := &enginetest.Sender{} vm := newDefaultTestVM() - vm.p2pSender = sender vm.ethTxPullGossiper = gossip.NoOpGossiper{} - vm.atomicTxPullGossiper = gossip.NoOpGossiper{} pk, err := secp256k1.NewPrivateKey() require.NoError(err) address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) + genesis := testutils.NewPrefundedGenesis(100_000_000_000_000_000, address) genesisBytes, err := genesis.MarshalJSON() require.NoError(err) @@ -430,150 +278,3 @@ func TestEthTxPushGossipInbound(t *testing.T) { require.True(vm.txPool.Has(signedTx.Hash())) } - -// Tests that a tx is gossiped when it is issued -func TestAtomicTxPushGossipOutbound(t *testing.T) { - require := require.New(t) - ctx := context.Background() - snowCtx := utils.TestSnowContext() - snowCtx.AVAXAssetID = ids.GenerateTestID() - validatorState := utils.NewTestValidatorState() - snowCtx.ValidatorState = validatorState - memory := avalancheatomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) - - pk, err := secp256k1.NewPrivateKey() - require.NoError(err) - address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) - genesisBytes, err := genesis.MarshalJSON() - require.NoError(err) - - sender := &enginetest.SenderStub{ - SentAppGossip: make(chan []byte, 1), - } - vm := newDefaultTestVM() - vm.p2pSender = sender - vm.ethTxPullGossiper = gossip.NoOpGossiper{} - vm.atomicTxPullGossiper = gossip.NoOpGossiper{} - - require.NoError(vm.Initialize( - ctx, - snowCtx, - memdb.New(), - genesisBytes, - nil, - nil, - make(chan common.Message), - nil, - &enginetest.SenderStub{}, - )) - require.NoError(vm.SetState(ctx, snow.NormalOp)) - - defer func() { - require.NoError(vm.Shutdown(ctx)) - }() - - // Issue a tx to the VM - utxo, err := addUTXO( - memory, - snowCtx, - ids.GenerateTestID(), - 0, - snowCtx.AVAXAssetID, - 100_000_000_000, - pk.Address(), - ) - require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) - require.NoError(err) - require.NoError(vm.mempool.AddLocalTx(tx)) - vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) - - gossipedBytes := <-sender.SentAppGossip - require.Equal(byte(p2p.AtomicTxGossipHandlerID), gossipedBytes[0]) - - outboundGossipMsg := &sdk.PushGossip{} - require.NoError(proto.Unmarshal(gossipedBytes[1:], outboundGossipMsg)) - require.Len(outboundGossipMsg.Gossip, 1) - - marshaller := atomic.GossipAtomicTxMarshaller{} - gossipedTx, err := marshaller.UnmarshalGossip(outboundGossipMsg.Gossip[0]) - require.NoError(err) - require.Equal(tx.ID(), gossipedTx.Tx.ID()) -} - -// Tests that a tx is gossiped when it is issued -func TestAtomicTxPushGossipInbound(t *testing.T) { - require := require.New(t) - ctx := context.Background() - snowCtx := utils.TestSnowContext() - snowCtx.AVAXAssetID = ids.GenerateTestID() - validatorState := utils.NewTestValidatorState() - snowCtx.ValidatorState = validatorState - memory := avalancheatomic.NewMemory(memdb.New()) - snowCtx.SharedMemory = memory.NewSharedMemory(snowCtx.ChainID) - - pk, err := secp256k1.NewPrivateKey() - require.NoError(err) - address := pk.EthAddress() - genesis := newPrefundedGenesis(100_000_000_000_000_000, address) - genesisBytes, err := genesis.MarshalJSON() - require.NoError(err) - - sender := &enginetest.Sender{} - vm := newDefaultTestVM() - vm.p2pSender = sender - vm.ethTxPullGossiper = gossip.NoOpGossiper{} - vm.atomicTxPullGossiper = gossip.NoOpGossiper{} - - require.NoError(vm.Initialize( - ctx, - snowCtx, - memdb.New(), - genesisBytes, - nil, - nil, - make(chan common.Message), - nil, - &enginetest.SenderStub{}, - )) - require.NoError(vm.SetState(ctx, snow.NormalOp)) - - defer func() { - require.NoError(vm.Shutdown(ctx)) - }() - - // issue a tx to the vm - utxo, err := addUTXO( - memory, - snowCtx, - ids.GenerateTestID(), - 0, - snowCtx.AVAXAssetID, - 100_000_000_000, - pk.Address(), - ) - require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, initialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) - require.NoError(err) - require.NoError(vm.mempool.AddLocalTx(tx)) - - marshaller := atomic.GossipAtomicTxMarshaller{} - gossipedTx := &atomic.GossipAtomicTx{ - Tx: tx, - } - gossipBytes, err := marshaller.MarshalGossip(gossipedTx) - require.NoError(err) - - inboundGossip := &sdk.PushGossip{ - Gossip: [][]byte{gossipBytes}, - } - inboundGossipBytes, err := proto.Marshal(inboundGossip) - require.NoError(err) - - inboundGossipMsg := append(binary.AppendUvarint(nil, p2p.AtomicTxGossipHandlerID), inboundGossipBytes...) - - require.NoError(vm.AppGossip(ctx, ids.EmptyNodeID, inboundGossipMsg)) - require.True(vm.mempool.Has(tx.ID())) -} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index ad75c5e0c7..ee4b85a740 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -33,8 +33,8 @@ import ( "github.com/ava-labs/coreth/node" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/peer" - "github.com/ava-labs/coreth/plugin/evm/atomic/extension" "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" @@ -140,7 +140,6 @@ var ( errInvalidBlock = errors.New("invalid block") errInvalidNonce = errors.New("invalid nonce") errUnclesUnsupported = errors.New("uncles unsupported") - errRejectedParent = errors.New("rejected parent") errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") @@ -257,7 +256,6 @@ type VM struct { warpBackend warp.Backend // Initialize only sets these if nil so they can be overridden in tests - p2pSender commonEng.AppSender ethTxGossipHandler p2p.Handler ethTxPushGossiper avalancheUtils.Atomic[*avalanchegossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper avalanchegossip.Gossiper @@ -444,13 +442,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to verify chain config: %w", err) } - // initialize peer network - if vm.p2pSender == nil { - vm.p2pSender = appSender - } - - // TODO: move all network stuff to peer.NewNetwork - p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, vm.p2pSender, vm.sdkMetrics, "p2p") + p2pNetwork, err := p2p.NewNetwork(vm.ctx.Log, appSender, vm.sdkMetrics, "p2p") if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } @@ -580,12 +572,15 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { syncStats, ), }) - leafConfigs = append(leafConfigs, vm.extensionConfig.SyncLeafType) + + if vm.extensionConfig.SyncLeafType != nil { + leafConfigs = append(leafConfigs, vm.extensionConfig.SyncLeafType) + } leafHandlers := make(LeafHandlers, len(leafConfigs)) for _, leafConfig := range leafConfigs { if _, exists := leafHandlers[leafConfig.LeafType]; exists { - return fmt.Errorf("duplicate leaf type %s", leafConfig.LeafType) + return fmt.Errorf("duplicate leaf type %v", leafConfig.LeafType) } leafHandlers[leafConfig.LeafType] = leafConfig.Handler } @@ -831,12 +826,6 @@ func (vm *VM) initBlockBuilding() error { return nil } -// setAppRequestHandlers sets the request handlers for the VM to serve state sync -// requests. -func (vm *VM) setAppRequestHandlers(leafConfigs []*extension.LeafRequestConfig, syncStats handlerstats.HandlerStats) error { - return nil -} - // Shutdown implements the snowman.ChainVM interface func (vm *VM) Shutdown(context.Context) error { if vm.ctx == nil { @@ -973,15 +962,12 @@ func (vm *VM) GetAcceptedBlock(ctx context.Context, blkID ids.ID) (snowman.Block // SetPreference sets what the current tail of the chain is func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { - // Since each internal handler used by [vm.State] always returns a block - // with non-nil ethBlock value, GetBlockInternal should never return a - // (*Block) with a nil ethBlock value. - block, err := vm.GetBlockInternal(ctx, blkID) + block, err := vm.GetVMBlock(ctx, blkID) if err != nil { return fmt.Errorf("failed to set preference to %s: %w", blkID, err) } - return vm.blockChain.SetPreference(block.(*Block).ethBlock) + return vm.blockChain.SetPreference(block.GetEthBlock()) } // VerifyHeightIndex always returns a nil error since the index is maintained by @@ -1083,17 +1069,11 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er *********************************** Helpers ********************************** */ -// currentRules returns the chain rules for the current block. -func (vm *VM) currentRules() params.Rules { - header := vm.eth.APIBackend.CurrentHeader() - return vm.chainConfig.Rules(header.Number, header.Time) -} - // requirePrimaryNetworkSigners returns true if warp messages from the primary // network must be signed by the primary network validators. // This is necessary when the subnet is not validating the primary network. func (vm *VM) requirePrimaryNetworkSigners() bool { - switch c := vm.currentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { + switch c := vm.CurrentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { case *warpcontract.Config: return c.RequirePrimaryNetworkSigners default: // includes nil due to non-presence @@ -1209,20 +1189,47 @@ func (vm *VM) PutLastAcceptedID(ID ids.ID) error { // All these methods assumes that VM is already initialized */ -func (vm *VM) Blockchain() *core.BlockChain { - return vm.blockChain +func (vm *VM) GetVMBlock(ctx context.Context, blkID ids.ID) (extension.VMBlock, error) { + // Since each internal handler used by [vm.State] always returns a block + // with non-nil ethBlock value, GetBlockInternal should never return a + // (*Block) with a nil ethBlock value. + blk, err := vm.GetBlockInternal(ctx, blkID) + if err != nil { + return nil, err + } + + return blk.(*Block), nil } -func (vm *VM) Config() *config.Config { - return &vm.config +func (vm *VM) LastAcceptedVMBlock() extension.VMBlock { + lastAcceptedBlock := vm.LastAcceptedBlockInternal() + if lastAcceptedBlock == nil { + return nil + } + return lastAcceptedBlock.(*Block) } -func (vm *VM) GetBlockExtended(ctx context.Context, blkID ids.ID) (extension.ExtendedBlock, error) { - blk, err := vm.GetBlock(ctx, blkID) +func (vm *VM) NewVMBlock(ethBlock *types.Block) (extension.VMBlock, error) { + blk, err := vm.blockManager.newBlock(ethBlock) if err != nil { return nil, err } - return blk.(*Block), nil + + return blk, nil +} + +// CurrentRules returns the chain rules for the current block. +func (vm *VM) CurrentRules() params.Rules { + header := vm.eth.BlockChain().CurrentHeader() + return vm.chainConfig.Rules(header.Number, header.Time) +} + +func (vm *VM) Ethereum() *eth.Ethereum { + return vm.eth +} + +func (vm *VM) Config() *config.Config { + return &vm.config } func (vm *VM) MetricRegistry() *prometheus.Registry { @@ -1236,3 +1243,11 @@ func (vm *VM) Validators() *p2p.Validators { func (vm *VM) VersionDB() *versiondb.Database { return vm.versiondb } + +func (vm *VM) EthChainDB() ethdb.Database { + return vm.chaindb +} + +func (vm *VM) SyncerClient() vmsync.Client { + return vm.Client +} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 4816591104..3336543543 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -5,7 +5,6 @@ package evm import ( "context" - "crypto/rand" "encoding/json" "errors" "fmt" @@ -18,40 +17,23 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" "github.com/holiman/uint256" "github.com/ava-labs/coreth/constants" - "github.com/ava-labs/coreth/eth/filters" "github.com/ava-labs/coreth/metrics" - "github.com/ava-labs/coreth/plugin/evm/atomic" - "github.com/ava-labs/coreth/plugin/evm/atomic/txpool" "github.com/ava-labs/coreth/plugin/evm/config" "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/upgrade" - "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" @@ -62,177 +44,39 @@ import ( "github.com/ava-labs/coreth/eth" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" - - accountKeystore "github.com/ava-labs/coreth/accounts/keystore" -) - -var ( - testNetworkID uint32 = 10 - nonExistentID = ids.ID{'F'} - testKeys []*secp256k1.PrivateKey - testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] - testShortIDAddrs []ids.ShortID - - genesisJSON = func(cfg *params.ChainConfig) string { - g := new(core.Genesis) - g.Difficulty = big.NewInt(0) - g.GasLimit = 0x5f5e100 - g.Timestamp = uint64(upgrade.InitiallyActiveTime.Unix()) - - // Use chainId: 43111, so that it does not overlap with any Avalanche ChainIDs, which may have their - // config overridden in vm.Initialize. - cpy := *cfg - cpy.ChainID = big.NewInt(43111) - g.Config = &cpy - - allocStr := `{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}}` - json.Unmarshal([]byte(allocStr), &g.Alloc) - // After Durango, an additional account is funded in tests to use - // with warp messages. - if cfg.IsDurango(0) { - addr := common.HexToAddress("0x99b9DEA54C48Dfea6aA9A4Ca4623633EE04ddbB5") - balance := new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(10)) - g.Alloc[addr] = types.GenesisAccount{Balance: balance} - } - - b, err := json.Marshal(g) - if err != nil { - panic(err) - } - return string(b) - } - - activateCancun = func(cfg *params.ChainConfig) *params.ChainConfig { - cpy := *cfg - cpy.ShanghaiTime = utils.NewUint64(0) - cpy.CancunTime = utils.NewUint64(0) - return &cpy - } - - genesisJSONApricotPhase0 = genesisJSON(params.TestLaunchConfig) - genesisJSONApricotPhase1 = genesisJSON(params.TestApricotPhase1Config) - genesisJSONApricotPhase2 = genesisJSON(params.TestApricotPhase2Config) - genesisJSONApricotPhase3 = genesisJSON(params.TestApricotPhase3Config) - genesisJSONApricotPhase4 = genesisJSON(params.TestApricotPhase4Config) - genesisJSONApricotPhase5 = genesisJSON(params.TestApricotPhase5Config) - genesisJSONApricotPhasePre6 = genesisJSON(params.TestApricotPhasePre6Config) - genesisJSONApricotPhase6 = genesisJSON(params.TestApricotPhase6Config) - genesisJSONApricotPhasePost6 = genesisJSON(params.TestApricotPhasePost6Config) - genesisJSONBanff = genesisJSON(params.TestBanffChainConfig) - genesisJSONCortina = genesisJSON(params.TestCortinaChainConfig) - genesisJSONDurango = genesisJSON(params.TestDurangoChainConfig) - genesisJSONEtna = genesisJSON(params.TestEtnaChainConfig) - genesisJSONLatest = genesisJSONEtna - - genesisJSONCancun = genesisJSON(activateCancun(params.TestChainConfig)) - - apricotRulesPhase0 = params.Rules{} - apricotRulesPhase1 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true}} - apricotRulesPhase2 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true}} - apricotRulesPhase3 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true}} - apricotRulesPhase4 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true}} - apricotRulesPhase5 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true}} - apricotRulesPhase6 = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true}} - banffRules = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true, IsBanff: true}} - // cortinaRules = params.Rules{AvalancheRules: params.AvalancheRules{IsApricotPhase1: true, IsApricotPhase2: true, IsApricotPhase3: true, IsApricotPhase4: true, IsApricotPhase5: true, IsApricotPhasePre6: true, IsApricotPhase6: true, IsApricotPhasePost6: true, IsBanff: true, IsCortina: true}} ) -func init() { - var b []byte - - for _, key := range []string{ - "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", - "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", - "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - } { - b, _ = cb58.Decode(key) - pk, _ := secp256k1.ToPrivateKey(b) - testKeys = append(testKeys, pk) - testEthAddrs = append(testEthAddrs, pk.EthAddress()) - testShortIDAddrs = append(testShortIDAddrs, pk.Address()) - } -} - -func newPrefundedGenesis( - balance int, - addresses ...common.Address, -) *core.Genesis { - alloc := types.GenesisAlloc{} - for _, address := range addresses { - alloc[address] = types.GenesisAccount{ - Balance: big.NewInt(int64(balance)), - } - } - - return &core.Genesis{ - Config: params.TestChainConfig, - Difficulty: big.NewInt(0), - Alloc: alloc, - } -} - -// BuildGenesisTest returns the genesis bytes for Coreth VM to be used in testing -func BuildGenesisTest(t *testing.T, genesisJSON string) []byte { - ss := StaticService{} - - genesis := &core.Genesis{} - if err := json.Unmarshal([]byte(genesisJSON), genesis); err != nil { - t.Fatalf("Problem unmarshaling genesis JSON: %s", err) - } - genesisReply, err := ss.BuildGenesis(nil, genesis) - if err != nil { - t.Fatalf("Failed to create test genesis") - } - genesisBytes, err := formatting.Decode(genesisReply.Encoding, genesisReply.Bytes) +func defaultExtensions() (*extension.Config, error) { + codecManager, err := message.NewCodec(message.BlockSyncSummary{}) if err != nil { - t.Fatalf("Failed to decode genesis bytes: %s", err) - } - return genesisBytes -} - -// setupGenesis sets up the genesis -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func setupGenesis( - t *testing.T, - genesisJSON string, -) (*snow.Context, - database.Database, - []byte, - chan commonEng.Message, - *avalancheatomic.Memory, -) { - if len(genesisJSON) == 0 { - genesisJSON = genesisJSONLatest + return nil, err } - genesisBytes := BuildGenesisTest(t, genesisJSON) - ctx := utils.TestSnowContext() - - baseDB := memdb.New() - - // initialize the atomic memory - atomicMemory := avalancheatomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) - ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) - - // NB: this lock is intentionally left locked when this function returns. - // The caller of this function is responsible for unlocking. - ctx.Lock.Lock() - - issuer := make(chan commonEng.Message, 1) - prefixedDB := prefixdb.New([]byte{1}, baseDB) - return ctx, prefixedDB, genesisBytes, issuer, atomicMemory + return &extension.Config{ + NetworkCodec: codecManager, + SyncSummaryProvider: &message.BlockSyncSummaryProvider{}, + SyncableParser: &message.BlockSyncSummaryParser{}, + ConsensusCallbacks: dummy.ConsensusCallbacks{ + OnFinalizeAndAssemble: nil, + OnExtraStateChange: nil, + }, + SyncExtender: nil, + BlockExtension: extension.NewNoOpBlockExtension(), + }, nil } // newDefaultTestVM returns a new instance of the VM with default extensions // This should not be called if the VM is being extended func newDefaultTestVM() *VM { - defaultCodec, err := message.NewCodec(message.BlockSyncSummary{}) + vm := &VM{} + exts, err := defaultExtensions() if err != nil { panic(err) } - return NewExtensibleEVM(false, extension.ExtensionConfig{ - NetworkCodec: defaultCodec, - }) + if err := vm.SetExtensionConfig(exts); err != nil { + panic(err) + } + return vm } // GenesisVM creates a VM instance with the genesis test bytes and returns @@ -250,104 +94,10 @@ func GenesisVM(t *testing.T, database.Database, *avalancheatomic.Memory, *enginetest.Sender, -) { - return GenesisVMWithClock(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON, mockable.Clock{}) -} - -// GenesisVMWithClock creates a VM instance as GenesisVM does, but also allows -// setting the vm's time before [Initialize] is called. -func GenesisVMWithClock( - t *testing.T, - finishBootstrapping bool, - genesisJSON string, - configJSON string, - upgradeJSON string, - clock mockable.Clock, -) ( - chan commonEng.Message, - *VM, - database.Database, - *avalancheatomic.Memory, - *enginetest.Sender, ) { vm := newDefaultTestVM() - vm.clock = clock - ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) - appSender := &enginetest.Sender{T: t} - appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err := vm.Initialize( - context.Background(), - ctx, - dbManager, - genesisBytes, - []byte(upgradeJSON), - []byte(configJSON), - issuer, - []*commonEng.Fx{}, - appSender, - ) - require.NoError(t, err, "error initializing GenesisVM") - - if finishBootstrapping { - require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) - } - - return issuer, vm, dbManager, m, appSender -} - -func addUTXO(sharedMemory *avalancheatomic.Memory, ctx *snow.Context, txID ids.ID, index uint32, assetID ids.ID, amount uint64, addr ids.ShortID) (*avax.UTXO, error) { - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: index, - }, - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }, - } - utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - return nil, err - } - - xChainSharedMemory := sharedMemory.NewSharedMemory(ctx.XChainID) - inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - addr.Bytes(), - }, - }}}}); err != nil { - return nil, err - } - - return utxo, nil -} - -// GenesisVMWithUTXOs creates a GenesisVM and generates UTXOs in the X-Chain Shared Memory containing AVAX based on the [utxos] map -// Generates UTXOIDs by using a hash of the address in the [utxos] map such that the UTXOs will be generated deterministically. -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVMWithUTXOs(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string, utxos map[ids.ShortID]uint64) (chan commonEng.Message, *VM, database.Database, *avalancheatomic.Memory, *enginetest.Sender) { - issuer, vm, db, sharedMemory, sender := GenesisVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON) - for addr, avaxAmount := range utxos { - txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) - if err != nil { - t.Fatalf("Failed to generate txID from addr: %s", err) - } - if _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, avaxAmount, addr); err != nil { - t.Fatalf("Failed to add UTXO to shared memory: %s", err) - } - } - - return issuer, vm, db, sharedMemory, sender + ch, dbManager, m, sender, _ := testutils.SetupVM(t, finishBootstrapping, genesisJSON, configJSON, upgradeJSON, vm) + return ch, vm, dbManager, m, sender } func TestVMConfig(t *testing.T) { @@ -370,7 +120,7 @@ func TestVMConfigDefaults(t *testing.T) { vmConfig.SetDefaults(defaultTxPoolConfig) vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs - require.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") + require.Equal(t, vmConfig, vm.Config, "VM Config should match default with overrides") require.NoError(t, vm.Shutdown(context.Background())) } @@ -380,7 +130,7 @@ func TestVMNilConfig(t *testing.T) { // VM Config should match defaults if no config is passed in var vmConfig config.Config vmConfig.SetDefaults(defaultTxPoolConfig) - require.Equal(t, vmConfig, vm.config, "VM Config should match default config") + require.Equal(t, vmConfig, vm.Config, "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) } @@ -411,47 +161,47 @@ func TestVMUpgrades(t *testing.T) { }{ { name: "Apricot Phase 3", - genesis: genesisJSONApricotPhase3, + genesis: testutils.GenesisJSONApricotPhase3, expectedGasPrice: big.NewInt(0), }, { name: "Apricot Phase 4", - genesis: genesisJSONApricotPhase4, + genesis: testutils.GenesisJSONApricotPhase4, expectedGasPrice: big.NewInt(0), }, { name: "Apricot Phase 5", - genesis: genesisJSONApricotPhase5, + genesis: testutils.GenesisJSONApricotPhase5, expectedGasPrice: big.NewInt(0), }, { name: "Apricot Phase Pre 6", - genesis: genesisJSONApricotPhasePre6, + genesis: testutils.GenesisJSONApricotPhasePre6, expectedGasPrice: big.NewInt(0), }, { name: "Apricot Phase 6", - genesis: genesisJSONApricotPhase6, + genesis: testutils.GenesisJSONApricotPhase6, expectedGasPrice: big.NewInt(0), }, { name: "Apricot Phase Post 6", - genesis: genesisJSONApricotPhasePost6, + genesis: testutils.GenesisJSONApricotPhasePost6, expectedGasPrice: big.NewInt(0), }, { name: "Banff", - genesis: genesisJSONBanff, + genesis: testutils.GenesisJSONBanff, expectedGasPrice: big.NewInt(0), }, { name: "Cortina", - genesis: genesisJSONCortina, + genesis: testutils.GenesisJSONCortina, expectedGasPrice: big.NewInt(0), }, { name: "Durango", - genesis: genesisJSONDurango, + genesis: testutils.GenesisJSONDurango, expectedGasPrice: big.NewInt(0), }, } @@ -509,157 +259,8 @@ func TestVMUpgrades(t *testing.T) { } } -func TestImportMissingUTXOs(t *testing.T) { - // make a VM with a shared memory that has an importable UTXO to build a block - importAmount := uint64(50000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(t, err) - err = vm.mempool.AddLocalTx(importTx) - require.NoError(t, err) - <-issuer - blk, err := vm.BuildBlock(context.Background()) - require.NoError(t, err) - - // make another VM which is missing the UTXO in shared memory - _, vm2, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase2, "", "") - defer func() { - err := vm2.Shutdown(context.Background()) - require.NoError(t, err) - }() - - vm2Blk, err := vm2.ParseBlock(context.Background(), blk.Bytes()) - require.NoError(t, err) - err = vm2Blk.Verify(context.Background()) - require.ErrorIs(t, err, errMissingUTXOs) - - // This should not result in a bad block since the missing UTXO should - // prevent InsertBlockManual from being called. - badBlocks, _ := vm2.blockChain.BadBlocks() - require.Len(t, badBlocks, 0) -} - -// Simple test to ensure we can issue an import transaction followed by an export transaction -// and they will be indexed correctly when accepted. -func TestIssueAtomicTxs(t *testing.T) { - importAmount := uint64(50000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { - t.Fatal(err) - } else if lastAcceptedID != blk.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) - } - vm.blockChain.DrainAcceptorQueue() - filterAPI := filters.NewFilterAPI(filters.NewFilterSystem(vm.eth.APIBackend, filters.Config{ - Timeout: 5 * time.Minute, - })) - blockHash := common.Hash(blk.ID()) - logs, err := filterAPI.GetLogs(context.Background(), filters.FilterCriteria{ - BlockHash: &blockHash, - }) - if err != nil { - t.Fatal(err) - } - if len(logs) != 0 { - t.Fatalf("Expected log length to be 0, but found %d", len(logs)) - } - if logs == nil { - t.Fatal("Expected logs to be non-nil") - } - - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(exportTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk2, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk2.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := blk2.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { - t.Fatal(err) - } else if lastAcceptedID != blk2.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) - } - - // Check that both atomic transactions were indexed as expected. - indexedImportTx, status, height, err := vm.getAtomicTx(importTx.ID()) - assert.NoError(t, err) - assert.Equal(t, atomic.Accepted, status) - assert.Equal(t, uint64(1), height, "expected height of indexed import tx to be 1") - assert.Equal(t, indexedImportTx.ID(), importTx.ID(), "expected ID of indexed import tx to match original txID") - - indexedExportTx, status, height, err := vm.getAtomicTx(exportTx.ID()) - assert.NoError(t, err) - assert.Equal(t, atomic.Accepted, status) - assert.Equal(t, uint64(2), height, "expected height of indexed export tx to be 2") - assert.Equal(t, indexedExportTx.ID(), exportTx.ID(), "expected ID of indexed import tx to match original txID") -} - func TestBuildEthTxBlock(t *testing.T) { - importAmount := uint64(20000000) - issuer, vm, dbManager, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer, vm, dbManager, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase2, `{"pruning-enabled":true}`, "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -670,13 +271,16 @@ func TestBuildEthTxBlock(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer @@ -705,14 +309,14 @@ func TestBuildEthTxBlock(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { - tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), testKeys[0].ToECDSA()) + tx := types.NewTransaction(uint64(i), testutils.TestEthAddrs[0], big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs = vm.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -774,7 +378,7 @@ func TestBuildEthTxBlock(t *testing.T) { context.Background(), utils.TestSnowContext(), dbManager, - []byte(genesisJSONApricotPhase2), + []byte(testutils.GenesisJSONApricotPhase2), []byte(""), []byte(`{"pruning-enabled":true}`), issuer, @@ -796,397 +400,80 @@ func TestBuildEthTxBlock(t *testing.T) { } } -func testConflictingImportTxs(t *testing.T, genesis string) { - importAmount := uint64(10000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesis, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - testShortIDAddrs[1]: importAmount, - testShortIDAddrs[2]: importAmount, - }) +// Regression test to ensure that after accepting block A +// then calling SetPreference on block B (when it becomes preferred) +// and the head of a longer chain (block D) does not corrupt the +// canonical chain. +// +// A +// / \ +// B C +// | +// D +func TestSetPreferenceRace(t *testing.T) { + // Create two VMs which will agree on block A and then + // build the two distinct preferred chains above + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, `{"pruning-enabled":true}`, "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, `{"pruning-enabled":true}`, "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - }() - importTxs := make([]*atomic.Tx, 0, 3) - conflictTxs := make([]*atomic.Tx, 0, 3) - for i, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) - if err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } - importTxs = append(importTxs, importTx) + }() - conflictAddr := testEthAddrs[(i+1)%len(testEthAddrs)] - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*secp256k1.PrivateKey{key}) - if err != nil { - t.Fatal(err) - } - conflictTxs = append(conflictTxs, conflictTx) - } + newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) + vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) + vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - expectedParentBlkID, err := vm.LastAccepted(context.Background()) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - for _, tx := range importTxs[:2] { - if err := vm.mempool.AddLocalTx(tx); err != nil { - t.Fatal(err) - } - - <-issuer - - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - blk, err := vm.BuildBlock(context.Background()) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if parentID := blk.Parent(); parentID != expectedParentBlkID { - t.Fatalf("Expected parent to have blockID %s, but found %s", expectedParentBlkID, parentID) - } - - expectedParentBlkID = blk.ID() - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - } - - // Check that for each conflict tx (whose conflict is in the chain ancestry) - // the VM returns an error when it attempts to issue the conflict into the mempool - // and when it attempts to build a block with the conflict force added to the mempool. - for i, tx := range conflictTxs[:2] { - if err := vm.mempool.AddLocalTx(tx); err == nil { - t.Fatal("Expected issueTx to fail due to conflicting transaction") - } - // Force issue transaction directly to the mempool - if err := vm.mempool.ForceAddTx(tx); err != nil { - t.Fatal(err) - } - <-issuer - - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - _, err = vm.BuildBlock(context.Background()) - // The new block is verified in BuildBlock, so - // BuildBlock should fail due to an attempt to - // double spend an atomic UTXO. - if err == nil { - t.Fatalf("Block verification should have failed in BuildBlock %d due to double spending atomic UTXO", i) + t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - // Generate one more valid block so that we can copy the header to create an invalid block - // with modified extra data. This new block will be invalid for more than one reason (invalid merkle root) - // so we check to make sure that the expected error is returned from block verification. - if err := vm.mempool.AddLocalTx(importTxs[2]); err != nil { - t.Fatal(err) - } - <-issuer - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + <-issuer1 - validBlock, err := vm.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { - t.Fatal(err) + t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := validBlock.Verify(context.Background()); err != nil { - t.Fatal(err) + if err := vm1BlkA.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification on VM1: %s", err) } - validEthBlock := validBlock.(*chain.BlockWrapper).Block.(*Block).ethBlock - - rules := vm.currentRules() - var extraData []byte - switch { - case rules.IsApricotPhase5: - extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{conflictTxs[1]}) - default: - extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, conflictTxs[1]) - } - if err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - conflictingAtomicTxBlock := types.NewBlockWithExtData( - types.CopyHeader(validEthBlock.Header()), - nil, - nil, - nil, - new(trie.Trie), - extraData, - true, - ) - - blockBytes, err := rlp.EncodeToBytes(conflictingAtomicTxBlock) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { - t.Fatal(err) + t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - - parsedBlock, err := vm.ParseBlock(context.Background(), blockBytes) - if err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification on VM2: %s", err) + } + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) + if err := vm1BlkA.Accept(context.Background()); err != nil { + t.Fatalf("VM1 failed to accept block: %s", err) } - - if !rules.IsApricotPhase5 { - return - } - - extraData, err = atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTxs[2], conflictTxs[2]}) - if err != nil { - t.Fatal(err) - } - - header := types.CopyHeader(validEthBlock.Header()) - header.ExtDataGasUsed.Mul(common.Big2, header.ExtDataGasUsed) - - internalConflictBlock := types.NewBlockWithExtData( - header, - nil, - nil, - nil, - new(trie.Trie), - extraData, - true, - ) - - blockBytes, err = rlp.EncodeToBytes(internalConflictBlock) - if err != nil { - t.Fatal(err) - } - - parsedBlock, err = vm.ParseBlock(context.Background(), blockBytes) - if err != nil { - t.Fatal(err) - } - - if err := parsedBlock.Verify(context.Background()); !errors.Is(err, atomic.ErrConflictingAtomicInputs) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", atomic.ErrConflictingAtomicInputs, err) - } -} - -func TestReissueAtomicTxHigherGasPrice(t *testing.T) { - kc := secp256k1fx.NewKeychain(testKeys...) - - for name, issueTxs := range map[string]func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, discarded []*atomic.Tx){ - "single UTXO override": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { - utxo, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) - if err != nil { - t.Fatal(err) - } - tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) - if err != nil { - t.Fatal(err) - } - tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(tx1); err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(tx2); err != nil { - t.Fatal(err) - } - - return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} - }, - "one of two UTXOs overrides": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { - utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) - if err != nil { - t.Fatal(err) - } - utxo2, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) - if err != nil { - t.Fatal(err) - } - tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) - if err != nil { - t.Fatal(err) - } - tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), kc, []*avax.UTXO{utxo1}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(tx1); err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(tx2); err != nil { - t.Fatal(err) - } - - return []*atomic.Tx{tx2}, []*atomic.Tx{tx1} - }, - "hola": func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) (issued []*atomic.Tx, evicted []*atomic.Tx) { - utxo1, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) - if err != nil { - t.Fatal(err) - } - utxo2, err := addUTXO(sharedMemory, vm.ctx, ids.GenerateTestID(), 0, vm.ctx.AVAXAssetID, units.Avax, testShortIDAddrs[0]) - if err != nil { - t.Fatal(err) - } - - importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo1}) - if err != nil { - t.Fatal(err) - } - - importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(3), initialBaseFee), kc, []*avax.UTXO{utxo2}) - if err != nil { - t.Fatal(err) - } - - reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(2), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) - if err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(importTx1); err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx2); err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(reissuanceTx1); !errors.Is(err, txpool.ErrConflictingAtomicTx) { - t.Fatalf("Expected to fail with err: %s, but found err: %s", txpool.ErrConflictingAtomicTx, err) - } - - assert.True(t, vm.mempool.Has(importTx1.ID())) - assert.True(t, vm.mempool.Has(importTx2.ID())) - assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) - - reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(big.NewInt(4), initialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) - if err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(reissuanceTx2); err != nil { - t.Fatal(err) - } - - return []*atomic.Tx{reissuanceTx2}, []*atomic.Tx{importTx1, importTx2} - }, - } { - t.Run(name, func(t *testing.T) { - _, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") - issuedTxs, evictedTxs := issueTxs(t, vm, sharedMemory) - - for i, tx := range issuedTxs { - _, issued := vm.mempool.GetPendingTx(tx.ID()) - assert.True(t, issued, "expected issued tx at index %d to be issued", i) - } - - for i, tx := range evictedTxs { - _, discarded, _ := vm.mempool.GetTx(tx.ID()) - assert.True(t, discarded, "expected discarded tx at index %d to be discarded", i) - } - }) - } -} - -func TestConflictingImportTxsAcrossBlocks(t *testing.T) { - for name, genesis := range map[string]string{ - "apricotPhase1": genesisJSONApricotPhase1, - "apricotPhase2": genesisJSONApricotPhase2, - "apricotPhase3": genesisJSONApricotPhase3, - "apricotPhase4": genesisJSONApricotPhase4, - "apricotPhase5": genesisJSONApricotPhase5, - } { - genesis := genesis - t.Run(name, func(t *testing.T) { - testConflictingImportTxs(t, genesis) - }) - } -} - -// Regression test to ensure that after accepting block A -// then calling SetPreference on block B (when it becomes preferred) -// and the head of a longer chain (block D) does not corrupt the -// canonical chain. -// -// A -// / \ -// B C -// | -// D -func TestSetPreferenceRace(t *testing.T) { - // Create two VMs which will agree on block A and then - // build the two distinct preferred chains above - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":true}`, "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) - newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer1 - - vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } - - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } - - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) + if err := vm2BlkA.Accept(context.Background()); err != nil { + t.Fatalf("VM2 failed to accept block: %s", err) } newHead := <-newTxPoolHeadChan1 @@ -1202,16 +489,14 @@ func TestSetPreferenceRace(t *testing.T) { // and to be split into two separate blocks on VM2 txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { - tx := types.NewTransaction(uint64(i), testEthAddrs[1], big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainID), testKeys[1].ToECDSA()) + tx := types.NewTransaction(uint64(i), testutils.TestEthAddrs[1], big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainID), testutils.TestKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } txs[i] = signedTx } - var errs []error - // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { @@ -1343,311 +628,93 @@ func TestSetPreferenceRace(t *testing.T) { } } -func TestConflictingTransitiveAncestryWithGap(t *testing.T) { - key, err := accountKeystore.NewKey(rand.Reader) - if err != nil { - t.Fatal(err) - } - - key0 := testKeys[0] - addr0 := key0.Address() - - key1 := testKeys[1] - addr1 := key1.Address() - - importAmount := uint64(1000000000) - - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", - map[ids.ShortID]uint64{ - addr0: importAmount, - addr1: importAmount, - }) +// Regression test to ensure that a VM that accepts block A and B +// will not attempt to orphan either when verifying blocks C and D +// from another VM (which have a common ancestor under the finalized +// frontier). +// +// A +// / \ +// B C +// +// verifies block B and C, then Accepts block B. Then we test to ensure +// that the VM defends against any attempt to set the preference or to +// accept block C, which should be an orphaned block at this point and +// get rejected. +func TestReorgProtection(t *testing.T) { + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, `{"pruning-enabled":false}`, "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, `{"pruning-enabled":false}`, "") defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { + t.Fatal(err) + } + + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) + vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) + newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) + vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key0}) - if err != nil { - t.Fatal(err) - } - // Create a conflicting transaction - importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*secp256k1.PrivateKey{key0}) + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] + + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm.mempool.AddLocalTx(importTx0A); err != nil { - t.Fatalf("Failed to issue importTx0A: %s", err) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } - <-issuer + <-issuer1 - blk0, err := vm.BuildBlock(context.Background()) + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk0.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification: %s", err) + if err := vm1BlkA.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm.SetPreference(context.Background(), blk0.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk0.ID()) { - t.Fatalf("Expected new block to match") - } - - tx := types.NewTransaction(0, key.Address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key.PrivateKey) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { + t.Fatalf("Unexpected error parsing block from vm2: %s", err) + } + if err := vm2BlkA.Verify(context.Background()); err != nil { + t.Fatalf("Block failed verification on VM2: %s", err) + } + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - // Add the remote transactions, build the block, and set VM1's preference for block A - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) - for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + if err := vm1BlkA.Accept(context.Background()); err != nil { + t.Fatalf("VM1 failed to accept block: %s", err) } - - <-issuer - - blk1, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build blk1: %s", err) + if err := vm2BlkA.Accept(context.Background()); err != nil { + t.Fatalf("VM2 failed to accept block: %s", err) } - if err := blk1.Verify(context.Background()); err != nil { - t.Fatalf("blk1 failed verification due to %s", err) - } - - if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { - t.Fatal(err) - } - - importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key1}) - if err != nil { - t.Fatalf("Failed to issue importTx1 due to: %s", err) - } - - if err := vm.mempool.AddLocalTx(importTx1); err != nil { - t.Fatal(err) - } - - <-issuer - - blk2, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - if err := blk2.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification: %s", err) - } - - if err := vm.SetPreference(context.Background(), blk2.ID()); err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx0B); err == nil { - t.Fatalf("Should not have been able to issue import tx with conflict") - } - // Force issue transaction directly into the mempool - if err := vm.mempool.ForceAddTx(importTx0B); err != nil { - t.Fatal(err) - } - <-issuer - - _, err = vm.BuildBlock(context.Background()) - if err == nil { - t.Fatal("Shouldn't have been able to build an invalid block") - } -} - -func TestBonusBlocksTxs(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - importAmount := uint64(10000000) - utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} - - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: importAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, - }, - }, - } - utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } - - xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - testKeys[0].Address().Bytes(), - }, - }}}}); err != nil { - t.Fatal(err) - } - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - // Make [blk] a bonus block. - vm.atomicBackend.AddBonusBlock(blk.Height(), blk.ID()) - - // Remove the UTXOs from shared memory, so that non-bonus blocks will fail verification - if err := vm.ctx.SharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.XChainID: {RemoveRequests: [][]byte{inputID[:]}}}); err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - lastAcceptedID, err := vm.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) - } -} - -// Regression test to ensure that a VM that accepts block A and B -// will not attempt to orphan either when verifying blocks C and D -// from another VM (which have a common ancestor under the finalized -// frontier). -// -// A -// / \ -// B C -// -// verifies block B and C, then Accepts block B. Then we test to ensure -// that the VM defends against any attempt to set the preference or to -// accept block C, which should be an orphaned block at this point and -// get rejected. -func TestReorgProtection(t *testing.T) { - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":false}`, "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, `{"pruning-enabled":false}`, "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) - vm1.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan1) - newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) - vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] - - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer1 - - vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } - - vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } - - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } - - newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } - newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + newHead := <-newTxPoolHeadChan1 + if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { + t.Fatalf("Expected new block to match") + } + newHead = <-newTxPoolHeadChan2 + if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { + t.Fatalf("Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -1662,8 +729,6 @@ func TestReorgProtection(t *testing.T) { txs[i] = signedTx } - var errs []error - // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { @@ -1741,13 +806,8 @@ func TestReorgProtection(t *testing.T) { // / \ // B C func TestNonCanonicalAccept(t *testing.T) { - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -1764,16 +824,19 @@ func TestNonCanonicalAccept(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer1 @@ -1847,8 +910,6 @@ func TestNonCanonicalAccept(t *testing.T) { txs[i] = signedTx } - var errs []error - // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { @@ -1936,13 +997,8 @@ func TestNonCanonicalAccept(t *testing.T) { // | // D func TestStickyPreference(t *testing.T) { - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -1959,16 +1015,19 @@ func TestStickyPreference(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer1 @@ -2025,8 +1084,6 @@ func TestStickyPreference(t *testing.T) { txs[i] = signedTx } - var errs []error - // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { @@ -2195,13 +1252,8 @@ func TestStickyPreference(t *testing.T) { // | // D func TestUncleBlock(t *testing.T) { - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -2217,16 +1269,19 @@ func TestUncleBlock(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer1 @@ -2281,8 +1336,6 @@ func TestUncleBlock(t *testing.T) { txs[i] = signedTx } - var errs []error - errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { @@ -2359,7 +1412,7 @@ func TestUncleBlock(t *testing.T) { blkDEthBlock.ExtData(), false, ) - uncleBlock, err := vm2.newBlock(uncleEthBlock) + uncleBlock, err := vm2.blockManager.newBlock(uncleEthBlock) if err != nil { t.Fatal(err) } @@ -2374,66 +1427,6 @@ func TestUncleBlock(t *testing.T) { } } -// Regression test to ensure that a VM that is not able to parse a block that -// contains no transactions. -func TestEmptyBlock(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - // Create empty block from blkA - ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - - emptyEthBlock := types.NewBlockWithExtData( - types.CopyHeader(ethBlock.Header()), - nil, - nil, - nil, - new(trie.Trie), - nil, - false, - ) - - if len(emptyEthBlock.ExtData()) != 0 || emptyEthBlock.Header().ExtDataHash != (common.Hash{}) { - t.Fatalf("emptyEthBlock should not have any extra data") - } - - emptyBlock, err := vm.newBlock(emptyEthBlock) - if err != nil { - t.Fatal(err) - } - - if _, err := vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) - } - if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) - } -} - // Regression test to ensure that a VM that verifies block B, C, then // D (preferring block B) reorgs when C and then D are accepted. // @@ -2443,13 +1436,8 @@ func TestEmptyBlock(t *testing.T) { // | // D func TestAcceptReorg(t *testing.T) { - importAmount := uint64(1000000000) - issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - issuer2, vm2, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer1, vm1, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") + issuer2, vm2, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -2466,16 +1454,19 @@ func TestAcceptReorg(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm1.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer1 @@ -2534,7 +1525,7 @@ func TestAcceptReorg(t *testing.T) { // Add the remote transactions, build the block, and set VM1's preference // for block B - errs := vm1.txPool.AddRemotesSync(txs) + errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) @@ -2642,10 +1633,7 @@ func TestAcceptReorg(t *testing.T) { } func TestFutureBlock(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -2653,15 +1641,17 @@ func TestFutureBlock(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } - <-issuer blkA, err := vm.BuildBlock(context.Background()) @@ -2687,655 +1677,22 @@ func TestFutureBlock(t *testing.T) { false, ) - futureBlock, err := vm.newBlock(modifiedBlock) + futureBlock, err := vm.blockManager.newBlock(modifiedBlock) if err != nil { t.Fatal(err) } if err := futureBlock.Verify(context.Background()); err == nil { - t.Fatal("Future block should have failed verification due to block timestamp too far in the future") - } else if !strings.Contains(err.Error(), "block timestamp is too far in the future") { - t.Fatalf("Expected error to be block timestamp too far in the future but found %s", err) - } -} - -// Regression test to ensure we can build blocks if we are starting with the -// Apricot Phase 1 ruleset in genesis. -func TestBuildApricotPhase1Block(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] - - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } - - txs := make([]*types.Transaction, 10) - for i := 0; i < 5; i++ { - tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) - if err != nil { - t.Fatal(err) - } - txs[i] = signedTx - } - for i := 5; i < 10; i++ { - tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.ApricotPhase1MinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) - if err != nil { - t.Fatal(err) - } - txs[i] = signedTx - } - errs := vm.txPool.AddRemotesSync(txs) - for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } - } - - <-issuer - - blk, err = vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - lastAcceptedID, err := vm.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) - } - - // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() - for i, tx := range txs { - if len(ethBlkTxs) <= i { - t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) - } - if ethBlkTxs[i].Hash() != tx.Hash() { - t.Fatalf("expected tx at index %d to have hash: %x but has: %x", i, txs[i].Hash(), tx.Hash()) - } - } -} - -func TestLastAcceptedBlockNumberAllow(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM: %s", err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - blkHeight := blk.Height() - blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - - vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) - - ctx := context.Background() - b, err := vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if err != nil { - t.Fatal(err) - } - if b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) - } - - vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) - - _, err = vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if !errors.Is(err, eth.ErrUnfinalizedData) { - t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatalf("VM failed to accept block: %s", err) - } - - if b := vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) - } -} - -// Builds [blkA] with a virtuous import transaction and [blkB] with a separate import transaction -// that does not conflict. Accepts [blkB] and rejects [blkA], then asserts that the virtuous atomic -// transaction in [blkA] is correctly re-issued into the atomic transaction mempool. -func TestReissueAtomicTx(t *testing.T) { - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: 10000000, - testShortIDAddrs[1]: 10000000, - }) - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - genesisBlkID, err := vm.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blkA, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blkA.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blkA.ID()); err != nil { - t.Fatal(err) - } - - // SetPreference to parent before rejecting (will rollback state to genesis - // so that atomic transaction can be reissued, otherwise current block will - // conflict with UTXO to be reissued) - if err := vm.SetPreference(context.Background(), genesisBlkID); err != nil { - t.Fatal(err) - } - - // Rejecting [blkA] should cause [importTx] to be re-issued into the mempool. - if err := blkA.Reject(context.Background()); err != nil { - t.Fatal(err) - } - - // Sleep for a minimum of two seconds to ensure that [blkB] will have a different timestamp - // than [blkA] so that the block will be unique. This is necessary since we have marked [blkA] - // as Rejected. - time.Sleep(2 * time.Second) - <-issuer - blkB, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if blkB.Height() != blkA.Height() { - t.Fatalf("Expected blkB (%d) to have the same height as blkA (%d)", blkB.Height(), blkA.Height()) - } - - if err := blkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blkB.ID()); err != nil { - t.Fatal(err) - } - - if err := blkB.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { - t.Fatal(err) - } else if lastAcceptedID != blkB.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blkB.ID(), lastAcceptedID) - } - - // Check that [importTx] has been indexed correctly after [blkB] is accepted. - _, height, err := vm.atomicTxRepository.GetByTxID(importTx.ID()) - if err != nil { - t.Fatal(err) - } else if height != blkB.Height() { - t.Fatalf("Expected indexed height of import tx to be %d, but found %d", blkB.Height(), height) - } -} - -func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - exportTxs := createExportTxOptions(t, vm, issuer, sharedMemory) - exportTx1, exportTx2 := exportTxs[0], exportTxs[1] - - if err := vm.mempool.AddLocalTx(exportTx1); err != nil { - t.Fatal(err) - } - <-issuer - exportBlk1, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - if err := exportBlk1.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), exportBlk1.ID()); err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(exportTx2); err == nil { - t.Fatal("Should have failed to issue due to an invalid export tx") - } - - if err := vm.mempool.AddRemoteTx(exportTx2); err == nil { - t.Fatal("Should have failed to add because conflicting") - } - - // Manually add transaction to mempool to bypass validation - if err := vm.mempool.ForceAddTx(exportTx2); err != nil { - t.Fatal(err) - } - <-issuer - - _, err = vm.BuildBlock(context.Background()) - if err == nil { - t.Fatal("BuildBlock should have returned an error due to invalid export transaction") - } -} - -func TestBuildInvalidBlockHead(t *testing.T) { - issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - key0 := testKeys[0] - addr0 := key0.Address() - - // Create the transaction - utx := &atomic.UnsignedImportTx{ - NetworkID: vm.ctx.NetworkID, - BlockchainID: vm.ctx.ChainID, - Outs: []atomic.EVMOutput{{ - Address: common.Address(addr0), - Amount: 1 * units.Avax, - AssetID: vm.ctx.AVAXAssetID, - }}, - ImportedInputs: []*avax.TransferableInput{ - { - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: 1 * units.Avax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }, - }, - SourceChain: vm.ctx.XChainID, - } - tx := &atomic.Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { - t.Fatal(err) - } - - currentBlock := vm.blockChain.CurrentBlock() - - // Verify that the transaction fails verification when attempting to issue - // it into the atomic mempool. - if err := vm.mempool.AddLocalTx(tx); err == nil { - t.Fatal("Should have failed to issue invalid transaction") - } - // Force issue the transaction directly to the mempool - if err := vm.mempool.ForceAddTx(tx); err != nil { - t.Fatal(err) - } - - <-issuer - - if _, err := vm.BuildBlock(context.Background()); err == nil { - t.Fatalf("Unexpectedly created a block") - } - - newCurrentBlock := vm.blockChain.CurrentBlock() - - if currentBlock.Hash() != newCurrentBlock.Hash() { - t.Fatal("current block changed") - } -} - -func TestConfigureLogLevel(t *testing.T) { - configTests := []struct { - name string - logConfig string - genesisJSON, upgradeJSON string - expectedErr string - }{ - { - name: "Log level info", - logConfig: `{"log-level": "info"}`, - genesisJSON: genesisJSONApricotPhase2, - upgradeJSON: "", - expectedErr: "", - }, - { - name: "Invalid log level", - logConfig: `{"log-level": "cchain"}`, - genesisJSON: genesisJSONApricotPhase3, - upgradeJSON: "", - expectedErr: "failed to initialize logger due to", - }, - } - for _, test := range configTests { - t.Run(test.name, func(t *testing.T) { - vm := newDefaultTestVM() - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) - appSender := &enginetest.Sender{T: t} - appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err := vm.Initialize( - context.Background(), - ctx, - dbManager, - genesisBytes, - []byte(""), - []byte(test.logConfig), - issuer, - []*commonEng.Fx{}, - appSender, - ) - if len(test.expectedErr) == 0 && err != nil { - t.Fatal(err) - } else if len(test.expectedErr) > 0 { - if err == nil { - t.Fatalf("initialize should have failed due to %s", test.expectedErr) - } else if !strings.Contains(err.Error(), test.expectedErr) { - t.Fatalf("Expected initialize to fail due to %s, but failed with %s", test.expectedErr, err.Error()) - } - } - - // If the VM was not initialized, do not attempt to shut it down - if err == nil { - shutdownChan := make(chan error, 1) - shutdownFunc := func() { - err := vm.Shutdown(context.Background()) - shutdownChan <- err - } - go shutdownFunc() - - shutdownTimeout := 250 * time.Millisecond - ticker := time.NewTicker(shutdownTimeout) - defer ticker.Stop() - - select { - case <-ticker.C: - t.Fatalf("VM shutdown took longer than timeout: %v", shutdownTimeout) - case err := <-shutdownChan: - if err != nil { - t.Fatalf("Shutdown errored: %s", err) - } - } - } - }) - } -} - -// Regression test to ensure we can build blocks if we are starting with the -// Apricot Phase 4 ruleset in genesis. -func TestBuildApricotPhase4Block(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] - - importAmount := uint64(1000000000) - utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} - - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: importAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, - }, - }, - } - utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } - - xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - testKeys[0].Address().Bytes(), - }, - }}}}); err != nil { - t.Fatal(err) - } - - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - ethBlk := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - if eBlockGasCost := ethBlk.BlockGasCost(); eBlockGasCost == nil || eBlockGasCost.Cmp(common.Big0) != 0 { - t.Fatalf("expected blockGasCost to be 0 but got %d", eBlockGasCost) - } - if eExtDataGasUsed := ethBlk.ExtDataGasUsed(); eExtDataGasUsed == nil || eExtDataGasUsed.Cmp(big.NewInt(1230)) != 0 { - t.Fatalf("expected extDataGasUsed to be 1000 but got %d", eExtDataGasUsed) - } - minRequiredTip, err := dummy.MinRequiredTip(vm.chainConfig, ethBlk.Header()) - if err != nil { - t.Fatal(err) - } - if minRequiredTip == nil || minRequiredTip.Cmp(common.Big0) != 0 { - t.Fatalf("expected minRequiredTip to be 0 but got %d", minRequiredTip) - } - - newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } - - txs := make([]*types.Transaction, 10) - for i := 0; i < 5; i++ { - tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) - if err != nil { - t.Fatal(err) - } - txs[i] = signedTx - } - for i := 5; i < 10; i++ { - tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.ApricotPhase1MinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) - if err != nil { - t.Fatal(err) - } - txs[i] = signedTx - } - errs := vm.txPool.AddRemotesSync(txs) - for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } - } - - <-issuer - - blk, err = vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - ethBlk = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - if ethBlk.BlockGasCost() == nil || ethBlk.BlockGasCost().Cmp(big.NewInt(100)) < 0 { - t.Fatalf("expected blockGasCost to be at least 100 but got %d", ethBlk.BlockGasCost()) - } - if ethBlk.ExtDataGasUsed() == nil || ethBlk.ExtDataGasUsed().Cmp(common.Big0) != 0 { - t.Fatalf("expected extDataGasUsed to be 0 but got %d", ethBlk.ExtDataGasUsed()) - } - minRequiredTip, err = dummy.MinRequiredTip(vm.chainConfig, ethBlk.Header()) - if err != nil { - t.Fatal(err) - } - if minRequiredTip == nil || minRequiredTip.Cmp(big.NewInt(0.05*params.GWei)) < 0 { - t.Fatalf("expected minRequiredTip to be at least 0.05 gwei but got %d", minRequiredTip) - } - - lastAcceptedID, err := vm.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) - } - - // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() - for i, tx := range txs { - if len(ethBlkTxs) <= i { - t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) - } - if ethBlkTxs[i].Hash() != tx.Hash() { - t.Fatalf("expected tx at index %d to have hash: %x but has: %x", i, txs[i].Hash(), tx.Hash()) - } + t.Fatal("Future block should have failed verification due to block timestamp too far in the future") + } else if !strings.Contains(err.Error(), "block timestamp is too far in the future") { + t.Fatalf("Expected error to be block timestamp too far in the future but found %s", err) } } // Regression test to ensure we can build blocks if we are starting with the -// Apricot Phase 5 ruleset in genesis. -func TestBuildApricotPhase5Block(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") - +// Apricot Phase 1 ruleset in genesis. +func TestBuildApricotPhase1Block(t *testing.T) { + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase1, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) @@ -3345,47 +1702,19 @@ func TestBuildApricotPhase5Block(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - key := testKeys[0].ToECDSA() - address := testEthAddrs[0] - - importAmount := uint64(1000000000) - utxoID := avax.UTXOID{TxID: ids.GenerateTestID()} - - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: importAmount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{testKeys[0].Address()}, - }, - }, - } - utxoBytes, err := atomic.Codec.Marshal(atomic.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } - - xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) - inputID := utxo.InputID() - if err := xChainSharedMemory.Apply(map[ids.ID]*avalancheatomic.Requests{vm.ctx.ChainID: {PutRequests: []*avalancheatomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - testKeys[0].Address().Bytes(), - }, - }}}}); err != nil { - t.Fatal(err) - } + key := testutils.TestKeys[0].ToECDSA() + address := testutils.TestEthAddrs[0] - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) if err != nil { t.Fatal(err) } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer @@ -3407,36 +1736,29 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - ethBlk := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - if eBlockGasCost := ethBlk.BlockGasCost(); eBlockGasCost == nil || eBlockGasCost.Cmp(common.Big0) != 0 { - t.Fatalf("expected blockGasCost to be 0 but got %d", eBlockGasCost) - } - if eExtDataGasUsed := ethBlk.ExtDataGasUsed(); eExtDataGasUsed == nil || eExtDataGasUsed.Cmp(big.NewInt(11230)) != 0 { - t.Fatalf("expected extDataGasUsed to be 11230 but got %d", eExtDataGasUsed) - } - minRequiredTip, err := dummy.MinRequiredTip(vm.chainConfig, ethBlk.Header()) - if err != nil { - t.Fatal(err) - } - if minRequiredTip == nil || minRequiredTip.Cmp(common.Big0) != 0 { - t.Fatalf("expected minRequiredTip to be 0 but got %d", minRequiredTip) - } - newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") } txs := make([]*types.Transaction, 10) - for i := 0; i < 10; i++ { - tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice*3), nil) + for i := 0; i < 5; i++ { + tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) if err != nil { t.Fatal(err) } txs[i] = signedTx } - errs := vm.txPool.Add(txs, false, false) + for i := 5; i < 10; i++ { + tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.ApricotPhase1MinGasPrice), nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), key) + if err != nil { + t.Fatal(err) + } + txs[i] = signedTx + } + errs = vm.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -3458,21 +1780,6 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - ethBlk = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - if ethBlk.BlockGasCost() == nil || ethBlk.BlockGasCost().Cmp(big.NewInt(100)) < 0 { - t.Fatalf("expected blockGasCost to be at least 100 but got %d", ethBlk.BlockGasCost()) - } - if ethBlk.ExtDataGasUsed() == nil || ethBlk.ExtDataGasUsed().Cmp(common.Big0) != 0 { - t.Fatalf("expected extDataGasUsed to be 0 but got %d", ethBlk.ExtDataGasUsed()) - } - minRequiredTip, err = dummy.MinRequiredTip(vm.chainConfig, ethBlk.Header()) - if err != nil { - t.Fatal(err) - } - if minRequiredTip == nil || minRequiredTip.Cmp(big.NewInt(0.05*params.GWei)) < 0 { - t.Fatalf("expected minRequiredTip to be at least 0.05 gwei but got %d", minRequiredTip) - } - lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) @@ -3493,11 +1800,8 @@ func TestBuildApricotPhase5Block(t *testing.T) { } } -// This is a regression test to ensure that if two consecutive atomic transactions fail verification -// in onFinalizeAndAssemble it will not cause a panic due to calling RevertToSnapshot(revID) on the -// same revision ID twice. -func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") +func TestLastAcceptedBlockNumberAllow(t *testing.T) { + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -3505,242 +1809,136 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { } }() - newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - - // Create three conflicting import transactions - importTxs := createImportTxOptions(t, vm, sharedMemory) - - // Issue the first import transaction, build, and accept the block. - if err := vm.mempool.AddLocalTx(importTxs[0]); err != nil { + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) + if err != nil { t.Fatal(err) } + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } + } <-issuer blk, err := vm.BuildBlock(context.Background()) if err != nil { - t.Fatal(err) + t.Fatalf("Failed to build block with import transaction: %s", err) } if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) + t.Fatalf("Block failed verification on VM: %s", err) } if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } - - newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } - - // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions - // will fail verification when build block is called. - vm.mempool.AddRemoteTx(importTxs[1]) - vm.mempool.AddRemoteTx(importTxs[2]) - - if _, err := vm.BuildBlock(context.Background()); err == nil { - t.Fatal("Expected build block to fail due to empty block") - } -} + blkHeight := blk.Height() + blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() -func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { - importAmount := uint64(10000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase5, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - testShortIDAddrs[1]: importAmount, - testShortIDAddrs[2]: importAmount, - }) - conflictKey, err := accountKeystore.NewKey(rand.Reader) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() + vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) - // Create a conflict set for each pair of transactions - conflictSets := make([]set.Set[ids.ID], len(testKeys)) - for index, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*secp256k1.PrivateKey{key}) - if err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } - conflictSets[index].Add(importTx.ID()) - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*secp256k1.PrivateKey{key}) - if err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(conflictTx); err == nil { - t.Fatal("should conflict with the utxoSet in the mempool") - } - // force add the tx - vm.mempool.ForceAddTx(conflictTx) - conflictSets[index].Add(conflictTx.ID()) - } - <-issuer - // Note: this only checks the path through OnFinalizeAndAssemble, we should make sure to add a test - // that verifies blocks received from the network will also fail verification - blk, err := vm.BuildBlock(context.Background()) + ctx := context.Background() + b, err := vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if err != nil { t.Fatal(err) } - atomicTxs := blk.(*chain.BlockWrapper).Block.(*Block).atomicTxs - assert.True(t, len(atomicTxs) == len(testKeys), "Conflict transactions should be out of the batch") - atomicTxIDs := set.Set[ids.ID]{} - for _, tx := range atomicTxs { - atomicTxIDs.Add(tx.ID()) - } - - // Check that removing the txIDs actually included in the block from each conflict set - // leaves one item remaining for each conflict set ie. only one tx from each conflict set - // has been included in the block. - for _, conflictSet := range conflictSets { - conflictSet.Difference(atomicTxIDs) - assert.Equal(t, 1, conflictSet.Len()) - } - - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) + if b.Hash() != blkHash { + t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } -} -func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { - importAmount := uint64(10000000) - issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") - - defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - kc := secp256k1fx.NewKeychain() - kc.Add(testKeys[0]) - txID, err := ids.ToID(hashing.ComputeHash256(testShortIDAddrs[0][:])) - assert.NoError(t, err) - - mempoolTxs := 200 - for i := 0; i < mempoolTxs; i++ { - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) - assert.NoError(t, err) + vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) - importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, kc, []*avax.UTXO{utxo}) - if err != nil { - t.Fatal(err) - } - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) - } + _, err = vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) + if !errors.Is(err, eth.ErrUnfinalizedData) { + t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) } - <-issuer - blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) + if err := blk.Accept(context.Background()); err != nil { + t.Fatalf("VM failed to accept block: %s", err) } - atomicTxs := blk.(*chain.BlockWrapper).Block.(*Block).atomicTxs - - // Need to ensure that not all of the transactions in the mempool are included in the block. - // This ensures that we hit the atomic gas limit while building the block before we hit the - // upper limit on the size of the codec for marshalling the atomic transactions. - if len(atomicTxs) >= mempoolTxs { - t.Fatalf("Expected number of atomic transactions included in the block (%d) to be less than the number of transactions added to the mempool (%d)", len(atomicTxs), mempoolTxs) + if b := vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { + t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } } -func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { - importAmount := uint64(10000000) - // We create two VMs one in ApriotPhase4 and one in ApricotPhase5, so that we can construct a block - // containing a large enough atomic transaction that it will exceed the atomic gas limit in - // ApricotPhase5. - issuer, vm1, _, sharedMemory1, _ := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") - _, vm2, _, sharedMemory2, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") - - defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - }() - - kc := secp256k1fx.NewKeychain() - kc.Add(testKeys[0]) - txID, err := ids.ToID(hashing.ComputeHash256(testShortIDAddrs[0][:])) - assert.NoError(t, err) - - // Add enough UTXOs, such that the created import transaction will attempt to consume more gas than allowed - // in ApricotPhase5. - for i := 0; i < 100; i++ { - _, err := addUTXO(sharedMemory1, vm1.ctx, txID, uint32(i), vm1.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) - assert.NoError(t, err) - - _, err = addUTXO(sharedMemory2, vm2.ctx, txID, uint32(i), vm2.ctx.AVAXAssetID, importAmount, testShortIDAddrs[0]) - assert.NoError(t, err) - } - - // Double the initial base fee used when estimating the cost of this transaction to ensure that when it is - // used in ApricotPhase5 it still pays a sufficient fee with the fixed fee per atomic transaction. - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*secp256k1.PrivateKey{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - if err := vm1.mempool.ForceAddTx(importTx); err != nil { - t.Fatal(err) - } - - <-issuer - blk1, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - if err := blk1.Verify(context.Background()); err != nil { - t.Fatal(err) - } - - validEthBlock := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock - - extraData, err := atomic.Codec.Marshal(atomic.CodecVersion, []*atomic.Tx{importTx}) - if err != nil { - t.Fatal(err) +func TestConfigureLogLevel(t *testing.T) { + configTests := []struct { + name string + logConfig string + genesisJSON, upgradeJSON string + expectedErr string + }{ + { + name: "Log level info", + logConfig: `{"log-level": "info"}`, + genesisJSON: testutils.GenesisJSONApricotPhase2, + upgradeJSON: "", + expectedErr: "", + }, + { + name: "Invalid log level", + logConfig: `{"log-level": "cchain"}`, + genesisJSON: testutils.GenesisJSONApricotPhase3, + upgradeJSON: "", + expectedErr: "failed to initialize logger due to", + }, } + for _, test := range configTests { + t.Run(test.name, func(t *testing.T) { + vm := newDefaultTestVM() + ctx, dbManager, genesisBytes, issuer, _ := testutils.SetupGenesis(t, test.genesisJSON) + appSender := &enginetest.Sender{T: t} + appSender.CantSendAppGossip = true + appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + err := vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + []byte(""), + []byte(test.logConfig), + issuer, + []*commonEng.Fx{}, + appSender, + ) + if len(test.expectedErr) == 0 && err != nil { + t.Fatal(err) + } else if len(test.expectedErr) > 0 { + if err == nil { + t.Fatalf("initialize should have failed due to %s", test.expectedErr) + } else if !strings.Contains(err.Error(), test.expectedErr) { + t.Fatalf("Expected initialize to fail due to %s, but failed with %s", test.expectedErr, err.Error()) + } + } - // Construct the new block with the extra data in the new format (slice of atomic transactions). - ethBlk2 := types.NewBlockWithExtData( - types.CopyHeader(validEthBlock.Header()), - nil, - nil, - nil, - new(trie.Trie), - extraData, - true, - ) + // If the VM was not initialized, do not attempt to shut it down + if err == nil { + shutdownChan := make(chan error, 1) + shutdownFunc := func() { + err := vm.Shutdown(context.Background()) + shutdownChan <- err + } + go shutdownFunc() - state, err := vm2.blockChain.State() - if err != nil { - t.Fatal(err) - } + shutdownTimeout := 250 * time.Millisecond + ticker := time.NewTicker(shutdownTimeout) + defer ticker.Stop() - // Hack: test [onExtraStateChange] directly to ensure it catches the atomic gas limit error correctly. - if _, _, err := vm2.onExtraStateChange(ethBlk2, state); err == nil || !strings.Contains(err.Error(), "exceeds atomic gas limit") { - t.Fatalf("Expected block to fail verification due to exceeded atomic gas limit, but found error: %v", err) + select { + case <-ticker.C: + t.Fatalf("VM shutdown took longer than timeout: %v", shutdownTimeout) + case err := <-shutdownChan: + if err != nil { + t.Fatalf("Shutdown errored: %s", err) + } + } + } + }) } } @@ -3749,17 +1947,22 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { metrics.Enabled = false defer func() { metrics.Enabled = true }() - importAmount := uint64(50000000) - issuer, vm, dbManager, _, appSender := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer, vm, dbManager, _, appSender := GenesisVM(t, true, testutils.GenesisJSONApricotPhase1, "", "") defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() // Since rewinding is permitted for last accepted height of 0, we must // accept one block to test the SkipUpgradeCheck functionality. - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) - require.NoError(t, err) - require.NoError(t, vm.mempool.AddLocalTx(importTx)) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) + if err != nil { + t.Fatal(err) + } + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } + } <-issuer blk, err := vm.BuildBlock(context.Background()) @@ -3772,7 +1975,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // use the block's timestamp instead of 0 since rewind to genesis // is hardcoded to be allowed in core/genesis.go. genesisWithUpgrade := &core.Genesis{} - require.NoError(t, json.Unmarshal([]byte(genesisJSONApricotPhase1), genesisWithUpgrade)) + require.NoError(t, json.Unmarshal([]byte(testutils.GenesisJSONApricotPhase1), genesisWithUpgrade)) genesisWithUpgrade.Config.ApricotPhase2BlockTimestamp = utils.TimeToNewUint64(blk.Timestamp()) genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) require.NoError(t, err) @@ -3798,39 +2001,39 @@ func TestParentBeaconRootBlock(t *testing.T) { }{ { name: "non-empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: testutils.GenesisJSONDurango, beaconRoot: &common.Hash{0x01}, expectedError: true, // err string wont work because it will also fail with blob gas is non-empty (zeroed) }, { name: "empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: testutils.GenesisJSONDurango, beaconRoot: &common.Hash{}, expectedError: true, }, { name: "nil parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: testutils.GenesisJSONDurango, beaconRoot: nil, expectedError: false, }, { name: "non-empty parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: testutils.GenesisJSONEtna, beaconRoot: &common.Hash{0x01}, expectedError: true, errString: "expected empty hash", }, { name: "empty parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: testutils.GenesisJSONEtna, beaconRoot: &common.Hash{}, expectedError: false, }, { name: "nil parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: testutils.GenesisJSONEtna, beaconRoot: nil, expectedError: true, errString: "header is missing parentBeaconRoot", @@ -3839,10 +2042,7 @@ func TestParentBeaconRootBlock(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - importAmount := uint64(1000000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, test.genesisJSON, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) + issuer, vm, _, _, _ := GenesisVM(t, true, test.genesisJSON, "", "") defer func() { if err := vm.Shutdown(context.Background()); err != nil { @@ -3850,13 +2050,16 @@ func TestParentBeaconRootBlock(t *testing.T) { } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - - if err := vm.mempool.AddLocalTx(importTx); err != nil { - t.Fatal(err) + errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + for i, err := range errs { + if err != nil { + t.Fatalf("Failed to add tx at index %d: %s", i, err) + } } <-issuer @@ -3880,7 +2083,7 @@ func TestParentBeaconRootBlock(t *testing.T) { false, ) - parentBeaconBlock, err := vm.newBlock(parentBeaconEthBlock) + parentBeaconBlock, err := vm.blockManager.newBlock(parentBeaconEthBlock) if err != nil { t.Fatal(err) } @@ -3910,7 +2113,7 @@ func TestNoBlobsAllowed(t *testing.T) { require := require.New(t) gspec := new(core.Genesis) - err := json.Unmarshal([]byte(genesisJSONCancun), gspec) + err := json.Unmarshal([]byte(testutils.GenesisJSONEtna), gspec) require.NoError(err) // Make one block with a single blob tx @@ -3924,11 +2127,11 @@ func TestNoBlobsAllowed(t *testing.T) { GasTipCap: uint256.NewInt(1), GasFeeCap: uint256.MustFromBig(fee), Gas: params.TxGas, - To: testEthAddrs[0], + To: testutils.TestEthAddrs[0], BlobFeeCap: uint256.NewInt(1), BlobHashes: []common.Hash{{1}}, // This blob is expected to cause verification to fail Value: new(uint256.Int), - }), signer, testKeys[0].ToECDSA()) + }), signer, testutils.TestKeys[0].ToECDSA()) require.NoError(err) b.AddTx(tx) } @@ -3937,11 +2140,11 @@ func TestNoBlobsAllowed(t *testing.T) { require.NoError(err) // Create a VM with the genesis (will use header verification) - _, vm, _, _, _ := GenesisVM(t, true, genesisJSONCancun, "", "") + _, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONEtna, "", "") defer func() { require.NoError(vm.Shutdown(ctx)) }() // Verification should fail - vmBlock, err := vm.newBlock(blocks[0]) + vmBlock, err := vm.blockManager.newBlock(blocks[0]) require.NoError(err) _, err = vm.ParseBlock(ctx, vmBlock.Bytes()) require.ErrorContains(err, "blobs not enabled on avalanche networks") diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index e69927a303..377615398e 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -32,6 +32,7 @@ import ( "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/precompile/contract" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/predicate" @@ -63,6 +64,10 @@ const ( signersPrimary ) +const ( + testNetworkID uint32 = 10 +) + var networkCodec codec.Manager func init() { @@ -75,7 +80,7 @@ func init() { func TestSendWarpMessage(t *testing.T) { require := require.New(t) - issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONDurango, "", "") defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -90,7 +95,7 @@ func TestSendWarpMessage(t *testing.T) { warpSendMessageInput, err := warpcontract.PackSendWarpMessage(payloadData) require.NoError(err) addressedPayload, err := payload.NewAddressedCall( - testEthAddrs[0].Bytes(), + testutils.TestEthAddrs[0].Bytes(), payloadData, ) require.NoError(err) @@ -103,7 +108,7 @@ func TestSendWarpMessage(t *testing.T) { // Submit a transaction to trigger sending a warp message tx0 := types.NewTransaction(uint64(0), warpcontract.ContractAddress, big.NewInt(1), 100_000, big.NewInt(params.LaunchMinGasPrice), warpSendMessageInput) - signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) + signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) require.NoError(err) errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) @@ -124,7 +129,7 @@ func TestSendWarpMessage(t *testing.T) { require.Len(receipts[0].Logs, 1) expectedTopics := []common.Hash{ warpcontract.WarpABI.Events["SendWarpMessage"].ID, - common.BytesToHash(testEthAddrs[0].Bytes()), + common.BytesToHash(testutils.TestEthAddrs[0].Bytes()), common.Hash(expectedUnsignedMessage.ID()), } require.Equal(expectedTopics, receipts[0].Logs[0].Topics) @@ -265,7 +270,7 @@ func TestValidateInvalidWarpBlockHash(t *testing.T) { func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.UnsignedMessage, validSignature bool, txPayload []byte) { require := require.New(t) - issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONDurango, "", "") defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -337,10 +342,10 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned createTx, err := types.SignTx( types.NewContractCreation(0, common.Big0, 7_000_000, big.NewInt(225*params.GWei), common.Hex2Bytes(exampleWarpBin)), types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0].ToECDSA(), + testutils.TestKeys[0].ToECDSA(), ) require.NoError(err) - exampleWarpAddress := crypto.CreateAddress(testEthAddrs[0], 0) + exampleWarpAddress := crypto.CreateAddress(testutils.TestEthAddrs[0], 0) tx, err := types.SignTx( predicate.NewPredicateTx( @@ -357,7 +362,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned signedMessage.Bytes(), ), types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0].ToECDSA(), + testutils.TestKeys[0].ToECDSA(), ) require.NoError(err) errs := vm.txPool.AddRemotesSync([]*types.Transaction{createTx, tx}) @@ -414,7 +419,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned func TestReceiveWarpMessage(t *testing.T) { require := require.New(t) - issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONDurango, "", "") + issuer, vm, _, _, _ := GenesisVM(t, true, testutils.GenesisJSONDurango, "", "") defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -517,7 +522,7 @@ func testReceiveWarpMessage( require := require.New(t) payloadData := avagoUtils.RandomBytes(100) addressedPayload, err := payload.NewAddressedCall( - testEthAddrs[0].Bytes(), + testutils.TestEthAddrs[0].Bytes(), payloadData, ) require.NoError(err) @@ -624,7 +629,7 @@ func testReceiveWarpMessage( getVerifiedWarpMessageTx, err := types.SignTx( predicate.NewPredicateTx( vm.chainConfig.ChainID, - vm.txPool.Nonce(testEthAddrs[0]), + vm.txPool.Nonce(testutils.TestEthAddrs[0]), &warpcontract.Module.Address, 1_000_000, big.NewInt(225*params.GWei), @@ -636,7 +641,7 @@ func testReceiveWarpMessage( signedMessage.Bytes(), ), types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0].ToECDSA(), + testutils.TestKeys[0].ToECDSA(), ) require.NoError(err) errs := vm.txPool.AddRemotesSync([]*types.Transaction{getVerifiedWarpMessageTx}) @@ -701,7 +706,7 @@ func testReceiveWarpMessage( expectedOutput, err := warpcontract.PackGetVerifiedWarpMessageOutput(warpcontract.GetVerifiedWarpMessageOutput{ Message: warpcontract.WarpMessage{ SourceChainID: common.Hash(sourceChainID), - OriginSenderAddress: testEthAddrs[0], + OriginSenderAddress: testutils.TestEthAddrs[0], Payload: payloadData, }, Valid: true, @@ -726,7 +731,7 @@ func testReceiveWarpMessage( } func TestMessageSignatureRequestsToVM(t *testing.T) { - _, vm, _, _, appSender := GenesisVM(t, true, genesisJSONDurango, "", "") + _, vm, _, _, appSender := GenesisVM(t, true, testutils.GenesisJSONDurango, "", "") defer func() { err := vm.Shutdown(context.Background()) @@ -788,7 +793,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { } func TestBlockSignatureRequestsToVM(t *testing.T) { - _, vm, _, _, appSender := GenesisVM(t, true, genesisJSONDurango, "", "") + _, vm, _, _, appSender := GenesisVM(t, true, testutils.GenesisJSONDurango, "", "") defer func() { err := vm.Shutdown(context.Background()) @@ -846,10 +851,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { } func TestClearWarpDB(t *testing.T) { - ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSONLatest) - vm := newDefaultTestVM() - err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) - require.NoError(t, err) + issuer, vm, db, _, _ := GenesisVM(t, false, testutils.GenesisJSONLatest, "", "") // use multiple messages to test that all messages get cleared payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} @@ -872,8 +874,8 @@ func TestClearWarpDB(t *testing.T) { // Restart VM with the same database default should not prune the warp db vm = newDefaultTestVM() // we need new context since the previous one has registered metrics. - ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) - err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + ctx := utils.TestSnowContext() + err := vm.Initialize(context.Background(), ctx, db, []byte(testutils.GenesisJSONLatest), []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) // check messages are still present @@ -888,8 +890,8 @@ func TestClearWarpDB(t *testing.T) { // restart the VM with pruning enabled vm = newDefaultTestVM() config := `{"prune-warp-db-enabled": true}` - ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) - err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + ctx = utils.TestSnowContext() + err = vm.Initialize(context.Background(), ctx, db, []byte(testutils.GenesisJSONLatest), []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) it := vm.warpDB.NewIterator() diff --git a/plugin/evm/factory.go b/plugin/factory.go similarity index 80% rename from plugin/evm/factory.go rename to plugin/factory.go index f42d8f8b41..a5e7907dfe 100644 --- a/plugin/evm/factory.go +++ b/plugin/factory.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package evm +package main import ( "github.com/ava-labs/avalanchego/ids" @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/coreth/plugin/evm" atomicvm "github.com/ava-labs/coreth/plugin/evm/atomic/vm" ) @@ -22,9 +23,9 @@ var ( type Factory struct{} func (*Factory) New(logging.Logger) (interface{}, error) { - return atomicvm.WrapVM(&VM{}), nil + return atomicvm.WrapVM(&evm.VM{}), nil } func NewPluginVM() (block.ChainVM, error) { - return atomicvm.WrapVM(&VM{IsPlugin: true}), nil + return atomicvm.WrapVM(&evm.VM{IsPlugin: true}), nil } diff --git a/plugin/main.go b/plugin/main.go index 6289e95446..2bf84f4798 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -29,7 +29,7 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - vm, err := evm.NewPluginVM() + vm, err := NewPluginVM() if err != nil { fmt.Printf("couldn't create evm plugin: %s", err) os.Exit(1) From c61e190d323ab3a3ca11780cba601825bb7f8195 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 6 Feb 2025 02:26:36 +0300 Subject: [PATCH 63/91] fix tests --- consensus/dummy/consensus.go | 4 +- core/test_blockchain.go | 2 +- eth/gasprice/gasprice_test.go | 2 +- plugin/evm/atomic/export_tx.go | 1 - plugin/evm/atomic/txpool/mempool.go | 6 + plugin/evm/atomic/vm/block_extension.go | 19 ++-- plugin/evm/atomic/vm/export_tx_test.go | 105 ++++++++++-------- .../vm/gossiper_atomic_gossiping_test.go | 5 +- plugin/evm/atomic/vm/import_tx_test.go | 87 +++++++++------ plugin/evm/atomic/vm/syncervm_test.go | 30 ++++- plugin/evm/atomic/vm/tx_semantic_verifier.go | 40 +++---- plugin/evm/atomic/vm/tx_test.go | 29 +++-- plugin/evm/atomic/vm/vm.go | 70 ++++++------ plugin/evm/atomic/vm/vm_test.go | 10 +- plugin/evm/block.go | 31 ++++-- plugin/evm/block_builder.go | 12 +- plugin/evm/block_manager.go | 5 +- plugin/evm/extension/config.go | 104 +++++++++++++---- plugin/evm/extension/no_op_block_extension.go | 33 ------ plugin/evm/sync/syncervm_client.go | 17 +-- plugin/evm/testutils/genesis.go | 9 +- plugin/evm/testutils/test_syncervm.go | 102 ++++++++++------- plugin/evm/testutils/utils.go | 13 ++- plugin/evm/vm.go | 40 +++++-- plugin/evm/vm_test.go | 72 ++++++------ 25 files changed, 486 insertions(+), 362 deletions(-) delete mode 100644 plugin/evm/extension/no_op_block_extension.go diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 1452335a95..47baf40ece 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -40,7 +40,7 @@ type Mode struct { type ( OnFinalizeAndAssembleCallbackType = func(header *types.Header, state *state.StateDB, txs []*types.Transaction) (extraData []byte, blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) - OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB) (blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) + OnExtraStateChangeType = func(block *types.Block, statedb *state.StateDB, config *params.ChainConfig) (blockFeeContribution *big.Int, extDataGasUsed *big.Int, err error) ConsensusCallbacks struct { OnFinalizeAndAssemble OnFinalizeAndAssembleCallbackType @@ -387,7 +387,7 @@ func (eng *DummyEngine) Finalize(chain consensus.ChainHeaderReader, block *types err error ) if eng.cb.OnExtraStateChange != nil { - contribution, extDataGasUsed, err = eng.cb.OnExtraStateChange(block, state) + contribution, extDataGasUsed, err = eng.cb.OnExtraStateChange(block, state, chain.Config()) if err != nil { return err } diff --git a/core/test_blockchain.go b/core/test_blockchain.go index 9c22ff0338..b992435969 100644 --- a/core/test_blockchain.go +++ b/core/test_blockchain.go @@ -24,7 +24,7 @@ import ( ) var TestCallbacks = dummy.ConsensusCallbacks{ - OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { + OnExtraStateChange: func(block *types.Block, sdb *state.StateDB, _ *params.ChainConfig) (*big.Int, *big.Int, error) { sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) return nil, nil, nil }, diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 8c1281fdb4..02d5294cf7 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -129,7 +129,7 @@ func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, ext OnFinalizeAndAssemble: func(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { return nil, common.Big0, extDataGasUsage, nil }, - OnExtraStateChange: func(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { + OnExtraStateChange: func(block *types.Block, state *state.StateDB, _ *params.ChainConfig) (*big.Int, *big.Int, error) { return common.Big0, extDataGasUsage, nil }, }) diff --git a/plugin/evm/atomic/export_tx.go b/plugin/evm/atomic/export_tx.go index f61d998cb8..9e9362d872 100644 --- a/plugin/evm/atomic/export_tx.go +++ b/plugin/evm/atomic/export_tx.go @@ -37,7 +37,6 @@ var ( errOverflowExport = errors.New("overflow when computing export amount + txFee") errInsufficientFunds = errors.New("insufficient funds") errInvalidNonce = errors.New("invalid nonce") - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") ) // UnsignedExportTx is an unsigned ExportTx diff --git a/plugin/evm/atomic/txpool/mempool.go b/plugin/evm/atomic/txpool/mempool.go index 253591d3ec..7839dceb08 100644 --- a/plugin/evm/atomic/txpool/mempool.go +++ b/plugin/evm/atomic/txpool/mempool.go @@ -110,6 +110,12 @@ func (m *Mempool) Initialize(ctx *snow.Context, registerer prometheus.Registerer return nil } +// PendingLen returns the number of pending transactions in the mempool +// it implements the BuilderMempool interface +func (m *Mempool) PendingLen() int { + return m.Len() +} + // Len returns the number of transactions in the mempool func (m *Mempool) Len() int { m.lock.RLock() diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 291bdbefe9..107e988d06 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -134,15 +134,18 @@ func (be *blockExtension) SyntacticVerify(b extension.VMBlock, rules params.Rule } } - // if bootstrapped, verify UTXOs named in atomic txs are present in shared memory - if be.vm.bootstrapped.Get() { - return be.verifyUTXOsPresent(b, atomicTxs) - } - return nil } -func (be *blockExtension) Accept(b extension.VMBlock, acceptedBatch database.Batch) error { +func (be *blockExtension) SemanticVerify(b extension.VMBlock) error { + atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) + if err != nil { + return err + } + return be.verifyUTXOsPresent(b, atomicTxs) +} + +func (be *blockExtension) OnAccept(b extension.VMBlock, acceptedBatch database.Batch) error { atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err @@ -163,7 +166,7 @@ func (be *blockExtension) Accept(b extension.VMBlock, acceptedBatch database.Bat return atomicState.Accept(acceptedBatch) } -func (be *blockExtension) Reject(b extension.VMBlock) error { +func (be *blockExtension) OnReject(b extension.VMBlock) error { atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err @@ -183,7 +186,7 @@ func (be *blockExtension) Reject(b extension.VMBlock) error { return atomicState.Reject() } -func (be *blockExtension) Cleanup(b extension.VMBlock) { +func (be *blockExtension) OnCleanup(b extension.VMBlock) { if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { atomicState.Reject() } diff --git a/plugin/evm/atomic/vm/export_tx_test.go b/plugin/evm/atomic/vm/export_tx_test.go index 15cb08bd05..686ae985ed 100644 --- a/plugin/evm/atomic/vm/export_tx_test.go +++ b/plugin/evm/atomic/vm/export_tx_test.go @@ -23,6 +23,7 @@ import ( "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" ) var nonExistentID = ids.ID{'F'} @@ -41,6 +42,9 @@ var ( // createExportTxOptions adds funds to shared memory, imports them, and returns a list of export transactions // that attempt to send the funds to each of the test keys (list of length 3). func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, sharedMemory *avalancheatomic.Memory) []*atomic.Tx { + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddr := key.EthAddress() // Add a UTXO to shared memory utxo := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: ids.GenerateTestID()}, @@ -49,7 +53,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Amt: uint64(50000000), OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + Addrs: []ids.ShortID{key.Address()}, }, }, } @@ -64,14 +68,14 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testutils.TestKeys[0].Address().Bytes(), + key.Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } // Import the funds - importTx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -106,7 +110,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } for _, addr := range testutils.TestShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, testutils.TestKeys[0:1]) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -117,7 +121,8 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } func TestExportTxEVMStateTransfer(t *testing.T) { - key := testutils.TestKeys[0] + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) addr := key.Address() ethAddr := key.EthAddress() @@ -387,7 +392,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -927,14 +932,14 @@ func TestExportTxSemanticVerify(t *testing.T) { t.Fatal(err) } - backend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: test.rules, - ChainConfig: vm.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, + backend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: test.rules, + chainConfig: vm.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm, + secpCache: &vm.secpCache, } t.Run(test.name, func(t *testing.T) { @@ -1664,6 +1669,9 @@ func TestExportTxGasCost(t *testing.T) { } func TestNewExportTx(t *testing.T) { + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddress := key.PublicKey().EthAddress() tests := []struct { name string genesis string @@ -1735,7 +1743,7 @@ func TestNewExportTx(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + Addrs: []ids.ShortID{key.Address()}, }, }, } @@ -1750,13 +1758,13 @@ func TestNewExportTx(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testutils.TestKeys[0].Address().Bytes(), + key.Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, ethAddress, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -1792,21 +1800,21 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testutils.TestShortIDAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, test.rules, state, vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, key.Address(), testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - backend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: vm.CurrentRules(), - ChainConfig: vm.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, + backend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: vm.CurrentRules(), + chainConfig: vm.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm, + secpCache: &vm.secpCache, } if err := exportTx.Visit(&semanticVerifier{ @@ -1848,15 +1856,17 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - addr := testutils.TestKeys[0].EthAddress() - if sdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { - t.Fatalf("address balance %s equal %s not %s", addr.String(), sdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) + if sdb.GetBalance(ethAddress).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { + t.Fatalf("address balance %s equal %s not %s", ethAddress.String(), sdb.GetBalance(ethAddress), new(big.Int).SetUint64(test.bal*units.Avax)) } }) } } func TestNewExportTxMulticoin(t *testing.T) { + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddress := key.PublicKey().EthAddress() tests := []struct { name string genesis string @@ -1914,7 +1924,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + Addrs: []ids.ShortID{key.Address()}, }, }, } @@ -1935,7 +1945,7 @@ func TestNewExportTxMulticoin(t *testing.T) { Amt: importAmount2, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, - Addrs: []ids.ShortID{testutils.TestKeys[0].Address()}, + Addrs: []ids.ShortID{key.Address()}, }, }, } @@ -1951,21 +1961,21 @@ func TestNewExportTxMulticoin(t *testing.T) { Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ - testutils.TestKeys[0].Address().Bytes(), + key.Address().Bytes(), }, }, { Key: inputID2[:], Value: utxoBytes2, Traits: [][]byte{ - testutils.TestKeys[0].Address().Bytes(), + key.Address().Bytes(), }, }, }}}); err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, ethAddress, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -2007,20 +2017,20 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err = atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } exportTx := tx.UnsignedAtomicTx - backend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: vm.CurrentRules(), - ChainConfig: vm.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, + backend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: vm.CurrentRules(), + chainConfig: vm.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm, + secpCache: &vm.secpCache, } if err := exportTx.Visit(&semanticVerifier{ @@ -2054,12 +2064,11 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - addr := testutils.TestKeys[0].EthAddress() - if stdb.GetBalance(addr).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { - t.Fatalf("address balance %s equal %s not %s", addr.String(), stdb.GetBalance(addr), new(big.Int).SetUint64(test.bal*units.Avax)) + if stdb.GetBalance(ethAddress).Cmp(uint256.NewInt(test.bal*units.Avax)) != 0 { + t.Fatalf("address balance %s equal %s not %s", ethAddress.String(), stdb.GetBalance(ethAddress), new(big.Int).SetUint64(test.bal*units.Avax)) } - if stdb.GetBalanceMultiCoin(addr, common.BytesToHash(tid[:])).Cmp(new(big.Int).SetUint64(test.balmc)) != 0 { - t.Fatalf("address balance multicoin %s equal %s not %s", addr.String(), stdb.GetBalanceMultiCoin(addr, common.BytesToHash(tid[:])), new(big.Int).SetUint64(test.balmc)) + if stdb.GetBalanceMultiCoin(ethAddress, common.BytesToHash(tid[:])).Cmp(new(big.Int).SetUint64(test.balmc)) != 0 { + t.Fatalf("address balance multicoin %s equal %s not %s", ethAddress.String(), stdb.GetBalanceMultiCoin(ethAddress, common.BytesToHash(tid[:])), new(big.Int).SetUint64(test.balmc)) } }) } diff --git a/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go index 636a6ccca8..88a670b2c4 100644 --- a/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go @@ -20,6 +20,7 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/coreth/plugin/evm/atomic" + "github.com/ava-labs/coreth/plugin/evm/testutils" ) // show that a txID discovered from gossip is requested to the same node only if @@ -27,7 +28,7 @@ import ( func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { assert.NoError(vm.Shutdown(context.Background())) }() @@ -118,7 +119,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { assert := assert.New(t) - _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, "", "", "") + _, vm, _, sharedMemory, sender := GenesisAtomicVM(t, true, testutils.GenesisJSONApricotPhase0, "", "") defer func() { assert.NoError(vm.Shutdown(context.Background())) }() diff --git a/plugin/evm/atomic/vm/import_tx_test.go b/plugin/evm/atomic/vm/import_tx_test.go index 31db861f5f..8a3bc2a51d 100644 --- a/plugin/evm/atomic/vm/import_tx_test.go +++ b/plugin/evm/atomic/vm/import_tx_test.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" @@ -427,16 +428,19 @@ func TestImportTxVerify(t *testing.T) { func TestNewImportTx(t *testing.T) { importAmount := uint64(5000000) + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddress := key.EthAddress() // createNewImportAVAXTx adds a UTXO to shared memory and then constructs a new import transaction // and checks that it has the correct fee for the base fee that has been used createNewImportAVAXTx := func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) + _, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, importAmount, key.Address()) if err != nil { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, []*secp256k1.PrivateKey{testutils.TestKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, ethAddress, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -487,7 +491,7 @@ func TestNewImportTx(t *testing.T) { // Ensure that the UTXO has been removed from shared memory within Accept addrSet := set.Set[ids.ShortID]{} - addrSet.Add(testutils.TestShortIDAddrs[0]) + addrSet.Add(key.Address()) utxos, _, _, err := vm.GetAtomicUTXOs(vm.ctx.XChainID, addrSet, ids.ShortEmpty, ids.Empty, -1) if err != nil { t.Fatal(err) @@ -504,9 +508,8 @@ func TestNewImportTx(t *testing.T) { expectedRemainingBalance := new(uint256.Int).Mul( uint256.NewInt(importAmount-actualAVAXBurned), atomic.X2CRate) - addr := testutils.TestKeys[0].EthAddress() - if actualBalance := sdb.GetBalance(addr); actualBalance.Cmp(expectedRemainingBalance) != 0 { - t.Fatalf("address remaining balance %s equal %s not %s", addr.String(), actualBalance, expectedRemainingBalance) + if actualBalance := sdb.GetBalance(ethAddress); actualBalance.Cmp(expectedRemainingBalance) != 0 { + t.Fatalf("address remaining balance %s equal %s not %s", ethAddress.String(), actualBalance, expectedRemainingBalance) } } tests2 := map[string]atomicTxTest{ @@ -874,6 +877,9 @@ func TestImportTxGasCost(t *testing.T) { } func TestImportTxSemanticVerify(t *testing.T) { + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddress := key.PublicKey().EthAddress() tests := map[string]atomicTxTest{ "UTXO not present during bootstrapping": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { @@ -892,16 +898,17 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, bootstrapping: true, }, "UTXO not present": { @@ -921,16 +928,17 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "failed to fetch import UTXOs from", }, "garbage UTXO": { @@ -942,7 +950,7 @@ func TestImportTxSemanticVerify(t *testing.T) { Key: inputID[:], Value: []byte("hey there"), Traits: [][]byte{ - testutils.TestShortIDAddrs[0].Bytes(), + key.Address().Bytes(), }, }}}}); err != nil { t.Fatal(err) @@ -961,23 +969,24 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "failed to unmarshal UTXO", }, "UTXO AssetID mismatch": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() expectedAssetID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, testutils.TestShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, expectedAssetID, 1, key.Address()) if err != nil { t.Fatal(err) } @@ -995,22 +1004,23 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: errAssetIDMismatch.Error(), }, "insufficient AVAX funds": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, key.Address()) if err != nil { t.Fatal(err) } @@ -1028,16 +1038,17 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 2, // Produce more output than is consumed by the transaction AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "import tx flow check failed due to", }, "insufficient non-AVAX funds": { @@ -1062,16 +1073,17 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 2, // Produce more output than is consumed by the transaction AssetID: assetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "import tx flow check failed due to", }, "no signatures": { @@ -1095,7 +1107,7 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, @@ -1105,6 +1117,7 @@ func TestImportTxSemanticVerify(t *testing.T) { } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "import tx contained mismatched number of inputs/credentials", }, "incorrect signature": { @@ -1128,7 +1141,7 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, @@ -1139,6 +1152,7 @@ func TestImportTxSemanticVerify(t *testing.T) { } return tx }, + genesisJSON: testutils.GenesisJSONApricotPhase0, semanticVerifyErr: "import tx transfer failed verification", }, "non-unique EVM Outputs": { @@ -1163,12 +1177,12 @@ func TestImportTxSemanticVerify(t *testing.T) { }}, Outs: []atomic.EVMOutput{ { - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }, { - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }, @@ -1193,11 +1207,14 @@ func TestImportTxSemanticVerify(t *testing.T) { func TestImportTxEVMStateTransfer(t *testing.T) { assetID := ids.GenerateTestID() + key, err := secp256k1.NewPrivateKey() + require.NoError(t, err) + ethAddress := key.EthAddress() tests := map[string]atomicTxTest{ "AVAX UTXO": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, testutils.TestShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, vm.ctx.AVAXAssetID, 1, key.Address()) if err != nil { t.Fatal(err) } @@ -1215,12 +1232,12 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx @@ -1233,16 +1250,17 @@ func TestImportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - avaxBalance := sdb.GetBalance(testutils.TestEthAddrs[0]) + avaxBalance := sdb.GetBalance(ethAddress) if avaxBalance.Cmp(atomic.X2CRate) != 0 { t.Fatalf("Expected AVAX balance to be %d, found balance: %d", *atomic.X2CRate, avaxBalance) } }, + genesisJSON: testutils.GenesisJSONApricotPhase0, }, "non-AVAX UTXO": { setup: func(t *testing.T, vm *VM, sharedMemory *avalancheatomic.Memory) *atomic.Tx { txID := ids.GenerateTestID() - utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, testutils.TestShortIDAddrs[0]) + utxo, err := addUTXO(sharedMemory, vm.ctx, txID, 0, assetID, 1, key.Address()) if err != nil { t.Fatal(err) } @@ -1260,12 +1278,12 @@ func TestImportTxEVMStateTransfer(t *testing.T) { }, }}, Outs: []atomic.EVMOutput{{ - Address: testutils.TestEthAddrs[0], + Address: ethAddress, Amount: 1, AssetID: assetID, }}, }} - if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{testutils.TestKeys[0]}}); err != nil { + if err := tx.Sign(atomic.Codec, [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx @@ -1277,15 +1295,16 @@ func TestImportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - assetBalance := sdb.GetBalanceMultiCoin(testutils.TestEthAddrs[0], common.Hash(assetID)) + assetBalance := sdb.GetBalanceMultiCoin(ethAddress, common.Hash(assetID)) if assetBalance.Cmp(common.Big1) != 0 { t.Fatalf("Expected asset balance to be %d, found balance: %d", common.Big1, assetBalance) } - avaxBalance := sdb.GetBalance(testutils.TestEthAddrs[0]) + avaxBalance := sdb.GetBalance(ethAddress) if avaxBalance.Cmp(common.U2560) != 0 { t.Fatalf("Expected AVAX balance to be 0, found balance: %d", avaxBalance) } }, + genesisJSON: testutils.GenesisJSONApricotPhase0, }, } diff --git a/plugin/evm/atomic/vm/syncervm_test.go b/plugin/evm/atomic/vm/syncervm_test.go index ec03ecbd11..d0e30d43ca 100644 --- a/plugin/evm/atomic/vm/syncervm_test.go +++ b/plugin/evm/atomic/vm/syncervm_test.go @@ -6,6 +6,8 @@ package vm import ( "testing" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" @@ -74,19 +76,35 @@ func TestAtomicSyncerVM(t *testing.T) { return vm, vm.createConsensusCallbacks() } - afterInit := func(t *testing.T, params testutils.SyncTestParams, vm extension.InnerVM) { - atomicVM, ok := vm.(*VM) + afterInit := func(t *testing.T, params testutils.SyncTestParams, vmSetup testutils.VMSetup, isServer bool) { + atomicVM, ok := vmSetup.VM.(*VM) require.True(t, ok) - serverAtomicTrie := atomicVM.atomicBackend.AtomicTrie() - require.NoError(t, serverAtomicTrie.Commit(params.SyncableInterval, serverAtomicTrie.LastAcceptedRoot())) - require.NoError(t, atomicVM.VersionDB().Commit()) + + alloc := map[ids.ShortID]uint64{ + testutils.TestShortIDAddrs[0]: importAmount, + } + + for addr, avaxAmount := range alloc { + txID, err := ids.ToID(hashing.ComputeHash256(addr.Bytes())) + if err != nil { + t.Fatalf("Failed to generate txID from addr: %s", err) + } + if _, err := addUTXO(vmSetup.AtomicMemory, vmSetup.SnowCtx, txID, 0, vmSetup.SnowCtx.AVAXAssetID, avaxAmount, addr); err != nil { + t.Fatalf("Failed to add UTXO to shared memory: %s", err) + } + } + if isServer { + serverAtomicTrie := atomicVM.atomicBackend.AtomicTrie() + require.NoError(t, serverAtomicTrie.Commit(params.SyncableInterval, serverAtomicTrie.LastAcceptedRoot())) + require.NoError(t, atomicVM.VersionDB().Commit()) + } } testSetup := &testutils.SyncTestSetup{ NewVM: newVMFn, GenFn: genFn, AfterInit: afterInit, - ExtraSyncerVMTest: func(t *testing.T, syncerVMSetup testutils.SyncerVMSetup) { + ExtraSyncerVMTest: func(t *testing.T, syncerVMSetup testutils.VMSetup) { // check atomic memory was synced properly syncerVM := syncerVMSetup.VM atomicVM, ok := syncerVM.(*VM) diff --git a/plugin/evm/atomic/vm/tx_semantic_verifier.go b/plugin/evm/atomic/vm/tx_semantic_verifier.go index 9ea68c937f..196190fc8c 100644 --- a/plugin/evm/atomic/vm/tx_semantic_verifier.go +++ b/plugin/evm/atomic/vm/tx_semantic_verifier.go @@ -32,18 +32,18 @@ type BlockFetcher interface { GetVMBlock(context.Context, ids.ID) (extension.VMBlock, error) } -type VerifierBackend struct { - Ctx *snow.Context - Fx fx.Fx - Rules params.Rules - ChainConfig *params.ChainConfig - Bootstrapped bool - BlockFetcher BlockFetcher - SecpCache *secp256k1.RecoverCache +type verifierBackend struct { + ctx *snow.Context + fx fx.Fx + rules params.Rules + chainConfig *params.ChainConfig + bootstrapped bool + blockFetcher BlockFetcher + secpCache *secp256k1.RecoverCache } type semanticVerifier struct { - backend *VerifierBackend + backend *verifierBackend atx *atomic.Tx parent extension.VMBlock baseFee *big.Int @@ -52,8 +52,8 @@ type semanticVerifier struct { // SemanticVerify this transaction is valid. func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { backend := s.backend - ctx := backend.Ctx - rules := backend.Rules + ctx := backend.ctx + rules := backend.rules stx := s.atx if err := utx.Verify(ctx, rules); err != nil { return err @@ -93,7 +93,7 @@ func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { return fmt.Errorf("import tx contained mismatched number of inputs/credentials (%d vs. %d)", len(utx.ImportedInputs), len(stx.Creds)) } - if !backend.Bootstrapped { + if !backend.bootstrapped { // Allow for force committing during bootstrapping return nil } @@ -125,7 +125,7 @@ func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { return errAssetIDMismatch } - if err := backend.Fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { + if err := backend.fx.VerifyTransfer(utx, in.In, cred, utxo.Out); err != nil { return fmt.Errorf("import tx transfer failed verification: %w", err) } } @@ -137,11 +137,11 @@ func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { // or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is // accepted, then nil will be returned immediately. // If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor extension.VMBlock) error { - lastAcceptedBlock := backend.BlockFetcher.LastAcceptedVMBlock() +func conflicts(backend *verifierBackend, inputs set.Set[ids.ID], ancestor extension.VMBlock) error { + lastAcceptedBlock := backend.blockFetcher.LastAcceptedVMBlock() lastAcceptedHeight := lastAcceptedBlock.Height() for ancestor.Height() > lastAcceptedHeight { - atomicTxs, err := extractAtomicTxsFromBlock(ancestor, backend.ChainConfig) + atomicTxs, err := extractAtomicTxsFromBlock(ancestor, backend.chainConfig) if err != nil { return err } @@ -162,7 +162,7 @@ func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor extens // will be missing. // If the ancestor is processing, then the block may have // been verified. - nextAncestor, err := backend.BlockFetcher.GetVMBlock(context.TODO(), nextAncestorID) + nextAncestor, err := backend.blockFetcher.GetVMBlock(context.TODO(), nextAncestorID) if err != nil { return errRejectedParent } @@ -174,8 +174,8 @@ func conflicts(backend *VerifierBackend, inputs set.Set[ids.ID], ancestor extens // SemanticVerify this transaction is valid. func (s *semanticVerifier) ExportTx(utx *atomic.UnsignedExportTx) error { - ctx := s.backend.Ctx - rules := s.backend.Rules + ctx := s.backend.ctx + rules := s.backend.rules stx := s.atx if err := utx.Verify(ctx, rules); err != nil { return err @@ -226,7 +226,7 @@ func (s *semanticVerifier) ExportTx(utx *atomic.UnsignedExportTx) error { if len(cred.Sigs) != 1 { return fmt.Errorf("expected one signature for EVM Input Credential, but found: %d", len(cred.Sigs)) } - pubKey, err := s.backend.SecpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) + pubKey, err := s.backend.secpCache.RecoverPublicKey(utx.Bytes(), cred.Sigs[0][:]) if err != nil { return err } diff --git a/plugin/evm/atomic/vm/tx_test.go b/plugin/evm/atomic/vm/tx_test.go index efffc81447..c5fdcb832e 100644 --- a/plugin/evm/atomic/vm/tx_test.go +++ b/plugin/evm/atomic/vm/tx_test.go @@ -92,7 +92,6 @@ type atomicTxTest struct { // Whether or not the VM should be considered to still be bootstrapping bootstrapping bool // genesisJSON to use for the VM genesis (also defines the rule set that will be used in verification) - // If this is left empty, [genesisJSONApricotPhase0], will be used genesisJSON string // passed directly into GenesisVM @@ -113,14 +112,14 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } lastAcceptedBlock := vm.LastAcceptedVMBlock() - backend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: rules, - ChainConfig: vm.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, + backend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: rules, + chainConfig: vm.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm, + secpCache: &vm.secpCache, } if err := tx.UnsignedAtomicTx.Visit( &semanticVerifier{ @@ -160,12 +159,12 @@ func executeTxTest(t *testing.T, test atomicTxTest) { } if test.bootstrapping { - // If this test simulates processing txs during bootstrapping (where some verification is skipped), - // initialize the block building goroutines normally initialized in SetState(snow.NormalOps). - // This ensures that the VM can build a block correctly during the test. - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + // If the test is in bootstrapping mode, we return early as we don't expect the transaction to be accepted + return + } + + if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { + t.Fatal(err) } if err := vm.mempool.AddLocalTx(tx); err != nil { diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index ad345f1521..77c35293f2 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -19,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - avalancheutils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" @@ -74,8 +73,6 @@ type VM struct { fx secp256k1fx.Fx ctx *snow.Context - bootstrapped *avalancheutils.Atomic[bool] - // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height @@ -126,7 +123,7 @@ func (vm *VM) Initialize( fujiExtDataHashes = nil mainnetExtDataHashes = nil - codec, err := message.NewCodec(atomicsync.AtomicSyncSummary{}) + networkCodec, err := message.NewCodec(atomicsync.AtomicSyncSummary{}) if err != nil { return fmt.Errorf("failed to create codec manager: %w", err) } @@ -144,20 +141,21 @@ func (vm *VM) Initialize( vm.mempool = &txpool.Mempool{} extensionConfig := &extension.Config{ - NetworkCodec: codec, + NetworkCodec: networkCodec, ConsensusCallbacks: vm.createConsensusCallbacks(), BlockExtension: blockExtension, SyncableParser: atomicsync.NewAtomicSyncSummaryParser(), SyncExtender: syncExtender, SyncSummaryProvider: syncProvider, - SyncLeafType: atomicLeafTypeConfig, + ExtraSyncLeafConfig: atomicLeafTypeConfig, ExtraMempool: vm.mempool, + Clock: &vm.clock, } if err := innerVM.SetExtensionConfig(extensionConfig); err != nil { return fmt.Errorf("failed to set extension config: %w", err) } - innerVM.Initialize( + if err := innerVM.Initialize( ctx, chainCtx, db, @@ -167,7 +165,9 @@ func (vm *VM) Initialize( toEngine, fxs, appSender, - ) + ); err != nil { + return fmt.Errorf("failed to initialize inner VM: %w", err) + } err = vm.mempool.Initialize(chainCtx, innerVM.MetricRegistry(), defaultMempoolSize, vm.verifyTxAtTip) if err != nil { @@ -190,7 +190,7 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to read last accepted block: %w", err) } - vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(innerVM.VersionDB(), codec, lastAcceptedHeight) + vm.atomicTxRepository, err = atomicstate.NewAtomicTxRepository(innerVM.VersionDB(), atomic.Codec, lastAcceptedHeight) if err != nil { return fmt.Errorf("failed to create atomic repository: %w", err) } @@ -206,7 +206,7 @@ func (vm *VM) Initialize( // Atomic backend is available now, we can initialize structs that depend on it syncProvider.Initialize(vm.atomicBackend.AtomicTrie()) syncExtender.Initialize(vm.atomicBackend, vm.atomicBackend.AtomicTrie(), innerVM.Config().StateSyncRequestSize) - leafHandler.Initialize(vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, codec) + leafHandler.Initialize(vm.atomicBackend.AtomicTrie().TrieDB(), atomicstate.AtomicTrieKeyLength, networkCodec) vm.secpCache = secp256k1.RecoverCache{ LRU: cache.LRU[ids.ID, *secp256k1.PublicKey]{ Size: secpCacheSize, @@ -226,8 +226,6 @@ func (vm *VM) Initialize( func (vm *VM) SetState(ctx context.Context, state snow.State) error { switch state { - case snow.StateSyncing: - vm.bootstrapped.Set(false) case snow.Bootstrapping: if err := vm.onBootstrapStarted(); err != nil { return err @@ -236,26 +234,19 @@ func (vm *VM) SetState(ctx context.Context, state snow.State) error { if err := vm.onNormalOperationsStarted(); err != nil { return err } - default: - return snow.ErrUnknownState } return vm.InnerVM.SetState(ctx, state) } -// onBootstrapStarted marks this VM as bootstrapping func (vm *VM) onBootstrapStarted() error { - vm.bootstrapped.Set(false) - return vm.fx.Bootstrapping() } -// onNormalOperationsStarted marks this VM as bootstrapped func (vm *VM) onNormalOperationsStarted() error { - if vm.bootstrapped.Get() { + if vm.IsBootstrapped() { return nil } - vm.bootstrapped.Set(true) if err := vm.fx.Bootstrapped(); err != nil { return err } @@ -419,14 +410,14 @@ func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, if err != nil { return fmt.Errorf("failed to get parent block: %w", err) } - atomicBackend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: rules, - ChainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm.InnerVM, - SecpCache: &vm.secpCache, + atomicBackend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: rules, + chainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm.InnerVM, + secpCache: &vm.secpCache, } if err := tx.UnsignedAtomicTx.Visit(&semanticVerifier{ backend: atomicBackend, @@ -460,14 +451,14 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. inputs := set.Set[ids.ID]{} - atomicBackend := &VerifierBackend{ - Ctx: vm.ctx, - Fx: &vm.fx, - Rules: rules, - ChainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), - Bootstrapped: vm.bootstrapped.Get(), - BlockFetcher: vm, - SecpCache: &vm.secpCache, + atomicBackend := &verifierBackend{ + ctx: vm.ctx, + fx: &vm.fx, + rules: rules, + chainConfig: vm.InnerVM.Ethereum().BlockChain().Config(), + bootstrapped: vm.IsBootstrapped(), + blockFetcher: vm, + secpCache: &vm.secpCache, } for _, atomicTx := range txs { @@ -476,8 +467,9 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I backend: atomicBackend, atx: atomicTx, parent: ancestor, + baseFee: baseFee, }); err != nil { - return fmt.Errorf("invalid block due to failed semanatic verify: %w at height %d", err, height) + return fmt.Errorf("invalid block due to failed semantic verify: %w at height %d", err, height) } txInputs := utx.InputUTXOs() if inputs.Overlaps(txInputs) { @@ -658,12 +650,12 @@ func (vm *VM) onFinalizeAndAssemble(header *types.Header, state *state.StateDB, return vm.postBatchOnFinalizeAndAssemble(header, state, txs) } -func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big.Int, *big.Int, error) { +func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB, chainConfig *params.ChainConfig) (*big.Int, *big.Int, error) { var ( batchContribution *big.Int = big.NewInt(0) batchGasUsed *big.Int = big.NewInt(0) header = block.Header() - rules = vm.InnerVM.Ethereum().BlockChain().Config().Rules(header.Number, header.Time) + rules = chainConfig.Rules(header.Number, header.Time) ) txs, err := atomic.ExtractAtomicTxs(block.ExtData(), rules.IsApricotPhase5, atomic.Codec) diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index 54987cef0b..46411d90ed 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -591,8 +591,8 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } }() - newHeadChan := make(chan core.ChainHeadEvent, 1) - vm.Ethereum().BlockChain().SubscribeChainHeadEvent(newHeadChan) + newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) + vm.Ethereum().TxPool().SubscribeNewReorgEvent(newTxPoolHeadChan) importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key0}) if err != nil { @@ -623,8 +623,8 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { t.Fatal(err) } - newHead := <-newHeadChan - if newHead.Block.Hash() != common.Hash(blk0.ID()) { + newHead := <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk0.ID()) { t.Fatalf("Expected new block to match") } @@ -1178,7 +1178,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } // Hack: test [onExtraStateChange] directly to ensure it catches the atomic gas limit error correctly. - if _, _, err := vm2.onExtraStateChange(ethBlk2, state); err == nil || !strings.Contains(err.Error(), "exceeds atomic gas limit") { + if _, _, err := vm2.onExtraStateChange(ethBlk2, state, vm2.Ethereum().BlockChain().Config()); err == nil || !strings.Contains(err.Error(), "exceeds atomic gas limit") { t.Fatalf("Expected block to fail verification due to exceeded atomic gas limit, but found error: %v", err) } } diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 4d9be76143..9140d6da44 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -72,9 +72,13 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } - // Apply any changes atomically with other pending changes to - // the vm's versionDB. - return b.blockManager.blockExtension.Accept(b, vdbBatch) + if b.blockManager.blockExtension != nil { + // Apply any changes atomically with other pending changes to + // the vm's versionDB. + return b.blockManager.blockExtension.OnAccept(b, vdbBatch) + } + + return vdbBatch.Write() } // handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements @@ -122,7 +126,10 @@ func (b *Block) Reject(context.Context) error { return fmt.Errorf("chain could not reject %s: %w", b.ID(), err) } - return b.blockManager.blockExtension.Reject(b) + if b.blockManager.blockExtension != nil { + return b.blockManager.blockExtension.OnReject(b) + } + return nil } // Parent implements the snowman.Block interface @@ -160,7 +167,7 @@ func (b *Block) syntacticVerify() error { // Verify implements the snowman.Block interface func (b *Block) Verify(context.Context) error { - return b.verify(&precompileconfig.PredicateContext{ + return b.semanticVerify(&precompileconfig.PredicateContext{ SnowCtx: b.blockManager.vm.ctx, ProposerVMBlockCtx: nil, }, true) @@ -191,7 +198,7 @@ func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { // VerifyWithContext implements the block.WithVerifyContext interface func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { - return b.verify(&precompileconfig.PredicateContext{ + return b.semanticVerify(&precompileconfig.PredicateContext{ SnowCtx: b.blockManager.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, }, true) @@ -200,7 +207,7 @@ func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block // Verify the block is valid. // Enforces that the predicates are valid within [predicateContext]. // Writes the block details to disk and the state to the trie manager iff writes=true. -func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { +func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateContext, writes bool) error { vm := b.blockManager.vm if predicateContext.ProposerVMBlockCtx != nil { log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) @@ -221,6 +228,12 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ } } + if b.blockManager.blockExtension != nil { + if err := b.blockManager.blockExtension.SemanticVerify(b); err != nil { + return fmt.Errorf("failed to verify block extension: %w", err) + } + } + // The engine may call VerifyWithContext multiple times on the same block with different contexts. // Since the engine will only call Accept/Reject once, we should only call InsertBlockManual once. // Additionally, if a block is already in processing, then it has already passed verification and @@ -231,8 +244,8 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ } err := vm.blockChain.InsertBlockManual(b.ethBlock, writes) - if err != nil || !writes { - b.blockManager.blockExtension.Cleanup(b) + if b.blockManager.blockExtension != nil && (err != nil || !writes) { + b.blockManager.blockExtension.OnCleanup(b) } return err } diff --git a/plugin/evm/block_builder.go b/plugin/evm/block_builder.go index ad28541b42..337137e36a 100644 --- a/plugin/evm/block_builder.go +++ b/plugin/evm/block_builder.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/txpool" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/holiman/uint256" "github.com/ava-labs/avalanchego/snow" @@ -24,17 +25,12 @@ const ( minBlockBuildingRetryDelay = 500 * time.Millisecond ) -type BuilderMempool interface { - Len() int - SubscribePendingTxs() <-chan struct{} -} - type blockBuilder struct { ctx *snow.Context chainConfig *params.ChainConfig txPool *txpool.TxPool - extraMempool BuilderMempool + extraMempool extension.BuilderMempool shutdownChan <-chan struct{} shutdownWg *sync.WaitGroup @@ -59,7 +55,7 @@ type blockBuilder struct { // NewBlockBuilder creates a new block builder. extraMempool is an optional mempool (can be nil) that // can be used to add transactions to the block builder, in addition to the txPool. -func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message, extraMempool BuilderMempool) *blockBuilder { +func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message, extraMempool extension.BuilderMempool) *blockBuilder { b := &blockBuilder{ ctx: vm.ctx, chainConfig: vm.chainConfig, @@ -111,7 +107,7 @@ func (b *blockBuilder) needToBuild() bool { size := b.txPool.PendingSize(txpool.PendingFilter{ MinTip: uint256.MustFromBig(b.txPool.GasTip()), }) - return size > 0 || (b.extraMempool != nil && b.extraMempool.Len() > 0) + return size > 0 || (b.extraMempool != nil && b.extraMempool.PendingLen() > 0) } // markBuilding adds a PendingTxs message to the toEngine channel. diff --git a/plugin/evm/block_manager.go b/plugin/evm/block_manager.go index 95a09d1915..c1610ea8fb 100644 --- a/plugin/evm/block_manager.go +++ b/plugin/evm/block_manager.go @@ -218,5 +218,8 @@ func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { } } - return bm.blockExtension.SyntacticVerify(b, rules) + if bm.blockExtension != nil { + return bm.blockExtension.SyntacticVerify(b, rules) + } + return nil } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index e8e8b910ba..49b11728e2 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/consensus/snowman" avalanchecommon "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ethereum/go-ethereum/common" @@ -26,26 +27,45 @@ import ( ) type ExtensibleVM interface { - SetLastAcceptedBlock(lastAcceptedBlock snowman.Block) error - GetVMBlock(context.Context, ids.ID) (VMBlock, error) - NewVMBlock(*types.Block) (VMBlock, error) - LastAcceptedVMBlock() VMBlock + // SetExtensionConfig sets the configuration for the VM extension + // Should be called before any other method and only once + SetExtensionConfig(config *Config) error + // NewClient returns a client to send messages with for the given protocol NewClient(protocol uint64, options ...p2p.ClientOption) *p2p.Client // AddHandler registers a server handler for an application protocol AddHandler(protocol uint64, handler p2p.Handler) error - LastAcceptedBlockInternal() snowman.Block + // SetLastAcceptedBlock sets the last accepted block + SetLastAcceptedBlock(lastAcceptedBlock snowman.Block) error + // GetVMBlock returns the VMBlock for the given ID or an error if the block is not found + GetVMBlock(context.Context, ids.ID) (VMBlock, error) + // NewVMBlock returns a new VMBlock for the given Eth block + NewVMBlock(*types.Block) (VMBlock, error) + // LastAcceptedVMBlock returns the last accepted VM block + LastAcceptedVMBlock() VMBlock + // IsBootstrapped returns true if the VM is bootstrapped + IsBootstrapped() bool + + // Validators returns the validators for the network Validators() *p2p.Validators - SetExtensionConfig(config *Config) error + // Ethereum returns the Ethereum client Ethereum() *eth.Ethereum - Config() *config.Config + // Config returns the configuration for the VM + Config() config.Config + // MetricRegistry returns the metric registry for the VM MetricRegistry() *prometheus.Registry + // ReadLastAccepted returns the last accepted block hash and height ReadLastAccepted() (common.Hash, uint64, error) + // CurrentRules returns the current rules for the VM CurrentRules() params.Rules + // VersionDB returns the versioned database for the VM VersionDB() *versiondb.Database + // SyncerClient returns the syncer client for the VM SyncerClient() sync.Client } +// InnerVM is the interface that must be implemented by the VM +// that's being wrapped by the extension type InnerVM interface { ExtensibleVM avalanchecommon.VM @@ -54,36 +74,80 @@ type InnerVM interface { block.StateSyncableVM } +// VMBlock is a block that can be used by the extension type VMBlock interface { snowman.Block GetEthBlock() *types.Block } +// BlockManagerExtension is an extension for the block manager +// to handle BlockManager events type BlockManagerExtension interface { + // SemanticVerify verifies the block semantically + // it can be implemented to extend inner block verification + SemanticVerify(b VMBlock) error + // SyntacticVerify verifies the block syntactically + // it can be implemented to extend inner block verification SyntacticVerify(b VMBlock, rules params.Rules) error - Accept(b VMBlock, acceptedBatch database.Batch) error - Reject(b VMBlock) error - Cleanup(b VMBlock) + // OnAccept is called when a block is accepted by the block manager + OnAccept(b VMBlock, acceptedBatch database.Batch) error + // OnReject is called when a block is rejected by the block manager + OnReject(b VMBlock) error + // OnCleanup is called when a block cleanup is requested from the block manager + OnCleanup(b VMBlock) } +// BuilderMempool is a mempool that's used in the block builder type BuilderMempool interface { - Len() int + // PendingLen returns the number of pending transactions + // that are waiting to be included in a block + PendingLen() int + // SubscribePendingTxs returns a channel that's signaled when there are pending transactions SubscribePendingTxs() <-chan struct{} } +// LeafRequestConfig is the configuration to handle leaf requests +// in the network and syncer type LeafRequestConfig struct { - LeafType message.NodeType + // LeafType is the type of the leaf node + LeafType message.NodeType + // MetricName is the name of the metric to use for the leaf request MetricName string - Handler handlers.LeafRequestHandler + // Handler is the handler to use for the leaf request + Handler handlers.LeafRequestHandler } +// Config is the configuration for the VM extension type Config struct { - NetworkCodec codec.Manager - ConsensusCallbacks dummy.ConsensusCallbacks + // NetworkCodec is the codec manager to use + // for encoding and decoding network messages. + // It's required and should be non-nil + NetworkCodec codec.Manager + // ConsensusCallbacks is the consensus callbacks to use + // for the VM to be used in consensus engine. + // It's required and should be non-nil + ConsensusCallbacks dummy.ConsensusCallbacks + // SyncSummaryProvider is the sync summary provider to use + // for the VM to be used in syncer. + // It's required and should be non-nil SyncSummaryProvider sync.SummaryProvider - SyncExtender sync.Extender - SyncableParser message.SyncableParser - BlockExtension BlockManagerExtension - SyncLeafType *LeafRequestConfig - ExtraMempool BuilderMempool + // SyncExtender can extend the syncer to handle custom sync logic. + // It's optional and can be nil + SyncExtender sync.Extender + // SyncableParser is to parse summary messages from the network. + // It's required and should be non-nil + SyncableParser message.SyncableParser + // BlockManagerExtension is the extension for the block manager + // to handle block manager events. + // It's optional and can be nil + BlockExtension BlockManagerExtension + // ExtraSyncLeafConfig is the extra configuration to handle leaf requests + // in the network and syncer. It's optional and can be nil + ExtraSyncLeafConfig *LeafRequestConfig + // ExtraMempool is the mempool to be used in the block builder. + // It's optional and can be nil + ExtraMempool BuilderMempool + // Clock is the clock to use for time related operations. + // It's optional and can be nil + Clock *mockable.Clock } diff --git a/plugin/evm/extension/no_op_block_extension.go b/plugin/evm/extension/no_op_block_extension.go deleted file mode 100644 index 0859466c19..0000000000 --- a/plugin/evm/extension/no_op_block_extension.go +++ /dev/null @@ -1,33 +0,0 @@ -package extension - -import ( - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" -) - -var _ BlockManagerExtension = (*noOpBlockExtension)(nil) - -type noOpBlockExtension struct{} - -func NewNoOpBlockExtension() *noOpBlockExtension { - return &noOpBlockExtension{} -} - -func (noOpBlockExtension) InitializeExtraData(ethBlock *types.Block, chainConfig *params.ChainConfig) (interface{}, error) { - return nil, nil -} - -func (noOpBlockExtension) SyntacticVerify(b VMBlock, rules params.Rules) error { - return nil -} - -func (noOpBlockExtension) Accept(b VMBlock, acceptedBatch database.Batch) error { - return nil -} - -func (noOpBlockExtension) Reject(b VMBlock) error { - return nil -} - -func (noOpBlockExtension) Cleanup(b VMBlock) {} diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index 1657fbbefe..3fa4f7682a 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -31,23 +31,24 @@ import ( // The last 256 block hashes are necessary to support the BLOCKHASH opcode. const ParentsToFetch = 256 -var ( - stateSyncSummaryKey = []byte("stateSyncSummary") - - errExtenderAlreadySet = fmt.Errorf("sync extender already set") -) +var stateSyncSummaryKey = []byte("stateSyncSummary") type BlockAcceptor interface { PutLastAcceptedID(ids.ID) error } +// EthBlockWrapper is an interface that wraps the GetEthBlock method. type EthBlockWrapper interface { GetEthBlock() *types.Block } +// Extender is an interface that allows for extending the state sync process. type Extender interface { + // Sync is called to perform any extension-specific state sync logic. Sync(ctx context.Context, client syncclient.LeafClient, verdb *versiondb.Database, syncSummary message.Syncable) error + // OnFinishBeforeCommit is called after the state sync process has completed but before the state sync summary is committed. OnFinishBeforeCommit(lastAcceptedHeight uint64, syncSummary message.Syncable) error + // OnFinishAfterCommit is called after the state sync process has completed and the state sync summary is committed. OnFinishAfterCommit(summaryHeight uint64) error } @@ -72,7 +73,8 @@ type ClientConfig struct { // Extension points SyncableParser message.SyncableParser - SyncExtender Extender + // SyncExtender is an optional extension point for the state sync process, and can be nil. + SyncExtender Extender Client syncclient.Client @@ -169,8 +171,7 @@ func (client *stateSyncerClient) stateSync(ctx context.Context) error { return err } - // Sync the EVM trie and then the atomic trie. These steps could be done - // in parallel or in the opposite order. Keeping them serial for simplicity for now. + // Sync the EVM trie. if err := client.syncStateTrie(ctx); err != nil { return err } diff --git a/plugin/evm/testutils/genesis.go b/plugin/evm/testutils/genesis.go index 31eb777f82..8e1aebb475 100644 --- a/plugin/evm/testutils/genesis.go +++ b/plugin/evm/testutils/genesis.go @@ -12,6 +12,8 @@ import ( "github.com/ava-labs/avalanchego/snow" commoneng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/utils/cb58" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" @@ -62,9 +64,12 @@ func genesisJSON(cfg *params.ChainConfig) string { } // Fund the test keys - for _, addr := range TestEthAddrs { + var b []byte + for _, key := range keys { + b, _ = cb58.Decode(key) + pk, _ := secp256k1.ToPrivateKey(b) balance := new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(10)) - g.Alloc[addr] = types.GenesisAccount{Balance: balance} + g.Alloc[pk.EthAddress()] = types.GenesisAccount{Balance: balance} } b, err := json.Marshal(g) diff --git a/plugin/evm/testutils/test_syncervm.go b/plugin/evm/testutils/test_syncervm.go index b3c804af8c..63235a0ffd 100644 --- a/plugin/evm/testutils/test_syncervm.go +++ b/plugin/evm/testutils/test_syncervm.go @@ -24,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/consensus/dummy" @@ -163,12 +164,10 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup if !hasItem { t.Fatal("expected nodeSet to contain at least 1 nodeID") } - go testSyncVMSetup.serverVM.vm.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + go testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } - // Disable metrics to prevent duplicate registerer stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` - SetupVM(t, false, GenesisJSONLatest, stateSyncDisabledConfigJSON, "", syncDisabledVM) if err := syncDisabledVM.Initialize( context.Background(), testSyncVMSetup.syncerVM.SnowCtx, @@ -189,7 +188,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup } }() - if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { + if height := syncDisabledVM.LastAcceptedVMBlock().Height(); height != 0 { t.Fatalf("Unexpected last accepted height: %d", height) } @@ -199,7 +198,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup // Process the first 10 blocks from the serverVM for i := uint64(1); i < 10; i++ { - ethBlock := testSyncVMSetup.serverVM.vm.Ethereum().BlockChain().GetBlockByNumber(i) + ethBlock := testSyncVMSetup.serverVM.VM.Ethereum().BlockChain().GetBlockByNumber(i) if ethBlock == nil { t.Fatalf("VM Server did not have a block available at height %d", i) } @@ -247,7 +246,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup } // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - testSyncVMSetup.serverVM.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + testSyncVMSetup.serverVM.AppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) } else { @@ -260,7 +259,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup // connect peer to [syncerVM] assert.NoError(t, syncReEnabledVM.Connected( context.Background(), - testSyncVMSetup.serverVM.ctx.NodeID, + testSyncVMSetup.serverVM.SnowCtx.NodeID, statesyncclient.StateSyncVersion, )) @@ -305,9 +304,9 @@ func VMShutdownWhileSyncingTest(t *testing.T, testSetup *SyncTestSetup) { type SyncTestSetup struct { NewVM func() (extension.InnerVM, dummy.ConsensusCallbacks) // should not be initialized - AfterInit func(t *testing.T, testParams SyncTestParams, vm extension.InnerVM) + AfterInit func(t *testing.T, testParams SyncTestParams, vmSetup VMSetup, isServer bool) GenFn func(i int, vm extension.InnerVM, gen *core.BlockGen) - ExtraSyncerVMTest func(t *testing.T, syncerVM SyncerVMSetup) + ExtraSyncerVMTest func(t *testing.T, syncerVM VMSetup) } func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int, testSetup *SyncTestSetup) *testSyncVMSetup { @@ -317,14 +316,23 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int // This is necessary to support fetching a state summary. config := fmt.Sprintf(`{"commit-interval": %d, "state-sync-commit-interval": %d}`, test.SyncableInterval, test.SyncableInterval) serverVM, cb := testSetup.NewVM() - _, _, _, serverAppSender, serverCtx := SetupVM(t, true, GenesisJSONLatest, config, "", serverVM) + serverChan, serverDB, serverAtomicMem, serverAppSender, serverCtx := SetupVM(t, true, GenesisJSONLatest, config, "", serverVM) t.Cleanup(func() { log.Info("Shutting down server VM") require.NoError(serverVM.Shutdown(context.Background())) }) + serverVmSetup := VMSetup{ + VM: serverVM, + AppSender: serverAppSender, + SnowCtx: serverCtx, + ConsensusCallbacks: cb, + DB: serverDB, + EngineChan: serverChan, + AtomicMemory: serverAtomicMem, + } var err error if testSetup.AfterInit != nil { - testSetup.AfterInit(t, test, serverVM) + testSetup.AfterInit(t, test, serverVmSetup, true) } generateAndAcceptBlocks(t, serverVM, numBlocks, testSetup.GenFn, nil, cb) @@ -338,9 +346,11 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int patchedBlock := patchBlock(lastAccepted, root, serverVM.Ethereum().ChainDb()) blockBytes, err := rlp.EncodeToBytes(patchedBlock) require.NoError(err) - internalBlock, err := serverVM.ParseBlock(context.Background(), blockBytes) + internalWrappedBlock, err := serverVM.ParseBlock(context.Background(), blockBytes) require.NoError(err) - require.NoError(serverVM.SetLastAcceptedBlock(internalBlock)) + internalBlock, ok := internalWrappedBlock.(*chain.BlockWrapper) + require.True(ok) + require.NoError(serverVM.SetLastAcceptedBlock(internalBlock.Block)) // initialise [syncerVM] with blank genesis state // we also override [syncerVM]'s commit interval so the atomic trie works correctly. @@ -352,6 +362,20 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int t.Cleanup(func() { require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) }) + syncerVmSetup := syncerVMSetup{ + VMSetup: VMSetup{ + VM: syncerVM, + ConsensusCallbacks: syncerCB, + SnowCtx: chainCtx, + DB: syncerDB, + EngineChan: syncerEngineChan, + AtomicMemory: syncerAtomicMemory, + }, + shutdownOnceSyncerVM: shutdownOnceSyncerVM, + } + if testSetup.AfterInit != nil { + testSetup.AfterInit(t, test, syncerVmSetup.VMSetup, false) + } require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) enabled, err := syncerVM.StateSyncEnabled(context.Background()) require.NoError(err) @@ -372,7 +396,7 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int require.NoError( syncerVM.Connected( context.Background(), - chainCtx.NodeID, + serverCtx.NodeID, statesyncclient.StateSyncVersion, ), ) @@ -387,46 +411,37 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int } return &testSyncVMSetup{ - serverVM: serverVMSetup{ - vm: serverVM, - appSender: serverAppSender, - ctx: serverCtx, + serverVM: VMSetup{ + VM: serverVM, + AppSender: serverAppSender, + SnowCtx: serverCtx, }, fundedAccounts: accounts, - syncerVM: SyncerVMSetup{ - VM: syncerVM, - ConsensusCallbacks: syncerCB, - SnowCtx: chainCtx, - DB: syncerDB, - EngineChan: syncerEngineChan, - AtomicMemory: syncerAtomicMemory, - shutdownOnceSyncerVM: shutdownOnceSyncerVM, - }, + syncerVM: syncerVmSetup, } } // testSyncVMSetup contains the required set up for a client VM to perform state sync // off of a server VM. type testSyncVMSetup struct { - serverVM serverVMSetup - syncerVM SyncerVMSetup + serverVM VMSetup + syncerVM syncerVMSetup fundedAccounts map[*keystore.Key]*types.StateAccount } -type serverVMSetup struct { - vm extension.InnerVM - ctx *snow.Context - appSender *enginetest.Sender +type VMSetup struct { + VM extension.InnerVM + SnowCtx *snow.Context + ConsensusCallbacks dummy.ConsensusCallbacks + DB avalanchedatabase.Database + EngineChan chan commonEng.Message + AtomicMemory *avalancheatomic.Memory + AppSender *enginetest.Sender } -type SyncerVMSetup struct { - VM extension.InnerVM - SnowCtx *snow.Context - ConsensusCallbacks dummy.ConsensusCallbacks - DB avalanchedatabase.Database - EngineChan chan commonEng.Message - AtomicMemory *avalancheatomic.Memory +type syncerVMSetup struct { + VMSetup shutdownOnceSyncerVM *shutdownOnceVM } @@ -450,11 +465,11 @@ type SyncTestParams struct { expectedErr error } -func testSyncerVM(t *testing.T, testSyncVMSetup *testSyncVMSetup, test SyncTestParams, extraSyncerVMTest func(t *testing.T, syncerVMSetup SyncerVMSetup)) { +func testSyncerVM(t *testing.T, testSyncVMSetup *testSyncVMSetup, test SyncTestParams, extraSyncerVMTest func(t *testing.T, syncerVMSetup VMSetup)) { t.Helper() var ( require = require.New(t) - serverVM = testSyncVMSetup.serverVM.vm + serverVM = testSyncVMSetup.serverVM.VM fundedAccounts = testSyncVMSetup.fundedAccounts syncerVM = testSyncVMSetup.syncerVM.VM syncerEngineChan = testSyncVMSetup.syncerVM.EngineChan @@ -502,6 +517,7 @@ func testSyncerVM(t *testing.T, testSyncVMSetup *testSyncVMSetup, test SyncTestP lastNumber := syncerVM.Ethereum().BlockChain().LastAcceptedBlock().NumberU64() // check the last block is indexed lastSyncedBlock := rawdb.ReadBlock(syncerVM.Ethereum().ChainDb(), rawdb.ReadCanonicalHash(syncerVM.Ethereum().ChainDb(), lastNumber), lastNumber) + require.NotNil(lastSyncedBlock, "last synced block not found") for _, tx := range lastSyncedBlock.Transactions() { index := rawdb.ReadTxLookupEntry(syncerVM.Ethereum().ChainDb(), tx.Hash()) require.NotNilf(index, "Miss transaction indices, number %d hash %s", lastNumber, tx.Hash().Hex()) @@ -584,7 +600,7 @@ func testSyncerVM(t *testing.T, testSyncVMSetup *testSyncVMSetup, test SyncTestP ) if extraSyncerVMTest != nil { - extraSyncerVMTest(t, testSyncVMSetup.syncerVM) + extraSyncerVMTest(t, testSyncVMSetup.syncerVM.VMSetup) } } diff --git a/plugin/evm/testutils/utils.go b/plugin/evm/testutils/utils.go index 7a94fe6c02..419df54ba2 100644 --- a/plugin/evm/testutils/utils.go +++ b/plugin/evm/testutils/utils.go @@ -16,16 +16,17 @@ var ( TestEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] TestShortIDAddrs []ids.ShortID InitialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) -) -func init() { - var b []byte - - for _, key := range []string{ + keys = []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", - } { + } +) + +func init() { + var b []byte + for _, key := range keys { b, _ = cb58.Decode(key) pk, _ := secp256k1.ToPrivateKey(b) TestKeys = append(TestKeys, pk) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index ee4b85a740..1b6a6d6783 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -78,6 +78,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -227,7 +228,7 @@ type VM struct { builder *blockBuilder - clock mockable.Clock + clock *mockable.Clock shutdownChan chan struct{} shutdownWg sync.WaitGroup @@ -288,6 +289,15 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { + if vm.extensionConfig == nil { + return errors.New("extension config not set") + } + + vm.clock = &mockable.Clock{} + if vm.extensionConfig.Clock != nil { + vm.clock = vm.extensionConfig.Clock + } + vm.config.SetDefaults(defaultTxPoolConfig) if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &vm.config); err != nil { @@ -355,11 +365,14 @@ func (vm *VM) Initialize( g := new(core.Genesis) if err := json.Unmarshal(genesisBytes, g); err != nil { - return err + return fmt.Errorf("failed to unmarshal genesis %s", err) } - g.Config.NetworkUpgrades = params.GetNetworkUpgrades(chainCtx.NetworkUpgrades) - + // if the chainCtx.NetworkUpgrades is not empty, set the chain config + // normally it should not be empty, but some tests may not set it + if chainCtx.NetworkUpgrades != (upgrade.Config{}) { + g.Config.NetworkUpgrades = params.GetNetworkUpgrades(chainCtx.NetworkUpgrades) + } // If the Durango is activated, activate the Warp Precompile at the same time if g.Config.DurangoBlockTimestamp != nil { g.Config.PrecompileUpgrades = append(g.Config.PrecompileUpgrades, params.PrecompileUpgrade{ @@ -525,8 +538,8 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { vm.chaindb, eth.Settings{MaxBlocksPerRequest: vm.config.MaxBlocksPerRequest}, lastAcceptedHash, - dummy.NewFakerWithClock(vm.extensionConfig.ConsensusCallbacks, &vm.clock), - &vm.clock, + dummy.NewFakerWithClock(vm.extensionConfig.ConsensusCallbacks, vm.clock), + vm.clock, ) if err != nil { return err @@ -573,8 +586,8 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { ), }) - if vm.extensionConfig.SyncLeafType != nil { - leafConfigs = append(leafConfigs, vm.extensionConfig.SyncLeafType) + if vm.extensionConfig.ExtraSyncLeafConfig != nil { + leafConfigs = append(leafConfigs, vm.extensionConfig.ExtraSyncLeafConfig) } leafHandlers := make(LeafHandlers, len(leafConfigs)) @@ -889,7 +902,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // We call verify without writes here to avoid generating a reference // to the blk state root in the triedb when we are going to call verify // again from the consensus engine with writes enabled. - if err := blk.verify(predicateCtx, false /*=writes*/); err != nil { + if err := blk.semanticVerify(predicateCtx, false /*=writes*/); err != nil { return nil, fmt.Errorf("%w: %w", vmerrs.ErrBlockVerificationFailed, err) } @@ -1218,6 +1231,11 @@ func (vm *VM) NewVMBlock(ethBlock *types.Block) (extension.VMBlock, error) { return blk, nil } +// IsBootstrapped returns true if the VM has finished bootstrapping +func (vm *VM) IsBootstrapped() bool { + return vm.bootstrapped.Get() +} + // CurrentRules returns the chain rules for the current block. func (vm *VM) CurrentRules() params.Rules { header := vm.eth.BlockChain().CurrentHeader() @@ -1228,8 +1246,8 @@ func (vm *VM) Ethereum() *eth.Ethereum { return vm.eth } -func (vm *VM) Config() *config.Config { - return &vm.config +func (vm *VM) Config() config.Config { + return vm.config } func (vm *VM) MetricRegistry() *prometheus.Registry { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 3336543543..f0610f3e16 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -60,7 +60,8 @@ func defaultExtensions() (*extension.Config, error) { OnExtraStateChange: nil, }, SyncExtender: nil, - BlockExtension: extension.NewNoOpBlockExtension(), + BlockExtension: nil, + ExtraMempool: nil, }, nil } @@ -104,7 +105,7 @@ func TestVMConfig(t *testing.T) { txFeeCap := float64(11) enabledEthAPIs := []string{"debug"} configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") + _, vm, _, _, _ := GenesisVM(t, false, testutils.GenesisJSONLatest, configJSON, "") require.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") require.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") require.NoError(t, vm.Shutdown(context.Background())) @@ -114,23 +115,23 @@ func TestVMConfigDefaults(t *testing.T) { txFeeCap := float64(11) enabledEthAPIs := []string{"debug"} configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") + _, vm, _, _, _ := GenesisVM(t, false, testutils.GenesisJSONLatest, configJSON, "") var vmConfig config.Config vmConfig.SetDefaults(defaultTxPoolConfig) vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs - require.Equal(t, vmConfig, vm.Config, "VM Config should match default with overrides") + require.Equal(t, vmConfig, vm.Config(), "VM Config should match default with overrides") require.NoError(t, vm.Shutdown(context.Background())) } func TestVMNilConfig(t *testing.T) { - _, vm, _, _, _ := GenesisVM(t, false, "", "", "") + _, vm, _, _, _ := GenesisVM(t, false, testutils.GenesisJSONLatest, "", "") // VM Config should match defaults if no config is passed in var vmConfig config.Config vmConfig.SetDefaults(defaultTxPoolConfig) - require.Equal(t, vmConfig, vm.Config, "VM Config should match default config") + require.Equal(t, vmConfig, vm.Config(), "VM Config should match default config") require.NoError(t, vm.Shutdown(context.Background())) } @@ -138,7 +139,7 @@ func TestVMContinuousProfiler(t *testing.T) { profilerDir := t.TempDir() profilerFrequency := 500 * time.Millisecond configJSON := fmt.Sprintf(`{"continuous-profiler-dir": %q,"continuous-profiler-frequency": "500ms"}`, profilerDir) - _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") + _, vm, _, _, _ := GenesisVM(t, false, testutils.GenesisJSONLatest, configJSON, "") require.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") require.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") @@ -310,7 +311,7 @@ func TestBuildEthTxBlock(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testutils.TestEthAddrs[0], big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), testutils.TestKeys[0].ToECDSA()) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainID), testutils.TestKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -431,7 +432,7 @@ func TestSetPreferenceRace(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -660,10 +661,10 @@ func TestReorgProtection(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -824,10 +825,10 @@ func TestNonCanonicalAccept(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1015,10 +1016,10 @@ func TestStickyPreference(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1269,10 +1270,10 @@ func TestUncleBlock(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1454,10 +1455,10 @@ func TestAcceptReorg(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1641,7 +1642,7 @@ func TestFutureBlock(t *testing.T) { } }() - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1702,11 +1703,11 @@ func TestBuildApricotPhase1Block(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - key := testutils.TestKeys[0].ToECDSA() - address := testutils.TestEthAddrs[0] + key := testutils.TestKeys[1].ToECDSA() + address := testutils.TestEthAddrs[1] tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1809,7 +1810,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { } }() - tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, testutils.InitialBaseFee, nil) + tx := types.NewTransaction(uint64(0), testutils.TestEthAddrs[1], big.NewInt(1), 21000, big.NewInt(params.LaunchMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testutils.TestKeys[0].ToECDSA()) if err != nil { t.Fatal(err) @@ -1948,7 +1949,6 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { defer func() { metrics.Enabled = true }() issuer, vm, dbManager, _, appSender := GenesisVM(t, true, testutils.GenesisJSONApricotPhase1, "", "") - defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() // Since rewinding is permitted for last accepted height of 0, we must // accept one block to test the SkipUpgradeCheck functionality. @@ -1980,6 +1980,8 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) require.NoError(t, err) + require.NoError(t, vm.Shutdown(context.Background())) + // this will not be allowed err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, appSender) require.ErrorContains(t, err, "mismatching ApricotPhase2 fork block timestamp in database") @@ -2073,15 +2075,7 @@ func TestParentBeaconRootBlock(t *testing.T) { ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock header := types.CopyHeader(ethBlock.Header()) header.ParentBeaconRoot = test.beaconRoot - parentBeaconEthBlock := types.NewBlockWithExtData( - header, - nil, - nil, - nil, - new(trie.Trie), - ethBlock.ExtData(), - false, - ) + parentBeaconEthBlock := ethBlock.WithSeal(header) parentBeaconBlock, err := vm.blockManager.newBlock(parentBeaconEthBlock) if err != nil { From 6776c195a3f7392617753538c27328208ee9c13c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 6 Feb 2025 02:31:34 +0300 Subject: [PATCH 64/91] fix linter --- plugin/evm/vm.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 1b6a6d6783..d1003d9e70 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -144,8 +144,6 @@ var ( errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") - errEthAlreadyInitialized = errors.New("ethereum already initialized") - errConsensusCallbacksAlreadySet = errors.New("consensus callbacks already set") errVMAlreadyInitialized = errors.New("vm already initialized") errExtensionConfigAlreadySet = errors.New("extension config already set") ) From f9f54efd0dae313add1ea5ba482bd4701713ad57 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 6 Feb 2025 02:37:33 +0300 Subject: [PATCH 65/91] move to factory pkg --- plugin/{ => factory}/factory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename plugin/{ => factory}/factory.go (97%) diff --git a/plugin/factory.go b/plugin/factory/factory.go similarity index 97% rename from plugin/factory.go rename to plugin/factory/factory.go index a5e7907dfe..e1b3caa8e1 100644 --- a/plugin/factory.go +++ b/plugin/factory/factory.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package main +package factory import ( "github.com/ava-labs/avalanchego/ids" From b754ffa7ab3f9817718c1a0a51a1465bbd9de01e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 6 Feb 2025 02:40:54 +0300 Subject: [PATCH 66/91] change main function --- plugin/main.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/main.go b/plugin/main.go index 2bf84f4798..46dfb41341 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/vms/rpcchainvm" "github.com/ava-labs/coreth/plugin/evm" + "github.com/ava-labs/coreth/plugin/factory" ) func main() { @@ -29,7 +30,7 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - vm, err := NewPluginVM() + vm, err := factory.NewPluginVM() if err != nil { fmt.Printf("couldn't create evm plugin: %s", err) os.Exit(1) From 58a2bb8fba0baacfbe9e8df5b192079fde73e5cb Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 6 Feb 2025 02:47:03 +0300 Subject: [PATCH 67/91] bump avago --- go.mod | 3 +-- go.sum | 6 ++---- scripts/versions.sh | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 9e8a97ed02..7442e17f4f 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.2-0.20250116172728-54d8b06b8625 + github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -92,7 +92,6 @@ require ( github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/go.sum b/go.sum index 2090275d01..fa6b120cb2 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.2-0.20250116172728-54d8b06b8625 h1:sbmfwhpetCKI7Unzw9jJ+2HWLRFM7vq7th0pH2LclCQ= -github.com/ava-labs/avalanchego v1.12.2-0.20250116172728-54d8b06b8625/go.mod h1:oK/C7ZGo5cAEayBKBoawh2EpOo3E9gD1rpd9NAM0RkQ= +github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326 h1:/ZhQ/yBU8i9vNTrGyNq3ioNb94owwtjREWsiIXAZack= +github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326/go.mod h1:0m9tYzjo53qEf9ZbfFZ4878/PQqkZ8erj9j6JBXp6P4= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -416,8 +416,6 @@ github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5Vgl github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/scripts/versions.sh b/scripts/versions.sh index 42e3dcf479..615cc55d0a 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'54d8b06b'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'286645b'} From f16b85b10e36802bb366bb3180c24da37381d364 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Fri, 7 Feb 2025 09:11:26 -0800 Subject: [PATCH 68/91] refactor: unexport commit from atomic trie --- plugin/evm/atomic/state/atomic_trie.go | 8 ++++---- plugin/evm/atomic/vm/syncervm_test.go | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index a196ad3e08..08c93f0b91 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -136,8 +136,8 @@ func (a *AtomicTrie) OpenTrie(root common.Hash) (*trie.Trie, error) { return trie.New(trie.TrieID(root), a.trieDB) } -// Commit calls Commit on the underlying trieDB and updates metadata pointers. -func (a *AtomicTrie) Commit(height uint64, root common.Hash) error { +// commit calls commit on the underlying trieDB and updates metadata pointers. +func (a *AtomicTrie) commit(height uint64, root common.Hash) error { if err := a.trieDB.Commit(root, false); err != nil { return err } @@ -270,7 +270,7 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // Because we do not accept the trie at every height, we may need to // populate roots at prior commit heights that were skipped. for nextCommitHeight := a.lastCommittedHeight + a.commitInterval; nextCommitHeight < height; nextCommitHeight += a.commitInterval { - if err := a.Commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { + if err := a.commit(nextCommitHeight, a.lastAcceptedRoot); err != nil { return false, err } hasCommitted = true @@ -284,7 +284,7 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // Commit this root if we have reached the [commitInterval]. if height%a.commitInterval == 0 { - if err := a.Commit(height, root); err != nil { + if err := a.commit(height, root); err != nil { return false, err } hasCommitted = true diff --git a/plugin/evm/atomic/vm/syncervm_test.go b/plugin/evm/atomic/vm/syncervm_test.go index d0e30d43ca..a4b632b1e8 100644 --- a/plugin/evm/atomic/vm/syncervm_test.go +++ b/plugin/evm/atomic/vm/syncervm_test.go @@ -95,7 +95,10 @@ func TestAtomicSyncerVM(t *testing.T) { } if isServer { serverAtomicTrie := atomicVM.atomicBackend.AtomicTrie() - require.NoError(t, serverAtomicTrie.Commit(params.SyncableInterval, serverAtomicTrie.LastAcceptedRoot())) + // Calling AcceptTrie with SyncableInterval creates a commit for the atomic trie + committed, err := serverAtomicTrie.AcceptTrie(params.SyncableInterval, serverAtomicTrie.LastAcceptedRoot()) + require.NoError(t, err) + require.True(t, committed) require.NoError(t, atomicVM.VersionDB().Commit()) } } From 54e88011d2acfbcaa1eb2b34b58c51bcf6e7c4c1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 7 Feb 2025 20:12:31 +0300 Subject: [PATCH 69/91] few nits --- core/blockchain_test.go | 13 +++----- eth/gasprice/gasprice_test.go | 2 +- plugin/evm/atomic/state/atomic_state.go | 5 +-- .../evm/atomic/sync/atomic_sync_extender.go | 1 + plugin/evm/atomic/txpool/mempool.go | 2 ++ plugin/evm/atomic/vm/api.go | 6 ++-- plugin/evm/atomic/vm/atomic_leaf_handler.go | 3 ++ plugin/evm/atomic/vm/block_extension.go | 15 +++++++++ plugin/evm/atomic/vm/formatting.go | 31 ++++++++---------- plugin/evm/atomic/vm/tx_semantic_verifier.go | 1 + plugin/evm/atomic/vm/vm.go | 32 ++++--------------- plugin/evm/atomic/vm/vm_test.go | 20 ++++++++++++ plugin/evm/block.go | 6 ++-- plugin/evm/block_manager.go | 20 ++++++++---- plugin/evm/extension/config.go | 26 ++++++++++++++- plugin/evm/vm.go | 2 +- 16 files changed, 115 insertions(+), 70 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index fe998a9a1d..601cb12f02 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -544,7 +544,7 @@ func TestCanonicalHashMarker(t *testing.T) { } func testCanonicalHashMarker(t *testing.T, scheme string) { - cases := []struct { + var cases = []struct { forkA int forkB int }{ @@ -713,7 +713,6 @@ func TestCreateThenDeletePreByzantium(t *testing.T) { testCreateThenDelete(t, &config) } - func TestCreateThenDeletePostByzantium(t *testing.T) { testCreateThenDelete(t, params.TestChainConfig) } @@ -738,8 +737,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { byte(vm.PUSH1), 0x1, byte(vm.SSTORE), // Get the runtime-code on the stack - byte(vm.PUSH32), - } + byte(vm.PUSH32)} initCode = append(initCode, code...) initCode = append(initCode, []byte{ byte(vm.PUSH1), 0x0, // offset @@ -781,8 +779,8 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { }) // Import the canonical chain chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{ - // Debug: true, - // Tracer: logger.NewJSONLogger(nil, os.Stdout), + //Debug: true, + //Tracer: logger.NewJSONLogger(nil, os.Stdout), }, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) @@ -941,8 +939,7 @@ func TestTransientStorageReset(t *testing.T) { byte(vm.TSTORE), // Get the runtime-code on the stack - byte(vm.PUSH32), - } + byte(vm.PUSH32)} initCode = append(initCode, code...) initCode = append(initCode, []byte{ byte(vm.PUSH1), 0x0, // offset diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 02d5294cf7..dda6fd808d 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -120,7 +120,7 @@ func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBloc // newTestBackend creates a test backend. OBS: don't forget to invoke tearDown // after use, otherwise the blockchain instance will mem-leak via goroutines. func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { - gspec := &core.Genesis{ + var gspec = &core.Genesis{ Config: config, Alloc: types.GenesisAlloc{addr: {Balance: bal}}, } diff --git a/plugin/evm/atomic/state/atomic_state.go b/plugin/evm/atomic/state/atomic_state.go index 940c987fe5..f4e171b0a2 100644 --- a/plugin/evm/atomic/state/atomic_state.go +++ b/plugin/evm/atomic/state/atomic_state.go @@ -31,9 +31,10 @@ func (a *atomicState) Root() common.Hash { // Accept applies the state change to VM's persistent storage. func (a *atomicState) Accept(commitBatch database.Batch) error { + isBonus := a.backend.IsBonus(a.blockHeight, a.blockHash) // Update the atomic tx repository. Note it is necessary to invoke // the correct method taking bonus blocks into consideration. - if a.backend.IsBonus(a.blockHeight, a.blockHash) { + if isBonus { if err := a.backend.repo.WriteBonus(a.blockHeight, a.txs); err != nil { return err } @@ -61,7 +62,7 @@ func (a *atomicState) Accept(commitBatch database.Batch) error { // If this is a bonus block, write [commitBatch] without applying atomic ops // to shared memory. - if a.backend.IsBonus(a.blockHeight, a.blockHash) { + if isBonus { log.Info("skipping atomic tx acceptance on bonus block", "block", a.blockHash) return avalancheatomic.WriteAll(commitBatch, atomicChangesBatch) } diff --git a/plugin/evm/atomic/sync/atomic_sync_extender.go b/plugin/evm/atomic/sync/atomic_sync_extender.go index 58c8bbed74..a5703606d5 100644 --- a/plugin/evm/atomic/sync/atomic_sync_extender.go +++ b/plugin/evm/atomic/sync/atomic_sync_extender.go @@ -42,6 +42,7 @@ type AtomicSyncExtender struct { stateSyncRequestSize uint16 } +// Initialize initializes the atomic sync extender with the atomic backend and atomic trie. func (a *AtomicSyncExtender) Initialize(backend AtomicBackend, atomicTrie AtomicTrie, stateSyncRequestSize uint16) { a.backend = backend a.atomicTrie = atomicTrie diff --git a/plugin/evm/atomic/txpool/mempool.go b/plugin/evm/atomic/txpool/mempool.go index 7839dceb08..d49fe91dd5 100644 --- a/plugin/evm/atomic/txpool/mempool.go +++ b/plugin/evm/atomic/txpool/mempool.go @@ -586,6 +586,8 @@ func (m *Mempool) addPending() { } } +// SubscribePendingTxs implements the BuilderMempool interface and returns a channel +// that signals when there are pending transactions in the mempool and a block should be built. func (m *Mempool) SubscribePendingTxs() <-chan struct{} { return m.Pending } diff --git a/plugin/evm/atomic/vm/api.go b/plugin/evm/atomic/vm/api.go index a9cb96ea9d..aac952d2ec 100644 --- a/plugin/evm/atomic/vm/api.go +++ b/plugin/evm/atomic/vm/api.go @@ -67,7 +67,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply addrSet := set.Set[ids.ShortID]{} for _, addrStr := range args.Addresses { - addr, err := service.vm.ParseServiceAddress(addrStr) + addr, err := ParseServiceAddress(service.vm.ctx, addrStr) if err != nil { return fmt.Errorf("couldn't parse address %q: %w", addrStr, err) } @@ -77,7 +77,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply startAddr := ids.ShortEmpty startUTXO := ids.Empty if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { - startAddr, err = service.vm.ParseServiceAddress(args.StartIndex.Address) + startAddr, err = ParseServiceAddress(service.vm.ctx, args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) } @@ -114,7 +114,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply reply.UTXOs[i] = str } - endAddress, err := service.vm.FormatLocalAddress(endAddr) + endAddress, err := FormatLocalAddress(service.vm.ctx, endAddr) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } diff --git a/plugin/evm/atomic/vm/atomic_leaf_handler.go b/plugin/evm/atomic/vm/atomic_leaf_handler.go index e47339d385..0ac9d57de7 100644 --- a/plugin/evm/atomic/vm/atomic_leaf_handler.go +++ b/plugin/evm/atomic/vm/atomic_leaf_handler.go @@ -21,16 +21,19 @@ func (h *uninitializedHandler) OnLeafsRequest(ctx context.Context, nodeID ids.No return nil, errUninitialized } +// atomicLeafHandler is a wrapper around handlers.LeafRequestHandler that allows for initialization after creation type atomicLeafHandler struct { handlers.LeafRequestHandler } +// NewAtomicLeafHandler returns a new uninitialzied atomicLeafHandler that can be later initialized func NewAtomicLeafHandler() *atomicLeafHandler { return &atomicLeafHandler{ LeafRequestHandler: &uninitializedHandler{}, } } +// Initialize initializes the atomicLeafHandler with the provided atomicTrieDB, trieKeyLength, and networkCodec func (a *atomicLeafHandler) Initialize(atomicTrieDB *triedb.Database, trieKeyLength int, networkCodec codec.Manager) { handlerStats := stats.NewHandlerStats(metrics.Enabled) a.LeafRequestHandler = handlers.NewLeafsRequestHandler(atomicTrieDB, trieKeyLength, nil, networkCodec, handlerStats) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 107e988d06..d956210c9c 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -34,6 +34,7 @@ type blockExtension struct { vm *VM } +// newBlockExtension returns a new block extension. func newBlockExtension( extDataHashes map[common.Hash]common.Hash, vm *VM, @@ -46,8 +47,11 @@ func newBlockExtension( } } +// SyntacticVerify checks the syntactic validity of the block. This is called the wrapper +// block manager's SyntacticVerify method. func (be *blockExtension) SyntacticVerify(b extension.VMBlock, rules params.Rules) error { ethBlock := b.GetEthBlock() + // should not happen if ethBlock == nil { return errNilEthBlock } @@ -137,6 +141,8 @@ func (be *blockExtension) SyntacticVerify(b extension.VMBlock, rules params.Rule return nil } +// SemanticVerify checks the semantic validity of the block. This is called the wrapper +// block manager's SemanticVerify method. func (be *blockExtension) SemanticVerify(b extension.VMBlock) error { atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { @@ -145,6 +151,10 @@ func (be *blockExtension) SemanticVerify(b extension.VMBlock) error { return be.verifyUTXOsPresent(b, atomicTxs) } +// OnAccept is called when the block is accepted. This is called the wrapper +// block manager's OnAccept method. The acceptedBatch contains the changes that +// were made to the database as a result of accepting the block, and it's flushed +// to the database in this method. func (be *blockExtension) OnAccept(b extension.VMBlock, acceptedBatch database.Batch) error { atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { @@ -166,6 +176,8 @@ func (be *blockExtension) OnAccept(b extension.VMBlock, acceptedBatch database.B return atomicState.Accept(acceptedBatch) } +// OnReject is called when the block is rejected. This is called the wrapper +// block manager's OnReject method. func (be *blockExtension) OnReject(b extension.VMBlock) error { atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { @@ -186,6 +198,8 @@ func (be *blockExtension) OnReject(b extension.VMBlock) error { return atomicState.Reject() } +// OnCleanup is called when the block is cleaned up. This is called the wrapper +// block manager's OnCleanup method. func (be *blockExtension) OnCleanup(b extension.VMBlock) { if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { atomicState.Reject() @@ -215,6 +229,7 @@ func (be *blockExtension) verifyUTXOsPresent(b extension.VMBlock, atomicTxs []*a return nil } +// extractAtomicTxsFromBlock extracts atomic transactions from the block's extra data. func extractAtomicTxsFromBlock(b extension.VMBlock, chainConfig *params.ChainConfig) ([]*atomic.Tx, error) { ethBlock := b.GetEthBlock() if ethBlock == nil { diff --git a/plugin/evm/atomic/vm/formatting.go b/plugin/evm/atomic/vm/formatting.go index e1914a7299..47ae7b71cc 100644 --- a/plugin/evm/atomic/vm/formatting.go +++ b/plugin/evm/atomic/vm/formatting.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" ) @@ -14,57 +15,51 @@ import ( // ParseServiceAddress get address ID from address string, being it either localized (using address manager, // doing also components validations), or not localized. // If both attempts fail, reports error from localized address parsing -func (vm *VM) ParseServiceAddress(addrStr string) (ids.ShortID, error) { +func ParseServiceAddress(ctx *snow.Context, addrStr string) (ids.ShortID, error) { addr, err := ids.ShortFromString(addrStr) if err == nil { return addr, nil } - return vm.ParseLocalAddress(addrStr) + return ParseLocalAddress(ctx, addrStr) } // ParseLocalAddress takes in an address for this chain and produces the ID -func (vm *VM) ParseLocalAddress(addrStr string) (ids.ShortID, error) { - chainID, addr, err := vm.ParseAddress(addrStr) +func ParseLocalAddress(ctx *snow.Context, addrStr string) (ids.ShortID, error) { + chainID, addr, err := ParseAddress(ctx, addrStr) if err != nil { return ids.ShortID{}, err } - if chainID != vm.ctx.ChainID { + if chainID != ctx.ChainID { return ids.ShortID{}, fmt.Errorf("expected chainID to be %q but was %q", - vm.ctx.ChainID, chainID) + ctx.ChainID, chainID) } return addr, nil } // FormatLocalAddress takes in a raw address and produces the formatted address -func (vm *VM) FormatLocalAddress(addr ids.ShortID) (string, error) { - return vm.FormatAddress(vm.ctx.ChainID, addr) -} - -// FormatAddress takes in a chainID and a raw address and produces the formatted -// address -func (vm *VM) FormatAddress(chainID ids.ID, addr ids.ShortID) (string, error) { - chainIDAlias, err := vm.ctx.BCLookup.PrimaryAlias(chainID) +func FormatLocalAddress(ctx *snow.Context, addr ids.ShortID) (string, error) { + chainIDAlias, err := ctx.BCLookup.PrimaryAlias(ctx.ChainID) if err != nil { return "", err } - hrp := constants.GetHRP(vm.ctx.NetworkID) + hrp := constants.GetHRP(ctx.NetworkID) return address.Format(chainIDAlias, hrp, addr.Bytes()) } // ParseAddress takes in an address and produces the ID of the chain it's for // the ID of the address -func (vm *VM) ParseAddress(addrStr string) (ids.ID, ids.ShortID, error) { +func ParseAddress(ctx *snow.Context, addrStr string) (ids.ID, ids.ShortID, error) { chainIDAlias, hrp, addrBytes, err := address.Parse(addrStr) if err != nil { return ids.ID{}, ids.ShortID{}, err } - chainID, err := vm.ctx.BCLookup.Lookup(chainIDAlias) + chainID, err := ctx.BCLookup.Lookup(chainIDAlias) if err != nil { return ids.ID{}, ids.ShortID{}, err } - expectedHRP := constants.GetHRP(vm.ctx.NetworkID) + expectedHRP := constants.GetHRP(ctx.NetworkID) if hrp != expectedHRP { return ids.ID{}, ids.ShortID{}, fmt.Errorf("expected hrp %q but got %q", expectedHRP, hrp) diff --git a/plugin/evm/atomic/vm/tx_semantic_verifier.go b/plugin/evm/atomic/vm/tx_semantic_verifier.go index 196190fc8c..b8794b9335 100644 --- a/plugin/evm/atomic/vm/tx_semantic_verifier.go +++ b/plugin/evm/atomic/vm/tx_semantic_verifier.go @@ -42,6 +42,7 @@ type verifierBackend struct { secpCache *secp256k1.RecoverCache } +// semanticVerifier is a visitor that checks the semantic validity of atomic transactions. type semanticVerifier struct { backend *verifierBackend atx *atomic.Tx diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index be56d438ce..a026fdc8f1 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -128,16 +128,19 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to create codec manager: %w", err) } + // Create the atomic extension structs + // some of them need to be initialized after the inner VM is initialized blockExtension := newBlockExtension(extDataHashes, vm) syncExtender := &atomicsync.AtomicSyncExtender{} syncProvider := &atomicsync.AtomicSummaryProvider{} + // Create and pass the leaf handler to the atomic extension + // it will be initialized after the inner VM is initialized leafHandler := NewAtomicLeafHandler() atomicLeafTypeConfig := &extension.LeafRequestConfig{ LeafType: atomicsync.AtomicTrieNode, MetricName: "sync_atomic_trie_leaves", Handler: leafHandler, } - vm.mempool = &txpool.Mempool{} extensionConfig := &extension.Config{ @@ -155,6 +158,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to set extension config: %w", err) } + // Initialize inner vm with the provided parameters if err := innerVM.Initialize( ctx, chainCtx, @@ -169,6 +173,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to initialize inner VM: %w", err) } + // Now we can initialize the mempool and so err = vm.mempool.Initialize(chainCtx, innerVM.MetricRegistry(), defaultMempoolSize, vm.verifyTxAtTip) if err != nil { return fmt.Errorf("failed to initialize mempool: %w", err) @@ -217,11 +222,7 @@ func (vm *VM) Initialize( // interface. The fx will register all of its types, which can be safely // ignored by the VM's codec. vm.baseCodec = linearcodec.NewDefault() - - if err := vm.fx.Initialize(vm); err != nil { - return err - } - return nil + return vm.fx.Initialize(vm) } func (vm *VM) SetState(ctx context.Context, state snow.State) error { @@ -768,25 +769,6 @@ func (vm *VM) GetAtomicUTXOs( ) } -func (vm *VM) newImportTx( - chainID ids.ID, // chain to import from - to common.Address, // Address of recipient - baseFee *big.Int, // fee to use post-AP3 - keys []*secp256k1.PrivateKey, // Keys to import the funds -) (*atomic.Tx, error) { - kc := secp256k1fx.NewKeychain() - for _, key := range keys { - kc.Add(key) - } - - atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } - - return atomic.NewImportTx(vm.ctx, vm.InnerVM.CurrentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) -} - func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { return vm.BuildBlockWithContext(ctx, nil) } diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index 46411d90ed..b5679b8c77 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "errors" + "fmt" "math/big" "strings" "testing" @@ -45,6 +46,25 @@ func newAtomicTestVM() *VM { return WrapVM(&evm.VM{}) } +func (vm *VM) newImportTx( + chainID ids.ID, // chain to import from + to common.Address, // Address of recipient + baseFee *big.Int, // fee to use post-AP3 + keys []*secp256k1.PrivateKey, // Keys to import the funds +) (*atomic.Tx, error) { + kc := secp256k1fx.NewKeychain() + for _, key := range keys { + kc.Add(key) + } + + atomicUTXOs, _, _, err := vm.GetAtomicUTXOs(chainID, kc.Addresses(), ids.ShortEmpty, ids.Empty, -1) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + + return atomic.NewImportTx(vm.ctx, vm.InnerVM.CurrentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) +} + func GenesisAtomicVM(t *testing.T, finishBootstrapping bool, genesisJSON string, diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 9140d6da44..a74d6ce911 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -228,10 +228,8 @@ func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateConte } } - if b.blockManager.blockExtension != nil { - if err := b.blockManager.blockExtension.SemanticVerify(b); err != nil { - return fmt.Errorf("failed to verify block extension: %w", err) - } + if err := b.blockManager.SemanticVerify(b); err != nil { + return fmt.Errorf("failed to verify block extension: %w", err) } // The engine may call VerifyWithContext multiple times on the same block with different contexts. diff --git a/plugin/evm/block_manager.go b/plugin/evm/block_manager.go index c1610ea8fb..1304795048 100644 --- a/plugin/evm/block_manager.go +++ b/plugin/evm/block_manager.go @@ -157,13 +157,6 @@ func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { } } - // Make sure the block isn't too far in the future - // TODO: move this to only be part of semantic verification. - blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(bm.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { - return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) - } - // Ensure BaseFee is non-nil as of ApricotPhase3. if rules.IsApricotPhase3 { if ethHeader.BaseFee == nil { @@ -223,3 +216,16 @@ func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { } return nil } + +func (bm *blockManager) SemanticVerify(b *Block) error { + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(bm.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + + if bm.blockExtension != nil { + return bm.blockExtension.SemanticVerify(b) + } + return nil +} diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 49b11728e2..59a21279bd 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -2,6 +2,7 @@ package extension import ( "context" + "errors" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" @@ -26,6 +27,13 @@ import ( "github.com/ava-labs/coreth/sync/handlers" ) +var ( + errNilConfig = errors.New("nil config") + errNilNetworkCodec = errors.New("nil network codec") + errNilSyncSummaryProvider = errors.New("nil sync summary provider") + errNilSyncableParser = errors.New("nil syncable parser") +) + type ExtensibleVM interface { // SetExtensionConfig sets the configuration for the VM extension // Should be called before any other method and only once @@ -125,7 +133,7 @@ type Config struct { NetworkCodec codec.Manager // ConsensusCallbacks is the consensus callbacks to use // for the VM to be used in consensus engine. - // It's required and should be non-nil + // Callback functions can be nil. ConsensusCallbacks dummy.ConsensusCallbacks // SyncSummaryProvider is the sync summary provider to use // for the VM to be used in syncer. @@ -151,3 +159,19 @@ type Config struct { // It's optional and can be nil Clock *mockable.Clock } + +func (c *Config) Validate() error { + if c == nil { + return errNilConfig + } + if c.NetworkCodec == nil { + return errNilNetworkCodec + } + if c.SyncSummaryProvider == nil { + return errNilSyncSummaryProvider + } + if c.SyncableParser == nil { + return errNilSyncableParser + } + return nil +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 8a39229ae4..623190a347 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -287,7 +287,7 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - if vm.extensionConfig == nil { + if vm.extensionConfig.Validate() == nil { return errors.New("extension config not set") } From ce08bacfd968a03c6842256bb1da07528809c25b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 7 Feb 2025 20:32:42 +0300 Subject: [PATCH 70/91] fix ext config validate --- plugin/evm/vm.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 623190a347..9528948394 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -287,8 +287,8 @@ func (vm *VM) Initialize( fxs []*commonEng.Fx, appSender commonEng.AppSender, ) error { - if vm.extensionConfig.Validate() == nil { - return errors.New("extension config not set") + if err := vm.extensionConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate extension config: %w", err) } vm.clock = &mockable.Clock{} From 264ddbb55f8fab365a7a94c95e4913cc9aa018ca Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 7 Feb 2025 21:29:28 +0300 Subject: [PATCH 71/91] update releases.md --- RELEASES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/RELEASES.md b/RELEASES.md index 628e454843..1c4988fd31 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -2,6 +2,8 @@ ## Pending Release +- Moved atomic codebase from plugin/evm to plugin/evm/atomic package + ## [v0.14.1](https://github.com/ava-labs/coreth/releases/tag/v0.14.1) - Removed deprecated `ExportKey`, `ExportAVAX`, `Export`, `ImportKey`, `ImportAVAX`, `Import` APIs From c388601058e2110c285823d2656aa77ed699d0ad Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 10 Feb 2025 12:23:01 +0300 Subject: [PATCH 72/91] rename vmerrs to coreerors/move vmerrors to plugin --- core/state_transition.go | 8 +-- core/txpool/validation.go | 12 ++-- core/vm/contracts.go | 9 ++- core/vm/contracts_stateful_native_asset.go | 10 +-- core/vm/contracts_stateful_test.go | 20 +++--- core/vm/eips.go | 5 +- core/vm/evm.go | 61 +++++++++---------- core/vm/gas.go | 4 +- core/vm/gas_table.go | 61 ++++++++++--------- core/vm/gas_table_test.go | 12 ++-- core/vm/instructions.go | 44 ++++++------- core/vm/instructions_test.go | 22 ++++--- core/vm/interpreter.go | 10 +-- core/vm/operations_acl.go | 10 +-- vmerrs/vmerrs.go => coreerrors/errors.go | 5 +- eth/gasestimator/gasestimator.go | 4 +- eth/tracers/logger/logger.go | 4 +- eth/tracers/native/call.go | 4 +- eth/tracers/native/call_flat.go | 4 +- internal/ethapi/errors.go | 6 +- plugin/evm/atomic/vm/vm.go | 6 +- plugin/evm/vm.go | 8 +-- plugin/evm/vmerrors/errors.go | 9 +++ precompile/contract/utils.go | 4 +- precompile/contracts/warp/contract.go | 6 +- precompile/contracts/warp/contract_test.go | 18 +++--- .../contracts/warp/contract_warp_handler.go | 4 +- 27 files changed, 193 insertions(+), 177 deletions(-) rename vmerrs/vmerrs.go => coreerrors/errors.go (91%) create mode 100644 plugin/evm/vmerrors/errors.go diff --git a/core/state_transition.go b/core/state_transition.go index df8c679a7a..52b801ee4d 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -33,9 +33,9 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -72,7 +72,7 @@ func (result *ExecutionResult) Return() []byte { // Revert returns the concrete revert reason if the execution is aborted by `REVERT` // opcode. Note the reason can be nil if no data supplied with revert opcode. func (result *ExecutionResult) Revert() []byte { - if result.Err != vmerrs.ErrExecutionReverted { + if result.Err != coreerrors.ErrExecutionReverted { return nil } return common.CopyBytes(result.ReturnData) @@ -353,7 +353,7 @@ func (st *StateTransition) preCheck() error { } // Make sure the sender is not prohibited if vm.IsProhibited(msg.From) { - return fmt.Errorf("%w: address %v", vmerrs.ErrAddrProhibited, msg.From) + return fmt.Errorf("%w: address %v", coreerrors.ErrAddrProhibited, msg.From) } } // Make sure that transaction gasFeeCap is greater than the baseFee (post london) @@ -477,7 +477,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { // Check whether the init code size has been exceeded. if rules.IsDurango && contractCreation && len(msg.Data) > params.MaxInitCodeSize { - return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(msg.Data), params.MaxInitCodeSize) + return nil, fmt.Errorf("%w: code size %v limit %v", coreerrors.ErrMaxInitCodeSizeExceeded, len(msg.Data), params.MaxInitCodeSize) } // Execute the preparatory steps for state transition which includes: diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 374471e978..32473b1680 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -34,18 +34,16 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" ) -var ( - // blobTxMinBlobGasPrice is the big.Int version of the configured protocol - // parameter to avoid constucting a new big integer for every transaction. - blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) -) +// blobTxMinBlobGasPrice is the big.Int version of the configured protocol +// parameter to avoid constucting a new big integer for every transaction. +var blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) // ValidationOptions define certain differences between transaction validation // across the different pools without having to duplicate those checks. @@ -85,7 +83,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types } // Check whether the init code size has been exceeded if opts.Config.IsDurango(head.Time) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { - return fmt.Errorf("%w: code size %v, limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + return fmt.Errorf("%w: code size %v, limit %v", coreerrors.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur for transactions created using the RPC. diff --git a/core/vm/contracts.go b/core/vm/contracts.go index b14dea6b2a..151255adc3 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -33,10 +33,10 @@ import ( "fmt" "math/big" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/precompile/contract" "github.com/ava-labs/coreth/precompile/modules" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -284,7 +284,7 @@ func ActivePrecompiles(rules params.Rules) []common.Address { func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64) (ret []byte, remainingGas uint64, err error) { gasCost := p.RequiredGas(input) if suppliedGas < gasCost { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } suppliedGas -= gasCost output, err := p.Run(input) @@ -339,6 +339,7 @@ type sha256hash struct{} func (c *sha256hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.Sha256PerWordGas + params.Sha256BaseGas } + func (c *sha256hash) Run(input []byte) ([]byte, error) { h := sha256.Sum256(input) return h[:], nil @@ -354,6 +355,7 @@ type ripemd160hash struct{} func (c *ripemd160hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.Ripemd160PerWordGas + params.Ripemd160BaseGas } + func (c *ripemd160hash) Run(input []byte) ([]byte, error) { ripemd := ripemd160.New() ripemd.Write(input) @@ -370,6 +372,7 @@ type dataCopy struct{} func (c *dataCopy) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.IdentityPerWordGas + params.IdentityBaseGas } + func (c *dataCopy) Run(in []byte) ([]byte, error) { return common.CopyBytes(in), nil } @@ -523,7 +526,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). - //If base == 1, then we can just return base % mod (if mod >= 1, which it is) + // If base == 1, then we can just return base % mod (if mod >= 1, which it is) v = base.Mod(base, mod).Bytes() default: v = base.Exp(base, exp, mod).Bytes() diff --git a/core/vm/contracts_stateful_native_asset.go b/core/vm/contracts_stateful_native_asset.go index 6791efe2e7..8b8d00a8cc 100644 --- a/core/vm/contracts_stateful_native_asset.go +++ b/core/vm/contracts_stateful_native_asset.go @@ -7,8 +7,8 @@ import ( "fmt" "math/big" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -54,18 +54,18 @@ func UnpackNativeAssetBalanceInput(input []byte) (common.Address, common.Hash, e func (b *nativeAssetBalance) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { // input: encodePacked(address 20 bytes, assetID 32 bytes) if suppliedGas < b.gasCost { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } remainingGas = suppliedGas - b.gasCost address, assetID, err := UnpackNativeAssetBalanceInput(input) if err != nil { - return nil, remainingGas, vmerrs.ErrExecutionReverted + return nil, remainingGas, coreerrors.ErrExecutionReverted } res, overflow := uint256.FromBig(accessibleState.GetStateDB().GetBalanceMultiCoin(address, assetID)) if overflow { - return nil, remainingGas, vmerrs.ErrExecutionReverted + return nil, remainingGas, coreerrors.ErrExecutionReverted } return common.LeftPadBytes(res.Bytes(), 32), remainingGas, nil } @@ -109,5 +109,5 @@ func (c *nativeAssetCall) Run(accessibleState contract.AccessibleState, caller c type deprecatedContract struct{} func (*deprecatedContract) Run(accessibleState contract.AccessibleState, caller common.Address, addr common.Address, input []byte, suppliedGas uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - return nil, suppliedGas, vmerrs.ErrExecutionReverted + return nil, suppliedGas, coreerrors.ErrExecutionReverted } diff --git a/core/vm/contracts_stateful_test.go b/core/vm/contracts_stateful_test.go index 9ac7652108..7cf5bb6ee4 100644 --- a/core/vm/contracts_stateful_test.go +++ b/core/vm/contracts_stateful_test.go @@ -9,8 +9,8 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/holiman/uint256" @@ -199,7 +199,7 @@ func TestStatefulPrecompile(t *testing.T) { value: big0, gasInput: params.AssetBalanceApricot, expectedGasRemaining: 0, - expectedErr: vmerrs.ErrExecutionReverted, + expectedErr: coreerrors.ErrExecutionReverted, expectedResult: nil, name: "native asset balance: invalid input data reverts", }, @@ -217,7 +217,7 @@ func TestStatefulPrecompile(t *testing.T) { value: big0, gasInput: params.AssetBalanceApricot - 1, expectedGasRemaining: 0, - expectedErr: vmerrs.ErrOutOfGas, + expectedErr: coreerrors.ErrOutOfGas, expectedResult: nil, name: "native asset balance: insufficient gas errors", }, @@ -235,7 +235,7 @@ func TestStatefulPrecompile(t *testing.T) { value: u256Hundred, gasInput: params.AssetBalanceApricot, expectedGasRemaining: params.AssetBalanceApricot, - expectedErr: vmerrs.ErrInsufficientBalance, + expectedErr: coreerrors.ErrInsufficientBalance, expectedResult: nil, name: "native asset balance: non-zero value with insufficient funds reverts before running pre-compile", }, @@ -324,7 +324,7 @@ func TestStatefulPrecompile(t *testing.T) { value: uint256.NewInt(50), gasInput: params.AssetCallApricot, expectedGasRemaining: 0, - expectedErr: vmerrs.ErrInsufficientBalance, + expectedErr: coreerrors.ErrInsufficientBalance, expectedResult: nil, name: "native asset call: insufficient multicoin funds", stateDBCheck: func(t *testing.T, stateDB StateDB) { @@ -356,7 +356,7 @@ func TestStatefulPrecompile(t *testing.T) { value: uint256.NewInt(51), gasInput: params.AssetCallApricot, expectedGasRemaining: params.AssetCallApricot, - expectedErr: vmerrs.ErrInsufficientBalance, + expectedErr: coreerrors.ErrInsufficientBalance, expectedResult: nil, name: "native asset call: insufficient funds", stateDBCheck: func(t *testing.T, stateDB StateDB) { @@ -388,7 +388,7 @@ func TestStatefulPrecompile(t *testing.T) { value: uint256.NewInt(50), gasInput: params.AssetCallApricot - 1, expectedGasRemaining: 0, - expectedErr: vmerrs.ErrOutOfGas, + expectedErr: coreerrors.ErrOutOfGas, expectedResult: nil, name: "native asset call: insufficient gas for native asset call", }, @@ -409,7 +409,7 @@ func TestStatefulPrecompile(t *testing.T) { value: uint256.NewInt(50), gasInput: params.AssetCallApricot + params.CallNewAccountGas - 1, expectedGasRemaining: 0, - expectedErr: vmerrs.ErrOutOfGas, + expectedErr: coreerrors.ErrOutOfGas, expectedResult: nil, name: "native asset call: insufficient gas to create new account", stateDBCheck: func(t *testing.T, stateDB StateDB) { @@ -441,7 +441,7 @@ func TestStatefulPrecompile(t *testing.T) { value: uint256.NewInt(50), gasInput: params.AssetCallApricot + params.CallNewAccountGas, expectedGasRemaining: params.CallNewAccountGas, - expectedErr: vmerrs.ErrExecutionReverted, + expectedErr: coreerrors.ErrExecutionReverted, expectedResult: nil, name: "native asset call: invalid input", }, @@ -462,7 +462,7 @@ func TestStatefulPrecompile(t *testing.T) { value: big0, gasInput: params.AssetCallApricot + params.CallNewAccountGas, expectedGasRemaining: params.AssetCallApricot + params.CallNewAccountGas, - expectedErr: vmerrs.ErrExecutionReverted, + expectedErr: coreerrors.ErrExecutionReverted, expectedResult: nil, name: "deprecated contract", }, diff --git a/core/vm/eips.go b/core/vm/eips.go index b573705dc2..793b3751dc 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -30,8 +30,8 @@ import ( "fmt" "sort" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/holiman/uint256" ) @@ -65,6 +65,7 @@ func ValidEip(eipNum int) bool { _, ok := activators[eipNum] return ok } + func ActivateableEips() []string { var nums []string for k := range activators { @@ -214,7 +215,7 @@ func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by // opTstore implements TSTORE opcode func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } loc := scope.Stack.pop() val := scope.Stack.pop() diff --git a/core/vm/evm.go b/core/vm/evm.go index 0eb7ec12df..cb984ca7e0 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -33,12 +33,12 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/precompile/contract" "github.com/ava-labs/coreth/precompile/modules" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" @@ -268,11 +268,11 @@ func (evm *EVM) Interpreter() *EVMInterpreter { func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *uint256.Int) (ret []byte, leftOverGas uint64, err error) { // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { - return nil, gas, vmerrs.ErrDepth + return nil, gas, coreerrors.ErrDepth } // Fail if we're trying to transfer more than the available balance if !value.IsZero() && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { - return nil, gas, vmerrs.ErrInsufficientBalance + return nil, gas, coreerrors.ErrInsufficientBalance } snapshot := evm.StateDB.Snapshot() p, isPrecompile := evm.precompile(addr) @@ -335,7 +335,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // when we're in homestead this also counts for code storage gas errors. if err != nil { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { gas = 0 } // TODO: consider clearing up unused snapshots: @@ -355,7 +355,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *uint256.Int) (ret []byte, leftOverGas uint64, err error) { // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { - return nil, gas, vmerrs.ErrDepth + return nil, gas, coreerrors.ErrDepth } // Fail if we're trying to transfer more than the available balance // Note although it's noop to transfer X ether to caller itself. But @@ -365,9 +365,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, // that [value] will be popped from the stack and decoded to a *big.Int, which will // always yield a positive result. if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { - return nil, gas, vmerrs.ErrInsufficientBalance + return nil, gas, coreerrors.ErrInsufficientBalance } - var snapshot = evm.StateDB.Snapshot() + snapshot := evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame if evm.Config.Tracer != nil { @@ -391,7 +391,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, } if err != nil { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { gas = 0 } } @@ -406,9 +406,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { - return nil, gas, vmerrs.ErrDepth + return nil, gas, coreerrors.ErrDepth } - var snapshot = evm.StateDB.Snapshot() + snapshot := evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame if evm.Config.Tracer != nil { @@ -435,7 +435,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by } if err != nil { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { gas = 0 } } @@ -449,14 +449,14 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { - return nil, gas, vmerrs.ErrDepth + return nil, gas, coreerrors.ErrDepth } // We take a snapshot here. This is a bit counter-intuitive, and could probably be skipped. // However, even a staticcall is considered a 'touch'. On mainnet, static calls were introduced // after all empty accounts were deleted, so this is not required. However, if we omit this, // then certain tests start failing; stRevertTest/RevertPrecompiledTouchExactOOG.json. // We could change this, but for now it's left for legacy reasons - var snapshot = evm.StateDB.Snapshot() + snapshot := evm.StateDB.Snapshot() // We do an AddBalance of zero here, just in order to trigger a touch. // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium, @@ -491,7 +491,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte } if err != nil { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { gas = 0 } } @@ -515,22 +515,22 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // Depth check execution. Fail if we're trying to execute above the // limit. if evm.depth > int(params.CallCreateDepth) { - return nil, common.Address{}, gas, vmerrs.ErrDepth + return nil, common.Address{}, gas, coreerrors.ErrDepth } // Note: it is not possible for a negative value to be passed in here due to the fact // that [value] will be popped from the stack and decoded to a *big.Int, which will // always yield a positive result. if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { - return nil, common.Address{}, gas, vmerrs.ErrInsufficientBalance + return nil, common.Address{}, gas, coreerrors.ErrInsufficientBalance } // If there is any collision with a prohibited address, return an error instead // of allowing the contract to be created. if IsProhibited(address) { - return nil, common.Address{}, gas, vmerrs.ErrAddrProhibited + return nil, common.Address{}, gas, coreerrors.ErrAddrProhibited } nonce := evm.StateDB.GetNonce(caller.Address()) if nonce+1 < nonce { - return nil, common.Address{}, gas, vmerrs.ErrNonceUintOverflow + return nil, common.Address{}, gas, coreerrors.ErrNonceUintOverflow } evm.StateDB.SetNonce(caller.Address(), nonce+1) // We add this to the access list _before_ taking a snapshot. Even if the creation fails, @@ -541,7 +541,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // Ensure there's no existing contract already at the designated address contractHash := evm.StateDB.GetCodeHash(address) if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { - return nil, common.Address{}, 0, vmerrs.ErrContractAddressCollision + return nil, common.Address{}, 0, coreerrors.ErrContractAddressCollision } // Create a new account on the state snapshot := evm.StateDB.Snapshot() @@ -568,12 +568,12 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, // Check whether the max code size has been exceeded, assign err if the case. if err == nil && evm.chainRules.IsEIP158 && len(ret) > params.MaxCodeSize { - err = vmerrs.ErrMaxCodeSizeExceeded + err = coreerrors.ErrMaxCodeSizeExceeded } // Reject code starting with 0xEF if EIP-3541 is enabled. if err == nil && len(ret) >= 1 && ret[0] == 0xEF && evm.chainRules.IsApricotPhase3 { - err = vmerrs.ErrInvalidCode + err = coreerrors.ErrInvalidCode } // if the contract creation ran successfully and no errors were returned @@ -585,16 +585,16 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if contract.UseGas(createDataGas) { evm.StateDB.SetCode(address, ret) } else { - err = vmerrs.ErrCodeStoreOutOfGas + err = coreerrors.ErrCodeStoreOutOfGas } } // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in homestead this also counts for code storage gas errors. - if err != nil && (evm.chainRules.IsHomestead || err != vmerrs.ErrCodeStoreOutOfGas) { + if err != nil && (evm.chainRules.IsHomestead || err != coreerrors.ErrCodeStoreOutOfGas) { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { contract.UseGas(contract.Gas) } } @@ -633,30 +633,30 @@ func (evm *EVM) GetChainConfig() precompileconfig.ChainConfig { return evm.chain func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas uint64, gasCost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { if suppliedGas < gasCost { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } remainingGas = suppliedGas - gasCost if readOnly { - return nil, remainingGas, vmerrs.ErrExecutionReverted + return nil, remainingGas, coreerrors.ErrExecutionReverted } to, assetID, assetAmount, callData, err := UnpackNativeAssetCallInput(input) if err != nil { - return nil, remainingGas, vmerrs.ErrExecutionReverted + return nil, remainingGas, coreerrors.ErrExecutionReverted } // Note: it is not possible for a negative assetAmount to be passed in here due to the fact that decoding a // byte slice into a *big.Int type will always return a positive value. if assetAmount.Sign() != 0 && !evm.Context.CanTransferMC(evm.StateDB, caller, to, assetID, assetAmount) { - return nil, remainingGas, vmerrs.ErrInsufficientBalance + return nil, remainingGas, coreerrors.ErrInsufficientBalance } snapshot := evm.StateDB.Snapshot() if !evm.StateDB.Exist(to) { if remainingGas < params.CallNewAccountGas { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } remainingGas -= params.CallNewAccountGas evm.StateDB.CreateAccount(to) @@ -669,13 +669,12 @@ func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas // Send [assetAmount] of [assetID] to [to] address evm.Context.TransferMultiCoin(evm.StateDB, caller, to, assetID, assetAmount) ret, remainingGas, err = evm.Call(AccountRef(caller), to, callData, remainingGas, new(uint256.Int)) - // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in homestead this also counts for code storage gas errors. if err != nil { evm.StateDB.RevertToSnapshot(snapshot) - if err != vmerrs.ErrExecutionReverted { + if err != coreerrors.ErrExecutionReverted { remainingGas = 0 } // TODO: consider clearing up unused snapshots: diff --git a/core/vm/gas.go b/core/vm/gas.go index 1a195acf2a..5a25f2c3df 100644 --- a/core/vm/gas.go +++ b/core/vm/gas.go @@ -27,7 +27,7 @@ package vm import ( - "github.com/ava-labs/coreth/vmerrs" + "github.com/ava-labs/coreth/coreerrors" "github.com/holiman/uint256" ) @@ -57,7 +57,7 @@ func callGas(isEip150 bool, availableGas, base uint64, callCost *uint256.Int) (u } } if !callCost.IsUint64() { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return callCost.Uint64(), nil diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 68a2ae2d58..d378f256d8 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -29,8 +29,8 @@ package vm import ( "errors" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" ) @@ -47,7 +47,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) { // overflow. The constant 0x1FFFFFFFE0 is the highest number that can be used // without overflowing the gas calculation. if newMemSize > 0x1FFFFFFFE0 { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } newMemSizeWords := toWordSize(newMemSize) newMemSize = newMemSizeWords * 32 @@ -84,15 +84,15 @@ func memoryCopierGas(stackpos int) gasFunc { // And gas for copying data, charged per word at param.CopyGas words, overflow := stack.Back(stackpos).Uint64WithOverflow() if overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if words, overflow = math.SafeMul(toWordSize(words), params.CopyGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, words); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -272,7 +272,7 @@ func makeGasLog(n uint64) gasFunc { return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { requestedSize, overflow := stack.Back(1).Uint64WithOverflow() if overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } gas, err := memoryGasCost(mem, memorySize) @@ -281,18 +281,18 @@ func makeGasLog(n uint64) gasFunc { } if gas, overflow = math.SafeAdd(gas, params.LogGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, n*params.LogTopicGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } var memorySizeGas uint64 if memorySizeGas, overflow = math.SafeMul(requestedSize, params.LogDataGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, memorySizeGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -305,13 +305,13 @@ func gasKeccak256(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor } wordGas, overflow := stack.Back(1).Uint64WithOverflow() if overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, wordGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -339,13 +339,13 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS } wordGas, overflow := stack.Back(2).Uint64WithOverflow() if overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, wordGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -357,15 +357,16 @@ func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m } size, overflow := stack.Back(2).Uint64WithOverflow() if overflow || size > params.MaxInitCodeSize { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow moreGas := params.InitCodeWordGas * ((size + 31) / 32) if gas, overflow = math.SafeAdd(gas, moreGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } + func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { gas, err := memoryGasCost(mem, memorySize) if err != nil { @@ -373,12 +374,12 @@ func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, } size, overflow := stack.Back(2).Uint64WithOverflow() if overflow || size > params.MaxInitCodeSize { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32) if gas, overflow = math.SafeAdd(gas, moreGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -391,7 +392,7 @@ func gasExpFrontier(evm *EVM, contract *Contract, stack *Stack, mem *Memory, mem overflow bool ) if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -404,7 +405,7 @@ func gasExpEIP158(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memor overflow bool ) if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -431,7 +432,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize } var overflow bool if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) @@ -439,7 +440,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize return 0, err } if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -457,14 +458,14 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory gas += params.CallValueTransferGas } if gas, overflow = math.SafeAdd(gas, memoryGas); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } evm.callGasTemp, err = callGas(evm.chainRules.IsEIP150, contract.Gas, gas, stack.Back(0)) if err != nil { return 0, err } if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -480,7 +481,7 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } var overflow bool if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -496,7 +497,7 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo } var overflow bool if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -506,7 +507,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me // EIP150 homestead gas reprice fork: if evm.chainRules.IsEIP150 { gas = params.SelfdestructGasEIP150 - var address = common.Address(stack.Back(0).Bytes20()) + address := common.Address(stack.Back(0).Bytes20()) if evm.chainRules.IsEIP158 { // if empty and transfers value @@ -529,7 +530,7 @@ func gasSelfdestructAP1(evm *EVM, contract *Contract, stack *Stack, mem *Memory, // EIP150 homestead gas reprice fork: if evm.chainRules.IsEIP150 { gas = params.SelfdestructGasEIP150 - var address = common.Address(stack.Back(0).Bytes20()) + address := common.Address(stack.Back(0).Bytes20()) if evm.chainRules.IsEIP158 { // if empty and transfers value diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index c6da37c789..81a84ff2e1 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -36,8 +36,8 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/holiman/uint256" @@ -54,8 +54,8 @@ func TestMemoryGasCost(t *testing.T) { } for i, tt := range tests { v, err := memoryGasCost(&Memory{}, tt.size) - if (err == vmerrs.ErrGasUintOverflow) != tt.overflow { - t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == vmerrs.ErrGasUintOverflow, tt.overflow) + if (err == coreerrors.ErrGasUintOverflow) != tt.overflow { + t.Errorf("test %d: overflow mismatch: have %v, want %v", i, err == coreerrors.ErrGasUintOverflow, tt.overflow) } if v != tt.cost { t.Errorf("test %d: gas cost mismatch: have %v, want %v", i, v, tt.cost) @@ -88,7 +88,7 @@ var eip2200Tests = []struct { {1, math.MaxUint64, "0x60016000556001600055", 1612, 0, nil}, // 1 -> 1 -> 1 {0, math.MaxUint64, "0x600160005560006000556001600055", 40818, 19200, nil}, // 0 -> 1 -> 0 -> 1 {1, math.MaxUint64, "0x600060005560016000556000600055", 10818, 19200, nil}, // 1 -> 0 -> 1 -> 0 - {1, 2306, "0x6001600055", 2306, 0, vmerrs.ErrOutOfGas}, // 1 -> 1 (2300 sentry + 2xPUSH) + {1, 2306, "0x6001600055", 2306, 0, coreerrors.ErrOutOfGas}, // 1 -> 1 (2300 sentry + 2xPUSH) {1, 2307, "0x6001600055", 806, 0, nil}, // 1 -> 1 (2301 sentry + 2xPUSH) } @@ -145,7 +145,7 @@ var createGasTests = []struct { func TestCreateGas(t *testing.T) { for i, tt := range createGasTests { - var gasUsed = uint64(0) + gasUsed := uint64(0) doCheck := func(testGas int) bool { address := common.BytesToAddress([]byte("contract")) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) @@ -165,7 +165,7 @@ func TestCreateGas(t *testing.T) { // Note: we use Cortina instead of AllEthashProtocolChanges (upstream) // because it is the last fork before the activation of EIP-3860 vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestCortinaChainConfig, config) - var startGas = uint64(testGas) + startGas := uint64(testGas) ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(uint256.Int)) if err != nil { return false diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 72047c260d..af0fd2d530 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -29,8 +29,8 @@ package vm import ( "math" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" @@ -339,14 +339,14 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte offset64, overflow := dataOffset.Uint64WithOverflow() if overflow { - return nil, vmerrs.ErrReturnDataOutOfBounds + return nil, coreerrors.ErrReturnDataOutOfBounds } // we can reuse dataOffset now (aliasing it for clarity) - var end = dataOffset + end := dataOffset end.Add(&dataOffset, &length) end64, overflow := end.Uint64WithOverflow() if overflow || uint64(len(interpreter.returnData)) < end64 { - return nil, vmerrs.ErrReturnDataOutOfBounds + return nil, coreerrors.ErrReturnDataOutOfBounds } scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64]) return nil, nil @@ -525,7 +525,7 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } loc := scope.Stack.pop() val := scope.Stack.pop() @@ -539,7 +539,7 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } pos := scope.Stack.pop() if !scope.Contract.validJumpdest(&pos) { - return nil, vmerrs.ErrInvalidJump + return nil, coreerrors.ErrInvalidJump } *pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop return nil, nil @@ -552,7 +552,7 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by pos, cond := scope.Stack.pop(), scope.Stack.pop() if !cond.IsZero() { if !scope.Contract.validJumpdest(&pos) { - return nil, vmerrs.ErrInvalidJump + return nil, coreerrors.ErrInvalidJump } *pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop } @@ -580,7 +580,7 @@ func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } var ( value = scope.Stack.pop() @@ -601,9 +601,9 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must // ignore this error and pretend the operation was successful. - if interpreter.evm.chainRules.IsHomestead && suberr == vmerrs.ErrCodeStoreOutOfGas { + if interpreter.evm.chainRules.IsHomestead && suberr == coreerrors.ErrCodeStoreOutOfGas { stackvalue.Clear() - } else if suberr != nil && suberr != vmerrs.ErrCodeStoreOutOfGas { + } else if suberr != nil && suberr != coreerrors.ErrCodeStoreOutOfGas { stackvalue.Clear() } else { stackvalue.SetBytes(addr.Bytes()) @@ -611,7 +611,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b scope.Stack.push(&stackvalue) scope.Contract.Gas += returnGas - if suberr == vmerrs.ErrExecutionReverted { + if suberr == coreerrors.ErrExecutionReverted { interpreter.returnData = res // set REVERT data to return data buffer return res, nil } @@ -621,7 +621,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } var ( endowment = scope.Stack.pop() @@ -646,7 +646,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] scope.Stack.push(&stackvalue) scope.Contract.Gas += returnGas - if suberr == vmerrs.ErrExecutionReverted { + if suberr == coreerrors.ErrExecutionReverted { interpreter.returnData = res // set REVERT data to return data buffer return res, nil } @@ -667,7 +667,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) if interpreter.readOnly && !value.IsZero() { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } if !value.IsZero() { gas += params.CallStipend @@ -680,7 +680,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt temp.SetOne() } stack.push(&temp) - if err == nil || err == vmerrs.ErrExecutionReverted { + if err == nil || err == coreerrors.ErrExecutionReverted { scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -712,7 +712,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ temp.SetOne() } stack.push(&temp) - if err == nil || err == vmerrs.ErrExecutionReverted { + if err == nil || err == coreerrors.ErrExecutionReverted { scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -740,7 +740,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext temp.SetOne() } stack.push(&temp) - if err == nil || err == vmerrs.ErrExecutionReverted { + if err == nil || err == coreerrors.ErrExecutionReverted { scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -768,7 +768,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) temp.SetOne() } stack.push(&temp) - if err == nil || err == vmerrs.ErrExecutionReverted { + if err == nil || err == coreerrors.ErrExecutionReverted { scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } scope.Contract.Gas += returnGas @@ -789,7 +789,7 @@ func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) interpreter.returnData = ret - return ret, vmerrs.ErrExecutionReverted + return ret, coreerrors.ErrExecutionReverted } func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { @@ -802,7 +802,7 @@ func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) @@ -817,7 +817,7 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) @@ -837,7 +837,7 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon func makeLog(size int) executionFunc { return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection + return nil, coreerrors.ErrWriteProtection } topics := make([]common.Hash, size) stack := scope.Stack diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index cf28289974..4011772b5f 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -38,8 +38,8 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -57,9 +57,11 @@ type twoOperandParams struct { y string } -var alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff" -var commonParams []*twoOperandParams -var twoOpMethods map[string]executionFunc +var ( + alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff" + commonParams []*twoOperandParams + twoOpMethods map[string]executionFunc +) type contractRef struct { addr common.Address @@ -226,7 +228,8 @@ func TestAddMod(t *testing.T) { z string expected string }{ - {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + { + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", @@ -282,7 +285,7 @@ func TestWriteExpectedValues(t *testing.T) { if err != nil { t.Fatal(err) } - _ = os.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0644) + _ = os.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0o644) if err != nil { t.Fatal(err) } @@ -471,11 +474,13 @@ func BenchmarkOpEq(b *testing.B) { opBenchmark(b, opEq, x, y) } + func BenchmarkOpEq2(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201fffffffe" opBenchmark(b, opEq, x, y) } + func BenchmarkOpAnd(b *testing.B) { x := alphabetSoup y := alphabetSoup @@ -526,18 +531,21 @@ func BenchmarkOpSHL(b *testing.B) { opBenchmark(b, opSHL, x, y) } + func BenchmarkOpSHR(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "ff" opBenchmark(b, opSHR, x, y) } + func BenchmarkOpSAR(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "ff" opBenchmark(b, opSAR, x, y) } + func BenchmarkOpIsZero(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" opBenchmark(b, opIszero, x) @@ -917,7 +925,7 @@ func TestOpMCopy(t *testing.T) { } else { var overflow bool if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { - t.Error(vmerrs.ErrGasUintOverflow) + t.Error(coreerrors.ErrGasUintOverflow) } } // and the dynamic cost diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 5d495f0af6..90f11975ac 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -27,7 +27,7 @@ package vm import ( - "github.com/ava-labs/coreth/vmerrs" + "github.com/ava-labs/coreth/coreerrors" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -200,7 +200,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack} } if !contract.UseGas(cost) { - return nil, vmerrs.ErrOutOfGas + return nil, coreerrors.ErrOutOfGas } if operation.dynamicGas != nil { @@ -213,12 +213,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( if operation.memorySize != nil { memSize, overflow := operation.memorySize(stack) if overflow { - return nil, vmerrs.ErrGasUintOverflow + return nil, coreerrors.ErrGasUintOverflow } // memory is expanded in words of 32 bytes. Gas // is also calculated in words. if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { - return nil, vmerrs.ErrGasUintOverflow + return nil, coreerrors.ErrGasUintOverflow } } // Consume the gas and return an error if not enough gas is available. @@ -227,7 +227,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize) cost += dynamicCost // for tracing if err != nil || !contract.UseGas(dynamicCost) { - return nil, vmerrs.ErrOutOfGas + return nil, coreerrors.ErrOutOfGas } // Do tracing before memory expansion if debug { diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 3cc56ef8be..f3f1bb47ad 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -29,8 +29,8 @@ package vm import ( "errors" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" ) @@ -78,7 +78,7 @@ func makeGasSStoreFunc() gasFunc { } // EIP-2200 original clause: - //return params.SloadGasEIP2200, nil // dirty update (2.2) + // return params.SloadGasEIP2200, nil // dirty update (2.2) return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) } } @@ -119,7 +119,7 @@ func gasExtCodeCopyEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memo var overflow bool // We charge (cold-warm), since 'warm' is already charged as constantGas if gas, overflow = math.SafeAdd(gas, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } @@ -158,7 +158,7 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc { // Charge the remaining difference here already, to correctly calculate available // gas for call if !contract.UseGas(coldCost) { - return 0, vmerrs.ErrOutOfGas + return 0, coreerrors.ErrOutOfGas } } // Now call the old calculator, which takes into account @@ -178,7 +178,7 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc { var overflow bool if gas, overflow = math.SafeAdd(gas, coldCost); overflow { - return 0, vmerrs.ErrGasUintOverflow + return 0, coreerrors.ErrGasUintOverflow } return gas, nil } diff --git a/vmerrs/vmerrs.go b/coreerrors/errors.go similarity index 91% rename from vmerrs/vmerrs.go rename to coreerrors/errors.go index 1518b2fb7d..38f8bcb01c 100644 --- a/vmerrs/vmerrs.go +++ b/coreerrors/errors.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vmerrs +package coreerrors import ( "errors" @@ -47,7 +47,4 @@ var ( ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") ErrNonceUintOverflow = errors.New("nonce uint64 overflow") ErrAddrProhibited = errors.New("prohibited address cannot be sender or created contract address") - ErrGenerateBlockFailed = errors.New("failed to generate block") - ErrBlockVerificationFailed = errors.New("failed to verify block") - ErrMakeNewBlockFailed = errors.New("failed to make new block") ) diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go index 291e806b84..75abb6032e 100644 --- a/eth/gasestimator/gasestimator.go +++ b/eth/gasestimator/gasestimator.go @@ -37,8 +37,8 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -128,7 +128,7 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin return 0, nil, err } if failed { - if result != nil && !errors.Is(result.Err, vmerrs.ErrOutOfGas) { + if result != nil && !errors.Is(result.Err, coreerrors.ErrOutOfGas) { return 0, result.Revert(), result.Err } return 0, nil, fmt.Errorf("gas required exceeds allowance (%d)", hi) diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index d94ffd3cf7..2eaac23c1e 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -27,8 +27,8 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" @@ -246,7 +246,7 @@ func (l *StructLogger) GetResult() (json.RawMessage, error) { returnData := common.CopyBytes(l.output) // Return data when successful and revert reason when reverted, otherwise empty. returnVal := fmt.Sprintf("%x", returnData) - if failed && l.err != vmerrs.ErrExecutionReverted { + if failed && l.err != coreerrors.ErrExecutionReverted { returnVal = "" } return json.Marshal(&ExecutionResult{ diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index ea2f0f8980..dd40911571 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -34,8 +34,8 @@ import ( "github.com/ava-labs/coreth/accounts/abi" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -91,7 +91,7 @@ func (f *callFrame) processOutput(output []byte, err error) { if f.Type == vm.CREATE || f.Type == vm.CREATE2 { f.To = nil } - if !errors.Is(err, vmerrs.ErrExecutionReverted) || len(output) == 0 { + if !errors.Is(err, coreerrors.ErrExecutionReverted) || len(output) == 0 { return } f.Output = output diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index 7f5f2ca287..51e2de72b3 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -34,8 +34,8 @@ import ( "strings" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/eth/tracers" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -276,7 +276,7 @@ func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx // Revert output contains useful information (revert reason). // Otherwise discard result. - if input.Error != "" && input.Error != vmerrs.ErrExecutionReverted.Error() { + if input.Error != "" && input.Error != coreerrors.ErrExecutionReverted.Error() { frame.Result = nil } diff --git a/internal/ethapi/errors.go b/internal/ethapi/errors.go index 928dded8b7..c54bba265a 100644 --- a/internal/ethapi/errors.go +++ b/internal/ethapi/errors.go @@ -30,7 +30,7 @@ import ( "fmt" "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/vmerrs" + "github.com/ava-labs/coreth/coreerrors" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -54,11 +54,11 @@ func (e *revertError) ErrorData() interface{} { // newRevertError creates a revertError instance with the provided revert data. func newRevertError(revert []byte) *revertError { - err := vmerrs.ErrExecutionReverted + err := coreerrors.ErrExecutionReverted reason, errUnpack := abi.UnpackRevert(revert) if errUnpack == nil { - err = fmt.Errorf("%w: %v", vmerrs.ErrExecutionReverted, reason) + err = fmt.Errorf("%w: %v", coreerrors.ErrExecutionReverted, reason) } return &revertError{ error: err, diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index a026fdc8f1..510d636c92 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -40,8 +40,8 @@ import ( "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/vmerrors" "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -778,9 +778,9 @@ func (vm *VM) BuildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // Handle errors and signal the mempool to take appropriate action switch { - case errors.Is(err, vmerrs.ErrGenerateBlockFailed), errors.Is(err, vmerrs.ErrBlockVerificationFailed): + case errors.Is(err, vmerrors.ErrGenerateBlockFailed), errors.Is(err, vmerrors.ErrBlockVerificationFailed): vm.mempool.CancelCurrentTxs() - case errors.Is(err, vmerrs.ErrMakeNewBlockFailed): + case errors.Is(err, vmerrors.ErrMakeNewBlockFailed): vm.mempool.DiscardCurrentTxs() case err != nil: // Marks the current transactions from the mempool as being successfully issued diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 9528948394..0bf6eeff62 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -38,6 +38,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/plugin/evm/vmerrors" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/rpc" statesyncclient "github.com/ava-labs/coreth/sync/client" @@ -47,7 +48,6 @@ import ( "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/vmerrs" "github.com/ava-labs/coreth/warp" // Force-load tracer engine to trigger registration @@ -898,14 +898,14 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo block, err := vm.miner.GenerateBlock(predicateCtx) vm.builder.handleGenerateBlock() if err != nil { - return nil, fmt.Errorf("%w: %w", vmerrs.ErrGenerateBlockFailed, err) + return nil, fmt.Errorf("%w: %w", vmerrors.ErrGenerateBlockFailed, err) } // Note: the status of block is set by ChainState blk, err := vm.blockManager.newBlock(block) if err != nil { log.Debug("discarding txs due to error making new block", "err", err) - return nil, fmt.Errorf("%w: %w", vmerrs.ErrMakeNewBlockFailed, err) + return nil, fmt.Errorf("%w: %w", vmerrors.ErrMakeNewBlockFailed, err) } // Verify is called on a non-wr apped block here, such that this @@ -921,7 +921,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // to the blk state root in the triedb when we are going to call verify // again from the consensus engine with writes enabled. if err := blk.semanticVerify(predicateCtx, false /*=writes*/); err != nil { - return nil, fmt.Errorf("%w: %w", vmerrs.ErrBlockVerificationFailed, err) + return nil, fmt.Errorf("%w: %w", vmerrors.ErrBlockVerificationFailed, err) } log.Debug(fmt.Sprintf("Built block %s", blk.ID())) diff --git a/plugin/evm/vmerrors/errors.go b/plugin/evm/vmerrors/errors.go new file mode 100644 index 0000000000..a92d34a6bc --- /dev/null +++ b/plugin/evm/vmerrors/errors.go @@ -0,0 +1,9 @@ +package vmerrors + +import "errors" + +var ( + ErrGenerateBlockFailed = errors.New("failed to generate block") + ErrBlockVerificationFailed = errors.New("failed to verify block") + ErrMakeNewBlockFailed = errors.New("failed to make new block") +) diff --git a/precompile/contract/utils.go b/precompile/contract/utils.go index 6b8ed7c832..977fe61ed2 100644 --- a/precompile/contract/utils.go +++ b/precompile/contract/utils.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/ava-labs/coreth/accounts/abi" - "github.com/ava-labs/coreth/vmerrs" + "github.com/ava-labs/coreth/coreerrors" "github.com/ethereum/go-ethereum/crypto" ) @@ -43,7 +43,7 @@ func CalculateFunctionSelector(functionSignature string) []byte { // DeductGas checks if [suppliedGas] is sufficient against [requiredGas] and deducts [requiredGas] from [suppliedGas]. func DeductGas(suppliedGas uint64, requiredGas uint64) (uint64, error) { if suppliedGas < requiredGas { - return 0, vmerrs.ErrOutOfGas + return 0, coreerrors.ErrOutOfGas } return suppliedGas - requiredGas, nil } diff --git a/precompile/contracts/warp/contract.go b/precompile/contracts/warp/contract.go index 7e30f9f9eb..62539be9d7 100644 --- a/precompile/contracts/warp/contract.go +++ b/precompile/contracts/warp/contract.go @@ -10,8 +10,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/accounts/abi" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/precompile/contract" - "github.com/ava-labs/coreth/vmerrs" _ "embed" @@ -236,13 +236,13 @@ func sendWarpMessage(accessibleState contract.AccessibleState, caller common.Add // This ensures that we charge gas before we unpack the variable sized input. payloadGas, overflow := math.SafeMul(SendWarpMessageGasCostPerByte, uint64(len(input))) if overflow { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } if remainingGas, err = contract.DeductGas(remainingGas, payloadGas); err != nil { return nil, 0, err } if readOnly { - return nil, remainingGas, vmerrs.ErrWriteProtection + return nil, remainingGas, coreerrors.ErrWriteProtection } // unpack the arguments payloadData, err := UnpackSendWarpMessageInput(input) diff --git a/precompile/contracts/warp/contract_test.go b/precompile/contracts/warp/contract_test.go index a3bb563b3b..522fdcb495 100644 --- a/precompile/contracts/warp/contract_test.go +++ b/precompile/contracts/warp/contract_test.go @@ -14,11 +14,11 @@ import ( avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/precompile/contract" "github.com/ava-labs/coreth/precompile/testutils" "github.com/ava-labs/coreth/predicate" "github.com/ava-labs/coreth/utils" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -74,7 +74,7 @@ func TestGetBlockchainID(t *testing.T) { }, SuppliedGas: GetBlockchainIDGasCost - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, } @@ -108,21 +108,21 @@ func TestSendWarpMessage(t *testing.T) { InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, SuppliedGas: SendWarpMessageGasCost + uint64(len(sendWarpMessageInput[4:])*int(SendWarpMessageGasCostPerByte)), ReadOnly: true, - ExpectedErr: vmerrs.ErrWriteProtection.Error(), + ExpectedErr: coreerrors.ErrWriteProtection.Error(), }, "send warp message insufficient gas for first step": { Caller: callerAddr, InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, SuppliedGas: SendWarpMessageGasCost - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "send warp message insufficient gas for payload bytes": { Caller: callerAddr, InputFn: func(t testing.TB) []byte { return sendWarpMessageInput }, SuppliedGas: SendWarpMessageGasCost + uint64(len(sendWarpMessageInput[4:])*int(SendWarpMessageGasCostPerByte)) - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "send warp message invalid input": { Caller: callerAddr, @@ -361,7 +361,7 @@ func TestGetVerifiedWarpMessage(t *testing.T) { }, SuppliedGas: GetVerifiedWarpMessageBaseCost - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "get message out of gas": { Caller: callerAddr, @@ -374,7 +374,7 @@ func TestGetVerifiedWarpMessage(t *testing.T) { }, SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)) - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "get message invalid predicate packing": { Caller: callerAddr, @@ -639,7 +639,7 @@ func TestGetVerifiedWarpBlockHash(t *testing.T) { }, SuppliedGas: GetVerifiedWarpMessageBaseCost - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "get message out of gas": { Caller: callerAddr, @@ -652,7 +652,7 @@ func TestGetVerifiedWarpBlockHash(t *testing.T) { }, SuppliedGas: GetVerifiedWarpMessageBaseCost + GasCostPerWarpMessageBytes*uint64(len(warpMessagePredicateBytes)) - 1, ReadOnly: false, - ExpectedErr: vmerrs.ErrOutOfGas.Error(), + ExpectedErr: coreerrors.ErrOutOfGas.Error(), }, "get message invalid predicate packing": { Caller: callerAddr, diff --git a/precompile/contracts/warp/contract_warp_handler.go b/precompile/contracts/warp/contract_warp_handler.go index 71142ed084..31e0677565 100644 --- a/precompile/contracts/warp/contract_warp_handler.go +++ b/precompile/contracts/warp/contract_warp_handler.go @@ -9,9 +9,9 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/coreth/coreerrors" "github.com/ava-labs/coreth/precompile/contract" "github.com/ava-labs/coreth/predicate" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" ) @@ -71,7 +71,7 @@ func handleWarpMessage(accessibleState contract.AccessibleState, input []byte, s // EVM execution because each execution incurs an additional read cost. msgBytesGas, overflow := math.SafeMul(GasCostPerWarpMessageBytes, uint64(len(predicateBytes))) if overflow { - return nil, 0, vmerrs.ErrOutOfGas + return nil, 0, coreerrors.ErrOutOfGas } if remainingGas, err = contract.DeductGas(remainingGas, msgBytesGas); err != nil { return nil, 0, err From 7f9b8940e4b164fc0cc693dd3c0c2ba3c74e3c44 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 10 Feb 2025 13:29:06 +0300 Subject: [PATCH 73/91] revert format changes --- core/txpool/validation.go | 8 +++++--- core/vm/contracts.go | 5 +---- core/vm/eips.go | 1 - core/vm/evm.go | 6 +++--- core/vm/gas_table.go | 5 ++--- core/vm/gas_table_test.go | 4 ++-- core/vm/instructions.go | 2 +- core/vm/instructions_test.go | 18 +++++------------- core/vm/operations_acl.go | 2 +- eth/gasprice/gasprice_test.go | 2 +- 10 files changed, 21 insertions(+), 32 deletions(-) diff --git a/core/txpool/validation.go b/core/txpool/validation.go index 32473b1680..21ebdf5cf0 100644 --- a/core/txpool/validation.go +++ b/core/txpool/validation.go @@ -41,9 +41,11 @@ import ( "github.com/ethereum/go-ethereum/log" ) -// blobTxMinBlobGasPrice is the big.Int version of the configured protocol -// parameter to avoid constucting a new big integer for every transaction. -var blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) +var ( + // blobTxMinBlobGasPrice is the big.Int version of the configured protocol + // parameter to avoid constucting a new big integer for every transaction. + blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) +) // ValidationOptions define certain differences between transaction validation // across the different pools without having to duplicate those checks. diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 151255adc3..80ba532689 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -339,7 +339,6 @@ type sha256hash struct{} func (c *sha256hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.Sha256PerWordGas + params.Sha256BaseGas } - func (c *sha256hash) Run(input []byte) ([]byte, error) { h := sha256.Sum256(input) return h[:], nil @@ -355,7 +354,6 @@ type ripemd160hash struct{} func (c *ripemd160hash) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.Ripemd160PerWordGas + params.Ripemd160BaseGas } - func (c *ripemd160hash) Run(input []byte) ([]byte, error) { ripemd := ripemd160.New() ripemd.Write(input) @@ -372,7 +370,6 @@ type dataCopy struct{} func (c *dataCopy) RequiredGas(input []byte) uint64 { return uint64(len(input)+31)/32*params.IdentityPerWordGas + params.IdentityBaseGas } - func (c *dataCopy) Run(in []byte) ([]byte, error) { return common.CopyBytes(in), nil } @@ -526,7 +523,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). - // If base == 1, then we can just return base % mod (if mod >= 1, which it is) + //If base == 1, then we can just return base % mod (if mod >= 1, which it is) v = base.Mod(base, mod).Bytes() default: v = base.Exp(base, exp, mod).Bytes() diff --git a/core/vm/eips.go b/core/vm/eips.go index 793b3751dc..e318a76863 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -65,7 +65,6 @@ func ValidEip(eipNum int) bool { _, ok := activators[eipNum] return ok } - func ActivateableEips() []string { var nums []string for k := range activators { diff --git a/core/vm/evm.go b/core/vm/evm.go index cb984ca7e0..1e89e04817 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -367,7 +367,7 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { return nil, gas, coreerrors.ErrInsufficientBalance } - snapshot := evm.StateDB.Snapshot() + var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame if evm.Config.Tracer != nil { @@ -408,7 +408,7 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by if evm.depth > int(params.CallCreateDepth) { return nil, gas, coreerrors.ErrDepth } - snapshot := evm.StateDB.Snapshot() + var snapshot = evm.StateDB.Snapshot() // Invoke tracer hooks that signal entering/exiting a call frame if evm.Config.Tracer != nil { @@ -456,7 +456,7 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte // after all empty accounts were deleted, so this is not required. However, if we omit this, // then certain tests start failing; stRevertTest/RevertPrecompiledTouchExactOOG.json. // We could change this, but for now it's left for legacy reasons - snapshot := evm.StateDB.Snapshot() + var snapshot = evm.StateDB.Snapshot() // We do an AddBalance of zero here, just in order to trigger a touch. // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium, diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index d378f256d8..484d15ce48 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -366,7 +366,6 @@ func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m } return gas, nil } - func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { gas, err := memoryGasCost(mem, memorySize) if err != nil { @@ -507,7 +506,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me // EIP150 homestead gas reprice fork: if evm.chainRules.IsEIP150 { gas = params.SelfdestructGasEIP150 - address := common.Address(stack.Back(0).Bytes20()) + var address = common.Address(stack.Back(0).Bytes20()) if evm.chainRules.IsEIP158 { // if empty and transfers value @@ -530,7 +529,7 @@ func gasSelfdestructAP1(evm *EVM, contract *Contract, stack *Stack, mem *Memory, // EIP150 homestead gas reprice fork: if evm.chainRules.IsEIP150 { gas = params.SelfdestructGasEIP150 - address := common.Address(stack.Back(0).Bytes20()) + var address = common.Address(stack.Back(0).Bytes20()) if evm.chainRules.IsEIP158 { // if empty and transfers value diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index 81a84ff2e1..03d7e3153d 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -145,7 +145,7 @@ var createGasTests = []struct { func TestCreateGas(t *testing.T) { for i, tt := range createGasTests { - gasUsed := uint64(0) + var gasUsed = uint64(0) doCheck := func(testGas int) bool { address := common.BytesToAddress([]byte("contract")) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) @@ -165,7 +165,7 @@ func TestCreateGas(t *testing.T) { // Note: we use Cortina instead of AllEthashProtocolChanges (upstream) // because it is the last fork before the activation of EIP-3860 vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestCortinaChainConfig, config) - startGas := uint64(testGas) + var startGas = uint64(testGas) ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(uint256.Int)) if err != nil { return false diff --git a/core/vm/instructions.go b/core/vm/instructions.go index af0fd2d530..c20020abda 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -342,7 +342,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte return nil, coreerrors.ErrReturnDataOutOfBounds } // we can reuse dataOffset now (aliasing it for clarity) - end := dataOffset + var end = dataOffset end.Add(&dataOffset, &length) end64, overflow := end.Uint64WithOverflow() if overflow || uint64(len(interpreter.returnData)) < end64 { diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 4011772b5f..482020ce21 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -57,11 +57,9 @@ type twoOperandParams struct { y string } -var ( - alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff" - commonParams []*twoOperandParams - twoOpMethods map[string]executionFunc -) +var alphabetSoup = "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff" +var commonParams []*twoOperandParams +var twoOpMethods map[string]executionFunc type contractRef struct { addr common.Address @@ -228,8 +226,7 @@ func TestAddMod(t *testing.T) { z string expected string }{ - { - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", @@ -285,7 +282,7 @@ func TestWriteExpectedValues(t *testing.T) { if err != nil { t.Fatal(err) } - _ = os.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0o644) + _ = os.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0644) if err != nil { t.Fatal(err) } @@ -474,13 +471,11 @@ func BenchmarkOpEq(b *testing.B) { opBenchmark(b, opEq, x, y) } - func BenchmarkOpEq2(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201fffffffe" opBenchmark(b, opEq, x, y) } - func BenchmarkOpAnd(b *testing.B) { x := alphabetSoup y := alphabetSoup @@ -531,21 +526,18 @@ func BenchmarkOpSHL(b *testing.B) { opBenchmark(b, opSHL, x, y) } - func BenchmarkOpSHR(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "ff" opBenchmark(b, opSHR, x, y) } - func BenchmarkOpSAR(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" y := "ff" opBenchmark(b, opSAR, x, y) } - func BenchmarkOpIsZero(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" opBenchmark(b, opIszero, x) diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index f3f1bb47ad..8641c346f8 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -78,7 +78,7 @@ func makeGasSStoreFunc() gasFunc { } // EIP-2200 original clause: - // return params.SloadGasEIP2200, nil // dirty update (2.2) + //return params.SloadGasEIP2200, nil // dirty update (2.2) return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) } } diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index dda6fd808d..3a5bf6ac3e 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -93,7 +93,7 @@ func (b *testBackend) teardown() { } func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { - gspec := &core.Genesis{ + var gspec = &core.Genesis{ Config: config, Alloc: types.GenesisAlloc{addr: {Balance: bal}}, } From d54a5392aa822ae32813c6b0acba6ef61d227ce9 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 10 Feb 2025 14:51:20 +0300 Subject: [PATCH 74/91] reviews --- plugin/evm/atomic/atomictest/tx.go | 2 +- plugin/evm/atomic/import_tx.go | 4 +- plugin/evm/atomic/state/atomic_backend.go | 6 +- ..._tx_repository.go => atomic_repository.go} | 32 ++--- ...tory_test.go => atomic_repository_test.go} | 4 +- .../atomic/sync/atomic_summary_provider.go | 2 +- plugin/evm/atomic/tx.go | 3 +- plugin/evm/atomic/vm/api.go | 3 + plugin/evm/atomic/vm/atomic_leaf_handler.go | 5 +- plugin/evm/atomic/vm/block_extension.go | 5 +- plugin/evm/atomic/vm/export_tx_test.go | 14 +-- plugin/evm/atomic/vm/ext_data_hashes.go | 3 + plugin/evm/atomic/vm/import_tx_test.go | 2 +- plugin/evm/atomic/vm/syncervm_test.go | 2 +- plugin/evm/atomic/vm/tx_gossip_test.go | 6 +- plugin/evm/atomic/vm/tx_semantic_verifier.go | 10 +- plugin/evm/atomic/vm/tx_test.go | 4 +- plugin/evm/atomic/vm/vm.go | 33 +++-- plugin/evm/atomic/vm/vm_test.go | 30 ++--- plugin/evm/block.go | 2 +- plugin/evm/extension/config.go | 19 +-- .../testutils/test_block_sync_summary.go | 16 +++ plugin/evm/sync/syncervm_client.go | 4 +- {utils => plugin/evm/utils}/handler.go | 0 plugin/evm/vm.go | 119 +++--------------- plugin/evm/vm_extensible.go | 107 ++++++++++++++++ plugin/evm/vm_warp_test.go | 12 +- sync/client/client_test.go | 12 +- sync/handlers/leafs_request_test.go | 12 +- sync/handlers/stats/stats.go | 4 +- sync/statesync/sync_test.go | 12 +- warp/handlers/signature_request_test.go | 12 +- 32 files changed, 260 insertions(+), 241 deletions(-) rename plugin/evm/atomic/state/{atomic_tx_repository.go => atomic_repository.go} (90%) rename plugin/evm/atomic/state/{atomic_tx_repository_test.go => atomic_repository_test.go} (97%) create mode 100644 plugin/evm/message/testutils/test_block_sync_summary.go rename {utils => plugin/evm/utils}/handler.go (100%) create mode 100644 plugin/evm/vm_extensible.go diff --git a/plugin/evm/atomic/atomictest/tx.go b/plugin/evm/atomic/atomictest/tx.go index 1118e5e199..5ad8d8cdad 100644 --- a/plugin/evm/atomic/atomictest/tx.go +++ b/plugin/evm/atomic/atomictest/tx.go @@ -83,7 +83,7 @@ func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } // InputUTXOs implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } -// SemanticVerify implements the UnsignedAtomicTx interface +// Visit implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) Visit(v atomic.Visitor) error { return t.VisitV } diff --git a/plugin/evm/atomic/import_tx.go b/plugin/evm/atomic/import_tx.go index dc81355270..e97d352c76 100644 --- a/plugin/evm/atomic/import_tx.go +++ b/plugin/evm/atomic/import_tx.go @@ -183,8 +183,8 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { } // AtomicOps returns imported inputs spent on this transaction -// We spend imported UTXOs here rather than in semanticVerify because -// we don't want to remove an imported UTXO in semanticVerify +// We spend imported UTXOs here rather than in verification because +// we don't want to remove an imported UTXO in verification // only to have the transaction not be Accepted. This would be inconsistent. // Recall that imported UTXOs are not kept in a versionDB. func (utx *UnsignedImportTx) AtomicOps() (ids.ID, *atomic.Requests, error) { diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index 79ebae32dc..ab7024ee85 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -26,13 +26,13 @@ const ( ) // AtomicBackend implements the AtomicBackend interface using -// the AtomicTrie, AtomicTxRepository, and the VM's shared memory. +// the AtomicTrie, AtomicRepository, and the VM's shared memory. type AtomicBackend struct { codec codec.Manager bonusBlocks map[uint64]ids.ID // Map of height to blockID for blocks to skip indexing sharedMemory avalancheatomic.SharedMemory - repo *AtomicTxRepository + repo *AtomicRepository atomicTrie *AtomicTrie lastAcceptedHash common.Hash @@ -42,7 +42,7 @@ type AtomicBackend struct { // NewAtomicBackend creates an AtomicBackend from the specified dependencies func NewAtomicBackend( sharedMemory avalancheatomic.SharedMemory, - bonusBlocks map[uint64]ids.ID, repo *AtomicTxRepository, + bonusBlocks map[uint64]ids.ID, repo *AtomicRepository, lastAcceptedHeight uint64, lastAcceptedHash common.Hash, commitInterval uint64, ) (*AtomicBackend, error) { codec := repo.codec diff --git a/plugin/evm/atomic/state/atomic_tx_repository.go b/plugin/evm/atomic/state/atomic_repository.go similarity index 90% rename from plugin/evm/atomic/state/atomic_tx_repository.go rename to plugin/evm/atomic/state/atomic_repository.go index 83794fea75..cf5d0d6c8d 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository.go +++ b/plugin/evm/atomic/state/atomic_repository.go @@ -39,8 +39,8 @@ var ( // bonusBlocksRepairedKey = []byte("bonusBlocksRepaired") ) -// atomicTxRepository is a prefixdb implementation of the AtomicTxRepository interface -type AtomicTxRepository struct { +// AtomicRepository manages the database interactions for atomic operations. +type AtomicRepository struct { // [acceptedAtomicTxDB] maintains an index of [txID] => [height]+[atomic tx] for all accepted atomic txs. acceptedAtomicTxDB database.Database @@ -64,8 +64,8 @@ type AtomicTxRepository struct { func NewAtomicTxRepository( db *versiondb.Database, codec codec.Manager, lastAcceptedHeight uint64, -) (*AtomicTxRepository, error) { - repo := &AtomicTxRepository{ +) (*AtomicRepository, error) { + repo := &AtomicRepository{ atomicTrieDB: prefixdb.New(atomicTrieDBPrefix, db), metadataDB: prefixdb.New(atomicTrieMetaDBPrefix, db), acceptedAtomicTxDB: prefixdb.New(atomicTxIDDBPrefix, db), @@ -82,7 +82,7 @@ func NewAtomicTxRepository( // initializeHeightIndex initializes the atomic repository and takes care of any required migration from the previous database // format which did not have a height -> txs index. -func (a *AtomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) error { +func (a *AtomicRepository) initializeHeightIndex(lastAcceptedHeight uint64) error { startTime := time.Now() lastLogTime := startTime @@ -182,7 +182,7 @@ func (a *AtomicTxRepository) initializeHeightIndex(lastAcceptedHeight uint64) er } // GetIndexHeight returns the last height that was indexed by the atomic repository -func (a *AtomicTxRepository) GetIndexHeight() (uint64, error) { +func (a *AtomicRepository) GetIndexHeight() (uint64, error) { indexHeightBytes, err := a.atomicRepoMetadataDB.Get(maxIndexedHeightKey) if err != nil { return 0, err @@ -198,7 +198,7 @@ func (a *AtomicTxRepository) GetIndexHeight() (uint64, error) { // GetByTxID queries [acceptedAtomicTxDB] for the [txID], parses a [*atomic.Tx] object // if an entry is found, and returns it with the block height the atomic tx it // represents was accepted on, along with an optional error. -func (a *AtomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { +func (a *AtomicRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) { indexedTxBytes, err := a.acceptedAtomicTxDB.Get(txID[:]) if err != nil { return nil, 0, err @@ -226,14 +226,14 @@ func (a *AtomicTxRepository) GetByTxID(txID ids.ID) (*atomic.Tx, uint64, error) // no atomic transactions in the block accepted at [height]. // If [height] is greater than the last accepted height, then this will always return // [database.ErrNotFound] -func (a *AtomicTxRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { +func (a *AtomicRepository) GetByHeight(height uint64) ([]*atomic.Tx, error) { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.getByHeightBytes(heightBytes) } -func (a *AtomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { +func (a *AtomicRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, error) { txsBytes, err := a.acceptedAtomicTxByHeightDB.Get(heightBytes) if err != nil { return nil, err @@ -245,17 +245,17 @@ func (a *AtomicTxRepository) getByHeightBytes(heightBytes []byte) ([]*atomic.Tx, // by txID or height. This method must be called only once per height, // and [txs] must include all atomic txs for the block accepted at the // corresponding height. -func (a *AtomicTxRepository) Write(height uint64, txs []*atomic.Tx) error { +func (a *AtomicRepository) Write(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, false) } // WriteBonus is similar to Write, except the [txID] => [height] is not // overwritten if already exists. -func (a *AtomicTxRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { +func (a *AtomicRepository) WriteBonus(height uint64, txs []*atomic.Tx) error { return a.write(height, txs, true) } -func (a *AtomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { +func (a *AtomicRepository) write(height uint64, txs []*atomic.Tx, bonus bool) error { if len(txs) > 1 { // txs should be stored in order of txID to ensure consistency // with txs initialized from the txID index. @@ -297,7 +297,7 @@ func (a *AtomicTxRepository) write(height uint64, txs []*atomic.Tx, bonus bool) // indexTxByID writes [tx] into the [acceptedAtomicTxDB] stored as // [height] + [tx bytes] -func (a *AtomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { +func (a *AtomicRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) error { txBytes, err := a.codec.Marshal(atomic.CodecVersion, tx) if err != nil { return err @@ -317,7 +317,7 @@ func (a *AtomicTxRepository) indexTxByID(heightBytes []byte, tx *atomic.Tx) erro } // indexTxsAtHeight adds [height] -> [txs] to the [acceptedAtomicTxByHeightDB] -func (a *AtomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { +func (a *AtomicRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic.Tx) error { txsBytes, err := a.codec.Marshal(atomic.CodecVersion, txs) if err != nil { return err @@ -332,7 +332,7 @@ func (a *AtomicTxRepository) indexTxsAtHeight(heightBytes []byte, txs []*atomic. // [tx] to the slice of transactions stored there. // This function is used while initializing the atomic repository to re-index the atomic transactions // by txID into the height -> txs index. -func (a *AtomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { +func (a *AtomicRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomic.Tx) error { txs, err := a.getByHeightBytes(heightBytes) if err != nil && err != database.ErrNotFound { return err @@ -353,7 +353,7 @@ func (a *AtomicTxRepository) appendTxToHeightIndex(heightBytes []byte, tx *atomi // IterateByHeight returns an iterator beginning at [height]. // Note [height] must be greater than 0 since we assume there are no // atomic txs in genesis. -func (a *AtomicTxRepository) IterateByHeight(height uint64) database.Iterator { +func (a *AtomicRepository) IterateByHeight(height uint64) database.Iterator { heightBytes := make([]byte, wrappers.LongLen) binary.BigEndian.PutUint64(heightBytes, height) return a.acceptedAtomicTxByHeightDB.NewIteratorWithStart(heightBytes) diff --git a/plugin/evm/atomic/state/atomic_tx_repository_test.go b/plugin/evm/atomic/state/atomic_repository_test.go similarity index 97% rename from plugin/evm/atomic/state/atomic_tx_repository_test.go rename to plugin/evm/atomic/state/atomic_repository_test.go index 8886864dad..d2c6e51071 100644 --- a/plugin/evm/atomic/state/atomic_tx_repository_test.go +++ b/plugin/evm/atomic/state/atomic_repository_test.go @@ -69,7 +69,7 @@ func constTxsPerHeight(txCount int) func(uint64) int { // writeTxs writes [txsPerHeight] txs for heights ranging in [fromHeight, toHeight) through the Write call on [repo], // storing the resulting transactions in [txMap] if non-nil and the resulting atomic operations in [operationsMap] // if non-nil. -func writeTxs(t testing.TB, repo *AtomicTxRepository, fromHeight uint64, toHeight uint64, +func writeTxs(t testing.TB, repo *AtomicRepository, fromHeight uint64, toHeight uint64, txsPerHeight func(height uint64) int, txMap map[uint64][]*atomic.Tx, operationsMap map[uint64]map[ids.ID]*avalancheatomic.Requests, ) { for height := fromHeight; height < toHeight; height++ { @@ -95,7 +95,7 @@ func writeTxs(t testing.TB, repo *AtomicTxRepository, fromHeight uint64, toHeigh } // verifyTxs asserts [repo] can find all txs in [txMap] by height and txID -func verifyTxs(t testing.TB, repo *AtomicTxRepository, txMap map[uint64][]*atomic.Tx) { +func verifyTxs(t testing.TB, repo *AtomicRepository, txMap map[uint64][]*atomic.Tx) { // We should be able to fetch indexed txs by height: for height, expectedTxs := range txMap { txs, err := repo.GetByHeight(height) diff --git a/plugin/evm/atomic/sync/atomic_summary_provider.go b/plugin/evm/atomic/sync/atomic_summary_provider.go index cddf2cdabb..808bf8ca67 100644 --- a/plugin/evm/atomic/sync/atomic_summary_provider.go +++ b/plugin/evm/atomic/sync/atomic_summary_provider.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -var _ sync.SummaryProvider = &AtomicSummaryProvider{} +var _ sync.SummaryProvider = (*AtomicSummaryProvider)(nil) type AtomicSummaryProvider struct { atomicTrie AtomicTrie diff --git a/plugin/evm/atomic/tx.go b/plugin/evm/atomic/tx.go index 0400529865..2c572add9c 100644 --- a/plugin/evm/atomic/tx.go +++ b/plugin/evm/atomic/tx.go @@ -152,7 +152,8 @@ type UnsignedAtomicTx interface { InputUTXOs() set.Set[ids.ID] // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules params.Rules) error - // Allow vm to execute custom logic against the underlying transaction types. + // Visit calls the corresponding method for the underlying transaction type. + // This is used in semantic verification of the tx. Visit(v Visitor) error // AtomicOps returns the blockchainID and set of atomic requests that // must be applied to shared memory for this transaction to be accepted. diff --git a/plugin/evm/atomic/vm/api.go b/plugin/evm/atomic/vm/api.go index aac952d2ec..3c99106f7a 100644 --- a/plugin/evm/atomic/vm/api.go +++ b/plugin/evm/atomic/vm/api.go @@ -1,3 +1,6 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package vm import ( diff --git a/plugin/evm/atomic/vm/atomic_leaf_handler.go b/plugin/evm/atomic/vm/atomic_leaf_handler.go index 0ac9d57de7..6896c48a58 100644 --- a/plugin/evm/atomic/vm/atomic_leaf_handler.go +++ b/plugin/evm/atomic/vm/atomic_leaf_handler.go @@ -1,3 +1,6 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package vm import ( @@ -35,6 +38,6 @@ func NewAtomicLeafHandler() *atomicLeafHandler { // Initialize initializes the atomicLeafHandler with the provided atomicTrieDB, trieKeyLength, and networkCodec func (a *atomicLeafHandler) Initialize(atomicTrieDB *triedb.Database, trieKeyLength int, networkCodec codec.Manager) { - handlerStats := stats.NewHandlerStats(metrics.Enabled) + handlerStats := stats.GetOrRegisterHandlerStats(metrics.Enabled) a.LeafRequestHandler = handlers.NewLeafsRequestHandler(atomicTrieDB, trieKeyLength, nil, networkCodec, handlerStats) } diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index d956210c9c..c158a474db 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -198,9 +198,8 @@ func (be *blockExtension) OnReject(b extension.VMBlock) error { return atomicState.Reject() } -// OnCleanup is called when the block is cleaned up. This is called the wrapper -// block manager's OnCleanup method. -func (be *blockExtension) OnCleanup(b extension.VMBlock) { +// OnError is called when the block is cleaned up after a failed insertion. +func (be *blockExtension) OnError(b extension.VMBlock) { if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { atomicState.Reject() } diff --git a/plugin/evm/atomic/vm/export_tx_test.go b/plugin/evm/atomic/vm/export_tx_test.go index 686ae985ed..a910c63d19 100644 --- a/plugin/evm/atomic/vm/export_tx_test.go +++ b/plugin/evm/atomic/vm/export_tx_test.go @@ -110,7 +110,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, t.Fatal(err) } for _, addr := range testutils.TestShortIDAddrs { - exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -948,7 +948,7 @@ func TestExportTxSemanticVerify(t *testing.T) { err := exportTx.Visit(&semanticVerifier{ backend: backend, - atx: tx, + tx: tx, parent: parent, baseFee: test.baseFee, }) @@ -1810,7 +1810,7 @@ func TestNewExportTx(t *testing.T) { backend := &verifierBackend{ ctx: vm.ctx, fx: &vm.fx, - rules: vm.CurrentRules(), + rules: vm.currentRules(), chainConfig: vm.Ethereum().BlockChain().Config(), bootstrapped: vm.IsBootstrapped(), blockFetcher: vm, @@ -1819,7 +1819,7 @@ func TestNewExportTx(t *testing.T) { if err := exportTx.Visit(&semanticVerifier{ backend: backend, - atx: tx, + tx: tx, parent: parent, baseFee: parent.GetEthBlock().BaseFee(), }); err != nil { @@ -2017,7 +2017,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err = atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) + tx, err = atomic.NewExportTx(vm.ctx, vm.currentRules(), state, tid, exportAmount, vm.ctx.XChainID, exportId, testutils.InitialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -2026,7 +2026,7 @@ func TestNewExportTxMulticoin(t *testing.T) { backend := &verifierBackend{ ctx: vm.ctx, fx: &vm.fx, - rules: vm.CurrentRules(), + rules: vm.currentRules(), chainConfig: vm.Ethereum().BlockChain().Config(), bootstrapped: vm.IsBootstrapped(), blockFetcher: vm, @@ -2035,7 +2035,7 @@ func TestNewExportTxMulticoin(t *testing.T) { if err := exportTx.Visit(&semanticVerifier{ backend: backend, - atx: tx, + tx: tx, parent: parent, baseFee: parent.GetEthBlock().BaseFee(), }); err != nil { diff --git a/plugin/evm/atomic/vm/ext_data_hashes.go b/plugin/evm/atomic/vm/ext_data_hashes.go index 82748c24c0..28489e7f7f 100644 --- a/plugin/evm/atomic/vm/ext_data_hashes.go +++ b/plugin/evm/atomic/vm/ext_data_hashes.go @@ -1,3 +1,6 @@ +// (c) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package vm import ( diff --git a/plugin/evm/atomic/vm/import_tx_test.go b/plugin/evm/atomic/vm/import_tx_test.go index 8a3bc2a51d..0b23069148 100644 --- a/plugin/evm/atomic/vm/import_tx_test.go +++ b/plugin/evm/atomic/vm/import_tx_test.go @@ -450,7 +450,7 @@ func TestNewImportTx(t *testing.T) { if err != nil { t.Fatal(err) } - rules := vm.CurrentRules() + rules := vm.currentRules() switch { case rules.IsApricotPhase3: actualCost, err := importTx.GasUsed(rules.IsApricotPhase5) diff --git a/plugin/evm/atomic/vm/syncervm_test.go b/plugin/evm/atomic/vm/syncervm_test.go index a4b632b1e8..bd21a351af 100644 --- a/plugin/evm/atomic/vm/syncervm_test.go +++ b/plugin/evm/atomic/vm/syncervm_test.go @@ -51,7 +51,7 @@ func TestAtomicSyncerVM(t *testing.T) { } exportTx, err := atomic.NewExportTx( atomicVM.ctx, - atomicVM.CurrentRules(), + atomicVM.currentRules(), state, atomicVM.ctx.AVAXAssetID, importAmount/2, diff --git a/plugin/evm/atomic/vm/tx_gossip_test.go b/plugin/evm/atomic/vm/tx_gossip_test.go index 0fcfca3963..54f773063f 100644 --- a/plugin/evm/atomic/vm/tx_gossip_test.go +++ b/plugin/evm/atomic/vm/tx_gossip_test.go @@ -144,7 +144,7 @@ func TestAtomicTxGossip(t *testing.T) { pk.Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) @@ -226,7 +226,7 @@ func TestAtomicTxPushGossipOutbound(t *testing.T) { pk.Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) vm.atomicTxPushGossiper.Add(&atomic.GossipAtomicTx{Tx: tx}) @@ -294,7 +294,7 @@ func TestAtomicTxPushGossipInbound(t *testing.T) { pk.Address(), ) require.NoError(err) - tx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) + tx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, address, testutils.InitialBaseFee, secp256k1fx.NewKeychain(pk), []*avax.UTXO{utxo}) require.NoError(err) require.NoError(vm.mempool.AddLocalTx(tx)) diff --git a/plugin/evm/atomic/vm/tx_semantic_verifier.go b/plugin/evm/atomic/vm/tx_semantic_verifier.go index b8794b9335..836d6560b9 100644 --- a/plugin/evm/atomic/vm/tx_semantic_verifier.go +++ b/plugin/evm/atomic/vm/tx_semantic_verifier.go @@ -45,17 +45,17 @@ type verifierBackend struct { // semanticVerifier is a visitor that checks the semantic validity of atomic transactions. type semanticVerifier struct { backend *verifierBackend - atx *atomic.Tx + tx *atomic.Tx parent extension.VMBlock baseFee *big.Int } -// SemanticVerify this transaction is valid. +// ImportTx verifies this transaction is valid. func (s *semanticVerifier) ImportTx(utx *atomic.UnsignedImportTx) error { backend := s.backend ctx := backend.ctx rules := backend.rules - stx := s.atx + stx := s.tx if err := utx.Verify(ctx, rules); err != nil { return err } @@ -173,11 +173,11 @@ func conflicts(backend *verifierBackend, inputs set.Set[ids.ID], ancestor extens return nil } -// SemanticVerify this transaction is valid. +// ExportTx verifies this transaction is valid. func (s *semanticVerifier) ExportTx(utx *atomic.UnsignedExportTx) error { ctx := s.backend.ctx rules := s.backend.rules - stx := s.atx + stx := s.tx if err := utx.Verify(ctx, rules); err != nil { return err } diff --git a/plugin/evm/atomic/vm/tx_test.go b/plugin/evm/atomic/vm/tx_test.go index c5fdcb832e..08d92cad2d 100644 --- a/plugin/evm/atomic/vm/tx_test.go +++ b/plugin/evm/atomic/vm/tx_test.go @@ -100,7 +100,7 @@ type atomicTxTest struct { func executeTxTest(t *testing.T, test atomicTxTest) { issuer, vm, _, sharedMemory, _ := GenesisAtomicVM(t, !test.bootstrapping, test.genesisJSON, test.configJSON, test.upgradeJSON) - rules := vm.CurrentRules() + rules := vm.currentRules() tx := test.setup(t, vm, sharedMemory) @@ -124,7 +124,7 @@ func executeTxTest(t *testing.T, test atomicTxTest) { if err := tx.UnsignedAtomicTx.Visit( &semanticVerifier{ backend: backend, - atx: tx, + tx: tx, parent: lastAcceptedBlock, baseFee: baseFee, }); len(test.semanticVerifyErr) == 0 && err != nil { diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index 510d636c92..7e2e98507e 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -40,8 +40,8 @@ import ( "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/plugin/evm/utils" "github.com/ava-labs/coreth/plugin/evm/vmerrors" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -76,7 +76,7 @@ type VM struct { // [atomicTxRepository] maintains two indexes on accepted atomic txs. // - txID to accepted atomic tx // - block height to list of atomic txs accepted on block at that height - atomicTxRepository *atomicstate.AtomicTxRepository + atomicTxRepository *atomicstate.AtomicRepository // [atomicBackend] abstracts verification and processing of atomic transactions atomicBackend *atomicstate.AtomicBackend @@ -144,15 +144,15 @@ func (vm *VM) Initialize( vm.mempool = &txpool.Mempool{} extensionConfig := &extension.Config{ - NetworkCodec: networkCodec, - ConsensusCallbacks: vm.createConsensusCallbacks(), - BlockExtension: blockExtension, - SyncableParser: atomicsync.NewAtomicSyncSummaryParser(), - SyncExtender: syncExtender, - SyncSummaryProvider: syncProvider, - ExtraSyncLeafConfig: atomicLeafTypeConfig, - ExtraMempool: vm.mempool, - Clock: &vm.clock, + NetworkCodec: networkCodec, + ConsensusCallbacks: vm.createConsensusCallbacks(), + BlockExtension: blockExtension, + SyncableParser: atomicsync.NewAtomicSyncSummaryParser(), + SyncExtender: syncExtender, + SyncSummaryProvider: syncProvider, + ExtraSyncLeafHandlerConfig: atomicLeafTypeConfig, + ExtraMempool: vm.mempool, + Clock: &vm.clock, } if err := innerVM.SetExtensionConfig(extensionConfig); err != nil { return fmt.Errorf("failed to set extension config: %w", err) @@ -432,7 +432,7 @@ func (vm *VM) verifyTx(tx *atomic.Tx, parentHash common.Hash, baseFee *big.Int, } if err := tx.UnsignedAtomicTx.Visit(&semanticVerifier{ backend: atomicBackend, - atx: tx, + tx: tx, parent: parent, baseFee: baseFee, }); err != nil { @@ -476,7 +476,7 @@ func (vm *VM) verifyTxs(txs []*atomic.Tx, parentHash common.Hash, baseFee *big.I utx := atomicTx.UnsignedAtomicTx if err := utx.Visit(&semanticVerifier{ backend: atomicBackend, - atx: atomicTx, + tx: atomicTx, parent: ancestor, baseFee: baseFee, }); err != nil { @@ -789,3 +789,10 @@ func (vm *VM) BuildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo } return blk, err } + +// currentRules returns the chain rules for the current block. +func (vm *VM) currentRules() params.Rules { + header := vm.Ethereum().BlockChain().CurrentHeader() + chainConfig := vm.Ethereum().BlockChain().Config() + return chainConfig.Rules(header.Number, header.Time) +} diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index b5679b8c77..48d9791578 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -62,7 +62,7 @@ func (vm *VM) newImportTx( return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) } - return atomic.NewImportTx(vm.ctx, vm.InnerVM.CurrentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) + return atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), chainID, to, baseFee, kc, atomicUTXOs) } func GenesisAtomicVM(t *testing.T, @@ -226,7 +226,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatal(err) } - exportTx, err := atomic.NewExportTx(vm.ctx, vm.CurrentRules(), state, vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testutils.TestShortIDAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) + exportTx, err := atomic.NewExportTx(vm.ctx, vm.currentRules(), state, vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testutils.TestShortIDAddrs[0], testutils.InitialBaseFee, testutils.TestKeys[0:1]) if err != nil { t.Fatal(err) } @@ -375,7 +375,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { validEthBlock := validBlock.(*chain.BlockWrapper).Block.(extension.VMBlock).GetEthBlock() - rules := vm.CurrentRules() + rules := vm.currentRules() var extraData []byte switch { case rules.IsApricotPhase5: @@ -457,11 +457,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } - tx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -484,11 +484,11 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { if err != nil { t.Fatal(err) } - tx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) + tx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } - tx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1}) + tx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(common.Big2, testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } @@ -512,17 +512,17 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { t.Fatal(err) } - importTx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1}) + importTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo1}) if err != nil { t.Fatal(err) } - importTx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(3), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo2}) + importTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(3), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo2}) if err != nil { t.Fatal(err) } - reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(2), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx1, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(2), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -542,7 +542,7 @@ func TestReissueAtomicTxHigherGasPrice(t *testing.T) { assert.True(t, vm.mempool.Has(importTx2.ID())) assert.False(t, vm.mempool.Has(reissuanceTx1.ID())) - reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(4), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) + reissuanceTx2, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], new(big.Int).Mul(big.NewInt(4), testutils.InitialBaseFee), kc, []*avax.UTXO{utxo1, utxo2}) if err != nil { t.Fatal(err) } @@ -1095,7 +1095,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { utxo, err := addUTXO(sharedMemory, vm.ctx, txID, uint32(i), vm.ctx.AVAXAssetID, importAmount, testutils.TestShortIDAddrs[0]) assert.NoError(t, err) - importTx, err := atomic.NewImportTx(vm.ctx, vm.CurrentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) + importTx, err := atomic.NewImportTx(vm.ctx, vm.currentRules(), vm.clock.Unix(), vm.ctx.XChainID, testutils.TestEthAddrs[0], testutils.InitialBaseFee, kc, []*avax.UTXO{utxo}) if err != nil { t.Fatal(err) } @@ -1366,7 +1366,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.currentRules().ChainID), key) if err != nil { t.Fatal(err) } @@ -1532,7 +1532,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 5; i++ { tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.LaunchMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.currentRules().ChainID), key) if err != nil { t.Fatal(err) } @@ -1540,7 +1540,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { } for i := 5; i < 10; i++ { tx := types.NewTransaction(uint64(i), address, big.NewInt(10), 21000, big.NewInt(params.ApricotPhase1MinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.CurrentRules().ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.currentRules().ChainID), key) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/block.go b/plugin/evm/block.go index a74d6ce911..6446688fe1 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -243,7 +243,7 @@ func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateConte err := vm.blockChain.InsertBlockManual(b.ethBlock, writes) if b.blockManager.blockExtension != nil && (err != nil || !writes) { - b.blockManager.blockExtension.OnCleanup(b) + b.blockManager.blockExtension.OnError(b) } return err } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 59a21279bd..cf987d1543 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -64,8 +64,6 @@ type ExtensibleVM interface { MetricRegistry() *prometheus.Registry // ReadLastAccepted returns the last accepted block hash and height ReadLastAccepted() (common.Hash, uint64, error) - // CurrentRules returns the current rules for the VM - CurrentRules() params.Rules // VersionDB returns the versioned database for the VM VersionDB() *versiondb.Database // SyncerClient returns the syncer client for the VM @@ -91,18 +89,21 @@ type VMBlock interface { // BlockManagerExtension is an extension for the block manager // to handle BlockManager events type BlockManagerExtension interface { - // SemanticVerify verifies the block semantically - // it can be implemented to extend inner block verification - SemanticVerify(b VMBlock) error // SyntacticVerify verifies the block syntactically // it can be implemented to extend inner block verification SyntacticVerify(b VMBlock, rules params.Rules) error + // SemanticVerify verifies the block semantically + // it can be implemented to extend inner block verification + SemanticVerify(b VMBlock) error + // OnError is called when a block has passed SemanticVerify and SynctacticVerify, + // but failed insertion in the chain. This allows the block manager to perform any + // needed cleanup. This does not return any error because the block manager + // propagates the original error. + OnError(b VMBlock) // OnAccept is called when a block is accepted by the block manager OnAccept(b VMBlock, acceptedBatch database.Batch) error // OnReject is called when a block is rejected by the block manager OnReject(b VMBlock) error - // OnCleanup is called when a block cleanup is requested from the block manager - OnCleanup(b VMBlock) } // BuilderMempool is a mempool that's used in the block builder @@ -149,9 +150,9 @@ type Config struct { // to handle block manager events. // It's optional and can be nil BlockExtension BlockManagerExtension - // ExtraSyncLeafConfig is the extra configuration to handle leaf requests + // ExtraSyncLeafHandlerConfig is the extra configuration to handle leaf requests // in the network and syncer. It's optional and can be nil - ExtraSyncLeafConfig *LeafRequestConfig + ExtraSyncLeafHandlerConfig *LeafRequestConfig // ExtraMempool is the mempool to be used in the block builder. // It's optional and can be nil ExtraMempool BuilderMempool diff --git a/plugin/evm/message/testutils/test_block_sync_summary.go b/plugin/evm/message/testutils/test_block_sync_summary.go new file mode 100644 index 0000000000..543678c491 --- /dev/null +++ b/plugin/evm/message/testutils/test_block_sync_summary.go @@ -0,0 +1,16 @@ +package testutils + +import ( + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/coreth/plugin/evm/message" +) + +var TestBlockSyncSummaryCodec codec.Manager + +func init() { + var err error + TestBlockSyncSummaryCodec, err = message.NewCodec(message.BlockSyncSummary{}) + if err != nil { + panic(err) + } +} diff --git a/plugin/evm/sync/syncervm_client.go b/plugin/evm/sync/syncervm_client.go index 3fa4f7682a..cad1afb56b 100644 --- a/plugin/evm/sync/syncervm_client.go +++ b/plugin/evm/sync/syncervm_client.go @@ -37,7 +37,9 @@ type BlockAcceptor interface { PutLastAcceptedID(ids.ID) error } -// EthBlockWrapper is an interface that wraps the GetEthBlock method. +// EthBlockWrapper can be implemented by a concrete block wrapper type to +// return *types.Block, which is needed to update chain pointers at the +// end of the sync operation. type EthBlockWrapper interface { GetEthBlock() *types.Block } diff --git a/utils/handler.go b/plugin/evm/utils/handler.go similarity index 100% rename from utils/handler.go rename to plugin/evm/utils/handler.go diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 0bf6eeff62..5788bbb66b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -38,6 +38,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/gossip" "github.com/ava-labs/coreth/plugin/evm/message" vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ava-labs/coreth/plugin/evm/utils" "github.com/ava-labs/coreth/plugin/evm/vmerrors" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" "github.com/ava-labs/coreth/rpc" @@ -47,7 +48,6 @@ import ( handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/triedb" "github.com/ava-labs/coreth/triedb/hashdb" - "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp" // Force-load tracer engine to trigger registration @@ -144,8 +144,6 @@ var ( errNilBaseFeeApricotPhase3 = errors.New("nil base fee is invalid after apricotPhase3") errNilBlockGasCostApricotPhase4 = errors.New("nil blockGasCost is invalid after apricotPhase4") errInvalidHeaderPredicateResults = errors.New("invalid header predicate results") - errVMAlreadyInitialized = errors.New("vm already initialized") - errExtensionConfigAlreadySet = errors.New("extension config already set") ) var originalStderr *os.File @@ -264,17 +262,6 @@ type VM struct { rpcHandlers []interface{ Stop() } } -func (vm *VM) SetExtensionConfig(config *extension.Config) error { - if vm.ctx != nil { - return errVMAlreadyInitialized - } - if vm.extensionConfig != nil { - return errExtensionConfigAlreadySet - } - vm.extensionConfig = config - return nil -} - // Initialize implements the snowman.ChainVM interface func (vm *VM) Initialize( _ context.Context, @@ -429,6 +416,7 @@ func (vm *VM) Initialize( vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) vm.ethConfig.SnapshotWait = vm.config.SnapshotWait vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify + vm.ethConfig.HistoricalProofQueryWindow = vm.config.HistoricalProofQueryWindow vm.ethConfig.OfflinePruning = vm.config.OfflinePruning vm.ethConfig.OfflinePruningBloomFilterSize = vm.config.OfflinePruningBloomFilterSize vm.ethConfig.OfflinePruningDataDirectory = vm.config.OfflinePruningDataDirectory @@ -571,10 +559,10 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { }, }, ) - var leafConfigs []*extension.LeafRequestConfig - syncStats := handlerstats.NewHandlerStats(metrics.Enabled) + var leafHandlerConfigs []*extension.LeafRequestConfig + syncStats := handlerstats.GetOrRegisterHandlerStats(metrics.Enabled) // register default leaf request handler for state trie - leafConfigs = append(leafConfigs, &extension.LeafRequestConfig{ + leafHandlerConfigs = append(leafHandlerConfigs, &extension.LeafRequestConfig{ LeafType: message.StateTrieNode, MetricName: "sync_state_trie_leaves", Handler: handlers.NewLeafsRequestHandler(evmTrieDB, @@ -584,12 +572,12 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { ), }) - if vm.extensionConfig.ExtraSyncLeafConfig != nil { - leafConfigs = append(leafConfigs, vm.extensionConfig.ExtraSyncLeafConfig) + if vm.extensionConfig.ExtraSyncLeafHandlerConfig != nil { + leafHandlerConfigs = append(leafHandlerConfigs, vm.extensionConfig.ExtraSyncLeafHandlerConfig) } - leafHandlers := make(LeafHandlers, len(leafConfigs)) - for _, leafConfig := range leafConfigs { + leafHandlers := make(LeafHandlers, len(leafHandlerConfigs)) + for _, leafConfig := range leafHandlerConfigs { if _, exists := leafHandlers[leafConfig.LeafType]; exists { return fmt.Errorf("duplicate leaf type %v", leafConfig.LeafType) } @@ -622,8 +610,8 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { } // Initialize the state sync client - leafMetricsNames := make(map[message.NodeType]string, len(leafConfigs)) - for _, leafConfig := range leafConfigs { + leafMetricsNames := make(map[message.NodeType]string, len(leafHandlerConfigs)) + for _, leafConfig := range leafHandlerConfigs { leafMetricsNames[leafConfig.LeafType] = leafConfig.MetricName } vm.Client = vmsync.NewClient(&vmsync.ClientConfig{ @@ -1024,10 +1012,6 @@ func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, erro return ids.ID(hash), nil } -func (vm *VM) Version(context.Context) (string, error) { - return Version, nil -} - // CreateHandlers makes new http handlers that can handle API calls func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) @@ -1096,6 +1080,12 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er }, nil } +// currentRules returns the chain rules for the current block. +func (vm *VM) currentRules() params.Rules { + header := vm.eth.BlockChain().CurrentHeader() + return vm.chainConfig.Rules(header.Number, header.Time) +} + /* *********************************** Helpers ********************************** */ @@ -1104,7 +1094,7 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]http.Handler, er // network must be signed by the primary network validators. // This is necessary when the subnet is not validating the primary network. func (vm *VM) requirePrimaryNetworkSigners() bool { - switch c := vm.CurrentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { + switch c := vm.currentRules().ActivePrecompiles[warpcontract.ContractAddress].(type) { case *warpcontract.Config: return c.RequirePrimaryNetworkSigners default: // includes nil due to non-presence @@ -1214,76 +1204,3 @@ func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { func (vm *VM) PutLastAcceptedID(ID ids.ID) error { return vm.acceptedBlockDB.Put(lastAcceptedKey, ID[:]) } - -/* - *********************************** ExtensibleVM functions ********************************** - // All these methods assumes that VM is already initialized -*/ - -func (vm *VM) GetVMBlock(ctx context.Context, blkID ids.ID) (extension.VMBlock, error) { - // Since each internal handler used by [vm.State] always returns a block - // with non-nil ethBlock value, GetBlockInternal should never return a - // (*Block) with a nil ethBlock value. - blk, err := vm.GetBlockInternal(ctx, blkID) - if err != nil { - return nil, err - } - - return blk.(*Block), nil -} - -func (vm *VM) LastAcceptedVMBlock() extension.VMBlock { - lastAcceptedBlock := vm.LastAcceptedBlockInternal() - if lastAcceptedBlock == nil { - return nil - } - return lastAcceptedBlock.(*Block) -} - -func (vm *VM) NewVMBlock(ethBlock *types.Block) (extension.VMBlock, error) { - blk, err := vm.blockManager.newBlock(ethBlock) - if err != nil { - return nil, err - } - - return blk, nil -} - -// IsBootstrapped returns true if the VM has finished bootstrapping -func (vm *VM) IsBootstrapped() bool { - return vm.bootstrapped.Get() -} - -// CurrentRules returns the chain rules for the current block. -func (vm *VM) CurrentRules() params.Rules { - header := vm.eth.BlockChain().CurrentHeader() - return vm.chainConfig.Rules(header.Number, header.Time) -} - -func (vm *VM) Ethereum() *eth.Ethereum { - return vm.eth -} - -func (vm *VM) Config() config.Config { - return vm.config -} - -func (vm *VM) MetricRegistry() *prometheus.Registry { - return vm.sdkMetrics -} - -func (vm *VM) Validators() *p2p.Validators { - return vm.p2pValidators -} - -func (vm *VM) VersionDB() *versiondb.Database { - return vm.versiondb -} - -func (vm *VM) EthChainDB() ethdb.Database { - return vm.chaindb -} - -func (vm *VM) SyncerClient() vmsync.Client { - return vm.Client -} diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go new file mode 100644 index 0000000000..b8e90bae13 --- /dev/null +++ b/plugin/evm/vm_extensible.go @@ -0,0 +1,107 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/eth" + "github.com/ava-labs/coreth/plugin/evm/config" + "github.com/ava-labs/coreth/plugin/evm/extension" + vmsync "github.com/ava-labs/coreth/plugin/evm/sync" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + errVMAlreadyInitialized = errors.New("vm already initialized") + errExtensionConfigAlreadySet = errors.New("extension config already set") +) + +/* + *********************************** ExtensibleVM functions ********************************** + // All these methods assumes that VM is already initialized +*/ + +func (vm *VM) GetVMBlock(ctx context.Context, blkID ids.ID) (extension.VMBlock, error) { + // Since each internal handler used by [vm.State] always returns a block + // with non-nil ethBlock value, GetBlockInternal should never return a + // (*Block) with a nil ethBlock value. + blk, err := vm.GetBlockInternal(ctx, blkID) + if err != nil { + return nil, err + } + + return blk.(*Block), nil +} + +func (vm *VM) LastAcceptedVMBlock() extension.VMBlock { + lastAcceptedBlock := vm.LastAcceptedBlockInternal() + if lastAcceptedBlock == nil { + return nil + } + return lastAcceptedBlock.(*Block) +} + +func (vm *VM) NewVMBlock(ethBlock *types.Block) (extension.VMBlock, error) { + blk, err := vm.blockManager.newBlock(ethBlock) + if err != nil { + return nil, err + } + + return blk, nil +} + +// IsBootstrapped returns true if the VM has finished bootstrapping +func (vm *VM) IsBootstrapped() bool { + return vm.bootstrapped.Get() +} + +func (vm *VM) Ethereum() *eth.Ethereum { + return vm.eth +} + +func (vm *VM) Config() config.Config { + return vm.config +} + +func (vm *VM) MetricRegistry() *prometheus.Registry { + return vm.sdkMetrics +} + +func (vm *VM) Validators() *p2p.Validators { + return vm.p2pValidators +} + +func (vm *VM) VersionDB() *versiondb.Database { + return vm.versiondb +} + +func (vm *VM) EthChainDB() ethdb.Database { + return vm.chaindb +} + +func (vm *VM) SyncerClient() vmsync.Client { + return vm.Client +} + +func (vm *VM) Version(context.Context) (string, error) { + return Version, nil +} + +func (vm *VM) SetExtensionConfig(config *extension.Config) error { + if vm.ctx != nil { + return errVMAlreadyInitialized + } + if vm.extensionConfig != nil { + return errExtensionConfigAlreadySet + } + vm.extensionConfig = config + return nil +} diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index ca9d890844..22b9cf0caa 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -12,7 +12,6 @@ import ( _ "embed" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" @@ -33,6 +32,7 @@ import ( "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" + messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/precompile/contract" warpcontract "github.com/ava-labs/coreth/precompile/contracts/warp" @@ -69,15 +69,7 @@ const ( testNetworkID uint32 = 10 ) -var networkCodec codec.Manager - -func init() { - var err error - networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } -} +var networkCodec = messagetest.TestBlockSyncSummaryCodec func TestSendWarpMessage(t *testing.T) { require := require.New(t) diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 31658dafb4..552646db2f 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/consensus/dummy" @@ -22,6 +21,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/plugin/evm/message" + messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" clientstats "github.com/ava-labs/coreth/sync/client/stats" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" @@ -31,15 +31,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -var networkCodec codec.Manager - -func init() { - var err error - networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } -} +var networkCodec = messagetest.TestBlockSyncSummaryCodec func TestGetCode(t *testing.T) { mockNetClient := &mockNetwork{} diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index eb57362e01..bb4fac0599 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -9,12 +9,12 @@ import ( "math/rand" "testing" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/message" + messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/sync/syncutils" "github.com/ava-labs/coreth/trie" @@ -25,15 +25,7 @@ import ( "github.com/stretchr/testify/assert" ) -var networkCodec codec.Manager - -func init() { - var err error - networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } -} +var networkCodec = messagetest.TestBlockSyncSummaryCodec func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { rand.Seed(1) diff --git a/sync/handlers/stats/stats.go b/sync/handlers/stats/stats.go index cff8185b42..56dbe35455 100644 --- a/sync/handlers/stats/stats.go +++ b/sync/handlers/stats/stats.go @@ -166,9 +166,9 @@ func (h *handlerStats) IncSnapshotReadSuccess() { h.snapshotReadSuccess.Inc(1 func (h *handlerStats) IncSnapshotSegmentValid() { h.snapshotSegmentValid.Inc(1) } func (h *handlerStats) IncSnapshotSegmentInvalid() { h.snapshotSegmentInvalid.Inc(1) } -// NewHandlerStats returns a new HandlerStats instance to track state sync handler metrics. +// GetOrRegisterHandlerStats returns a new HandlerStats instance to track state sync handler metrics. // Calling this multiple times will return same registered metrics. -func NewHandlerStats(enabled bool) HandlerStats { +func GetOrRegisterHandlerStats(enabled bool) HandlerStats { if !enabled { return NewNoopHandlerStats() } diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 99c5d17b0e..c105504c07 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -13,11 +13,11 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/plugin/evm/message" + messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/handlers" handlerstats "github.com/ava-labs/coreth/sync/handlers/stats" @@ -35,15 +35,7 @@ const testSyncTimeout = 30 * time.Second var errInterrupted = errors.New("interrupted sync") -var networkCodec codec.Manager - -func init() { - var err error - networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } -} +var networkCodec = messagetest.TestBlockSyncSummaryCodec type syncTest struct { ctx context.Context diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 7bbd434641..8c06e138c1 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -16,21 +15,14 @@ import ( avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/plugin/evm/message" + messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp" "github.com/ava-labs/coreth/warp/warptest" "github.com/stretchr/testify/require" ) -var networkCodec codec.Manager - -func init() { - var err error - networkCodec, err = message.NewCodec(message.BlockSyncSummary{}) - if err != nil { - panic(err) - } -} +var networkCodec = messagetest.TestBlockSyncSummaryCodec func TestMessageSignatureHandler(t *testing.T) { database := memdb.New() From da82817dd71a92b9b2fecf7950894a680bc51c5c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 17:02:29 +0300 Subject: [PATCH 75/91] nits and reviews --- core/vm/evm.go | 1 + .../internal/tracetest/calltrace_test.go | 3 +++ params/config_extra.go | 19 ------------------- plugin/evm/atomic/state/atomic_backend.go | 2 +- plugin/evm/atomic/state/atomic_trie.go | 4 ++-- plugin/evm/vm_extensible.go | 1 - 6 files changed, 7 insertions(+), 23 deletions(-) diff --git a/core/vm/evm.go b/core/vm/evm.go index 1e89e04817..ce1e2731ab 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -669,6 +669,7 @@ func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas // Send [assetAmount] of [assetID] to [to] address evm.Context.TransferMultiCoin(evm.StateDB, caller, to, assetID, assetAmount) ret, remainingGas, err = evm.Call(AccountRef(caller), to, callData, remainingGas, new(uint256.Int)) + // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in homestead this also counts for code storage gas errors. diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 87c6ff5979..a6614b7ac1 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -47,6 +47,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/require" ) type callContext struct { @@ -289,6 +290,8 @@ func TestInternals(t *testing.T) { GasLimit: uint64(6000000), } ) + config2 := params.TestApricotPhase2Config + require.Equal(t, config, config2) mkTracer := func(name string, cfg json.RawMessage) tracers.Tracer { tr, err := tracers.DefaultDirectory.New(name, nil, cfg) if err != nil { diff --git a/params/config_extra.go b/params/config_extra.go index a9449d5206..4f9a4c78cb 100644 --- a/params/config_extra.go +++ b/params/config_extra.go @@ -178,25 +178,6 @@ func (c *ChainConfig) ToWithUpgradesJSON() *ChainConfigWithUpgradesJSON { } } -func GetChainConfig(agoUpgrade upgrade.Config, chainID *big.Int) *ChainConfig { - c := &ChainConfig{ - ChainID: chainID, - HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - NetworkUpgrades: GetNetworkUpgrades(agoUpgrade), - } - return c -} - func (r *Rules) PredicatersExist() bool { return len(r.Predicaters) > 0 } diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index ab7024ee85..db799c7d68 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -47,7 +47,7 @@ func NewAtomicBackend( ) (*AtomicBackend, error) { codec := repo.codec - atomicTrie, err := NewAtomicTrie(repo.atomicTrieDB, repo.metadataDB, codec, lastAcceptedHeight, commitInterval) + atomicTrie, err := newAtomicTrie(repo.atomicTrieDB, repo.metadataDB, codec, lastAcceptedHeight, commitInterval) if err != nil { return nil, err } diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index 08c93f0b91..2b87a7fd02 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -51,9 +51,9 @@ type AtomicTrie struct { tipBuffer *core.BoundedBuffer[common.Hash] } -// NewAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. +// newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. // Initializes the trie before returning it. -func NewAtomicTrie( +func newAtomicTrie( atomicTrieDB avalanchedatabase.Database, metadataDB avalanchedatabase.Database, codec codec.Manager, lastAcceptedHeight uint64, commitHeightInterval uint64, ) (*AtomicTrie, error) { diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go index b8e90bae13..f4ea1f58d6 100644 --- a/plugin/evm/vm_extensible.go +++ b/plugin/evm/vm_extensible.go @@ -25,7 +25,6 @@ var ( ) /* - *********************************** ExtensibleVM functions ********************************** // All these methods assumes that VM is already initialized */ From 90893c2f493f673f882a1707dbcd6d44e3535039 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 17:43:18 +0300 Subject: [PATCH 76/91] fix unsaved changes --- eth/tracers/internal/tracetest/calltrace_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index a6614b7ac1..8166a3b045 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -34,8 +34,6 @@ import ( "strings" "testing" - "github.com/ava-labs/avalanchego/upgrade" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" @@ -47,7 +45,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/require" ) type callContext struct { @@ -273,7 +270,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { func TestInternals(t *testing.T) { var ( - config = params.GetChainConfig(upgrade.GetConfig(constants.MainnetID), params.AvalancheMainnetChainID) + config = params.TestLaunchConfig to = common.HexToAddress("0x00000000000000000000000000000000deadbeef") origin = common.HexToAddress("0x00000000000000000000000000000000feed") txContext = vm.TxContext{ @@ -290,8 +287,6 @@ func TestInternals(t *testing.T) { GasLimit: uint64(6000000), } ) - config2 := params.TestApricotPhase2Config - require.Equal(t, config, config2) mkTracer := func(name string, cfg json.RawMessage) tracers.Tracer { tr, err := tracers.DefaultDirectory.New(name, nil, cfg) if err != nil { From f9f4824a192e06fb4b63098d922655de9029a915 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 18:46:13 +0300 Subject: [PATCH 77/91] set block eth upgrades --- params/config_extra.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/params/config_extra.go b/params/config_extra.go index 4f9a4c78cb..9d5be4bfa9 100644 --- a/params/config_extra.go +++ b/params/config_extra.go @@ -35,11 +35,20 @@ type AvalancheContext struct { // SetEthUpgrades enables Etheruem network upgrades using the same time as // the Avalanche network upgrade that enables them. -// -// TODO: Prior to Cancun, Avalanche upgrades are referenced inline in the -// code in place of their Ethereum counterparts. The original Ethereum names -// should be restored for maintainability. func (c *ChainConfig) SetEthUpgrades() { + // Set Ethereum block upgrades to initially activated as they were already activated on launch. + c.HomesteadBlock = big.NewInt(0) + c.DAOForkBlock = big.NewInt(0) + c.DAOForkSupport = true + c.EIP150Block = big.NewInt(0) + c.EIP155Block = big.NewInt(0) + c.EIP158Block = big.NewInt(0) + c.ByzantiumBlock = big.NewInt(0) + c.ConstantinopleBlock = big.NewInt(0) + c.PetersburgBlock = big.NewInt(0) + c.IstanbulBlock = big.NewInt(0) + c.MuirGlacierBlock = big.NewInt(0) + if c.ChainID != nil && AvalancheFujiChainID.Cmp(c.ChainID) == 0 { c.BerlinBlock = big.NewInt(184985) // https://testnet.snowtrace.io/block/184985?chainid=43113, AP2 activation block c.LondonBlock = big.NewInt(805078) // https://testnet.snowtrace.io/block/805078?chainid=43113, AP3 activation block From 61455466fe70b5e9928e00d84cb492b9ff378804 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 19:56:45 +0300 Subject: [PATCH 78/91] enable dao in test chain configs --- params/config.go | 60 ++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/params/config.go b/params/config.go index f95a815ef4..6186a946d8 100644 --- a/params/config.go +++ b/params/config.go @@ -52,8 +52,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -85,8 +85,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -116,8 +116,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -147,8 +147,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -179,8 +179,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -212,8 +212,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -245,8 +245,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -278,8 +278,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -311,8 +311,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -344,8 +344,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -377,8 +377,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -410,8 +410,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -443,8 +443,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -477,8 +477,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), @@ -512,8 +512,8 @@ var ( AvalancheContext: AvalancheContext{utils.TestSnowContext()}, ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), From c372ec8b825bd0c94a78cdc093fc2e5874b35eef Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 11 Feb 2025 09:22:41 -0800 Subject: [PATCH 79/91] simplify: remove blockManager type (#786) --- plugin/evm/atomic/vm/block_extension.go | 2 +- plugin/evm/block.go | 53 ++++++++++++------------- plugin/evm/block_manager.go | 36 ++++++----------- plugin/evm/extension/config.go | 10 ++--- plugin/evm/vm.go | 10 ++--- plugin/evm/vm_extensible.go | 2 +- plugin/evm/vm_test.go | 8 ++-- 7 files changed, 51 insertions(+), 70 deletions(-) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index c158a474db..e15b865310 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/extension" ) -var _ extension.BlockManagerExtension = (*blockExtension)(nil) +var _ extension.BlockExtension = (*blockExtension)(nil) var ( errNilExtDataGasUsedApricotPhase4 = errors.New("nil extDataGasUsed is invalid after apricotPhase4") diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 6446688fe1..7374fa4ece 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" @@ -32,9 +33,10 @@ var ( // Block implements the snowman.Block interface type Block struct { - id ids.ID - ethBlock *types.Block - blockManager *blockManager + id ids.ID + ethBlock *types.Block + extension extension.BlockExtension + vm *VM } // ID implements the snowman.Block interface @@ -42,7 +44,7 @@ func (b *Block) ID() ids.ID { return b.id } // Accept implements the snowman.Block interface func (b *Block) Accept(context.Context) error { - vm := b.blockManager.vm + vm := b.vm // Although returning an error from Accept is considered fatal, it is good // practice to cleanup the batch we were modifying in the case of an error. @@ -72,10 +74,10 @@ func (b *Block) Accept(context.Context) error { return fmt.Errorf("could not create commit batch processing block[%s]: %w", b.ID(), err) } - if b.blockManager.blockExtension != nil { + if b.extension != nil { // Apply any changes atomically with other pending changes to // the vm's versionDB. - return b.blockManager.blockExtension.OnAccept(b, vdbBatch) + return b.extension.OnAccept(b, vdbBatch) } return vdbBatch.Write() @@ -84,7 +86,7 @@ func (b *Block) Accept(context.Context) error { // handlePrecompileAccept calls Accept on any logs generated with an active precompile address that implements // contract.Accepter func (b *Block) handlePrecompileAccept(rules params.Rules) error { - vm := b.blockManager.vm + vm := b.vm // Short circuit early if there are no precompile accepters to execute if len(rules.AccepterPrecompiles) == 0 { return nil @@ -119,15 +121,14 @@ func (b *Block) handlePrecompileAccept(rules params.Rules) error { // Reject implements the snowman.Block interface // If [b] contains an atomic transaction, attempt to re-issue it func (b *Block) Reject(context.Context) error { - vm := b.blockManager.vm log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) - if err := vm.blockChain.Reject(b.ethBlock); err != nil { + if err := b.vm.blockChain.Reject(b.ethBlock); err != nil { return fmt.Errorf("chain could not reject %s: %w", b.ID(), err) } - if b.blockManager.blockExtension != nil { - return b.blockManager.blockExtension.OnReject(b) + if b.extension != nil { + return b.extension.OnReject(b) } return nil } @@ -153,29 +154,27 @@ func (b *Block) syntacticVerify() error { return errInvalidBlock } - vm := b.blockManager.vm - header := b.ethBlock.Header() // Skip verification of the genesis block since it should already be marked as accepted. - if b.ethBlock.Hash() == vm.genesisHash { + if b.ethBlock.Hash() == b.vm.genesisHash { return nil } - rules := vm.chainConfig.Rules(header.Number, header.Time) - return b.blockManager.SyntacticVerify(b, rules) + rules := b.vm.chainConfig.Rules(header.Number, header.Time) + return b.SyntacticVerify(rules) } // Verify implements the snowman.Block interface func (b *Block) Verify(context.Context) error { return b.semanticVerify(&precompileconfig.PredicateContext{ - SnowCtx: b.blockManager.vm.ctx, + SnowCtx: b.vm.ctx, ProposerVMBlockCtx: nil, }, true) } // ShouldVerifyWithContext implements the block.WithVerifyContext interface func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { - predicates := b.blockManager.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters + predicates := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters // Short circuit early if there are no predicates to verify if len(predicates) == 0 { return false, nil @@ -199,7 +198,7 @@ func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { // VerifyWithContext implements the block.WithVerifyContext interface func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { return b.semanticVerify(&precompileconfig.PredicateContext{ - SnowCtx: b.blockManager.vm.ctx, + SnowCtx: b.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, }, true) } @@ -208,7 +207,6 @@ func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block // Enforces that the predicates are valid within [predicateContext]. // Writes the block details to disk and the state to the trie manager iff writes=true. func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateContext, writes bool) error { - vm := b.blockManager.vm if predicateContext.ProposerVMBlockCtx != nil { log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) } else { @@ -222,13 +220,13 @@ func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateConte // If the chain is still bootstrapping, we can assume that all blocks we are verifying have // been accepted by the network (so the predicate was validated by the network when the // block was originally verified). - if vm.bootstrapped.Get() { + if b.vm.bootstrapped.Get() { if err := b.verifyPredicates(predicateContext); err != nil { return fmt.Errorf("failed to verify predicates: %w", err) } } - if err := b.blockManager.SemanticVerify(b); err != nil { + if err := b.SemanticVerify(); err != nil { return fmt.Errorf("failed to verify block extension: %w", err) } @@ -237,21 +235,20 @@ func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateConte // Additionally, if a block is already in processing, then it has already passed verification and // at this point we have checked the predicates are still valid in the different context so we // can return nil. - if vm.State.IsProcessing(b.id) { + if b.vm.State.IsProcessing(b.id) { return nil } - err := vm.blockChain.InsertBlockManual(b.ethBlock, writes) - if b.blockManager.blockExtension != nil && (err != nil || !writes) { - b.blockManager.blockExtension.OnError(b) + err := b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) + if b.extension != nil && (err != nil || !writes) { + b.extension.OnError(b) } return err } // verifyPredicates verifies the predicates in the block are valid according to predicateContext. func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { - vm := b.blockManager.vm - rules := vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) switch { case !rules.IsDurango && rules.PredicatersExist(): diff --git a/plugin/evm/block_manager.go b/plugin/evm/block_manager.go index 1304795048..c20e33b81b 100644 --- a/plugin/evm/block_manager.go +++ b/plugin/evm/block_manager.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/trie" ) @@ -24,28 +23,17 @@ var ( apricotPhase1MinGasPrice = big.NewInt(params.ApricotPhase1MinGasPrice) ) -type blockManager struct { - blockExtension extension.BlockManagerExtension - vm *VM -} - -func newBlockManager(vm *VM, blockExtension extension.BlockManagerExtension) *blockManager { - return &blockManager{ - blockExtension: blockExtension, - vm: vm, - } -} - // newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (bm *blockManager) newBlock(ethBlock *types.Block) (*Block, error) { +func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { return &Block{ - id: ids.ID(ethBlock.Hash()), - ethBlock: ethBlock, - blockManager: bm, + id: ids.ID(ethBlock.Hash()), + ethBlock: ethBlock, + extension: vm.extensionConfig.BlockExtension, + vm: vm, }, nil } -func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { +func (b *Block) SyntacticVerify(rules params.Rules) error { ethHeader := b.ethBlock.Header() // Perform block and header sanity checks @@ -211,21 +199,21 @@ func (bm *blockManager) SyntacticVerify(b *Block, rules params.Rules) error { } } - if bm.blockExtension != nil { - return bm.blockExtension.SyntacticVerify(b, rules) + if b.extension != nil { + return b.extension.SyntacticVerify(b, rules) } return nil } -func (bm *blockManager) SemanticVerify(b *Block) error { +func (b *Block) SemanticVerify() error { // Make sure the block isn't too far in the future blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(bm.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) } - if bm.blockExtension != nil { - return bm.blockExtension.SemanticVerify(b) + if b.extension != nil { + return b.extension.SemanticVerify(b) } return nil } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index cf987d1543..54ac9a3049 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -86,9 +86,8 @@ type VMBlock interface { GetEthBlock() *types.Block } -// BlockManagerExtension is an extension for the block manager -// to handle BlockManager events -type BlockManagerExtension interface { +// BlockExtension allows the VM extension to handle block processing events. +type BlockExtension interface { // SyntacticVerify verifies the block syntactically // it can be implemented to extend inner block verification SyntacticVerify(b VMBlock, rules params.Rules) error @@ -146,10 +145,9 @@ type Config struct { // SyncableParser is to parse summary messages from the network. // It's required and should be non-nil SyncableParser message.SyncableParser - // BlockManagerExtension is the extension for the block manager - // to handle block manager events. + // BlockExtension allows the VM extension to handle block processing events. // It's optional and can be nil - BlockExtension BlockManagerExtension + BlockExtension BlockExtension // ExtraSyncLeafHandlerConfig is the extra configuration to handle leaf requests // in the network and syncer. It's optional and can be nil ExtraSyncLeafHandlerConfig *LeafRequestConfig diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 5788bbb66b..1188b5cfb2 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -193,7 +193,6 @@ type VM struct { // Extension Points extensionConfig *extension.Config - blockManager *blockManager // pointers to eth constructs eth *eth.Ethereum @@ -650,8 +649,7 @@ func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { } func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { - vm.blockManager = newBlockManager(vm, vm.extensionConfig.BlockExtension) - block, err := vm.blockManager.newBlock(lastAcceptedBlock) + block, err := vm.newBlock(lastAcceptedBlock) if err != nil { return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) } @@ -890,7 +888,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo } // Note: the status of block is set by ChainState - blk, err := vm.blockManager.newBlock(block) + blk, err := vm.newBlock(block) if err != nil { log.Debug("discarding txs due to error making new block", "err", err) return nil, fmt.Errorf("%w: %w", vmerrors.ErrMakeNewBlockFailed, err) @@ -924,7 +922,7 @@ func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { } // Note: the status of block is set by ChainState - block, err := vm.blockManager.newBlock(ethBlock) + block, err := vm.newBlock(ethBlock) if err != nil { return nil, err } @@ -955,7 +953,7 @@ func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { return nil, avalanchedatabase.ErrNotFound } // Note: the status of block is set by ChainState - return vm.blockManager.newBlock(ethBlock) + return vm.newBlock(ethBlock) } // GetAcceptedBlock attempts to retrieve block [blkID] from the VM. This method diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go index f4ea1f58d6..8a74ab4c1e 100644 --- a/plugin/evm/vm_extensible.go +++ b/plugin/evm/vm_extensible.go @@ -49,7 +49,7 @@ func (vm *VM) LastAcceptedVMBlock() extension.VMBlock { } func (vm *VM) NewVMBlock(ethBlock *types.Block) (extension.VMBlock, error) { - blk, err := vm.blockManager.newBlock(ethBlock) + blk, err := vm.newBlock(ethBlock) if err != nil { return nil, err } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index f0610f3e16..7decbd3c87 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1413,7 +1413,7 @@ func TestUncleBlock(t *testing.T) { blkDEthBlock.ExtData(), false, ) - uncleBlock, err := vm2.blockManager.newBlock(uncleEthBlock) + uncleBlock, err := vm2.newBlock(uncleEthBlock) if err != nil { t.Fatal(err) } @@ -1678,7 +1678,7 @@ func TestFutureBlock(t *testing.T) { false, ) - futureBlock, err := vm.blockManager.newBlock(modifiedBlock) + futureBlock, err := vm.newBlock(modifiedBlock) if err != nil { t.Fatal(err) } @@ -2077,7 +2077,7 @@ func TestParentBeaconRootBlock(t *testing.T) { header.ParentBeaconRoot = test.beaconRoot parentBeaconEthBlock := ethBlock.WithSeal(header) - parentBeaconBlock, err := vm.blockManager.newBlock(parentBeaconEthBlock) + parentBeaconBlock, err := vm.newBlock(parentBeaconEthBlock) if err != nil { t.Fatal(err) } @@ -2138,7 +2138,7 @@ func TestNoBlobsAllowed(t *testing.T) { defer func() { require.NoError(vm.Shutdown(ctx)) }() // Verification should fail - vmBlock, err := vm.blockManager.newBlock(blocks[0]) + vmBlock, err := vm.newBlock(blocks[0]) require.NoError(err) _, err = vm.ParseBlock(ctx, vmBlock.Bytes()) require.ErrorContains(err, "blobs not enabled on avalanche networks") From f4b2a10326452508c2cafa80d4e85286735b7ad7 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 20:39:10 +0300 Subject: [PATCH 80/91] remove block manager and move to block --- plugin/evm/block.go | 232 ++++++++++++++++++++++++++++++++---- plugin/evm/block_manager.go | 219 ---------------------------------- plugin/evm/vm.go | 14 ++- 3 files changed, 224 insertions(+), 241 deletions(-) delete mode 100644 plugin/evm/block_manager.go diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 7374fa4ece..e21cb1af95 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -8,11 +8,14 @@ import ( "context" "errors" "fmt" + "math/big" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/constants" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" @@ -20,6 +23,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/extension" "github.com/ava-labs/coreth/precompile/precompileconfig" "github.com/ava-labs/coreth/predicate" + "github.com/ava-labs/coreth/trie" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -31,6 +35,11 @@ var ( _ block.WithVerifyContext = (*Block)(nil) ) +var ( + apricotPhase0MinGasPrice = big.NewInt(params.LaunchMinGasPrice) + apricotPhase1MinGasPrice = big.NewInt(params.ApricotPhase1MinGasPrice) +) + // Block implements the snowman.Block interface type Block struct { id ids.ID @@ -42,6 +51,205 @@ type Block struct { // ID implements the snowman.Block interface func (b *Block) ID() ids.ID { return b.id } +// SyntacticVerify verifies that a *Block is well-formed. +func (b *Block) SyntacticVerify() error { + if b == nil || b.ethBlock == nil { + return errInvalidBlock + } + + header := b.ethBlock.Header() + + // Skip verification of the genesis block since it should already be marked as accepted. + if b.ethBlock.Hash() == b.vm.genesisHash { + return nil + } + + rules := b.vm.chainConfig.Rules(header.Number, header.Time) + ethHeader := b.ethBlock.Header() + + // Perform block and header sanity checks + if !ethHeader.Number.IsUint64() { + return fmt.Errorf("invalid block number: %v", ethHeader.Number) + } + if !ethHeader.Difficulty.IsUint64() || ethHeader.Difficulty.Cmp(common.Big1) != 0 { + return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) + } + if ethHeader.Nonce.Uint64() != 0 { + return fmt.Errorf( + "expected nonce to be 0 but got %d: %w", + ethHeader.Nonce.Uint64(), errInvalidNonce, + ) + } + + if ethHeader.MixDigest != (common.Hash{}) { + return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) + } + + // Enforce static gas limit after ApricotPhase1 (prior to ApricotPhase1 it's handled in processing). + if rules.IsCortina { + if ethHeader.GasLimit != params.CortinaGasLimit { + return fmt.Errorf( + "expected gas limit to be %d after cortina but got %d", + params.CortinaGasLimit, ethHeader.GasLimit, + ) + } + } else if rules.IsApricotPhase1 { + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d after apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } + } + + // Check that the size of the header's Extra data field is correct for [rules]. + headerExtraDataSize := len(ethHeader.Extra) + switch { + case rules.IsDurango: + if headerExtraDataSize < params.DynamicFeeExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be len >= %d but got %d", + params.DynamicFeeExtraDataSize, len(ethHeader.Extra), + ) + } + case rules.IsApricotPhase3: + if headerExtraDataSize != params.DynamicFeeExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be len %d but got %d", + params.DynamicFeeExtraDataSize, headerExtraDataSize, + ) + } + case rules.IsApricotPhase1: + if headerExtraDataSize != 0 { + return fmt.Errorf( + "expected header ExtraData to be 0 but got %d", + headerExtraDataSize, + ) + } + default: + if uint64(headerExtraDataSize) > params.MaximumExtraDataSize { + return fmt.Errorf( + "expected header ExtraData to be <= %d but got %d", + params.MaximumExtraDataSize, headerExtraDataSize, + ) + } + } + + if b.ethBlock.Version() != 0 { + return fmt.Errorf("invalid version: %d", b.ethBlock.Version()) + } + + // Check that the tx hash in the header matches the body + txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) + if txsHash != ethHeader.TxHash { + return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) + } + // Check that the uncle hash in the header matches the body + uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) + if uncleHash != ethHeader.UncleHash { + return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) + } + // Coinbase must match the BlackholeAddr on C-Chain + if ethHeader.Coinbase != constants.BlackholeAddr { + return fmt.Errorf("invalid coinbase %v does not match required blackhole address %v", ethHeader.Coinbase, constants.BlackholeAddr) + } + // Block must not have any uncles + if len(b.ethBlock.Uncles()) > 0 { + return errUnclesUnsupported + } + + // Enforce minimum gas prices here prior to dynamic fees going into effect. + switch { + case !rules.IsApricotPhase1: + // If we are in ApricotPhase0, enforce each transaction has a minimum gas price of at least the LaunchMinGasPrice + for _, tx := range b.ethBlock.Transactions() { + if tx.GasPrice().Cmp(apricotPhase0MinGasPrice) < 0 { + return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.LaunchMinGasPrice) + } + } + case !rules.IsApricotPhase3: + // If we are prior to ApricotPhase3, enforce each transaction has a minimum gas price of at least the ApricotPhase1MinGasPrice + for _, tx := range b.ethBlock.Transactions() { + if tx.GasPrice().Cmp(apricotPhase1MinGasPrice) < 0 { + return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.ApricotPhase1MinGasPrice) + } + } + } + + // Ensure BaseFee is non-nil as of ApricotPhase3. + if rules.IsApricotPhase3 { + if ethHeader.BaseFee == nil { + return errNilBaseFeeApricotPhase3 + } + // TODO: this should be removed as 256 is the maximum possible bit length of a big int + if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { + return fmt.Errorf("too large base fee: bitlen %d", bfLen) + } + } + + if rules.IsApricotPhase4 { + switch { + // Make sure BlockGasCost is not nil + // NOTE: ethHeader.BlockGasCost correctness is checked in header verification + case ethHeader.BlockGasCost == nil: + return errNilBlockGasCostApricotPhase4 + case !ethHeader.BlockGasCost.IsUint64(): + return fmt.Errorf("too large blockGasCost: %d", ethHeader.BlockGasCost) + } + } + + // Verify the existence / non-existence of excessBlobGas + cancun := rules.IsCancun + if !cancun && ethHeader.ExcessBlobGas != nil { + return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *ethHeader.ExcessBlobGas) + } + if !cancun && ethHeader.BlobGasUsed != nil { + return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed) + } + if cancun && ethHeader.ExcessBlobGas == nil { + return errors.New("header is missing excessBlobGas") + } + if cancun && ethHeader.BlobGasUsed == nil { + return errors.New("header is missing blobGasUsed") + } + if !cancun && ethHeader.ParentBeaconRoot != nil { + return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) + } + + if cancun { + switch { + case ethHeader.ParentBeaconRoot == nil: + return errors.New("header is missing parentBeaconRoot") + case *ethHeader.ParentBeaconRoot != (common.Hash{}): + return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) + } + if ethHeader.BlobGasUsed == nil { + return fmt.Errorf("blob gas used must not be nil in Cancun") + } else if *ethHeader.BlobGasUsed > 0 { + return fmt.Errorf("blobs not enabled on avalanche networks: used %d blob gas, expected 0", *ethHeader.BlobGasUsed) + } + } + + if b.extension != nil { + return b.extension.SyntacticVerify(b, rules) + } + return nil +} + +// SemanticVerify verifies that a *Block is internally consistent. +func (b *Block) SemanticVerify() error { + // Make sure the block isn't too far in the future + blockTimestamp := b.ethBlock.Time() + if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { + return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) + } + + if b.extension != nil { + return b.extension.SemanticVerify(b) + } + return nil +} + // Accept implements the snowman.Block interface func (b *Block) Accept(context.Context) error { vm := b.vm @@ -148,25 +356,9 @@ func (b *Block) Timestamp() time.Time { return time.Unix(int64(b.ethBlock.Time()), 0) } -// syntacticVerify verifies that a *Block is well-formed. -func (b *Block) syntacticVerify() error { - if b == nil || b.ethBlock == nil { - return errInvalidBlock - } - - header := b.ethBlock.Header() - - // Skip verification of the genesis block since it should already be marked as accepted. - if b.ethBlock.Hash() == b.vm.genesisHash { - return nil - } - rules := b.vm.chainConfig.Rules(header.Number, header.Time) - return b.SyntacticVerify(rules) -} - // Verify implements the snowman.Block interface func (b *Block) Verify(context.Context) error { - return b.semanticVerify(&precompileconfig.PredicateContext{ + return b.verify(&precompileconfig.PredicateContext{ SnowCtx: b.vm.ctx, ProposerVMBlockCtx: nil, }, true) @@ -197,7 +389,7 @@ func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { // VerifyWithContext implements the block.WithVerifyContext interface func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block.Context) error { - return b.semanticVerify(&precompileconfig.PredicateContext{ + return b.verify(&precompileconfig.PredicateContext{ SnowCtx: b.vm.ctx, ProposerVMBlockCtx: proposerVMBlockCtx, }, true) @@ -206,13 +398,13 @@ func (b *Block) VerifyWithContext(ctx context.Context, proposerVMBlockCtx *block // Verify the block is valid. // Enforces that the predicates are valid within [predicateContext]. // Writes the block details to disk and the state to the trie manager iff writes=true. -func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateContext, writes bool) error { +func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writes bool) error { if predicateContext.ProposerVMBlockCtx != nil { log.Debug("Verifying block with context", "block", b.ID(), "height", b.Height()) } else { log.Debug("Verifying block without context", "block", b.ID(), "height", b.Height()) } - if err := b.syntacticVerify(); err != nil { + if err := b.SyntacticVerify(); err != nil { return fmt.Errorf("syntactic block verification failed: %w", err) } diff --git a/plugin/evm/block_manager.go b/plugin/evm/block_manager.go deleted file mode 100644 index c20e33b81b..0000000000 --- a/plugin/evm/block_manager.go +++ /dev/null @@ -1,219 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "errors" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ava-labs/avalanchego/ids" - - "github.com/ava-labs/coreth/constants" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/trie" -) - -var ( - apricotPhase0MinGasPrice = big.NewInt(params.LaunchMinGasPrice) - apricotPhase1MinGasPrice = big.NewInt(params.ApricotPhase1MinGasPrice) -) - -// newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface -func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { - return &Block{ - id: ids.ID(ethBlock.Hash()), - ethBlock: ethBlock, - extension: vm.extensionConfig.BlockExtension, - vm: vm, - }, nil -} - -func (b *Block) SyntacticVerify(rules params.Rules) error { - ethHeader := b.ethBlock.Header() - - // Perform block and header sanity checks - if !ethHeader.Number.IsUint64() { - return fmt.Errorf("invalid block number: %v", ethHeader.Number) - } - if !ethHeader.Difficulty.IsUint64() || ethHeader.Difficulty.Cmp(common.Big1) != 0 { - return fmt.Errorf("invalid difficulty: %d", ethHeader.Difficulty) - } - if ethHeader.Nonce.Uint64() != 0 { - return fmt.Errorf( - "expected nonce to be 0 but got %d: %w", - ethHeader.Nonce.Uint64(), errInvalidNonce, - ) - } - - if ethHeader.MixDigest != (common.Hash{}) { - return fmt.Errorf("invalid mix digest: %v", ethHeader.MixDigest) - } - - // Enforce static gas limit after ApricotPhase1 (prior to ApricotPhase1 it's handled in processing). - if rules.IsCortina { - if ethHeader.GasLimit != params.CortinaGasLimit { - return fmt.Errorf( - "expected gas limit to be %d after cortina but got %d", - params.CortinaGasLimit, ethHeader.GasLimit, - ) - } - } else if rules.IsApricotPhase1 { - if ethHeader.GasLimit != params.ApricotPhase1GasLimit { - return fmt.Errorf( - "expected gas limit to be %d after apricot phase 1 but got %d", - params.ApricotPhase1GasLimit, ethHeader.GasLimit, - ) - } - } - - // Check that the size of the header's Extra data field is correct for [rules]. - headerExtraDataSize := len(ethHeader.Extra) - switch { - case rules.IsDurango: - if headerExtraDataSize < params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len >= %d but got %d", - params.DynamicFeeExtraDataSize, len(ethHeader.Extra), - ) - } - case rules.IsApricotPhase3: - if headerExtraDataSize != params.DynamicFeeExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be len %d but got %d", - params.DynamicFeeExtraDataSize, headerExtraDataSize, - ) - } - case rules.IsApricotPhase1: - if headerExtraDataSize != 0 { - return fmt.Errorf( - "expected header ExtraData to be 0 but got %d", - headerExtraDataSize, - ) - } - default: - if uint64(headerExtraDataSize) > params.MaximumExtraDataSize { - return fmt.Errorf( - "expected header ExtraData to be <= %d but got %d", - params.MaximumExtraDataSize, headerExtraDataSize, - ) - } - } - - if b.ethBlock.Version() != 0 { - return fmt.Errorf("invalid version: %d", b.ethBlock.Version()) - } - - // Check that the tx hash in the header matches the body - txsHash := types.DeriveSha(b.ethBlock.Transactions(), trie.NewStackTrie(nil)) - if txsHash != ethHeader.TxHash { - return fmt.Errorf("invalid txs hash %v does not match calculated txs hash %v", ethHeader.TxHash, txsHash) - } - // Check that the uncle hash in the header matches the body - uncleHash := types.CalcUncleHash(b.ethBlock.Uncles()) - if uncleHash != ethHeader.UncleHash { - return fmt.Errorf("invalid uncle hash %v does not match calculated uncle hash %v", ethHeader.UncleHash, uncleHash) - } - // Coinbase must match the BlackholeAddr on C-Chain - if ethHeader.Coinbase != constants.BlackholeAddr { - return fmt.Errorf("invalid coinbase %v does not match required blackhole address %v", ethHeader.Coinbase, constants.BlackholeAddr) - } - // Block must not have any uncles - if len(b.ethBlock.Uncles()) > 0 { - return errUnclesUnsupported - } - - // Enforce minimum gas prices here prior to dynamic fees going into effect. - switch { - case !rules.IsApricotPhase1: - // If we are in ApricotPhase0, enforce each transaction has a minimum gas price of at least the LaunchMinGasPrice - for _, tx := range b.ethBlock.Transactions() { - if tx.GasPrice().Cmp(apricotPhase0MinGasPrice) < 0 { - return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.LaunchMinGasPrice) - } - } - case !rules.IsApricotPhase3: - // If we are prior to ApricotPhase3, enforce each transaction has a minimum gas price of at least the ApricotPhase1MinGasPrice - for _, tx := range b.ethBlock.Transactions() { - if tx.GasPrice().Cmp(apricotPhase1MinGasPrice) < 0 { - return fmt.Errorf("block contains tx %s with gas price too low (%d < %d)", tx.Hash(), tx.GasPrice(), params.ApricotPhase1MinGasPrice) - } - } - } - - // Ensure BaseFee is non-nil as of ApricotPhase3. - if rules.IsApricotPhase3 { - if ethHeader.BaseFee == nil { - return errNilBaseFeeApricotPhase3 - } - // TODO: this should be removed as 256 is the maximum possible bit length of a big int - if bfLen := ethHeader.BaseFee.BitLen(); bfLen > 256 { - return fmt.Errorf("too large base fee: bitlen %d", bfLen) - } - } - - if rules.IsApricotPhase4 { - switch { - // Make sure BlockGasCost is not nil - // NOTE: ethHeader.BlockGasCost correctness is checked in header verification - case ethHeader.BlockGasCost == nil: - return errNilBlockGasCostApricotPhase4 - case !ethHeader.BlockGasCost.IsUint64(): - return fmt.Errorf("too large blockGasCost: %d", ethHeader.BlockGasCost) - } - } - - // Verify the existence / non-existence of excessBlobGas - cancun := rules.IsCancun - if !cancun && ethHeader.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", *ethHeader.ExcessBlobGas) - } - if !cancun && ethHeader.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", *ethHeader.BlobGasUsed) - } - if cancun && ethHeader.ExcessBlobGas == nil { - return errors.New("header is missing excessBlobGas") - } - if cancun && ethHeader.BlobGasUsed == nil { - return errors.New("header is missing blobGasUsed") - } - if !cancun && ethHeader.ParentBeaconRoot != nil { - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected nil", *ethHeader.ParentBeaconRoot) - } - - if cancun { - switch { - case ethHeader.ParentBeaconRoot == nil: - return errors.New("header is missing parentBeaconRoot") - case *ethHeader.ParentBeaconRoot != (common.Hash{}): - return fmt.Errorf("invalid parentBeaconRoot: have %x, expected empty hash", ethHeader.ParentBeaconRoot) - } - if ethHeader.BlobGasUsed == nil { - return fmt.Errorf("blob gas used must not be nil in Cancun") - } else if *ethHeader.BlobGasUsed > 0 { - return fmt.Errorf("blobs not enabled on avalanche networks: used %d blob gas, expected 0", *ethHeader.BlobGasUsed) - } - } - - if b.extension != nil { - return b.extension.SyntacticVerify(b, rules) - } - return nil -} - -func (b *Block) SemanticVerify() error { - // Make sure the block isn't too far in the future - blockTimestamp := b.ethBlock.Time() - if maxBlockTime := uint64(b.vm.clock.Time().Add(maxFutureBlockTime).Unix()); blockTimestamp > maxBlockTime { - return fmt.Errorf("block timestamp is too far in the future: %d > allowed %d", blockTimestamp, maxBlockTime) - } - - if b.extension != nil { - return b.extension.SemanticVerify(b) - } - return nil -} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 1188b5cfb2..43e258d091 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -865,6 +865,16 @@ func (vm *VM) Shutdown(context.Context) error { return nil } +// newBlock returns a new Block wrapping the ethBlock type and implementing the snowman.Block interface +func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { + return &Block{ + id: ids.ID(ethBlock.Hash()), + ethBlock: ethBlock, + extension: vm.extensionConfig.BlockExtension, + vm: vm, + }, nil +} + // buildBlock builds a block to be wrapped by ChainState func (vm *VM) buildBlock(ctx context.Context) (snowman.Block, error) { return vm.buildBlockWithContext(ctx, nil) @@ -906,7 +916,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo // We call verify without writes here to avoid generating a reference // to the blk state root in the triedb when we are going to call verify // again from the consensus engine with writes enabled. - if err := blk.semanticVerify(predicateCtx, false /*=writes*/); err != nil { + if err := blk.verify(predicateCtx, false /*=writes*/); err != nil { return nil, fmt.Errorf("%w: %w", vmerrors.ErrBlockVerificationFailed, err) } @@ -928,7 +938,7 @@ func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { } // Performing syntactic verification in ParseBlock allows for // short-circuiting bad blocks before they are processed by the VM. - if err := block.syntacticVerify(); err != nil { + if err := block.SyntacticVerify(); err != nil { return nil, fmt.Errorf("syntactic block verification failed: %w", err) } return block, nil From 9d7bfc74d915d7c527b0b30c487ebdd2503993c5 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 20:56:10 +0300 Subject: [PATCH 81/91] bump avalanchego dep --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7442e17f4f..7ba3c6b35b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326 + github.com/ava-labs/avalanchego v1.12.3-0.20250211171948-c2c6e5dd9f6a github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index fa6b120cb2..6aa5f77eaf 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326 h1:/ZhQ/yBU8i9vNTrGyNq3ioNb94owwtjREWsiIXAZack= -github.com/ava-labs/avalanchego v1.12.3-0.20250205234502-286645b55326/go.mod h1:0m9tYzjo53qEf9ZbfFZ4878/PQqkZ8erj9j6JBXp6P4= +github.com/ava-labs/avalanchego v1.12.3-0.20250211171948-c2c6e5dd9f6a h1:eTTraMsAXkYajANjyz5+vj5pRr9HJwOSv3vrboOzEZg= +github.com/ava-labs/avalanchego v1.12.3-0.20250211171948-c2c6e5dd9f6a/go.mod h1:V8Rb40UUnYPHRp4l7SDo4+h8SMBBiB3q+hHGuQP+vJ4= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 3eb68c6132f24d7056a2c8d9c789d6d9f169164f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 11 Feb 2025 21:04:47 +0300 Subject: [PATCH 82/91] bump e2e version --- scripts/versions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/versions.sh b/scripts/versions.sh index 615cc55d0a..01b2eecf0f 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -6,4 +6,4 @@ set -euo pipefail # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'286645b'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'c2c6e5dd'} From a0767d313d21d37a9fb18b282e8677e7150bfc75 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 12 Feb 2025 23:13:06 +0300 Subject: [PATCH 83/91] Update plugin/evm/vm.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 43e258d091..8d11ee97e9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -543,7 +543,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { return vm.initChainState(vm.blockChain.LastAcceptedBlock()) } -// initializeStateSync initializes the client for performing state sync. +// initializeStateSync initializes the vm for performing state sync and responding to peer requests. // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. func (vm *VM) initializeStateSync(lastAcceptedHeight uint64) error { From 463491726073bf829906d26bd2dd8fadda8c771d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 12 Feb 2025 23:13:15 +0300 Subject: [PATCH 84/91] Update plugin/evm/vm.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 8d11ee97e9..a093ca9cc3 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -904,7 +904,7 @@ func (vm *VM) buildBlockWithContext(ctx context.Context, proposerVMBlockCtx *blo return nil, fmt.Errorf("%w: %w", vmerrors.ErrMakeNewBlockFailed, err) } - // Verify is called on a non-wr apped block here, such that this + // Verify is called on a non-wrapped block here, such that this // does not add [blk] to the processing blocks map in ChainState. // // TODO cache verification since Verify() will be called by the From a30fae0abc4102537faa249e0d8e7e60a1afec39 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 12 Feb 2025 23:31:58 +0300 Subject: [PATCH 85/91] rename onerror --- plugin/evm/atomic/vm/block_extension.go | 4 ++-- plugin/evm/block.go | 2 +- plugin/evm/extension/config.go | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index e15b865310..553aa1dd8e 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -198,8 +198,8 @@ func (be *blockExtension) OnReject(b extension.VMBlock) error { return atomicState.Reject() } -// OnError is called when the block is cleaned up after a failed insertion. -func (be *blockExtension) OnError(b extension.VMBlock) { +// CleanupVerified is called when the block is cleaned up after a failed insertion. +func (be *blockExtension) CleanupVerified(b extension.VMBlock) { if atomicState, err := be.vm.atomicBackend.GetVerifiedAtomicState(b.GetEthBlock().Hash()); err == nil { atomicState.Reject() } diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 7374fa4ece..c70a8d8046 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -241,7 +241,7 @@ func (b *Block) semanticVerify(predicateContext *precompileconfig.PredicateConte err := b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) if b.extension != nil && (err != nil || !writes) { - b.extension.OnError(b) + b.extension.CleanupVerified(b) } return err } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 54ac9a3049..c28d7c40cd 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -94,11 +94,10 @@ type BlockExtension interface { // SemanticVerify verifies the block semantically // it can be implemented to extend inner block verification SemanticVerify(b VMBlock) error - // OnError is called when a block has passed SemanticVerify and SynctacticVerify, - // but failed insertion in the chain. This allows the block manager to perform any - // needed cleanup. This does not return any error because the block manager - // propagates the original error. - OnError(b VMBlock) + // CleanupVerified is called when a block has passed SemanticVerify and SynctacticVerify, + // and should be cleaned up due to error or verification runs under non-write mode. This + // does not return an error because the block has already been verified. + CleanupVerified(b VMBlock) // OnAccept is called when a block is accepted by the block manager OnAccept(b VMBlock, acceptedBatch database.Batch) error // OnReject is called when a block is rejected by the block manager From 7a79b9efd8fc29939b3783ebf3ca40050bfe9d4e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 12 Feb 2025 23:32:10 +0300 Subject: [PATCH 86/91] move set extension fn above --- plugin/evm/vm_extensible.go | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/plugin/evm/vm_extensible.go b/plugin/evm/vm_extensible.go index 8a74ab4c1e..2f70093622 100644 --- a/plugin/evm/vm_extensible.go +++ b/plugin/evm/vm_extensible.go @@ -24,9 +24,18 @@ var ( errExtensionConfigAlreadySet = errors.New("extension config already set") ) -/* - // All these methods assumes that VM is already initialized -*/ +func (vm *VM) SetExtensionConfig(config *extension.Config) error { + if vm.ctx != nil { + return errVMAlreadyInitialized + } + if vm.extensionConfig != nil { + return errExtensionConfigAlreadySet + } + vm.extensionConfig = config + return nil +} + +// All these methods below assumes that VM is already initialized func (vm *VM) GetVMBlock(ctx context.Context, blkID ids.ID) (extension.VMBlock, error) { // Since each internal handler used by [vm.State] always returns a block @@ -93,14 +102,3 @@ func (vm *VM) SyncerClient() vmsync.Client { func (vm *VM) Version(context.Context) (string, error) { return Version, nil } - -func (vm *VM) SetExtensionConfig(config *extension.Config) error { - if vm.ctx != nil { - return errVMAlreadyInitialized - } - if vm.extensionConfig != nil { - return errExtensionConfigAlreadySet - } - vm.extensionConfig = config - return nil -} From d83860bae9858df24193f3762a05c50f1ea9f677 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 13 Feb 2025 20:18:07 +0300 Subject: [PATCH 87/91] reorder/fix semantic verify --- plugin/evm/atomic/vm/block_extension.go | 4 ++++ plugin/evm/block.go | 8 ++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 553aa1dd8e..dc4a4d4015 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -144,6 +144,10 @@ func (be *blockExtension) SyntacticVerify(b extension.VMBlock, rules params.Rule // SemanticVerify checks the semantic validity of the block. This is called the wrapper // block manager's SemanticVerify method. func (be *blockExtension) SemanticVerify(b extension.VMBlock) error { + // If the VM is not bootstrapped, we cannot verify atomic transactions + if !be.vm.IsBootstrapped() { + return nil + } atomicTxs, err := extractAtomicTxsFromBlock(b, be.vm.Ethereum().BlockChain().Config()) if err != nil { return err diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 75fa9c85b9..999f850db0 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -408,6 +408,10 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ return fmt.Errorf("syntactic block verification failed: %w", err) } + if err := b.SemanticVerify(); err != nil { + return fmt.Errorf("failed to verify block extension: %w", err) + } + // Only enforce predicates if the chain has already bootstrapped. // If the chain is still bootstrapping, we can assume that all blocks we are verifying have // been accepted by the network (so the predicate was validated by the network when the @@ -418,10 +422,6 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ } } - if err := b.SemanticVerify(); err != nil { - return fmt.Errorf("failed to verify block extension: %w", err) - } - // The engine may call VerifyWithContext multiple times on the same block with different contexts. // Since the engine will only call Accept/Reject once, we should only call InsertBlockManual once. // Additionally, if a block is already in processing, then it has already passed verification and From b6bc1bbd593d6f1d69547a2289a0ab05ca86c180 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 14 Feb 2025 23:09:43 +0300 Subject: [PATCH 88/91] nits --- plugin/evm/atomic/state/atomic_backend.go | 5 ++--- plugin/evm/client/utils.go | 13 ------------- 2 files changed, 2 insertions(+), 16 deletions(-) delete mode 100644 plugin/evm/client/utils.go diff --git a/plugin/evm/atomic/state/atomic_backend.go b/plugin/evm/atomic/state/atomic_backend.go index db799c7d68..57616e7a23 100644 --- a/plugin/evm/atomic/state/atomic_backend.go +++ b/plugin/evm/atomic/state/atomic_backend.go @@ -19,10 +19,9 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates - const ( - progressLogFrequency = 30 * time.Second + sharedMemoryApplyBatchSize = 10_000 // specifies the number of atomic operations to batch progress updates + progressLogFrequency = 30 * time.Second ) // AtomicBackend implements the AtomicBackend interface using diff --git a/plugin/evm/client/utils.go b/plugin/evm/client/utils.go deleted file mode 100644 index 5ea43f4a20..0000000000 --- a/plugin/evm/client/utils.go +++ /dev/null @@ -1,13 +0,0 @@ -// (c) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package client - -import "github.com/ethereum/go-ethereum/common" - -func ParseEthAddress(addrStr string) (common.Address, error) { - if !common.IsHexAddress(addrStr) { - return common.Address{}, errInvalidAddr - } - return common.HexToAddress(addrStr), nil -} From 314f4471605997e3e2bd0bae3e6f2c7a1f28e87b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 14 Feb 2025 23:48:27 +0300 Subject: [PATCH 89/91] remove unused err --- plugin/evm/client/client.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugin/evm/client/client.go b/plugin/evm/client/client.go index b92b8bb0c0..6de9ffe834 100644 --- a/plugin/evm/client/client.go +++ b/plugin/evm/client/client.go @@ -5,7 +5,6 @@ package client import ( "context" - "errors" "fmt" "golang.org/x/exp/slog" @@ -24,8 +23,6 @@ import ( // Interface compliance var _ Client = (*client)(nil) -var errInvalidAddr = errors.New("invalid hex address") - // Client interface for interacting with EVM [chain] type Client interface { IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) From cfdd80b8f5c4a801e8c2b29811e178cc51097bd6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 21 Feb 2025 18:16:50 +0300 Subject: [PATCH 90/91] fix ap3 import --- plugin/evm/vm_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index b7a1447121..1d1e9e283a 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -29,6 +29,7 @@ import ( "github.com/ava-labs/coreth/plugin/evm/header" "github.com/ava-labs/coreth/plugin/evm/upgrade/ap0" "github.com/ava-labs/coreth/plugin/evm/upgrade/ap1" + "github.com/ava-labs/coreth/plugin/evm/upgrade/ap3" "github.com/ava-labs/coreth/trie" "github.com/ava-labs/coreth/utils" @@ -73,7 +74,7 @@ var ( testKeys []*secp256k1.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID - initialBaseFee = big.NewInt(params.ApricotPhase3InitialBaseFee) + initialBaseFee = big.NewInt(ap3.InitialBaseFee) genesisJSON = func(cfg *params.ChainConfig) string { g := new(core.Genesis) From 950e83a18cde09f7a14ee1778a4303cf8c4cc0d0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 24 Feb 2025 18:42:57 +0300 Subject: [PATCH 91/91] reintroduce ap3 check in tip --- plugin/evm/atomic/vm/vm.go | 10 ++++++---- plugin/evm/atomic/vm/vm_test.go | 3 +-- plugin/evm/testutils/test_syncervm.go | 16 +++++++++------- warp/handlers/signature_request_test.go | 1 + 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/plugin/evm/atomic/vm/vm.go b/plugin/evm/atomic/vm/vm.go index f7352ba809..7944cc49a7 100644 --- a/plugin/evm/atomic/vm/vm.go +++ b/plugin/evm/atomic/vm/vm.go @@ -402,10 +402,12 @@ func (vm *VM) verifyTxAtTip(tx *atomic.Tx) error { parentHeader := preferredBlock var nextBaseFee *big.Int timestamp := uint64(vm.clock.Time().Unix()) - nextBaseFee, err = header.EstimateNextBaseFee(chainConfig, parentHeader, timestamp) - if err != nil { - // Return extremely detailed error since CalcBaseFee should never encounter an issue here - return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) + if chainConfig.IsApricotPhase3(timestamp) { + nextBaseFee, err = header.EstimateNextBaseFee(chainConfig, parentHeader, timestamp) + if err != nil { + // Return extremely detailed error since CalcBaseFee should never encounter an issue here + return fmt.Errorf("failed to calculate base fee with parent timestamp (%d), parent ExtraData: (0x%x), and current timestamp (%d): %w", parentHeader.Time, parentHeader.Extra, timestamp, err) + } } // We don’t need to revert the state here in case verifyTx errors, because diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index 30038c7e66..1a897e8c3f 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -24,7 +24,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/avalanchego/vms/secp256k1fx" accountKeystore "github.com/ava-labs/coreth/accounts/keystore" - "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" @@ -1580,7 +1579,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { if ethBlk.ExtDataGasUsed() == nil || ethBlk.ExtDataGasUsed().Cmp(common.Big0) != 0 { t.Fatalf("expected extDataGasUsed to be 0 but got %d", ethBlk.ExtDataGasUsed()) } - minRequiredTip, err = dummy.MinRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) + minRequiredTip, err = header.EstimateRequiredTip(vm.Ethereum().BlockChain().Config(), ethBlk.Header()) if err != nil { t.Fatal(err) } diff --git a/plugin/evm/testutils/test_syncervm.go b/plugin/evm/testutils/test_syncervm.go index 1573fa904a..2a0b893c3e 100644 --- a/plugin/evm/testutils/test_syncervm.go +++ b/plugin/evm/testutils/test_syncervm.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api/metrics" avalancheatomic "github.com/ava-labs/avalanchego/chains/atomic" avalanchedatabase "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -42,7 +43,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" ) @@ -116,12 +116,6 @@ func StateSyncFromScratchExceedParentTest(t *testing.T, testSetup *SyncTestSetup func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup) { rand.Seed(1) - // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. - metrics.Enabled = false - defer func() { - metrics.Enabled = true - }() - var lock sync.Mutex reqCount := 0 test := SyncTestParams{ @@ -167,6 +161,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup go testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } + resetMetrics(testSyncVMSetup.syncerVM.SnowCtx) stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` if err := syncDisabledVM.Initialize( context.Background(), @@ -231,6 +226,7 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, test.StateSyncMinBlocks, ) + resetMetrics(testSyncVMSetup.syncerVM.SnowCtx) if err := syncReEnabledVM.Initialize( context.Background(), testSyncVMSetup.syncerVM.SnowCtx, @@ -681,3 +677,9 @@ func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[ui require.NoError(t, it.Error()) require.Equal(t, expected, found) } + +// resetMetrics resets the vm avalanchego metrics, and allows +// for the VM to be re-initialized in tests. +func resetMetrics(snowCtx *snow.Context) { + snowCtx.Metrics = metrics.NewPrefixGatherer() +} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 45c95aed4d..20ccf81097 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/coreth/plugin/evm/message" messagetest "github.com/ava-labs/coreth/plugin/evm/message/testutils" + "github.com/ava-labs/coreth/plugin/evm/testutils" "github.com/ava-labs/coreth/utils" "github.com/ava-labs/coreth/warp" "github.com/ava-labs/coreth/warp/warptest"