From 5c0e1f2aa1e13cfd084fdd5c1e1ca31887f25c66 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 22 Jan 2025 21:57:26 +0100 Subject: [PATCH 01/28] wip --- client/blockchain-service/src/handler.rs | 24 +++++++++++++++++++--- client/common/src/types.rs | 1 + client/rpc/src/lib.rs | 11 ++-------- node/src/rpc.rs | 6 ++---- pallets/file-system/runtime-api/src/lib.rs | 10 ++++++++- pallets/file-system/src/utils.rs | 17 +++++++++++++++ runtime/src/apis.rs | 9 ++++++-- xcm-simulator/src/storagehub/apis.rs | 7 ++++++- 8 files changed, 65 insertions(+), 20 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index b798976fc..ac4f96d31 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -40,8 +40,7 @@ use shc_actors_framework::actor::{Actor, ActorEventLoop}; use shc_common::{ blockchain_utils::{convert_raw_multiaddresses_to_multiaddr, get_events_at_block}, types::{ - BlockNumber, EitherBucketOrBspId, Fingerprint, ParachainClient, StorageProviderId, - TickNumber, BCSV_KEY_TYPE, + BlockNumber, EitherBucketOrBspId, Fingerprint, ParachainClient, StorageProviderId, StorageRequestMetadata, TickNumber, BCSV_KEY_TYPE }, }; use shp_file_metadata::FileKey; @@ -1195,9 +1194,28 @@ where // TODO: Send events to check that this node has a Forest Storage for the BSP that it manages. // TODO: Catch up to Forest root writes in the BSP Forest. } - Some(StorageProviderId::MainStorageProvider(_msp_id)) => { + Some(StorageProviderId::MainStorageProvider(msp_id)) => { // TODO: Send events to check that this node has a Forest Storage for each Bucket this MSP manages. // TODO: Catch up to Forest root writes in the Bucket's Forests. + + // TODO: Call runtime here + let storage_requests: Vec = self + .client + .runtime_api() + .storage_requests_by_msp(block_hash, msp_id).unwrap(); + + for sr in storage_requests { + self.emit(NewStorageRequest { + who: sr.owner, + file_key: , + bucket_id: sr.bucket_id, + location: sr.location, + fingerprint: sr.fingerprint.into(), + size: sr.size, + user_peer_ids: , + expires_at: , + }) + } } None => { warn!(target: LOG_TARGET, "No Provider ID found. This node is not managing a Provider."); diff --git a/client/common/src/types.rs b/client/common/src/types.rs index 48e86e0c5..5d2c35cae 100644 --- a/client/common/src/types.rs +++ b/client/common/src/types.rs @@ -76,6 +76,7 @@ pub type Balance = pallet_storage_providers::types::BalanceOf; pub type OpaqueBlock = storage_hub_runtime::opaque::Block; pub type BlockHash = ::Hash; pub type PeerId = pallet_file_system::types::PeerId; +pub type StorageRequestMetadata = pallet_file_system::types::StorageRequestMetadata; /// Type alias for the events vector. /// diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index e1b1124fb..68c0b5f10 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -14,15 +14,7 @@ use tokio::{fs, fs::create_dir_all, sync::RwLock}; use pallet_file_system_runtime_api::FileSystemApi as FileSystemRuntimeApi; use pallet_proofs_dealer_runtime_api::ProofsDealerApi as ProofsDealerRuntimeApi; -use shc_common::{ - consts::CURRENT_FOREST_KEY, - types::{ - BackupStorageProviderId, BlockNumber, BucketId, ChunkId, CustomChallenge, FileMetadata, - ForestLeaf, HashT, KeyProof, KeyProofs, MainStorageProviderId, ProofsDealerProviderId, - Proven, RandomnessOutput, StorageProof, StorageProofsMerkleTrieLayout, BCSV_KEY_TYPE, - FILE_CHUNK_SIZE, - }, -}; +use shc_common::{consts::CURRENT_FOREST_KEY, types::*}; use shc_file_manager::traits::{ExcludeType, FileDataTrie, FileStorage, FileStorageError}; use shc_forest_manager::traits::{ForestStorage, ForestStorageHandler}; use sp_core::{sr25519::Pair as Sr25519Pair, Encode, Pair, H256}; @@ -253,6 +245,7 @@ where BlockNumber, ChunkId, BucketId, + StorageRequestMetadata, >, FL: FileStorage + Send + Sync, FSH: ForestStorageHandler + Send + Sync + 'static, diff --git a/node/src/rpc.rs b/node/src/rpc.rs index 792da3810..6c41cfa41 100644 --- a/node/src/rpc.rs +++ b/node/src/rpc.rs @@ -15,10 +15,7 @@ use sc_consensus_manual_seal::{ }; use sc_rpc::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; -use shc_common::types::{ - BackupStorageProviderId, BlockNumber, BucketId, ChunkId, CustomChallenge, ForestLeaf, - MainStorageProviderId, ProofsDealerProviderId, RandomnessOutput, -}; +use shc_common::types::*; use shc_forest_manager::traits::ForestStorageHandler; use shc_rpc::{StorageHubClientApiServer, StorageHubClientRpc, StorageHubClientRpcConfig}; use sp_api::ProvideRuntimeApi; @@ -73,6 +70,7 @@ where BlockNumber, ChunkId, BucketId, + StorageRequestMetadata, >, P: TransactionPool + Send + Sync + 'static, FL: FileStorageT, diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index 425db8b9f..1c62f1c71 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -51,9 +51,15 @@ pub enum GenericApplyDeltaEventInfoError { DecodeError, } +/// Error type for the `storage_requests_by_msp`. +#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum StorageRequestsByMSPError { + FailedToRetrieveStorageRequests, +} + sp_api::decl_runtime_apis! { #[api_version(1)] - pub trait FileSystemApi + pub trait FileSystemApi where BackupStorageProviderId: Codec, MainStorageProviderId: Codec, @@ -61,11 +67,13 @@ sp_api::decl_runtime_apis! { TickNumber: Codec, ChunkId: Codec, GenericApplyDeltaEventInfo: Codec, + StorageRequestMetadata: Codec, { fn is_storage_request_open_to_volunteers(file_key: FileKey) -> Result; fn query_earliest_file_volunteer_tick(bsp_id: BackupStorageProviderId, file_key: FileKey) -> Result; fn query_bsp_confirm_chunks_to_prove_for_file(bsp_id: BackupStorageProviderId, file_key: FileKey) -> Result, QueryBspConfirmChunksToProveForFileError>; fn query_msp_confirm_chunks_to_prove_for_file(msp_id: MainStorageProviderId, file_key: FileKey) -> Result, QueryMspConfirmChunksToProveForFileError>; fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result; + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec; } } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index 6a4b53f5f..f457a741d 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -11,6 +11,7 @@ use frame_support::{ }, }; use num_bigint::BigUint; +use sp_core::H256; use sp_runtime::{ traits::{ Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Convert, ConvertBack, Hash, One, @@ -2817,6 +2818,22 @@ where ) -> Result, codec::Error> { BucketIdFor::::decode(&mut encoded_event_info.as_ref()) } + + pub fn storage_requests_by_msp( + msp_id: ProviderIdFor, + ) -> Vec<(MerkleHash, StorageRequestMetadata)> { + // Get the storeage requests for a specific MSP + StorageRequests::::iter() + .filter(|(_, metadata)| { + if let Some(msp) = metadata.msp { + msp.0 == msp_id + } else { + false + } + }) + .map(|(file_key, storage)| (file_key, storage)) + .collect() + } } mod hooks { diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 2415f1f8d..3e25305f3 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -4,6 +4,7 @@ use frame_support::{ weights::Weight, }; use pallet_aura::Authorities; +use pallet_file_system::types::StorageRequestMetadata; use pallet_file_system_runtime_api::*; use pallet_payment_streams_runtime_api::*; use pallet_proofs_dealer::types::{ @@ -328,7 +329,7 @@ impl_runtime_apis! { } } - impl pallet_file_system_runtime_api::FileSystemApi, MainStorageProviderId, H256, BlockNumber, ChunkId, BucketId> for Runtime { + impl pallet_file_system_runtime_api::FileSystemApi, MainStorageProviderId, H256, BlockNumber, ChunkId, BucketId, StorageRequestMetadata> for Runtime { fn is_storage_request_open_to_volunteers(file_key: H256) -> Result { FileSystem::is_storage_request_open_to_volunteers(file_key) } @@ -345,9 +346,13 @@ impl_runtime_apis! { FileSystem::query_msp_confirm_chunks_to_prove_for_file(msp_id, file_key) } - fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result, GenericApplyDeltaEventInfoError> { + fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result, GenericApplyDeltaEventInfoError> { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } + + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec> { + FileSystem::storage_requests_by_msp(msp_id) + } } impl pallet_payment_streams_runtime_api::PaymentStreamsApi, Balance, AccountId> for Runtime { diff --git a/xcm-simulator/src/storagehub/apis.rs b/xcm-simulator/src/storagehub/apis.rs index d5e0bc2fc..62e88a690 100644 --- a/xcm-simulator/src/storagehub/apis.rs +++ b/xcm-simulator/src/storagehub/apis.rs @@ -5,6 +5,7 @@ use frame_support::{ weights::Weight, }; use pallet_aura::Authorities; +use pallet_file_system::types::StorageRequestMetadata; use pallet_file_system_runtime_api::*; use pallet_payment_streams_runtime_api::*; use pallet_proofs_dealer::types::{ @@ -326,7 +327,7 @@ impl_runtime_apis! { } } - impl pallet_file_system_runtime_api::FileSystemApi, MainStorageProviderId, H256, BlockNumber, ChunkId, BucketId> for Runtime { + impl pallet_file_system_runtime_api::FileSystemApi, MainStorageProviderId, H256, BlockNumber, ChunkId, BucketId, StorageRequestMetadata> for Runtime { fn is_storage_request_open_to_volunteers(file_key: H256) -> Result { FileSystem::is_storage_request_open_to_volunteers(file_key) } @@ -346,6 +347,10 @@ impl_runtime_apis! { fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result, GenericApplyDeltaEventInfoError> { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } + + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec> { + FileSystem::storage_requests_by_msp(msp_id) + } } impl pallet_payment_streams_runtime_api::PaymentStreamsApi, Balance, AccountId> for Runtime { From 0bccfb6580cf4937233b952788d8545ce2d0bf53 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 23 Jan 2025 13:35:05 +0100 Subject: [PATCH 02/28] needed file key value and some minors fixes --- Cargo.lock | 1 + client/blockchain-service/src/handler.rs | 19 ++++++++++--------- pallets/file-system/runtime-api/Cargo.toml | 1 + pallets/file-system/runtime-api/src/lib.rs | 2 ++ pallets/file-system/src/utils.rs | 1 - runtime/src/apis.rs | 2 +- xcm-simulator/src/storagehub/apis.rs | 2 +- 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 019ed2403..edc7e23fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7834,6 +7834,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-api 34.0.0", + "sp-core 34.0.0", "sp-runtime 39.0.5", ] diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index ac4f96d31..b87dc0b9a 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -40,7 +40,8 @@ use shc_actors_framework::actor::{Actor, ActorEventLoop}; use shc_common::{ blockchain_utils::{convert_raw_multiaddresses_to_multiaddr, get_events_at_block}, types::{ - BlockNumber, EitherBucketOrBspId, Fingerprint, ParachainClient, StorageProviderId, StorageRequestMetadata, TickNumber, BCSV_KEY_TYPE + BlockNumber, EitherBucketOrBspId, Fingerprint, ParachainClient, StorageProviderId, + StorageRequestMetadata, TickNumber, BCSV_KEY_TYPE, }, }; use shp_file_metadata::FileKey; @@ -1198,22 +1199,22 @@ where // TODO: Send events to check that this node has a Forest Storage for each Bucket this MSP manages. // TODO: Catch up to Forest root writes in the Bucket's Forests. - // TODO: Call runtime here - let storage_requests: Vec = self + let storage_requests: Vec<(H256, StorageRequestMetadata)> = self .client .runtime_api() - .storage_requests_by_msp(block_hash, msp_id).unwrap(); + .storage_requests_by_msp(block_hash, msp_id) + .unwrap(); - for sr in storage_requests { + for (file_key, sr) in storage_requests { self.emit(NewStorageRequest { who: sr.owner, - file_key: , + file_key: file_key.into(), bucket_id: sr.bucket_id, location: sr.location, - fingerprint: sr.fingerprint.into(), + fingerprint: Fingerprint::from(sr.fingerprint.as_bytes()), size: sr.size, - user_peer_ids: , - expires_at: , + user_peer_ids: sr.user_peer_ids, + expires_at: sr.expires_at, }) } } diff --git a/pallets/file-system/runtime-api/Cargo.toml b/pallets/file-system/runtime-api/Cargo.toml index c95521948..0a83f4a89 100644 --- a/pallets/file-system/runtime-api/Cargo.toml +++ b/pallets/file-system/runtime-api/Cargo.toml @@ -18,6 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { workspace = true, features = ["derive"] } scale-info = { workspace = true } sp-api = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } [features] diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index 1c62f1c71..b195e2311 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -3,6 +3,7 @@ use codec::{Codec, Decode, Encode}; use scale_info::prelude::vec::Vec; use scale_info::TypeInfo; +use sp_core::H256; use sp_runtime::RuntimeDebug; /// Error type for the `is_storage_request_open_to_volunteers` runtime API call. @@ -75,5 +76,6 @@ sp_api::decl_runtime_apis! { fn query_msp_confirm_chunks_to_prove_for_file(msp_id: MainStorageProviderId, file_key: FileKey) -> Result, QueryMspConfirmChunksToProveForFileError>; fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result; fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec; + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; } } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index f457a741d..ad9343eba 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -11,7 +11,6 @@ use frame_support::{ }, }; use num_bigint::BigUint; -use sp_core::H256; use sp_runtime::{ traits::{ Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Convert, ConvertBack, Hash, One, diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 3e25305f3..8aa5e6f23 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -350,7 +350,7 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec> { + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { FileSystem::storage_requests_by_msp(msp_id) } } diff --git a/xcm-simulator/src/storagehub/apis.rs b/xcm-simulator/src/storagehub/apis.rs index 62e88a690..6ef104822 100644 --- a/xcm-simulator/src/storagehub/apis.rs +++ b/xcm-simulator/src/storagehub/apis.rs @@ -348,7 +348,7 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec> { + fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { FileSystem::storage_requests_by_msp(msp_id) } } From d7c056a1d982910ab58e43890ea4f0935062f7e1 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 29 Jan 2025 16:45:24 +0100 Subject: [PATCH 03/28] added an integration test but it is failing --- pallets/file-system/src/utils.rs | 2 +- .../integration/msp/catch-up-storage.test.ts | 67 +++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 test/suites/integration/msp/catch-up-storage.test.ts diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index ad9343eba..087d69d69 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -2830,7 +2830,7 @@ where false } }) - .map(|(file_key, storage)| (file_key, storage)) + .map(|(file_key, metadata)| (file_key, metadata)) .collect() } } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts new file mode 100644 index 000000000..d9f3776ff --- /dev/null +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -0,0 +1,67 @@ +import assert, { strictEqual } from "node:assert"; +import { describeMspNet, shUser, type EnrichedBspApi, sleep } from "../../../util"; + +describeMspNet( + "MSP catching up with chain and volunteering for storage request", + { initialised: true, only: true }, + ({ before, createMsp1Api, it, createUserApi, getLaunchResponse }) => { + let userApi: EnrichedBspApi; + let mspApi: EnrichedBspApi; + + before(async () => { + userApi = await createUserApi(); + const maybeMspApi = await createMsp1Api(); + + assert(maybeMspApi, "MSP API not available"); + mspApi = maybeMspApi; + }); + + it("Network launches and can be queried", async () => { + const userNodePeerId = await userApi.rpc.system.localPeerId(); + strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId); + + const mspNodePeerId = await mspApi.rpc.system.localPeerId(); + strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); + }); + + it("MSP accepts subsequent storage request for the same file key", async () => { + const source = "res/whatsup.jpg"; + const destination = "test/smile.jpg"; + const initialised = await getLaunchResponse(); + const bucketId = initialised?.fileMetadata.bucketId; + + assert(bucketId, "Bucket ID not found"); + + const localBucketRoot = await mspApi.rpc.storagehubclient.getForestRoot(bucketId.toString()); + + await userApi.docker.pauseBspContainer("docker-sh-msp-1"); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, + userApi.shConsts.TEST_ARTEFACTS[source].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + null + ) + ], + signer: shUser + }); + + await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + + // Advancing 10 blocks to see if MSP catchup + await userApi.block.skip(50); + + await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); + + await sleep(50000); + + await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + + }); + } +); From 3fdb9072da4eacf4d342e9bf55dfbfb5dd8dc57d Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Mon, 3 Feb 2025 17:06:07 +0100 Subject: [PATCH 04/28] failing integration test --- client/blockchain-service/src/handler.rs | 4 ++ pallets/file-system/src/utils.rs | 2 +- .../integration/msp/catch-up-storage.test.ts | 56 +++++++++++++++---- test/util/bspNet/waits.ts | 1 + 4 files changed, 51 insertions(+), 12 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index b87dc0b9a..b0d34b4ae 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -1199,12 +1199,16 @@ where // TODO: Send events to check that this node has a Forest Storage for each Bucket this MSP manages. // TODO: Catch up to Forest root writes in the Bucket's Forests. + info!(target: LOG_TARGET, "Checking for storage requests for this MSP"); + let storage_requests: Vec<(H256, StorageRequestMetadata)> = self .client .runtime_api() .storage_requests_by_msp(block_hash, msp_id) .unwrap(); + dbg!(&storage_requests); + for (file_key, sr) in storage_requests { self.emit(NewStorageRequest { who: sr.owner, diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index 087d69d69..1a309ebcd 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -2825,7 +2825,7 @@ where StorageRequests::::iter() .filter(|(_, metadata)| { if let Some(msp) = metadata.msp { - msp.0 == msp_id + msp.0 == msp_id && !msp.1 } else { false } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index d9f3776ff..651b8794f 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,10 +1,10 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi, sleep } from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi, sleep, waitFor } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: true, only: true }, - ({ before, createMsp1Api, it, createUserApi, getLaunchResponse }) => { + { initialised: false, only: true }, + ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -27,19 +27,32 @@ describeMspNet( it("MSP accepts subsequent storage request for the same file key", async () => { const source = "res/whatsup.jpg"; const destination = "test/smile.jpg"; - const initialised = await getLaunchResponse(); - const bucketId = initialised?.fileMetadata.bucketId; + const bucketName = "trying-things"; - assert(bucketId, "Bucket ID not found"); + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - const localBucketRoot = await mspApi.rpc.storagehubclient.getForestRoot(bucketId.toString()); + assert(newBucketEventDataBlob, "Event doesn't match Type"); + + const { + file_metadata + } = await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); await userApi.docker.pauseBspContainer("docker-sh-msp-1"); + // We need to wait so it won't try to answer the request storage + await sleep(10000); + await userApi.block.seal({ calls: [ userApi.tx.fileSystem.issueStorageRequest( - bucketId, + newBucketEventDataBlob.bucketId, destination, userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, userApi.shConsts.TEST_ARTEFACTS[source].size, @@ -51,14 +64,35 @@ describeMspNet( signer: shUser }); - await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + const { event } = await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + const newStorageRequestDataBlob = + userApi.events.fileSystem.NewStorageRequest.is(event) && event.data; + assert( + newStorageRequestDataBlob, + "NewStorageRequest event data does not match expected type" + ); // Advancing 10 blocks to see if MSP catchup - await userApi.block.skip(50); + await userApi.block.skip(10); await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); - await sleep(50000); + // need to wait for the container to be up again + await sleep(10000); + + // NOTE: + // We shouldn't have to recarete an API but any other attempt to reconnect failed + // Also had to guess for the port of MSP 1 + await using newMspApi = await createApi(`ws://127.0.0.1:9777`); + + // Required to trigger out of sync mode + await userApi.rpc.engine.createBlock(true, true); + + await waitFor({ + lambda: async () => + (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound + }); + await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); diff --git a/test/util/bspNet/waits.ts b/test/util/bspNet/waits.ts index 4dc4cb098..a0da07089 100644 --- a/test/util/bspNet/waits.ts +++ b/test/util/bspNet/waits.ts @@ -312,6 +312,7 @@ export const waitForBspToCatchUpToChainTip = async ( await sleep(delay); const syncedBestBlock = await syncedApi.rpc.chain.getHeader(); const bspBehindBestBlock = await bspBehindApi.rpc.chain.getHeader(); + assert( syncedBestBlock.hash.toString() === bspBehindBestBlock.hash.toString(), "BSP did not catch up to the chain tip" From ee2c27bc5ffb81f2885fea89231f916be048cf2d Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Tue, 4 Feb 2025 14:52:21 +0100 Subject: [PATCH 05/28] fmt --- .../integration/msp/catch-up-storage.test.ts | 190 +++++++++--------- 1 file changed, 94 insertions(+), 96 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 651b8794f..e317558f7 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -2,100 +2,98 @@ import assert, { strictEqual } from "node:assert"; import { describeMspNet, shUser, type EnrichedBspApi, sleep, waitFor } from "../../../util"; describeMspNet( - "MSP catching up with chain and volunteering for storage request", - { initialised: false, only: true }, - ({ before, createMsp1Api, it, createUserApi, createApi }) => { - let userApi: EnrichedBspApi; - let mspApi: EnrichedBspApi; - - before(async () => { - userApi = await createUserApi(); - const maybeMspApi = await createMsp1Api(); - - assert(maybeMspApi, "MSP API not available"); - mspApi = maybeMspApi; - }); - - it("Network launches and can be queried", async () => { - const userNodePeerId = await userApi.rpc.system.localPeerId(); - strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId); - - const mspNodePeerId = await mspApi.rpc.system.localPeerId(); - strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); - }); - - it("MSP accepts subsequent storage request for the same file key", async () => { - const source = "res/whatsup.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "trying-things"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - assert(newBucketEventDataBlob, "Event doesn't match Type"); - - const { - file_metadata - } = await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.docker.pauseBspContainer("docker-sh-msp-1"); - - // We need to wait so it won't try to answer the request storage - await sleep(10000); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, - userApi.shConsts.TEST_ARTEFACTS[source].size, - userApi.shConsts.DUMMY_MSP_ID, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - null - ) - ], - signer: shUser - }); - - const { event } = await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - const newStorageRequestDataBlob = - userApi.events.fileSystem.NewStorageRequest.is(event) && event.data; - assert( - newStorageRequestDataBlob, - "NewStorageRequest event data does not match expected type" - ); - - // Advancing 10 blocks to see if MSP catchup - await userApi.block.skip(10); - - await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); - - // need to wait for the container to be up again - await sleep(10000); - - // NOTE: - // We shouldn't have to recarete an API but any other attempt to reconnect failed - // Also had to guess for the port of MSP 1 - await using newMspApi = await createApi(`ws://127.0.0.1:9777`); - - // Required to trigger out of sync mode - await userApi.rpc.engine.createBlock(true, true); - - await waitFor({ - lambda: async () => - (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound - }); - - - await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); - - }); - } + "MSP catching up with chain and volunteering for storage request", + { initialised: false, only: true }, + ({ before, createMsp1Api, it, createUserApi, createApi }) => { + let userApi: EnrichedBspApi; + let mspApi: EnrichedBspApi; + + before(async () => { + userApi = await createUserApi(); + const maybeMspApi = await createMsp1Api(); + + assert(maybeMspApi, "MSP API not available"); + mspApi = maybeMspApi; + }); + + it("Network launches and can be queried", async () => { + const userNodePeerId = await userApi.rpc.system.localPeerId(); + strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId); + + const mspNodePeerId = await mspApi.rpc.system.localPeerId(); + strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); + }); + + it("MSP accepts subsequent storage request for the same file key", async () => { + const source = "res/whatsup.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "trying-things"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + assert(newBucketEventDataBlob, "Event doesn't match Type"); + + const { file_metadata } = await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.docker.pauseBspContainer("docker-sh-msp-1"); + + // We need to wait so it won't try to answer the request storage + await sleep(10000); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, + userApi.shConsts.TEST_ARTEFACTS[source].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + null + ) + ], + signer: shUser + }); + + const { event } = await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + const newStorageRequestDataBlob = + userApi.events.fileSystem.NewStorageRequest.is(event) && event.data; + assert( + newStorageRequestDataBlob, + "NewStorageRequest event data does not match expected type" + ); + + // Advancing 10 blocks to see if MSP catchup + await userApi.block.skip(10); + + await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); + + // need to wait for the container to be up again + await sleep(10000); + + // NOTE: + // We shouldn't have to recarete an API but any other attempt to reconnect failed + // Also had to guess for the port of MSP 1 + await using newMspApi = await createApi(`ws://127.0.0.1:9777`); + + // Required to trigger out of sync mode + await userApi.rpc.engine.createBlock(true, true); + + await sleep(10000); + + await waitFor({ + lambda: async () => + (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound + }); + + await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + }); + } ); From 5b4461aaac091108bb92a3d5c10386654b70e7cf Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 5 Feb 2025 15:48:48 +0100 Subject: [PATCH 06/28] change name of the rpc function; incomplete integration test --- client/blockchain-service/src/handler.rs | 4 +--- node/src/tasks/bsp_charge_fees.rs | 2 +- pallets/file-system/runtime-api/src/lib.rs | 5 ++--- pallets/file-system/src/utils.rs | 2 +- runtime/src/apis.rs | 4 ++-- .../integration/msp/catch-up-storage.test.ts | 16 +++++++--------- xcm-simulator/src/storagehub/apis.rs | 4 ++-- 7 files changed, 16 insertions(+), 21 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index b0d34b4ae..09619e3da 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -1204,11 +1204,9 @@ where let storage_requests: Vec<(H256, StorageRequestMetadata)> = self .client .runtime_api() - .storage_requests_by_msp(block_hash, msp_id) + .unresponded_storage_requests_by_msp(block_hash, msp_id) .unwrap(); - dbg!(&storage_requests); - for (file_key, sr) in storage_requests { self.emit(NewStorageRequest { who: sr.owner, diff --git a/node/src/tasks/bsp_charge_fees.rs b/node/src/tasks/bsp_charge_fees.rs index 70aa88127..24ef2f629 100644 --- a/node/src/tasks/bsp_charge_fees.rs +++ b/node/src/tasks/bsp_charge_fees.rs @@ -272,7 +272,7 @@ where .map_err(|e| anyhow!("Failed to get metadata from Forest: {:?}", e))?; if !user_files.is_empty() { - let (file_key, metadata) = user_files.first().expect("User files is not empty"); + let (file_key, metadata) = user_files.first().expect("User files is not empty"); // NOTE: WHY ONLY THE FIRST ONE ? let bucket_id = H256::from_slice(metadata.bucket_id.as_ref()); let location = sp_runtime::BoundedVec::truncate_from(metadata.location.clone()); let owner = insolvent_user.clone(); diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index b195e2311..02a858ee7 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -52,7 +52,7 @@ pub enum GenericApplyDeltaEventInfoError { DecodeError, } -/// Error type for the `storage_requests_by_msp`. +/// Error type for the `unresponded_storage_requests_by_msp`. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum StorageRequestsByMSPError { FailedToRetrieveStorageRequests, @@ -75,7 +75,6 @@ sp_api::decl_runtime_apis! { fn query_bsp_confirm_chunks_to_prove_for_file(bsp_id: BackupStorageProviderId, file_key: FileKey) -> Result, QueryBspConfirmChunksToProveForFileError>; fn query_msp_confirm_chunks_to_prove_for_file(msp_id: MainStorageProviderId, file_key: FileKey) -> Result, QueryMspConfirmChunksToProveForFileError>; fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result; - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec; - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; + fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; } } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index 1a309ebcd..1c7d08458 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -2818,7 +2818,7 @@ where BucketIdFor::::decode(&mut encoded_event_info.as_ref()) } - pub fn storage_requests_by_msp( + pub fn unresponded_storage_requests_by_msp( msp_id: ProviderIdFor, ) -> Vec<(MerkleHash, StorageRequestMetadata)> { // Get the storeage requests for a specific MSP diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 8aa5e6f23..6e4d52440 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -350,8 +350,8 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { - FileSystem::storage_requests_by_msp(msp_id) + fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + FileSystem::unresponded_storage_requests_by_msp(msp_id) } } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index e317558f7..31f5b343a 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -3,7 +3,7 @@ import { describeMspNet, shUser, type EnrichedBspApi, sleep, waitFor } from "../ describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: false, only: true }, + { initialised: false }, ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -79,21 +79,19 @@ describeMspNet( await sleep(10000); // NOTE: - // We shouldn't have to recarete an API but any other attempt to reconnect failed + // We shouldn't have to recreate an API but any other attempt to reconnect failed // Also had to guess for the port of MSP 1 await using newMspApi = await createApi(`ws://127.0.0.1:9777`); // Required to trigger out of sync mode await userApi.rpc.engine.createBlock(true, true); - await sleep(10000); - - await waitFor({ - lambda: async () => - (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound - }); + // await waitFor({ + // lambda: async () => + // (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound + // }); - await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + // await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); }); } ); diff --git a/xcm-simulator/src/storagehub/apis.rs b/xcm-simulator/src/storagehub/apis.rs index 6ef104822..37ceb0ecf 100644 --- a/xcm-simulator/src/storagehub/apis.rs +++ b/xcm-simulator/src/storagehub/apis.rs @@ -348,8 +348,8 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { - FileSystem::storage_requests_by_msp(msp_id) + fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + FileSystem::unresponded_storage_requests_by_msp(msp_id) } } From 327be011b5e63ace286c226fcc82bd6b3441b03f Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 5 Feb 2025 16:10:19 +0100 Subject: [PATCH 07/28] linter --- test/suites/integration/msp/catch-up-storage.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 31f5b343a..d2a9df054 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -81,7 +81,7 @@ describeMspNet( // NOTE: // We shouldn't have to recreate an API but any other attempt to reconnect failed // Also had to guess for the port of MSP 1 - await using newMspApi = await createApi(`ws://127.0.0.1:9777`); + await using newMspApi = await createApi("ws://127.0.0.1:9777"); // Required to trigger out of sync mode await userApi.rpc.engine.createBlock(true, true); From 95adc492331b7a57e86924fd3090dd3c598f7ed2 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 5 Feb 2025 16:32:27 +0100 Subject: [PATCH 08/28] typecheck --- test/suites/integration/msp/catch-up-storage.test.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index d2a9df054..bf34ba24f 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,10 +1,10 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi, sleep, waitFor } from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi, sleep } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", { initialised: false }, - ({ before, createMsp1Api, it, createUserApi, createApi }) => { + ({ before, createMsp1Api, it, createUserApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -35,7 +35,7 @@ describeMspNet( assert(newBucketEventDataBlob, "Event doesn't match Type"); - const { file_metadata } = await userApi.rpc.storagehubclient.loadFileInStorage( + await userApi.rpc.storagehubclient.loadFileInStorage( source, destination, userApi.shConsts.NODE_INFOS.user.AddressId, @@ -56,7 +56,9 @@ describeMspNet( userApi.shConsts.TEST_ARTEFACTS[source].size, userApi.shConsts.DUMMY_MSP_ID, [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - null + { + Basic: null + } ) ], signer: shUser @@ -81,7 +83,7 @@ describeMspNet( // NOTE: // We shouldn't have to recreate an API but any other attempt to reconnect failed // Also had to guess for the port of MSP 1 - await using newMspApi = await createApi("ws://127.0.0.1:9777"); + // await using newMspApi = await createApi("ws://127.0.0.1:9777"); // Required to trigger out of sync mode await userApi.rpc.engine.createBlock(true, true); From 0bd3cb26acb6947fe4aa6c0cf5b3b9617f28b18c Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 6 Feb 2025 12:57:45 +0100 Subject: [PATCH 09/28] remove sleep --- test/suites/integration/msp/catch-up-storage.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index bf34ba24f..1b8aa0bee 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -45,7 +45,7 @@ describeMspNet( await userApi.docker.pauseBspContainer("docker-sh-msp-1"); // We need to wait so it won't try to answer the request storage - await sleep(10000); + await sleep(5000); await userApi.block.seal({ calls: [ @@ -78,7 +78,7 @@ describeMspNet( await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); // need to wait for the container to be up again - await sleep(10000); + // await sleep(5000); // NOTE: // We shouldn't have to recreate an API but any other attempt to reconnect failed @@ -86,7 +86,7 @@ describeMspNet( // await using newMspApi = await createApi("ws://127.0.0.1:9777"); // Required to trigger out of sync mode - await userApi.rpc.engine.createBlock(true, true); + // await userApi.rpc.engine.createBlock(true, true); // await waitFor({ // lambda: async () => From 680bca7dcc58041f82f1e1f9a48adb29589367b2 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 7 Feb 2025 13:40:27 +0100 Subject: [PATCH 10/28] update comment; change unresponded_ to pending_ function name; --- client/blockchain-service/src/handler.rs | 2 +- node/src/tasks/bsp_charge_fees.rs | 4 +++- pallets/file-system/runtime-api/src/lib.rs | 4 ++-- pallets/file-system/src/utils.rs | 2 +- runtime/src/apis.rs | 4 ++-- xcm-simulator/src/storagehub/apis.rs | 4 ++-- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index 09619e3da..8bdd224de 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -1204,7 +1204,7 @@ where let storage_requests: Vec<(H256, StorageRequestMetadata)> = self .client .runtime_api() - .unresponded_storage_requests_by_msp(block_hash, msp_id) + .pending_storage_requests_by_msp(block_hash, msp_id) .unwrap(); for (file_key, sr) in storage_requests { diff --git a/node/src/tasks/bsp_charge_fees.rs b/node/src/tasks/bsp_charge_fees.rs index 24ef2f629..91c36ee88 100644 --- a/node/src/tasks/bsp_charge_fees.rs +++ b/node/src/tasks/bsp_charge_fees.rs @@ -272,7 +272,9 @@ where .map_err(|e| anyhow!("Failed to get metadata from Forest: {:?}", e))?; if !user_files.is_empty() { - let (file_key, metadata) = user_files.first().expect("User files is not empty"); // NOTE: WHY ONLY THE FIRST ONE ? + // We only take the first file of the list in order to generate a proof submit it with an extrinsic and then release the lock to process the next file and generate the next proof. + // It is not ideal because it means one extrinsic per file but batch deletion is not yet implemented. + let (file_key, metadata) = user_files.first().expect("User files is not empty"); let bucket_id = H256::from_slice(metadata.bucket_id.as_ref()); let location = sp_runtime::BoundedVec::truncate_from(metadata.location.clone()); let owner = insolvent_user.clone(); diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index 02a858ee7..b1ea653a7 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -52,7 +52,7 @@ pub enum GenericApplyDeltaEventInfoError { DecodeError, } -/// Error type for the `unresponded_storage_requests_by_msp`. +/// Error type for the `pending_storage_requests_by_msp`. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum StorageRequestsByMSPError { FailedToRetrieveStorageRequests, @@ -75,6 +75,6 @@ sp_api::decl_runtime_apis! { fn query_bsp_confirm_chunks_to_prove_for_file(bsp_id: BackupStorageProviderId, file_key: FileKey) -> Result, QueryBspConfirmChunksToProveForFileError>; fn query_msp_confirm_chunks_to_prove_for_file(msp_id: MainStorageProviderId, file_key: FileKey) -> Result, QueryMspConfirmChunksToProveForFileError>; fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result; - fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; } } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index 1c7d08458..a962104bf 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -2818,7 +2818,7 @@ where BucketIdFor::::decode(&mut encoded_event_info.as_ref()) } - pub fn unresponded_storage_requests_by_msp( + pub fn pending_storage_requests_by_msp( msp_id: ProviderIdFor, ) -> Vec<(MerkleHash, StorageRequestMetadata)> { // Get the storeage requests for a specific MSP diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 6e4d52440..a6dcbd8bf 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -350,8 +350,8 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { - FileSystem::unresponded_storage_requests_by_msp(msp_id) + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + FileSystem::pending_storage_requests_by_msp(msp_id) } } diff --git a/xcm-simulator/src/storagehub/apis.rs b/xcm-simulator/src/storagehub/apis.rs index 37ceb0ecf..5248017bb 100644 --- a/xcm-simulator/src/storagehub/apis.rs +++ b/xcm-simulator/src/storagehub/apis.rs @@ -348,8 +348,8 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn unresponded_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { - FileSystem::unresponded_storage_requests_by_msp(msp_id) + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + FileSystem::pending_storage_requests_by_msp(msp_id) } } From aceacadd519434600686753a0de86e4b28ac2ba7 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 12 Feb 2025 12:18:03 +0100 Subject: [PATCH 11/28] convert Vec of storage requests to HashMap; minor changes; --- client/blockchain-service/src/handler.rs | 13 +++++++++++-- node/src/tasks/bsp_charge_fees.rs | 1 + pallets/file-system/runtime-api/src/lib.rs | 2 +- pallets/file-system/src/utils.rs | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index 8bdd224de..321253903 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -45,6 +45,7 @@ use shc_common::{ }, }; use shp_file_metadata::FileKey; +use std::collections::HashMap; use storage_hub_runtime::RuntimeEvent; use crate::{ @@ -1201,12 +1202,20 @@ where info!(target: LOG_TARGET, "Checking for storage requests for this MSP"); - let storage_requests: Vec<(H256, StorageRequestMetadata)> = self + let storage_requests: HashMap = match self .client .runtime_api() .pending_storage_requests_by_msp(block_hash, msp_id) - .unwrap(); + { + Ok(sr) => sr.into_iter().collect(), + Err(_) => { + // If querying for pending storage requests fail, do not try to answer them + warn!(target: LOG_TARGET, "Failed to get pending storage request"); + return; + } + }; + // loop over each pending storage requests to start a new storage request task for the MSP for (file_key, sr) in storage_requests { self.emit(NewStorageRequest { who: sr.owner, diff --git a/node/src/tasks/bsp_charge_fees.rs b/node/src/tasks/bsp_charge_fees.rs index 91c36ee88..523c8a94e 100644 --- a/node/src/tasks/bsp_charge_fees.rs +++ b/node/src/tasks/bsp_charge_fees.rs @@ -274,6 +274,7 @@ where if !user_files.is_empty() { // We only take the first file of the list in order to generate a proof submit it with an extrinsic and then release the lock to process the next file and generate the next proof. // It is not ideal because it means one extrinsic per file but batch deletion is not yet implemented. + // TODO: Improve it once batch deletion is implemented. let (file_key, metadata) = user_files.first().expect("User files is not empty"); let bucket_id = H256::from_slice(metadata.bucket_id.as_ref()); let location = sp_runtime::BoundedVec::truncate_from(metadata.location.clone()); diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index b1ea653a7..a3c4bfb21 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -54,7 +54,7 @@ pub enum GenericApplyDeltaEventInfoError { /// Error type for the `pending_storage_requests_by_msp`. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum StorageRequestsByMSPError { +pub enum StorageRequestsByMspError { FailedToRetrieveStorageRequests, } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index a962104bf..4b28fe473 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -2821,7 +2821,7 @@ where pub fn pending_storage_requests_by_msp( msp_id: ProviderIdFor, ) -> Vec<(MerkleHash, StorageRequestMetadata)> { - // Get the storeage requests for a specific MSP + // Get the storage requests for a specific MSP StorageRequests::::iter() .filter(|(_, metadata)| { if let Some(msp) = metadata.msp { From 43a883d20c43e9ea066bcefba23e92f1b32b7fc6 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 13 Feb 2025 15:26:09 +0100 Subject: [PATCH 12/28] use BTreeMap from the sp-std lib; remove sleep; --- Cargo.lock | 1 + client/blockchain-service/src/handler.rs | 4 ++-- pallets/file-system/runtime-api/Cargo.toml | 3 ++- pallets/file-system/runtime-api/src/lib.rs | 3 ++- pallets/file-system/src/utils.rs | 3 ++- runtime/src/apis.rs | 3 ++- test/suites/integration/msp/catch-up-storage.test.ts | 8 +++----- xcm-simulator/src/storagehub/apis.rs | 3 ++- 8 files changed, 16 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edc7e23fe..bd809d98f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7836,6 +7836,7 @@ dependencies = [ "sp-api 34.0.0", "sp-core 34.0.0", "sp-runtime 39.0.5", + "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?branch=stable2409)", ] [[package]] diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index 321253903..2159c50b8 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -1202,12 +1202,12 @@ where info!(target: LOG_TARGET, "Checking for storage requests for this MSP"); - let storage_requests: HashMap = match self + let storage_requests: BTreeMap = match self .client .runtime_api() .pending_storage_requests_by_msp(block_hash, msp_id) { - Ok(sr) => sr.into_iter().collect(), + Ok(sr) => sr, Err(_) => { // If querying for pending storage requests fail, do not try to answer them warn!(target: LOG_TARGET, "Failed to get pending storage request"); diff --git a/pallets/file-system/runtime-api/Cargo.toml b/pallets/file-system/runtime-api/Cargo.toml index 0a83f4a89..d2148f3af 100644 --- a/pallets/file-system/runtime-api/Cargo.toml +++ b/pallets/file-system/runtime-api/Cargo.toml @@ -20,7 +20,8 @@ scale-info = { workspace = true } sp-api = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } [features] default = ["std"] -std = ["codec/std", "sp-api/std", "sp-runtime/std"] +std = ["codec/std", "sp-api/std", "sp-runtime/std", "sp-std/std"] diff --git a/pallets/file-system/runtime-api/src/lib.rs b/pallets/file-system/runtime-api/src/lib.rs index a3c4bfb21..30c566e41 100644 --- a/pallets/file-system/runtime-api/src/lib.rs +++ b/pallets/file-system/runtime-api/src/lib.rs @@ -5,6 +5,7 @@ use scale_info::prelude::vec::Vec; use scale_info::TypeInfo; use sp_core::H256; use sp_runtime::RuntimeDebug; +use sp_std::collections::btree_map::BTreeMap; /// Error type for the `is_storage_request_open_to_volunteers` runtime API call. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] @@ -75,6 +76,6 @@ sp_api::decl_runtime_apis! { fn query_bsp_confirm_chunks_to_prove_for_file(bsp_id: BackupStorageProviderId, file_key: FileKey) -> Result, QueryBspConfirmChunksToProveForFileError>; fn query_msp_confirm_chunks_to_prove_for_file(msp_id: MainStorageProviderId, file_key: FileKey) -> Result, QueryMspConfirmChunksToProveForFileError>; fn decode_generic_apply_delta_event_info(encoded_event_info: Vec) -> Result; - fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)>; + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> BTreeMap; } } diff --git a/pallets/file-system/src/utils.rs b/pallets/file-system/src/utils.rs index 4b28fe473..9ba75bce8 100644 --- a/pallets/file-system/src/utils.rs +++ b/pallets/file-system/src/utils.rs @@ -34,6 +34,7 @@ use shp_traits::{ ReadBucketsInterface, ReadProvidersInterface, ReadStorageProvidersInterface, ReadUserSolvencyInterface, TrieAddMutation, TrieRemoveMutation, }; +use sp_std::collections::btree_map::BTreeMap; use crate::{ pallet, @@ -2820,7 +2821,7 @@ where pub fn pending_storage_requests_by_msp( msp_id: ProviderIdFor, - ) -> Vec<(MerkleHash, StorageRequestMetadata)> { + ) -> BTreeMap, StorageRequestMetadata> { // Get the storage requests for a specific MSP StorageRequests::::iter() .filter(|(_, metadata)| { diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index a6dcbd8bf..471ec4765 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -25,6 +25,7 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, ExtrinsicInclusionMode, }; +use sp_std::collections::btree_map::BTreeMap; use sp_std::prelude::Vec; use sp_version::RuntimeVersion; use xcm::{ @@ -350,7 +351,7 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> BTreeMap> { FileSystem::pending_storage_requests_by_msp(msp_id) } } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 1b8aa0bee..ddd5b3938 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -29,6 +29,9 @@ describeMspNet( const destination = "test/smile.jpg"; const bucketName = "trying-things"; + // Stop the msp container so it will be behind when we restart the node. + await userApi.docker.pauseBspContainer("docker-sh-msp-1"); + const newBucketEventEvent = await userApi.createBucket(bucketName); const newBucketEventDataBlob = userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; @@ -42,11 +45,6 @@ describeMspNet( newBucketEventDataBlob.bucketId ); - await userApi.docker.pauseBspContainer("docker-sh-msp-1"); - - // We need to wait so it won't try to answer the request storage - await sleep(5000); - await userApi.block.seal({ calls: [ userApi.tx.fileSystem.issueStorageRequest( diff --git a/xcm-simulator/src/storagehub/apis.rs b/xcm-simulator/src/storagehub/apis.rs index 5248017bb..888c4abc2 100644 --- a/xcm-simulator/src/storagehub/apis.rs +++ b/xcm-simulator/src/storagehub/apis.rs @@ -26,6 +26,7 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, ExtrinsicInclusionMode, }; +use sp_std::collections::btree_map::BTreeMap; use sp_std::prelude::Vec; use sp_version::RuntimeVersion; use xcm_runtime_apis::{ @@ -348,7 +349,7 @@ impl_runtime_apis! { FileSystem::decode_generic_apply_delta_event_info(encoded_event_info) } - fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> Vec<(H256, StorageRequestMetadata)> { + fn pending_storage_requests_by_msp(msp_id: MainStorageProviderId) -> BTreeMap> { FileSystem::pending_storage_requests_by_msp(msp_id) } } From 5b6ecc4398d247a9625e8d2ffad1e1e22cb555d3 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 13 Feb 2025 15:48:41 +0100 Subject: [PATCH 13/28] typegen --- client/blockchain-service/src/handler.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index 2159c50b8..f48e72720 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -45,7 +45,6 @@ use shc_common::{ }, }; use shp_file_metadata::FileKey; -use std::collections::HashMap; use storage_hub_runtime::RuntimeEvent; use crate::{ From 302a0fff89de0206886c7431beb79a5328acf000 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 13 Feb 2025 16:50:54 +0100 Subject: [PATCH 14/28] remove unused imports --- runtime/src/apis.rs | 3 +-- test/suites/integration/msp/catch-up-storage.test.ts | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 471ec4765..7c35cd5d1 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -25,8 +25,7 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, ExtrinsicInclusionMode, }; -use sp_std::collections::btree_map::BTreeMap; -use sp_std::prelude::Vec; +use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; use sp_version::RuntimeVersion; use xcm::{ latest::prelude::AssetId, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm, diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index ddd5b3938..1cb971dd6 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,5 +1,5 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi, sleep } from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", From 9dc88465a8f4cd867c2e9aa38466179ac1edc58c Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 19 Feb 2025 15:41:37 +0100 Subject: [PATCH 15/28] wip --- node/src/tasks/user_sends_file.rs | 9 ++++++--- .../integration/msp/catch-up-storage.test.ts | 20 +++++++++---------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 4e24764dd..ffa369c5c 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -14,6 +14,7 @@ use shp_constants::FILE_CHUNK_SIZE; use shp_file_metadata::ChunkId; use sp_core::H256; use sp_runtime::AccountId32; +use tokio::time::Timeout; use crate::services::{handler::StorageHubHandler, types::ShNodeType}; @@ -305,6 +306,7 @@ where break; } Err(RequestError::RequestFailure(RequestFailure::Refused)) + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) if retry_attempts < 3 => { warn!( @@ -316,7 +318,7 @@ where retry_attempts += 1; // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(60)).await; } Err(RequestError::RequestFailure(RequestFailure::Refused)) => { // Return an error if the provider refused to answer. @@ -398,7 +400,8 @@ where break; } Err(RequestError::RequestFailure(RequestFailure::Refused)) - if retry_attempts < 3 => + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) + if retry_attempts < 10 => { warn!( target: LOG_TARGET, @@ -409,7 +412,7 @@ where retry_attempts += 1; // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(60)).await; } Err(RequestError::RequestFailure(RequestFailure::Refused)) => { // Return an error if the provider refused to answer. diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 1cb971dd6..7bf8715f5 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,10 +1,10 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi } from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi, waitFor } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: false }, - ({ before, createMsp1Api, it, createUserApi }) => { + { initialised: false, only: true }, + ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -81,17 +81,17 @@ describeMspNet( // NOTE: // We shouldn't have to recreate an API but any other attempt to reconnect failed // Also had to guess for the port of MSP 1 - // await using newMspApi = await createApi("ws://127.0.0.1:9777"); + await using newMspApi = await createApi("ws://127.0.0.1:9777"); // Required to trigger out of sync mode - // await userApi.rpc.engine.createBlock(true, true); + await userApi.rpc.engine.createBlock(true, true); - // await waitFor({ - // lambda: async () => - // (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound - // }); + await waitFor({ + lambda: async () => + (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound + }); - // await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); }); } ); From dff7e02cd013711ec25720f5e65a559a225131d0 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 20 Feb 2025 13:48:28 +0100 Subject: [PATCH 16/28] wip --- node/src/tasks/user_sends_file.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index ffa369c5c..7d2023e03 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -14,7 +14,6 @@ use shp_constants::FILE_CHUNK_SIZE; use shp_file_metadata::ChunkId; use sp_core::H256; use sp_runtime::AccountId32; -use tokio::time::Timeout; use crate::services::{handler::StorageHubHandler, types::ShNodeType}; @@ -380,7 +379,7 @@ where .upload_request(peer_id, file_key.as_ref().into(), proof.clone(), None) .await; - match upload_response { + match upload_response.as_ref() { Ok(r) => { debug!( target: LOG_TARGET, @@ -403,6 +402,7 @@ where | Err(RequestError::RequestFailure(RequestFailure::Network(_))) if retry_attempts < 10 => { + dbg!(upload_response.as_ref()); warn!( target: LOG_TARGET, "Final batch upload rejected by peer {:?}, retrying... (attempt {})", From a09b3b2a80fedc17947b07bdd6fc0b17a07e5a99 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 21 Feb 2025 12:04:36 +0100 Subject: [PATCH 17/28] add retry logic and complete integration test; --- client/blockchain-service/src/handler.rs | 5 ++ node/src/tasks/user_sends_file.rs | 51 +++++++++++++++---- .../integration/msp/catch-up-storage.test.ts | 14 +++-- 3 files changed, 57 insertions(+), 13 deletions(-) diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index f48e72720..f5e41cadd 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -1214,6 +1214,11 @@ where } }; + info!( + "We have {} pending storage requests", + storage_requests.len() + ); + // loop over each pending storage requests to start a new storage request task for the MSP for (file_key, sr) in storage_requests { self.emit(NewStorageRequest { diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 7d2023e03..56948a2ec 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -305,21 +305,38 @@ where break; } Err(RequestError::RequestFailure(RequestFailure::Refused)) - | Err(RequestError::RequestFailure(RequestFailure::Network(_))) if retry_attempts < 3 => { warn!( target: LOG_TARGET, - "Batch upload rejected by peer {:?}, retrying... (attempt {})", + "Final batch upload rejected by peer {:?}, retrying... (attempt {})", peer_id, retry_attempts + 1 ); retry_attempts += 1; // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(60)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; } - Err(RequestError::RequestFailure(RequestFailure::Refused)) => { + Err(RequestError::RequestFailure(RequestFailure::Network(_))) + if retry_attempts < 10 => + { + warn!( + target: LOG_TARGET, + "Unable to upload final batch to peer {:?}, retrying... (attempt {})", + peer_id, + retry_attempts + 1 + ); + retry_attempts += 1; + + // Wait a bit for the MSP to be online + self.storage_hub_handler + .blockchain + .wait_for_block(10) + .await?; + } + Err(RequestError::RequestFailure(RequestFailure::Refused)) + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) => { // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } @@ -399,10 +416,8 @@ where break; } Err(RequestError::RequestFailure(RequestFailure::Refused)) - | Err(RequestError::RequestFailure(RequestFailure::Network(_))) - if retry_attempts < 10 => + if retry_attempts < 3 => { - dbg!(upload_response.as_ref()); warn!( target: LOG_TARGET, "Final batch upload rejected by peer {:?}, retrying... (attempt {})", @@ -412,9 +427,27 @@ where retry_attempts += 1; // Wait for a short time before retrying - tokio::time::sleep(std::time::Duration::from_secs(60)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + Err(RequestError::RequestFailure(RequestFailure::Network(_))) + if retry_attempts < 10 => + { + warn!( + target: LOG_TARGET, + "Unable to upload final batch to peer {:?}, retrying... (attempt {})", + peer_id, + retry_attempts + 1 + ); + retry_attempts += 1; + + // Wait a bit for the MSP to be online + self.storage_hub_handler + .blockchain + .wait_for_block(10) + .await?; } - Err(RequestError::RequestFailure(RequestFailure::Refused)) => { + Err(RequestError::RequestFailure(RequestFailure::Refused)) + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) => { // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 7bf8715f5..7664515f6 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,9 +1,9 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi, waitFor } from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi, waitFor, sleep } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: false, only: true }, + { initialised: false }, ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -75,8 +75,10 @@ describeMspNet( await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); - // need to wait for the container to be up again - // await sleep(5000); + await userApi.docker.waitForLog({ searchString: "💾 StorageHub's Blockchain Service starting up!", containerName: "docker-sh-msp-1" }); + + // IMPORTANT!!! DO NOT REMOVE!!! Need to wait for the container to be up again. + await sleep(10000); // NOTE: // We shouldn't have to recreate an API but any other attempt to reconnect failed @@ -86,11 +88,15 @@ describeMspNet( // Required to trigger out of sync mode await userApi.rpc.engine.createBlock(true, true); + await userApi.docker.waitForLog({ searchString: "🥱 Handling coming out of sync mode", containerName: "docker-sh-msp-1" }); + await userApi.docker.waitForLog({ searchString: 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', containerName: "docker-sh-user-1", timeout: 120000 }); + await waitFor({ lambda: async () => (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound }); + await userApi.block.seal(); await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); }); } From d046d26dbb00ba9ab5887d0736027851ba587ac4 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 21 Feb 2025 12:45:31 +0100 Subject: [PATCH 18/28] fmt --- .../integration/msp/catch-up-storage.test.ts | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 7664515f6..7727e3109 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -75,7 +75,10 @@ describeMspNet( await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); - await userApi.docker.waitForLog({ searchString: "💾 StorageHub's Blockchain Service starting up!", containerName: "docker-sh-msp-1" }); + await userApi.docker.waitForLog({ + searchString: "💾 StorageHub's Blockchain Service starting up!", + containerName: "docker-sh-msp-1" + }); // IMPORTANT!!! DO NOT REMOVE!!! Need to wait for the container to be up again. await sleep(10000); @@ -88,8 +91,16 @@ describeMspNet( // Required to trigger out of sync mode await userApi.rpc.engine.createBlock(true, true); - await userApi.docker.waitForLog({ searchString: "🥱 Handling coming out of sync mode", containerName: "docker-sh-msp-1" }); - await userApi.docker.waitForLog({ searchString: 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', containerName: "docker-sh-user-1", timeout: 120000 }); + await userApi.docker.waitForLog({ + searchString: "🥱 Handling coming out of sync mode", + containerName: "docker-sh-msp-1" + }); + await userApi.docker.waitForLog({ + searchString: + 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', + containerName: "docker-sh-user-1", + timeout: 120000 + }); await waitFor({ lambda: async () => From 6eb9df6486d2aade094b08488522612eb428ac04 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 21 Feb 2025 13:53:27 +0100 Subject: [PATCH 19/28] update integration tests --- test/suites/integration/msp/catch-up-storage.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 7727e3109..5b51dccf9 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -24,13 +24,13 @@ describeMspNet( strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); }); - it("MSP accepts subsequent storage request for the same file key", async () => { + it("MSP accept storage request after catching up with blockchain and user properly retry sending file", async () => { const source = "res/whatsup.jpg"; const destination = "test/smile.jpg"; const bucketName = "trying-things"; // Stop the msp container so it will be behind when we restart the node. - await userApi.docker.pauseBspContainer("docker-sh-msp-1"); + await userApi.docker.pauseContainer("docker-sh-msp-1"); const newBucketEventEvent = await userApi.createBucket(bucketName); const newBucketEventDataBlob = @@ -73,7 +73,7 @@ describeMspNet( // Advancing 10 blocks to see if MSP catchup await userApi.block.skip(10); - await userApi.docker.restartBspContainer({ containerName: "docker-sh-msp-1" }); + await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); await userApi.docker.waitForLog({ searchString: "💾 StorageHub's Blockchain Service starting up!", From 8ca37495acd1c1b5a83147d04cf91c41f0ebc158 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 21 Feb 2025 16:19:41 +0100 Subject: [PATCH 20/28] created new utility function wait_for_num_blocks that wait for an amount of blocks to pass; fix test; --- client/blockchain-service/src/commands.rs | 20 +++++++++++++++++ client/blockchain-service/src/handler.rs | 22 +++++++++++++++++++ node/src/tasks/user_sends_file.rs | 12 ++++++---- .../integration/msp/catch-up-storage.test.ts | 6 +++-- 4 files changed, 54 insertions(+), 6 deletions(-) diff --git a/client/blockchain-service/src/commands.rs b/client/blockchain-service/src/commands.rs index c68382d8f..2a5e98de2 100644 --- a/client/blockchain-service/src/commands.rs +++ b/client/blockchain-service/src/commands.rs @@ -62,6 +62,10 @@ pub enum BlockchainServiceCommand { block_number: BlockNumber, callback: tokio::sync::oneshot::Sender>, }, + WaitForNumBlocks { + number_of_blocks: BlockNumber, + callback: tokio::sync::oneshot::Sender>, + }, WaitForTick { tick_number: TickNumber, callback: @@ -226,6 +230,9 @@ pub trait BlockchainServiceInterface { /// Wait for a block number. async fn wait_for_block(&self, block_number: BlockNumber) -> Result<()>; + /// Wait for a number blocks to pass. + async fn wait_for_num_blocks(&self, number_of_blocks: BlockNumber) -> Result<()>; + /// Wait for a tick number. async fn wait_for_tick(&self, tick_number: TickNumber) -> Result<(), ApiError>; @@ -459,6 +466,19 @@ where Ok(()) } + async fn wait_for_num_blocks(&self, number_of_blocks: BlockNumber) -> Result<()> { + let (callback, rx) = tokio::sync::oneshot::channel(); + // Build command to send to blockchain service. + let message = BlockchainServiceCommand::WaitForNumBlocks { + number_of_blocks, + callback, + }; + self.send(message).await; + let rx = rx.await.expect("Failed to receive response from BlockchainService. Probably means BlockchainService has crashed."); + rx.await.expect("Failed to wait for block"); + Ok(()) + } + async fn wait_for_tick(&self, tick_number: TickNumber) -> Result<(), ApiError> { let (callback, rx) = tokio::sync::oneshot::channel(); // Build command to send to blockchain service. diff --git a/client/blockchain-service/src/handler.rs b/client/blockchain-service/src/handler.rs index f5e41cadd..fb1a51fbd 100644 --- a/client/blockchain-service/src/handler.rs +++ b/client/blockchain-service/src/handler.rs @@ -386,6 +386,28 @@ where } } } + BlockchainServiceCommand::WaitForNumBlocks { + number_of_blocks, + callback, + } => { + let current_block_number = self.client.info().best_number; + + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.wait_for_block_request_by_number + .entry(current_block_number + number_of_blocks) + .or_insert_with(Vec::new) + .push(tx); + + match callback.send(rx) { + Ok(_) => { + trace!(target: LOG_TARGET, "Block message receiver sent successfully"); + } + Err(e) => { + error!(target: LOG_TARGET, "Failed to send block message receiver: {:?}", e); + } + } + } BlockchainServiceCommand::WaitForTick { tick_number, callback, diff --git a/node/src/tasks/user_sends_file.rs b/node/src/tasks/user_sends_file.rs index 56948a2ec..b1eecd3d3 100644 --- a/node/src/tasks/user_sends_file.rs +++ b/node/src/tasks/user_sends_file.rs @@ -319,6 +319,7 @@ where tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(RequestError::RequestFailure(RequestFailure::Network(_))) + | Err(RequestError::RequestFailure(RequestFailure::NotConnected)) if retry_attempts < 10 => { warn!( @@ -332,11 +333,12 @@ where // Wait a bit for the MSP to be online self.storage_hub_handler .blockchain - .wait_for_block(10) + .wait_for_num_blocks(5) .await?; } Err(RequestError::RequestFailure(RequestFailure::Refused)) - | Err(RequestError::RequestFailure(RequestFailure::Network(_))) => { + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) + | Err(RequestError::RequestFailure(RequestFailure::NotConnected)) => { // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } @@ -430,6 +432,7 @@ where tokio::time::sleep(std::time::Duration::from_secs(1)).await; } Err(RequestError::RequestFailure(RequestFailure::Network(_))) + | Err(RequestError::RequestFailure(RequestFailure::NotConnected)) if retry_attempts < 10 => { warn!( @@ -443,11 +446,12 @@ where // Wait a bit for the MSP to be online self.storage_hub_handler .blockchain - .wait_for_block(10) + .wait_for_num_blocks(5) .await?; } Err(RequestError::RequestFailure(RequestFailure::Refused)) - | Err(RequestError::RequestFailure(RequestFailure::Network(_))) => { + | Err(RequestError::RequestFailure(RequestFailure::Network(_))) + | Err(RequestError::RequestFailure(RequestFailure::NotConnected)) => { // Return an error if the provider refused to answer. return Err(anyhow::anyhow!("Failed to send file {:?}", file_key)); } diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 5b51dccf9..b44444fef 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -3,7 +3,7 @@ import { describeMspNet, shUser, type EnrichedBspApi, waitFor, sleep } from "../ describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: false }, + { initialised: false, only: true }, ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; @@ -95,11 +95,13 @@ describeMspNet( searchString: "🥱 Handling coming out of sync mode", containerName: "docker-sh-msp-1" }); + + await userApi.block.skip(4); // user retry every 5 blocks. The one we created before and this one + await userApi.docker.waitForLog({ searchString: 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', containerName: "docker-sh-user-1", - timeout: 120000 }); await waitFor({ From db47403acc86f36753174682979ac1355e0572f0 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 21 Feb 2025 16:21:47 +0100 Subject: [PATCH 21/28] linter --- test/suites/integration/msp/catch-up-storage.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index b44444fef..5ae5982df 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -101,7 +101,7 @@ describeMspNet( await userApi.docker.waitForLog({ searchString: 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', - containerName: "docker-sh-user-1", + containerName: "docker-sh-user-1" }); await waitFor({ From 51039c4cd6721a74bef6114c413e200f7c5e2445 Mon Sep 17 00:00:00 2001 From: Facundo Farall <37149322+ffarall@users.noreply.github.com> Date: Wed, 26 Feb 2025 11:00:33 -0300 Subject: [PATCH 22/28] feat: :construction: Trying to fix this, passing it to lola --- .../integration/msp/catch-up-storage.test.ts | 39 ++++++++++++------- test/util/bspNet/docker.ts | 15 +++++++ 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 5ae5982df..130fb58d7 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,5 +1,12 @@ import assert, { strictEqual } from "node:assert"; -import { describeMspNet, shUser, type EnrichedBspApi, waitFor, sleep } from "../../../util"; +import { + describeMspNet, + shUser, + type EnrichedBspApi, + waitFor, + sleep, + clearLogs +} from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", @@ -30,6 +37,8 @@ describeMspNet( const bucketName = "trying-things"; // Stop the msp container so it will be behind when we restart the node. + // TODO: clearLogs is not working, fix it. + await clearLogs({ containerName: "docker-sh-msp-1" }); await userApi.docker.pauseContainer("docker-sh-msp-1"); const newBucketEventEvent = await userApi.createBucket(bucketName); @@ -73,23 +82,24 @@ describeMspNet( // Advancing 10 blocks to see if MSP catchup await userApi.block.skip(10); - await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); + // Closing mspApi gracefully before restarting the container + // IMPORTANT: If this is not done, the api connection cannot close properly and the test + // runner will hang. + await mspApi.disconnect(); - await userApi.docker.waitForLog({ - searchString: "💾 StorageHub's Blockchain Service starting up!", - containerName: "docker-sh-msp-1" - }); + // Restarting the MSP container. This will start the Substrate node from scratch. + await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); - // IMPORTANT!!! DO NOT REMOVE!!! Need to wait for the container to be up again. - await sleep(10000); + // TODO: Wait for the container logs of starting up - // NOTE: - // We shouldn't have to recreate an API but any other attempt to reconnect failed - // Also had to guess for the port of MSP 1 - await using newMspApi = await createApi("ws://127.0.0.1:9777"); + // Creating a new MSP API to connect to the newly restarted container. + // TODO: Make this prettier + const maybeMspApi = await createApi("ws://127.0.0.1:9777"); + assert(maybeMspApi, "MSP API not available"); + const newMspApi = maybeMspApi; - // Required to trigger out of sync mode - await userApi.rpc.engine.createBlock(true, true); + // Waiting for the MSP node to be in sync with the chain. + await userApi.wait.bspCatchUpToChainTip(newMspApi); await userApi.docker.waitForLog({ searchString: "🥱 Handling coming out of sync mode", @@ -111,6 +121,7 @@ describeMspNet( await userApi.block.seal(); await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + console.log("HELLO THERE"); }); } ); diff --git a/test/util/bspNet/docker.ts b/test/util/bspNet/docker.ts index a0d4d2ec3..2b6fbac33 100644 --- a/test/util/bspNet/docker.ts +++ b/test/util/bspNet/docker.ts @@ -182,6 +182,21 @@ export const restartContainer = async (options: { await container.restart(); }; +export const clearLogs = async (options: { + containerName: string; +}) => { + const docker = new Docker(); + const container = docker.getContainer(options.containerName); + const exec = await container.exec({ + AttachStdout: true, + AttachStderr: true, + Cmd: ["sh", "-c", `> /var/lib/docker/containers/${options.containerName}/*.log`] + }); + + await exec.start({}); + console.log(`Logs cleared for container ${options.containerName}`); +}; + export const resumeContainer = async (options: { containerName: string; }) => { From b17af7ffef6489b67d2810fd71f66d32f3ec5095 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 26 Feb 2025 18:26:28 +0100 Subject: [PATCH 23/28] fix test --- .../integration/msp/catch-up-storage.test.ts | 20 ++++++++++++------- test/util/bspNet/docker.ts | 2 +- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 130fb58d7..16fe374b1 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -38,7 +38,7 @@ describeMspNet( // Stop the msp container so it will be behind when we restart the node. // TODO: clearLogs is not working, fix it. - await clearLogs({ containerName: "docker-sh-msp-1" }); + // await clearLogs({ containerName: "docker-sh-msp-1" }); await userApi.docker.pauseContainer("docker-sh-msp-1"); const newBucketEventEvent = await userApi.createBucket(bucketName); @@ -91,15 +91,19 @@ describeMspNet( await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); // TODO: Wait for the container logs of starting up + await userApi.docker.waitForLog({ + searchString: "💤 Idle (3 peers)", + containerName: "docker-sh-msp-1" + }); + + // Doesn't work without this because there is no log that tell us when the websocket is ready + await sleep(5000); // Creating a new MSP API to connect to the newly restarted container. - // TODO: Make this prettier - const maybeMspApi = await createApi("ws://127.0.0.1:9777"); - assert(maybeMspApi, "MSP API not available"); - const newMspApi = maybeMspApi; + const newMspApi = await createApi(`ws://127.0.0.1:${userApi.shConsts.NODE_INFOS.msp1.port}`); // Waiting for the MSP node to be in sync with the chain. - await userApi.wait.bspCatchUpToChainTip(newMspApi); + await userApi.rpc.engine.createBlock(true, true); await userApi.docker.waitForLog({ searchString: "🥱 Handling coming out of sync mode", @@ -121,7 +125,9 @@ describeMspNet( await userApi.block.seal(); await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); - console.log("HELLO THERE"); + + // IMPORTANT!! Without this the test suite never finish + newMspApi.disconnect(); }); } ); diff --git a/test/util/bspNet/docker.ts b/test/util/bspNet/docker.ts index 2b6fbac33..04f2dd839 100644 --- a/test/util/bspNet/docker.ts +++ b/test/util/bspNet/docker.ts @@ -253,7 +253,7 @@ export const waitForLog = async (options: { const container = docker.getContainer(options.containerName); container.logs( - { follow: true, stdout: true, stderr: true, tail: undefined, timestamps: false }, + { follow: true, stdout: true, stderr: true, tail: 10, timestamps: false }, // set tail default to 10 to get the 10 last lines of logs printed (err, stream) => { if (err) { return reject(err); From 643be9849602b6d1c1acf69f13d90f18297761d6 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Wed, 26 Feb 2025 18:36:42 +0100 Subject: [PATCH 24/28] remove unused import --- test/suites/integration/msp/catch-up-storage.test.ts | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 16fe374b1..817dc7fa6 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -1,12 +1,5 @@ import assert, { strictEqual } from "node:assert"; -import { - describeMspNet, - shUser, - type EnrichedBspApi, - waitFor, - sleep, - clearLogs -} from "../../../util"; +import { describeMspNet, shUser, type EnrichedBspApi, waitFor, sleep } from "../../../util"; describeMspNet( "MSP catching up with chain and volunteering for storage request", From b9e8149d777a131bd90ec4bf4646ee469e99880c Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 27 Feb 2025 14:36:32 +0100 Subject: [PATCH 25/28] make the tail value an option and default back to undefined --- test/suites/integration/msp/catch-up-storage.test.ts | 3 ++- test/util/bspNet/docker.ts | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 817dc7fa6..367132971 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -86,7 +86,8 @@ describeMspNet( // TODO: Wait for the container logs of starting up await userApi.docker.waitForLog({ searchString: "💤 Idle (3 peers)", - containerName: "docker-sh-msp-1" + containerName: "docker-sh-msp-1", + tail: 10 }); // Doesn't work without this because there is no log that tell us when the websocket is ready diff --git a/test/util/bspNet/docker.ts b/test/util/bspNet/docker.ts index 04f2dd839..6cdefaa0e 100644 --- a/test/util/bspNet/docker.ts +++ b/test/util/bspNet/docker.ts @@ -247,13 +247,14 @@ export const waitForLog = async (options: { searchString: string; containerName: string; timeout?: number; + tail?: number; }): Promise => { return new Promise((resolve, reject) => { const docker = new Docker(); const container = docker.getContainer(options.containerName); container.logs( - { follow: true, stdout: true, stderr: true, tail: 10, timestamps: false }, // set tail default to 10 to get the 10 last lines of logs printed + { follow: true, stdout: true, stderr: true, tail: options.tail, timestamps: false }, // set tail default to 10 to get the 10 last lines of logs printed (err, stream) => { if (err) { return reject(err); From 0ed222142d104ca2aa612a94860b0c75be38db52 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Thu, 27 Feb 2025 14:37:41 +0100 Subject: [PATCH 26/28] remove only:true --- test/suites/integration/msp/catch-up-storage.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index 367132971..d25890a27 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -3,7 +3,7 @@ import { describeMspNet, shUser, type EnrichedBspApi, waitFor, sleep } from "../ describeMspNet( "MSP catching up with chain and volunteering for storage request", - { initialised: false, only: true }, + { initialised: false }, ({ before, createMsp1Api, it, createUserApi, createApi }) => { let userApi: EnrichedBspApi; let mspApi: EnrichedBspApi; From aefdf1ab1316c548cfce149bcfadeacd8e7acba5 Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 28 Feb 2025 10:21:20 +0100 Subject: [PATCH 27/28] still need sleep --- test/suites/integration/msp/catch-up-storage.test.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index d25890a27..edc3c2914 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -24,7 +24,7 @@ describeMspNet( strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); }); - it("MSP accept storage request after catching up with blockchain and user properly retry sending file", async () => { + it("MSP accept storage request after catching up with blockchain and user properly retry sending file", { timeout: 50000 }, async () => { const source = "res/whatsup.jpg"; const destination = "test/smile.jpg"; const bucketName = "trying-things"; @@ -91,11 +91,13 @@ describeMspNet( }); // Doesn't work without this because there is no log that tell us when the websocket is ready - await sleep(5000); + await sleep(15000); // Creating a new MSP API to connect to the newly restarted container. const newMspApi = await createApi(`ws://127.0.0.1:${userApi.shConsts.NODE_INFOS.msp1.port}`); + console.log("Connected"); + // Waiting for the MSP node to be in sync with the chain. await userApi.rpc.engine.createBlock(true, true); From c202fcfaf8bb0951efba64cdf3de02e3c444b24e Mon Sep 17 00:00:00 2001 From: undercover-cactus Date: Fri, 28 Feb 2025 14:01:44 +0100 Subject: [PATCH 28/28] fix linter --- .../integration/msp/catch-up-storage.test.ts | 209 +++++++++--------- 1 file changed, 108 insertions(+), 101 deletions(-) diff --git a/test/suites/integration/msp/catch-up-storage.test.ts b/test/suites/integration/msp/catch-up-storage.test.ts index edc3c2914..4eb7475bc 100644 --- a/test/suites/integration/msp/catch-up-storage.test.ts +++ b/test/suites/integration/msp/catch-up-storage.test.ts @@ -24,106 +24,113 @@ describeMspNet( strictEqual(mspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.msp1.expectedPeerId); }); - it("MSP accept storage request after catching up with blockchain and user properly retry sending file", { timeout: 50000 }, async () => { - const source = "res/whatsup.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "trying-things"; - - // Stop the msp container so it will be behind when we restart the node. - // TODO: clearLogs is not working, fix it. - // await clearLogs({ containerName: "docker-sh-msp-1" }); - await userApi.docker.pauseContainer("docker-sh-msp-1"); - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - assert(newBucketEventDataBlob, "Event doesn't match Type"); - - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - userApi.shConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.block.seal({ - calls: [ - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - destination, - userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, - userApi.shConsts.TEST_ARTEFACTS[source].size, - userApi.shConsts.DUMMY_MSP_ID, - [userApi.shConsts.NODE_INFOS.user.expectedPeerId], - { - Basic: null - } - ) - ], - signer: shUser - }); - - const { event } = await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); - const newStorageRequestDataBlob = - userApi.events.fileSystem.NewStorageRequest.is(event) && event.data; - assert( - newStorageRequestDataBlob, - "NewStorageRequest event data does not match expected type" - ); - - // Advancing 10 blocks to see if MSP catchup - await userApi.block.skip(10); - - // Closing mspApi gracefully before restarting the container - // IMPORTANT: If this is not done, the api connection cannot close properly and the test - // runner will hang. - await mspApi.disconnect(); - - // Restarting the MSP container. This will start the Substrate node from scratch. - await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); - - // TODO: Wait for the container logs of starting up - await userApi.docker.waitForLog({ - searchString: "💤 Idle (3 peers)", - containerName: "docker-sh-msp-1", - tail: 10 - }); - - // Doesn't work without this because there is no log that tell us when the websocket is ready - await sleep(15000); - - // Creating a new MSP API to connect to the newly restarted container. - const newMspApi = await createApi(`ws://127.0.0.1:${userApi.shConsts.NODE_INFOS.msp1.port}`); - - console.log("Connected"); - - // Waiting for the MSP node to be in sync with the chain. - await userApi.rpc.engine.createBlock(true, true); - - await userApi.docker.waitForLog({ - searchString: "🥱 Handling coming out of sync mode", - containerName: "docker-sh-msp-1" - }); - - await userApi.block.skip(4); // user retry every 5 blocks. The one we created before and this one - - await userApi.docker.waitForLog({ - searchString: - 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', - containerName: "docker-sh-user-1" - }); - - await waitFor({ - lambda: async () => - (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)).isFileFound - }); - - await userApi.block.seal(); - await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); - - // IMPORTANT!! Without this the test suite never finish - newMspApi.disconnect(); - }); + it( + "MSP accept storage request after catching up with blockchain and user properly retry sending file", + { timeout: 50000 }, + async () => { + const source = "res/whatsup.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "trying-things"; + + // Stop the msp container so it will be behind when we restart the node. + // TODO: clearLogs is not working, fix it. + // await clearLogs({ containerName: "docker-sh-msp-1" }); + await userApi.docker.pauseContainer("docker-sh-msp-1"); + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + assert(newBucketEventDataBlob, "Event doesn't match Type"); + + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + userApi.shConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.block.seal({ + calls: [ + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + destination, + userApi.shConsts.TEST_ARTEFACTS[source].fingerprint, + userApi.shConsts.TEST_ARTEFACTS[source].size, + userApi.shConsts.DUMMY_MSP_ID, + [userApi.shConsts.NODE_INFOS.user.expectedPeerId], + { + Basic: null + } + ) + ], + signer: shUser + }); + + const { event } = await userApi.assert.eventPresent("fileSystem", "NewStorageRequest"); + const newStorageRequestDataBlob = + userApi.events.fileSystem.NewStorageRequest.is(event) && event.data; + assert( + newStorageRequestDataBlob, + "NewStorageRequest event data does not match expected type" + ); + + // Advancing 10 blocks to see if MSP catchup + await userApi.block.skip(10); + + // Closing mspApi gracefully before restarting the container + // IMPORTANT: If this is not done, the api connection cannot close properly and the test + // runner will hang. + await mspApi.disconnect(); + + // Restarting the MSP container. This will start the Substrate node from scratch. + await userApi.docker.restartContainer({ containerName: "docker-sh-msp-1" }); + + // TODO: Wait for the container logs of starting up + await userApi.docker.waitForLog({ + searchString: "💤 Idle (3 peers)", + containerName: "docker-sh-msp-1", + tail: 10 + }); + + // Doesn't work without this because there is no log that tell us when the websocket is ready + await sleep(15000); + + // Creating a new MSP API to connect to the newly restarted container. + const newMspApi = await createApi( + `ws://127.0.0.1:${userApi.shConsts.NODE_INFOS.msp1.port}` + ); + + console.log("Connected"); + + // Waiting for the MSP node to be in sync with the chain. + await userApi.rpc.engine.createBlock(true, true); + + await userApi.docker.waitForLog({ + searchString: "🥱 Handling coming out of sync mode", + containerName: "docker-sh-msp-1" + }); + + await userApi.block.skip(4); // user retry every 5 blocks. The one we created before and this one + + await userApi.docker.waitForLog({ + searchString: + 'File upload complete. Peer PeerId("12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV") has the entire file', + containerName: "docker-sh-user-1" + }); + + await waitFor({ + lambda: async () => + (await newMspApi.rpc.storagehubclient.isFileInFileStorage(event.data.fileKey)) + .isFileFound + }); + + await userApi.block.seal(); + await userApi.assert.eventPresent("fileSystem", "MspAcceptedStorageRequest"); + + // IMPORTANT!! Without this the test suite never finish + newMspApi.disconnect(); + } + ); } );