diff --git a/Cargo.lock b/Cargo.lock index 18517f94788a..2ad1483887e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10737,6 +10737,7 @@ dependencies = [ name = "pallet-assets" version = "29.1.0" dependencies = [ + "binary-merkle-tree", "frame-benchmarking", "frame-support", "frame-system", @@ -22471,6 +22472,7 @@ dependencies = [ name = "sp-runtime" version = "31.0.1" dependencies = [ + "binary-merkle-tree", "docify", "either", "hash256-std-hasher", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs index c76c1137335a..22dff5cf5081 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_foreign.rs @@ -541,4 +541,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs index cf4f60042bc6..c2abde128200 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_local.rs @@ -538,4 +538,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs index 2cd85de00989..5c4f4110cf17 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_assets_pool.rs @@ -538,4 +538,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs index 2692de9aeb50..f759123025a8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_foreign.rs @@ -547,4 +547,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs index d2e12549a45c..440dfb3fe0c2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_local.rs @@ -545,4 +545,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs index 8368f6e583cc..2977ef295a5c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_assets_pool.rs @@ -539,4 +539,20 @@ impl pallet_assets::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index e20b576d0836..5f1a876a5dc3 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -32,6 +32,7 @@ sp-core = { workspace = true } [dev-dependencies] sp-io = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } +binary-merkle-tree = { workspace = true } [features] default = ["std"] diff --git a/substrate/frame/assets/src/benchmarking.rs b/substrate/frame/assets/src/benchmarking.rs index 8988323c1985..a0e2b3ded546 100644 --- a/substrate/frame/assets/src/benchmarking.rs +++ b/substrate/frame/assets/src/benchmarking.rs @@ -139,6 +139,29 @@ fn assert_event, I: 'static>(generic_event: >::Runti frame_system::Pallet::::assert_has_event(generic_event.into()); } +// fn generate_merkle_trie, I: 'static>(items: u32) -> (DistributionHashOf, +// DistributionProofOf) { use codec::Encode; +// let flat_distribution = Vec::>::with_capacity(items as usize); +// for i in 0..items { +// let account: T::AccountId = account("target", i, SEED); +// let balance: T::Balance = i.into(); + +// flat_distribution.push((account, balance).encode()); +// } + +// let root = binary_merkle_tree::merkle_root::<::Hashing, _>( +// flat_distribution.clone(), +// ); + +// let proof = binary_merkle_tree::merkle_proof::< +// ::Hashing, +// _, +// _, +// >(flat_distribution.clone(), items); + +// return (root, proof) +// } + benchmarks_instance_pallet! { create { let asset_id = default_asset_id::(); @@ -564,5 +587,61 @@ benchmarks_instance_pallet! { assert_last_event::(Event::Transferred { asset_id: asset_id.into(), from: caller, to: target, amount }.into()); } + // This function is O(1), so placing any hash as a merkle root should work. + mint_distribution { + let (asset_id, caller, _) = create_default_asset::(true); + let before_count = MerklizedDistribution::::count(); + }: _(SystemOrigin::Signed(caller.clone()), asset_id.clone(), DistributionHashOf::::default()) + verify { + let count = MerklizedDistribution::::count(); + assert_eq!(count, before_count + 1); + assert_last_event::(Event::DistributionIssued { distribution_id: before_count, asset_id: asset_id.into(), merkle_root: DistributionHashOf::::default() }.into()); + } + + // This function is O(1), so ending any distribution should work. + end_distribution { + let (asset_id, caller, _) = create_default_asset::(true); + let before_count = MerklizedDistribution::::count(); + Assets::::mint_distribution( + SystemOrigin::Signed(caller.clone()).into(), + asset_id.clone(), + DistributionHashOf::::default(), + )?; + let count = MerklizedDistribution::::count(); + assert_eq!(count, before_count + 1); + assert_last_event::(Event::DistributionIssued { distribution_id: before_count, asset_id: asset_id.into(), merkle_root: DistributionHashOf::::default() }.into()); + }: _(SystemOrigin::Signed(caller.clone()), before_count) + verify { + assert_last_event::(Event::DistributionEnded { distribution_id: before_count }.into()); + } + + // This function is O(N), where N is the number of items destroyed in one extrinsic call. + // This benchmark cheats a little to avoid having to do hundreds or thousands of merkle proofs. + // Instead we call low level storage to populate the `MerklizedDistributionTracker` for our needs. + // If the logic of the `do_destroy_distribution` function changes, then this also needs to be updated. + destroy_distribution { + let c in 0 .. T::RemoveItemsLimit::get(); + let (asset_id, caller, _) = create_default_asset::(true); + let before_count = MerklizedDistribution::::count(); + Assets::::mint_distribution( + SystemOrigin::Signed(caller.clone()).into(), + asset_id.clone(), + DistributionHashOf::::default(), + )?; + Assets::::end_distribution( + SystemOrigin::Signed(caller.clone()).into(), + before_count, + )?; + for i in 0..c { + let account_id: T::AccountId = account("target", i, SEED); + MerklizedDistributionTracker::::insert(before_count, account_id, ()); + } + assert_eq!(MerklizedDistributionTracker::::iter().count() as u32, c); + }: _(SystemOrigin::Signed(caller.clone()), before_count) + verify { + assert_last_event::(Event::DistributionCleaned { distribution_id: before_count }.into()); + assert_eq!(MerklizedDistributionTracker::::iter().count() as u32, 0); + } + impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test) } diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index c218c4ddc952..756f08849c81 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -438,6 +438,128 @@ impl, I: 'static> Pallet { Ok(()) } + /// Creates a distribution in storage for asset `id`, which can be claimed via + /// `do_claim_distribution`. + pub(super) fn do_mint_distribution( + id: T::AssetId, + merkle_root: DistributionHashOf, + maybe_check_issuer: Option, + ) -> DispatchResult { + let details = Asset::::get(&id).ok_or(Error::::Unknown)?; + ensure!(details.status == AssetStatus::Live, Error::::AssetNotLive); + + if let Some(check_issuer) = maybe_check_issuer { + ensure!(check_issuer == details.issuer, Error::::NoPermission); + } + + let info = DistributionInfo { + asset_id: id.clone(), + merkle_root: merkle_root.clone(), + active: true, + }; + + let distribution_id: u32 = MerklizedDistribution::::count(); + MerklizedDistribution::::insert(&distribution_id, info); + + Self::deposit_event(Event::DistributionIssued { + distribution_id, + asset_id: id, + merkle_root, + }); + + Ok(()) + } + + /// A wrapper around `do_mint`, allowing a `merkle_proof` to control the amount minted and to + /// whom. + pub(super) fn do_claim_distribution( + distribution_id: DistributionCounter, + merkle_proof: Vec, + ) -> DispatchResult { + let proof = + codec::Decode::decode(&mut &merkle_proof[..]).map_err(|_| Error::::BadProof)?; + + let DistributionInfo { asset_id, merkle_root, active } = + MerklizedDistribution::::get(distribution_id).ok_or(Error::::Unknown)?; + + ensure!(active, Error::::DistributionEnded); + + let leaf = T::VerifyExistenceProof::verify_proof(proof, &merkle_root) + .map_err(|()| Error::::BadProof)?; + let (beneficiary, amount) = + codec::Decode::decode(&mut &leaf[..]).map_err(|_| Error::::CannotDecodeLeaf)?; + + ensure!( + !MerklizedDistributionTracker::::contains_key(distribution_id, &beneficiary), + Error::::AlreadyClaimed + ); + + Self::do_mint(asset_id, &beneficiary, amount, None)?; + MerklizedDistributionTracker::::insert(&distribution_id, &beneficiary, ()); + + Ok(()) + } + + /// Ends the asset distribution of `distribution_id`. + pub(super) fn do_end_distribution( + distribution_id: DistributionCounter, + maybe_check_issuer: Option, + ) -> DispatchResult { + let mut info = + MerklizedDistribution::::get(&distribution_id).ok_or(Error::::Unknown)?; + let details = Asset::::get(&info.asset_id).ok_or(Error::::Unknown)?; + + if let Some(check_issuer) = maybe_check_issuer { + ensure!(check_issuer == details.issuer, Error::::NoPermission); + } + + info.active = false; + + MerklizedDistribution::::insert(&distribution_id, info); + + Self::deposit_event(Event::DistributionEnded { distribution_id }); + + Ok(()) + } + + /// Cleans up the distribution tracker of `distribution_id`. + /// Iterates and cleans up data in the `MerklizedDistributionTracker` map `RemoveItemsLimit` at + /// a time. This function may need to be called multiple times to complete successfully. + pub(super) fn do_destroy_distribution( + distribution_id: DistributionCounter, + ) -> DispatchResultWithPostInfo { + let info = + MerklizedDistribution::::get(&distribution_id).ok_or(Error::::Unknown)?; + + ensure!(!info.active, Error::::DistributionActive); + + let mut refund_count = 0u32; + let distribution_iterator = + MerklizedDistributionTracker::::iter_key_prefix(&distribution_id); + + let mut all_refunded = true; + for who in distribution_iterator { + if refund_count >= T::RemoveItemsLimit::get() { + // Not everyone was able to be refunded this time around. + all_refunded = false; + break + } + + MerklizedDistributionTracker::::remove(&distribution_id, &who); + refund_count += 1; + } + + if all_refunded { + Self::deposit_event(Event::::DistributionCleaned { distribution_id }); + // Refund weight only the amount we actually used. + Ok(Some(T::WeightInfo::destroy_distribution(refund_count)).into()) + } else { + Self::deposit_event(Event::::DistributionPartiallyCleaned { distribution_id }); + // No weight to refund since we did not finish the loop. + Ok(().into()) + } + } + /// Increases the asset `id` balance of `beneficiary` by `amount`. /// /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_mint` if you need diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index e909932bfc82..6aea2f18d4fa 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -190,6 +190,7 @@ use frame_support::{ }, BalanceStatus::Reserved, Currency, EnsureOriginWithArg, Incrementable, ReservableCurrency, StoredMap, + VerifyExistenceProof, }, }; use frame_system::Config as SystemConfig; @@ -298,6 +299,7 @@ pub mod pallet { type Extra = (); type CallbackHandle = (); type WeightInfo = (); + type VerifyExistenceProof = (); #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = (); } @@ -321,7 +323,8 @@ pub mod pallet { + MaxEncodedLen + TypeInfo; - /// Max number of items to destroy per `destroy_accounts` and `destroy_approvals` call. + /// Max number of items to destroy per `destroy_accounts`, `destroy_approvals`, + /// `destroy_distribution` call. /// /// Must be configured to result in a weight that makes each call fit in a block. #[pallet::constant] @@ -404,6 +407,9 @@ pub mod pallet { /// used to set up auto-incrementing asset IDs for this collection. type CallbackHandle: AssetsCallback; + /// A type used to verify merkle proofs used for distributions. + type VerifyExistenceProof: VerifyExistenceProof; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -456,6 +462,28 @@ pub mod pallet { ValueQuery, >; + #[pallet::storage] + /// Merklized distribution of an asset. + pub(super) type MerklizedDistribution, I: 'static = ()> = CountedStorageMap< + _, + Blake2_128Concat, + DistributionCounter, + DistributionInfo>, + OptionQuery, + >; + + #[pallet::storage] + /// Tracks the merklized distribution of an asset so that assets are only claimed once. + pub(super) type MerklizedDistributionTracker, I: 'static = ()> = StorageDoubleMap< + _, + Blake2_128Concat, + DistributionCounter, + Blake2_128Concat, + T::AccountId, + (), + OptionQuery, + >; + /// The asset ID enforced for the next asset creation, if any present. Otherwise, this storage /// item has no effect. /// @@ -639,6 +667,18 @@ pub mod pallet { Deposited { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, /// Some assets were withdrawn from the account (e.g. for transaction fees). Withdrawn { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance }, + /// A distribution of assets were issued. + DistributionIssued { + distribution_id: DistributionCounter, + asset_id: T::AssetId, + merkle_root: DistributionHashOf, + }, + /// A distribution has ended. + DistributionEnded { distribution_id: DistributionCounter }, + /// A distribution has been partially cleaned. There are still more items to clean up. + DistributionPartiallyCleaned { distribution_id: DistributionCounter }, + /// A distribution has been fully cleaned. + DistributionCleaned { distribution_id: DistributionCounter }, } #[pallet::error] @@ -688,6 +728,16 @@ pub mod pallet { CallbackFailed, /// The asset ID must be equal to the [`NextAssetId`]. BadAssetId, + /// The asset distribution was already claimed! + AlreadyClaimed, + /// The asset distribution is no longer active. + DistributionEnded, + /// The asset distribution is still active. + DistributionActive, + /// The proof provided could not be verified. + BadProof, + /// The a leaf node was extracted from the proof, but it did not match the expected format. + CannotDecodeLeaf, } #[pallet::call(weight(>::WeightInfo))] @@ -1798,6 +1848,92 @@ pub mod pallet { )?; Ok(()) } + + /// Mint a distribution of assets of a particular class. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `id`. + /// + /// - `id`: The identifier of the asset to have some amount minted. + /// - `merkle_root`: The merkle root of a compact base-16 merkle trie used to authorize + /// minting. + /// + /// Emits `DistributionIssued` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(33)] + pub fn mint_distribution( + origin: OriginFor, + id: T::AssetIdParameter, + merkle_root: DistributionHashOf, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let id: T::AssetId = id.into(); + Self::do_mint_distribution(id, merkle_root, Some(origin))?; + Ok(()) + } + + /// Claim a distribution of assets of a particular class. + /// + /// Any signed origin may call this function. + /// + /// - `distribution_id`: The identifier of the distribution. + /// - `merkle_proof`: The merkle proof of the account and balance in a compact base-16 + /// merkle trie used to authorize minting. + /// + /// Emits `Issued` event when successful. + /// + /// Weight: `O(P)` where `P` is the size of the merkle proof. + #[pallet::call_index(34)] + pub fn claim_distribution( + origin: OriginFor, + distribution_id: DistributionCounter, + merkle_proof: Vec, + ) -> DispatchResult { + ensure_signed(origin)?; + Self::do_claim_distribution(distribution_id, merkle_proof)?; + Ok(()) + } + + /// End the distribution of assets by distribution id. + /// + /// The origin must be Signed and the sender must be the Issuer of the asset `id`. + /// + /// - `distribution_id`: The identifier of the distribution. + /// + /// Emits `DistributionEnded` event when successful. + /// + /// Weight: `O(1)` + #[pallet::call_index(35)] + pub fn end_distribution( + origin: OriginFor, + distribution_id: DistributionCounter, + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::do_end_distribution(distribution_id, Some(origin))?; + Ok(()) + } + + /// Clean up the distribution tracker of an ended distribution. This function might need to + /// be called multiple times to remove all the items from the distribution tracker. + /// + /// Any signed origin may call this function. + /// + /// - `distribution_id`: The identifier of the distribution to clean. It cannot be active. + /// + /// Emits `DistributionPartiallyCleaned` event when some elements have been removed, but + /// there are still some left. Emits `DistributionCleaned` when all of the distribution + /// history has been removed. + /// + /// Weight: `O(N)` where `N` is the maximum number of elements that can be removed at once. + #[pallet::call_index(36)] + #[pallet::weight(T::WeightInfo::destroy_distribution(100u32))] // TODO + pub fn destroy_distribution( + origin: OriginFor, + distribution_id: DistributionCounter, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + Self::do_destroy_distribution(distribution_id) + } } /// Implements [`AccountTouch`] trait. diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index 2c160840e147..7aa57ada0f5a 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -23,7 +23,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{AsEnsureOriginWithArg, ConstU32}, + traits::{AsEnsureOriginWithArg, BinaryMerkleTreeProver, ConstU32}, }; use sp_io::storage; use sp_runtime::BuildStorage; @@ -39,8 +39,9 @@ construct_runtime!( } ); -type AccountId = u64; -type AssetId = u32; +pub(crate) type AccountId = u64; +pub(crate) type AssetId = u32; +pub(crate) type Balance = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { @@ -104,6 +105,7 @@ impl Config for Test { type ForceOrigin = frame_system::EnsureRoot; type Freezer = TestFreezer; type CallbackHandle = (AssetsCallbackHandle, AutoIncAssetId); + type VerifyExistenceProof = BinaryMerkleTreeProver; } use std::collections::HashMap; diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index af605c5a3c64..21a98a958910 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -19,6 +19,7 @@ use super::*; use crate::{mock::*, Error}; +use codec::Encode; use frame_support::{ assert_noop, assert_ok, dispatch::GetDispatchInfo, @@ -1921,3 +1922,66 @@ fn asset_id_cannot_be_reused() { assert!(Asset::::contains_key(7)); }); } + +#[test] +fn merklized_distribution_works() { + new_test_ext().execute_with(|| { + use alloc::collections::BTreeMap; + + // Create asset id 0 controlled by user 1, sufficient so it does not need ED. + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + + // Offchain, user 1 creates a distribution of tokens. + let mut distribution = BTreeMap::::new(); + for i in 0..100u64 { + distribution.insert(i, i.into()); + } + + // Maybe the owner gives himself a little extra ;) + distribution.insert(1, 1337); + + let flat_distribution: Vec> = + distribution.into_iter().map(|item| item.encode()).collect(); + + let root = binary_merkle_tree::merkle_root::<::Hashing, _>( + flat_distribution.clone(), + ); + + let proof_for_69 = binary_merkle_tree::merkle_proof::< + ::Hashing, + _, + _, + >(flat_distribution.clone(), 69); + let proof_for_1 = binary_merkle_tree::merkle_proof::< + ::Hashing, + _, + _, + >(flat_distribution.clone(), 1); + let proof_for_6 = binary_merkle_tree::merkle_proof::< + ::Hashing, + _, + _, + >(flat_distribution, 6); + + // Use this trie root for the distribution + assert_ok!(Assets::mint_distribution(RuntimeOrigin::signed(1), 0, root)); + + // Now users claim their distributions permissionlessly with a proof. + assert_ok!(Assets::claim_distribution(RuntimeOrigin::signed(1), 0, proof_for_1.encode())); + assert_eq!(Assets::balance(0, 1), 1337); + + // Other users can claim their tokens. + assert_ok!(Assets::claim_distribution(RuntimeOrigin::signed(55), 0, proof_for_69.encode())); + assert_eq!(Assets::balance(0, 69), 69); + + // Owner (or anyone) can also distribute on behalf of the other users. + assert_ok!(Assets::claim_distribution(RuntimeOrigin::signed(1), 0, proof_for_6.encode())); + assert_eq!(Assets::balance(0, 6), 6); + + // You cannot double claim. + assert_noop!( + Assets::claim_distribution(RuntimeOrigin::signed(6), 0, proof_for_6.encode()), + Error::::AlreadyClaimed + ); + }); +} diff --git a/substrate/frame/assets/src/types.rs b/substrate/frame/assets/src/types.rs index 11edc7d3fcb5..678499cb85b4 100644 --- a/substrate/frame/assets/src/types.rs +++ b/substrate/frame/assets/src/types.rs @@ -317,3 +317,19 @@ where .saturating_mul_int(balance)) } } + +pub type DistributionCounter = u32; +pub type DistributionProofOf = + <>::VerifyExistenceProof as VerifyExistenceProof>::Proof; +pub type DistributionHashOf = + <>::VerifyExistenceProof as VerifyExistenceProof>::Hash; + +#[derive(Eq, PartialEq, Copy, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct DistributionInfo { + // The asset id we are distributing. + pub asset_id: AssetId, + // The merkle root which represents all the balances to distribute. + pub merkle_root: Hash, + // Whether the distribution is still active. + pub active: bool, +} diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index 57f7e951b73c..7755789c79b9 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -84,6 +84,10 @@ pub trait WeightInfo { fn refund_other() -> Weight; fn block() -> Weight; fn transfer_all() -> Weight; + fn mint_distribution() -> Weight; + fn claim_distribution() -> Weight; + fn end_distribution() -> Weight; + fn destroy_distribution(n: u32) -> Weight; } /// Weights for `pallet_assets` using the Substrate node and recommended hardware. @@ -541,6 +545,23 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } + } // For backwards compatibility and tests. @@ -997,4 +1018,20 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + + fn mint_distribution() -> Weight { + Weight::default() + } + + fn claim_distribution() -> Weight { + Weight::default() + } + + fn end_distribution() -> Weight { + Weight::default() + } + + fn destroy_distribution(_n: u32) -> Weight { + Weight::default() + } } diff --git a/substrate/frame/support/src/traits/proving.rs b/substrate/frame/support/src/traits/proving.rs index dc44f4cd68e7..549f8f719ba4 100644 --- a/substrate/frame/support/src/traits/proving.rs +++ b/substrate/frame/support/src/traits/proving.rs @@ -24,9 +24,9 @@ use sp_core::Hasher; /// Something that can verify the existence of some data in a given proof. pub trait VerifyExistenceProof { /// The proof type. - type Proof; + type Proof: Encode + Decode; /// The hash type. - type Hash; + type Hash: Encode + Decode; /// Verify the given `proof`. /// @@ -77,7 +77,10 @@ pub struct SixteenPatriciaMerkleTreeExistenceProof { /// Implements [`VerifyExistenceProof`] using a 16-patricia merkle tree. pub struct SixteenPatriciaMerkleTreeProver(core::marker::PhantomData); -impl VerifyExistenceProof for SixteenPatriciaMerkleTreeProver { +impl VerifyExistenceProof for SixteenPatriciaMerkleTreeProver +where + H::Out: Decode + Encode, +{ type Proof = SixteenPatriciaMerkleTreeExistenceProof; type Hash = H::Out; @@ -92,6 +95,15 @@ impl VerifyExistenceProof for SixteenPatriciaMerkleTreeProver { } } +/// An implementation which always returns an error when this feature is unavailable. +impl VerifyExistenceProof for () { + type Proof = (); + type Hash = (); + fn verify_proof(_proof: Self::Proof, _root: &Self::Hash) -> Result, ()> { + Err(()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 800bf4bd0737..eebd3a2b3322 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -36,6 +36,7 @@ sp-trie = { workspace = true } sp-weights = { workspace = true } docify = { workspace = true } tracing = { workspace = true, features = ["log"], default-features = false } +binary-merkle-tree = { workspace = true } simple-mermaid = { version = "0.1.1", optional = true } @@ -73,6 +74,7 @@ std = [ "sp-trie/std", "sp-weights/std", "tracing/std", + "binary-merkle-tree/std", ] # Serde support without relying on std features. diff --git a/substrate/primitives/runtime/src/proving_trie.rs b/substrate/primitives/runtime/src/proving_trie/base16.rs similarity index 62% rename from substrate/primitives/runtime/src/proving_trie.rs rename to substrate/primitives/runtime/src/proving_trie/base16.rs index 9a423f18284f..69dbe77fa9d6 100644 --- a/substrate/primitives/runtime/src/proving_trie.rs +++ b/substrate/primitives/runtime/src/proving_trie/base16.rs @@ -24,105 +24,14 @@ //! Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible with //! proofs using `LayoutV0`. -use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo}; -#[cfg(feature = "serde")] -use crate::{Deserialize, Serialize}; - +use super::TrieError; +use crate::{Decode, DispatchError, Encode}; use sp_std::vec::Vec; use sp_trie::{ - trie_types::{TrieDBBuilder, TrieDBMutBuilderV1, TrieError as SpTrieError}, - LayoutV1, MemoryDB, Trie, TrieMut, VerifyError, + trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, + LayoutV1, MemoryDB, Trie, TrieMut, }; -type HashOf = ::Out; - -/// A runtime friendly error type for tries. -#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum TrieError { - /* From TrieError */ - /// Attempted to create a trie with a state root not in the DB. - InvalidStateRoot, - /// Trie item not found in the database, - IncompleteDatabase, - /// A value was found in the trie with a nibble key that was not byte-aligned. - ValueAtIncompleteKey, - /// Corrupt Trie item. - DecoderError, - /// Hash is not value. - InvalidHash, - /* From VerifyError */ - /// The statement being verified contains multiple key-value pairs with the same key. - DuplicateKey, - /// The proof contains at least one extraneous node. - ExtraneousNode, - /// The proof contains at least one extraneous value which should have been omitted from the - /// proof. - ExtraneousValue, - /// The proof contains at least one extraneous hash reference the should have been omitted. - ExtraneousHashReference, - /// The proof contains an invalid child reference that exceeds the hash length. - InvalidChildReference, - /// The proof indicates that an expected value was not found in the trie. - ValueMismatch, - /// The proof is missing trie nodes required to verify. - IncompleteProof, - /// The root hash computed from the proof is incorrect. - RootMismatch, - /// One of the proof nodes could not be decoded. - DecodeError, -} - -impl From> for TrieError { - fn from(error: SpTrieError) -> Self { - match error { - SpTrieError::InvalidStateRoot(..) => Self::InvalidStateRoot, - SpTrieError::IncompleteDatabase(..) => Self::IncompleteDatabase, - SpTrieError::ValueAtIncompleteKey(..) => Self::ValueAtIncompleteKey, - SpTrieError::DecoderError(..) => Self::DecoderError, - SpTrieError::InvalidHash(..) => Self::InvalidHash, - } - } -} - -impl From> for TrieError { - fn from(error: VerifyError) -> Self { - match error { - VerifyError::DuplicateKey(..) => Self::DuplicateKey, - VerifyError::ExtraneousNode => Self::ExtraneousNode, - VerifyError::ExtraneousValue(..) => Self::ExtraneousValue, - VerifyError::ExtraneousHashReference(..) => Self::ExtraneousHashReference, - VerifyError::InvalidChildReference(..) => Self::InvalidChildReference, - VerifyError::ValueMismatch(..) => Self::ValueMismatch, - VerifyError::IncompleteProof => Self::IncompleteProof, - VerifyError::RootMismatch(..) => Self::RootMismatch, - VerifyError::DecodeError(..) => Self::DecodeError, - } - } -} - -impl From for &'static str { - fn from(e: TrieError) -> &'static str { - match e { - TrieError::InvalidStateRoot => "The state root is not in the database.", - TrieError::IncompleteDatabase => "A trie item was not found in the database.", - TrieError::ValueAtIncompleteKey => - "A value was found with a key that is not byte-aligned.", - TrieError::DecoderError => "A corrupt trie item was encountered.", - TrieError::InvalidHash => "The hash does not match the expected value.", - TrieError::DuplicateKey => "The proof contains duplicate keys.", - TrieError::ExtraneousNode => "The proof contains extraneous nodes.", - TrieError::ExtraneousValue => "The proof contains extraneous values.", - TrieError::ExtraneousHashReference => "The proof contains extraneous hash references.", - TrieError::InvalidChildReference => "The proof contains an invalid child reference.", - TrieError::ValueMismatch => "The proof indicates a value mismatch.", - TrieError::IncompleteProof => "The proof is incomplete.", - TrieError::RootMismatch => "The root hash computed from the proof is incorrect.", - TrieError::DecodeError => "One of the proof nodes could not be decoded.", - } - } -} - /// A helper structure for building a basic base-16 merkle trie and creating compact proofs for that /// trie. Proofs are created with latest substrate trie format (`LayoutV1`), and are not compatible /// with proofs using `LayoutV0`. @@ -131,7 +40,7 @@ where Hashing: sp_core::Hasher, { db: MemoryDB, - root: HashOf, + root: Hashing::Out, _phantom: core::marker::PhantomData<(Key, Value)>, } @@ -161,7 +70,7 @@ where } /// Access the underlying trie root. - pub fn root(&self) -> &HashOf { + pub fn root(&self) -> &Hashing::Out { &self.root } @@ -178,8 +87,6 @@ where } /// Create a compact merkle proof needed to prove all `keys` and their values are in the trie. - /// Returns `None` if the nodes within the current `MemoryDB` are insufficient to create a - /// proof. /// /// This function makes a proof with latest substrate trie format (`LayoutV1`), and is not /// compatible with `LayoutV0`. @@ -187,22 +94,21 @@ where /// When verifying the proof created by this function, you must include all of the keys and /// values of the proof, else the verifier will complain that extra nodes are provided in the /// proof that are not needed. - pub fn create_proof(&self, keys: &[Key]) -> Result>, DispatchError> { + pub fn create_proof(&self, keys: &[Key]) -> Result, DispatchError> { sp_trie::generate_trie_proof::, _, _, _>( &self.db, self.root, &keys.into_iter().map(|k| k.encode()).collect::>>(), ) .map_err(|err| TrieError::from(*err).into()) + .map(|structured_proof| structured_proof.encode()) } /// Create a compact merkle proof needed to prove a single key and its value are in the trie. - /// Returns `None` if the nodes within the current `MemoryDB` are insufficient to create a - /// proof. /// /// This function makes a proof with latest substrate trie format (`LayoutV1`), and is not /// compatible with `LayoutV0`. - pub fn create_single_value_proof(&self, key: Key) -> Result>, DispatchError> { + pub fn create_single_value_proof(&self, key: Key) -> Result, DispatchError> { self.create_proof(&[key]) } } @@ -211,8 +117,8 @@ where /// /// Proofs must be created with latest substrate trie format (`LayoutV1`). pub fn verify_single_value_proof( - root: HashOf, - proof: &[Vec], + root: Hashing::Out, + proof: &[u8], key: Key, maybe_value: Option, ) -> Result<(), DispatchError> @@ -221,9 +127,11 @@ where Key: Encode, Value: Encode, { + let structured_proof: Vec> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::DecodeError)?; sp_trie::verify_trie_proof::, _, _, _>( &root, - proof, + &structured_proof, &[(key.encode(), maybe_value.map(|value| value.encode()))], ) .map_err(|err| TrieError::from(err).into()) @@ -233,8 +141,8 @@ where /// /// Proofs must be created with latest substrate trie format (`LayoutV1`). pub fn verify_proof( - root: HashOf, - proof: &[Vec], + root: Hashing::Out, + proof: &[u8], items: &[(Key, Option)], ) -> Result<(), DispatchError> where @@ -242,13 +150,19 @@ where Key: Encode, Value: Encode, { + let structured_proof: Vec> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::DecodeError)?; let items_encoded = items .into_iter() .map(|(key, maybe_value)| (key.encode(), maybe_value.as_ref().map(|value| value.encode()))) .collect::, Option>)>>(); - sp_trie::verify_trie_proof::, _, _, _>(&root, proof, &items_encoded) - .map_err(|err| TrieError::from(err).into()) + sp_trie::verify_trie_proof::, _, _, _>( + &root, + &structured_proof, + &items_encoded, + ) + .map_err(|err| TrieError::from(err).into()) } #[cfg(test)] @@ -382,10 +296,13 @@ mod tests { Err(TrieError::RootMismatch.into()) ); - // Fail to verify proof with wrong data + // Crete a bad proof. + let bad_proof = balance_trie.create_single_value_proof(99u32).unwrap(); + + // Fail to verify data with the wrong proof assert_eq!( - verify_single_value_proof::(root, &[], 6u32, Some(6u128)), - Err(TrieError::IncompleteProof.into()) + verify_single_value_proof::(root, &bad_proof, 6u32, Some(6u128)), + Err(TrieError::ExtraneousHashReference.into()) ); } } diff --git a/substrate/primitives/runtime/src/proving_trie/base2.rs b/substrate/primitives/runtime/src/proving_trie/base2.rs new file mode 100644 index 000000000000..216910f2c7d0 --- /dev/null +++ b/substrate/primitives/runtime/src/proving_trie/base2.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for a base-2 merkle tree used for checking and generating proofs within the +//! runtime. The `binary-merkle-tree` crate exposes all of these same functionality (and more), but +//! this library is designed to work more easily with runtime native types, which simply need to +//! implement `Encode`/`Decode`. + +use super::TrieError; +use crate::{Decode, DispatchError, Encode}; +use binary_merkle_tree::{merkle_proof, merkle_root, verify_proof, MerkleProof}; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; + +/// A helper structure for building a basic base-2 merkle trie and creating compact proofs for that +/// trie. +pub struct BasicProvingTrie +where + Hashing: sp_core::Hasher, +{ + // Deduplicated and flattened list of key value pairs. + db: BTreeMap, + root: Hashing::Out, + _phantom: core::marker::PhantomData<(Key, Value)>, +} + +impl BasicProvingTrie +where + Hashing: sp_core::Hasher, + Key: Encode + Ord, + Value: Encode, +{ + /// Create a new instance of a `ProvingTrie` using an iterator of key/value pairs. + pub fn generate_for(items: I) -> Result + where + I: IntoIterator, + { + let mut db = BTreeMap::default(); + for (key, value) in items.into_iter() { + db.insert(key, value); + } + let root = merkle_root::(db.iter().map(|item| item.encode())); + Ok(Self { db, root, _phantom: Default::default() }) + } + + /// Access the underlying trie root. + pub fn root(&self) -> &Hashing::Out { + &self.root + } + + /// Query a value contained within the current trie. Returns `None` if the + /// nodes within the current `db` are insufficient to query the item. + pub fn query(&self, key: Key) -> Option + where + Value: Decode + Clone, + { + self.db.get(&key).cloned() + } + + /// Create a compact merkle proof needed to prove a single key and its value are in the trie. + /// Returns `None` if the nodes within the current `db` are insufficient to create a + /// proof. + pub fn create_single_value_proof(&self, key: Key) -> Result, DispatchError> + where + Hashing::Out: Encode, + { + let mut encoded = Vec::with_capacity(self.db.len()); + let mut found_index = None; + + // Find the index of our key, and encode the (key, value) pair. + for (i, (k, v)) in self.db.iter().enumerate() { + // If we found the key we are looking for, save it. + if *k == key { + found_index = Some(i); + } + + encoded.push((k, v).encode()); + } + + let index = found_index.ok_or(TrieError::IncompleteDatabase)?; + let proof = merkle_proof::>, Vec>(encoded, index as u32); + Ok(proof.encode()) + } +} + +/// Verify the existence of `key` and `value` in a given trie root and proof. +pub fn verify_single_value_proof( + root: Hashing::Out, + proof: &[u8], + key: Key, + value: Value, +) -> Result<(), DispatchError> +where + Hashing: sp_core::Hasher, + Hashing::Out: Decode, + Key: Encode + Decode, + Value: Encode + Decode, +{ + let decoded_proof: MerkleProof> = + Decode::decode(&mut &proof[..]).map_err(|_| TrieError::IncompleteProof)?; + if root != decoded_proof.root { + return Err(TrieError::RootMismatch.into()); + } + + if (&key, &value).encode() != decoded_proof.leaf { + return Err(TrieError::ValueMismatch.into()); + } + + if verify_proof::( + &decoded_proof.root, + decoded_proof.proof, + decoded_proof.number_of_leaves, + decoded_proof.leaf_index, + &decoded_proof.leaf, + ) { + Ok(()) + } else { + Err(TrieError::IncompleteProof.into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::BlakeTwo256; + use sp_core::H256; + use sp_std::collections::btree_map::BTreeMap; + + // A trie which simulates a trie of accounts (u32) and balances (u128). + type BalanceTrie = BasicProvingTrie; + + // The expected root hash for an empty trie. + fn empty_root() -> H256 { + let tree = BalanceTrie::generate_for(Vec::new()).unwrap(); + *tree.root() + } + + fn create_balance_trie() -> BalanceTrie { + // Create a map of users and their balances. + let mut map = BTreeMap::::new(); + for i in 0..100u32 { + map.insert(i, i.into()); + } + + // Put items into the trie. + let balance_trie = BalanceTrie::generate_for(map).unwrap(); + + // Root is changed. + let root = *balance_trie.root(); + assert!(root != empty_root()); + + // Assert valid keys are queryable. + assert_eq!(balance_trie.query(6u32), Some(6u128)); + assert_eq!(balance_trie.query(9u32), Some(9u128)); + assert_eq!(balance_trie.query(69u32), Some(69u128)); + + balance_trie + } + + #[test] + fn empty_trie_works() { + let empty_trie = BalanceTrie::generate_for(Vec::new()).unwrap(); + assert_eq!(*empty_trie.root(), empty_root()); + } + + #[test] + fn basic_end_to_end_single_value() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_single_value_proof(6u32).unwrap(); + + // Assert key is provable, all other keys are invalid. + for i in 0..200u32 { + if i == 6 { + assert_eq!( + verify_single_value_proof::(root, &proof, i, u128::from(i)), + Ok(()) + ); + // Wrong value is invalid. + assert_eq!( + verify_single_value_proof::( + root, + &proof, + i, + u128::from(i + 1) + ), + Err(TrieError::ValueMismatch.into()) + ); + } else { + assert!(verify_single_value_proof::( + root, + &proof, + i, + u128::from(i) + ) + .is_err()); + } + } + } + + #[test] + fn proof_fails_with_bad_data() { + let balance_trie = create_balance_trie(); + let root = *balance_trie.root(); + + // Create a proof for a valid key. + let proof = balance_trie.create_single_value_proof(6u32).unwrap(); + + // Correct data verifies successfully + assert_eq!( + verify_single_value_proof::(root, &proof, 6u32, 6u128), + Ok(()) + ); + + // Fail to verify proof with wrong root + assert_eq!( + verify_single_value_proof::(Default::default(), &proof, 6u32, 6u128), + Err(TrieError::RootMismatch.into()) + ); + + // Fail to verify proof with wrong data + assert_eq!( + verify_single_value_proof::(root, &[], 6u32, 6u128), + Err(TrieError::IncompleteProof.into()) + ); + } +} diff --git a/substrate/primitives/runtime/src/proving_trie/mod.rs b/substrate/primitives/runtime/src/proving_trie/mod.rs new file mode 100644 index 000000000000..60f11645a48f --- /dev/null +++ b/substrate/primitives/runtime/src/proving_trie/mod.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types for merkle tries compatible with the runtime. + +pub mod base16; +pub mod base2; + +use crate::{Decode, Encode, MaxEncodedLen, TypeInfo}; +#[cfg(feature = "serde")] +use crate::{Deserialize, Serialize}; +use sp_trie::{trie_types::TrieError as SpTrieError, VerifyError}; + +/// A runtime friendly error type for tries. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TrieError { + /* From TrieError */ + /// Attempted to create a trie with a state root not in the DB. + InvalidStateRoot, + /// Trie item not found in the database, + IncompleteDatabase, + /// A value was found in the trie with a nibble key that was not byte-aligned. + ValueAtIncompleteKey, + /// Corrupt Trie item. + DecoderError, + /// Hash is not value. + InvalidHash, + /* From VerifyError */ + /// The statement being verified contains multiple key-value pairs with the same key. + DuplicateKey, + /// The proof contains at least one extraneous node. + ExtraneousNode, + /// The proof contains at least one extraneous value which should have been omitted from the + /// proof. + ExtraneousValue, + /// The proof contains at least one extraneous hash reference the should have been omitted. + ExtraneousHashReference, + /// The proof contains an invalid child reference that exceeds the hash length. + InvalidChildReference, + /// The proof indicates that an expected value was not found in the trie. + ValueMismatch, + /// The proof is missing trie nodes required to verify. + IncompleteProof, + /// The root hash computed from the proof is incorrect. + RootMismatch, + /// One of the proof nodes could not be decoded. + DecodeError, +} + +impl From> for TrieError { + fn from(error: SpTrieError) -> Self { + match error { + SpTrieError::InvalidStateRoot(..) => Self::InvalidStateRoot, + SpTrieError::IncompleteDatabase(..) => Self::IncompleteDatabase, + SpTrieError::ValueAtIncompleteKey(..) => Self::ValueAtIncompleteKey, + SpTrieError::DecoderError(..) => Self::DecoderError, + SpTrieError::InvalidHash(..) => Self::InvalidHash, + } + } +} + +impl From> for TrieError { + fn from(error: VerifyError) -> Self { + match error { + VerifyError::DuplicateKey(..) => Self::DuplicateKey, + VerifyError::ExtraneousNode => Self::ExtraneousNode, + VerifyError::ExtraneousValue(..) => Self::ExtraneousValue, + VerifyError::ExtraneousHashReference(..) => Self::ExtraneousHashReference, + VerifyError::InvalidChildReference(..) => Self::InvalidChildReference, + VerifyError::ValueMismatch(..) => Self::ValueMismatch, + VerifyError::IncompleteProof => Self::IncompleteProof, + VerifyError::RootMismatch(..) => Self::RootMismatch, + VerifyError::DecodeError(..) => Self::DecodeError, + } + } +} + +impl From for &'static str { + fn from(e: TrieError) -> &'static str { + match e { + TrieError::InvalidStateRoot => "The state root is not in the database.", + TrieError::IncompleteDatabase => "A trie item was not found in the database.", + TrieError::ValueAtIncompleteKey => + "A value was found with a key that is not byte-aligned.", + TrieError::DecoderError => "A corrupt trie item was encountered.", + TrieError::InvalidHash => "The hash does not match the expected value.", + TrieError::DuplicateKey => "The proof contains duplicate keys.", + TrieError::ExtraneousNode => "The proof contains extraneous nodes.", + TrieError::ExtraneousValue => "The proof contains extraneous values.", + TrieError::ExtraneousHashReference => "The proof contains extraneous hash references.", + TrieError::InvalidChildReference => "The proof contains an invalid child reference.", + TrieError::ValueMismatch => "The proof indicates a value mismatch.", + TrieError::IncompleteProof => "The proof is incomplete.", + TrieError::RootMismatch => "The root hash computed from the proof is incorrect.", + TrieError::DecodeError => "One of the proof nodes could not be decoded.", + } + } +}