From 8d29cd71aa0ee81ceeed6a01ad056afbe8b119cd Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Thu, 25 Dec 2025 00:51:53 -0600 Subject: [PATCH 01/13] chore: Release hypercore version 0.15.0 --- CHANGELOG.md | 13 ++++++++++++- Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad12e7dc..c09999ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +### Changed + +### Removed + + + +## [0.15.0] - 2025-12-25 + +### Added + * More impl's of `CompactEncoding`. ### Changed @@ -480,5 +490,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ``` -[Unreleased]: https://github.com/datrs/hypercore/compare/v0.14.0...HEAD +[Unreleased]: https://github.com/datrs/hypercore/compare/v0.15.0...HEAD +[0.15.0]: https://github.com/datrs/hypercore/compare/v0.14.0...v0.15.0 [0.14.0]: https://github.com/datrs/hypercore/compare/v0.14.0...v0.13.0 diff --git a/Cargo.toml b/Cargo.toml index a47ed08f..60675d45 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hypercore" -version = "0.14.0" +version = "0.15.0" license = "MIT OR Apache-2.0" description = "Secure, distributed, append-only log" documentation = "https://docs.rs/hypercore" From ffb969be58893cae03858dd1cf6a3bfcda805735 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:12:42 -0500 Subject: [PATCH 02/13] 2024 edition --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 60675d45..61991325 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ categories = [ "data-structures", "encoding", ] -edition = "2021" +edition = "2024" [dependencies] blake2 = "0.10" From 8d203bf1d6044189e4414ff6d8dbe406fbd24352 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:13:34 -0500 Subject: [PATCH 03/13] get_random_access -> get_random_access_mut --- src/storage/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/storage/mod.rs b/src/storage/mod.rs index ad4b68ac..23b65574 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -127,12 +127,12 @@ impl Storage { return Ok(vec![]); } let mut current_store: Store = info_instructions[0].store.clone(); - let mut storage = self.get_random_access(¤t_store); + let mut storage = self.get_random_access_mut(¤t_store); let mut infos: Vec = Vec::with_capacity(info_instructions.len()); for instruction in info_instructions.iter() { if instruction.store != current_store { current_store = instruction.store.clone(); - storage = self.get_random_access(¤t_store); + storage = self.get_random_access_mut(¤t_store); } match instruction.info_type { StoreInfoType::Content => { @@ -193,11 +193,11 @@ impl Storage { return Ok(()); } let mut current_store: Store = infos[0].store.clone(); - let mut storage = self.get_random_access(¤t_store); + let mut storage = self.get_random_access_mut(¤t_store); for info in infos.iter() { if info.store != current_store { current_store = info.store.clone(); - storage = self.get_random_access(¤t_store); + storage = self.get_random_access_mut(¤t_store); } match info.info_type { StoreInfoType::Content => { @@ -233,7 +233,7 @@ impl Storage { Ok(()) } - fn get_random_access(&mut self, store: &Store) -> &mut Box { + fn get_random_access_mut(&mut self, store: &Store) -> &mut Box { match store { Store::Tree => &mut self.tree, Store::Data => &mut self.data, From bad48fc5aa88f48e6c5caba71303bf21800a2eed Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:13:59 -0500 Subject: [PATCH 04/13] unneeded mut --- src/tree/merkle_tree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index 87cb22be..d76bd685 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -333,7 +333,7 @@ impl MerkleTree { /// https://github.com/holepunchto/hypercore/blob/9ce03363cb8938dbab53baba7d7cc9dde0508a7e/lib/merkle-tree.js#L1181 /// The implementation should be rewritten to make it clearer. pub(crate) fn create_valueless_proof( - &mut self, + &self, block: Option<&RequestBlock>, hash: Option<&RequestBlock>, seek: Option<&RequestSeek>, @@ -1297,7 +1297,7 @@ impl MerkleTree { } fn infos_to_nodes( - &mut self, + &self, infos: Option<&[StoreInfo]>, ) -> Result>, HypercoreError> { match infos { From f8447e378b34f344f6e3023de7ea06d1d193c3ea Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:14:19 -0500 Subject: [PATCH 05/13] cargo fmt --- benches/disk.rs | 2 +- benches/memory.rs | 2 +- examples/replication.rs | 10 ++++++---- src/bitfield/dynamic.rs | 4 ++-- src/builder.rs | 2 +- src/core.rs | 20 +++++++++++--------- src/crypto/hash.rs | 4 ++-- src/crypto/mod.rs | 6 +++--- src/encoding.rs | 6 +++--- src/lib.rs | 6 +++--- src/oplog/entry.rs | 6 +++--- src/oplog/header.rs | 12 ++++++------ src/oplog/mod.rs | 8 +++++--- src/replication/events.rs | 4 ++-- src/storage/mod.rs | 7 ++----- src/tree/merkle_tree.rs | 10 +++++----- src/tree/merkle_tree_changeset.rs | 5 +++-- tests/common/mod.rs | 2 +- tests/core.rs | 14 ++++++++------ tests/model.rs | 6 +----- 20 files changed, 69 insertions(+), 67 deletions(-) diff --git a/benches/disk.rs b/benches/disk.rs index e465dfc8..3292df03 100644 --- a/benches/disk.rs +++ b/benches/disk.rs @@ -2,7 +2,7 @@ use std::time::{Duration, Instant}; #[cfg(feature = "async-std")] use criterion::async_executor::AsyncStdExecutor; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use hypercore::{Hypercore, HypercoreBuilder, HypercoreError, Storage}; use tempfile::Builder as TempfileBuilder; diff --git a/benches/memory.rs b/benches/memory.rs index 7c916fb8..ac8015a5 100644 --- a/benches/memory.rs +++ b/benches/memory.rs @@ -2,7 +2,7 @@ use std::time::{Duration, Instant}; #[cfg(feature = "async-std")] use criterion::async_executor::AsyncStdExecutor; -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; use hypercore::{Hypercore, HypercoreBuilder, HypercoreError, Storage}; use random_access_memory::RandomAccessMemory; diff --git a/examples/replication.rs b/examples/replication.rs index 19b393e0..3d058551 100644 --- a/examples/replication.rs +++ b/examples/replication.rs @@ -99,10 +99,12 @@ async fn replicate_index( .expect("Creating proof error") .expect("Could not get proof"); // Then the proof is verified and applied to the replicated party. - assert!(replicated_hypercore - .verify_and_apply_proof(&proof) - .await - .expect("Verifying and applying proof failed")); + assert!( + replicated_hypercore + .verify_and_apply_proof(&proof) + .await + .expect("Verifying and applying proof failed") + ); } fn format_res(res: Result>, HypercoreError>) -> String { diff --git a/src/bitfield/dynamic.rs b/src/bitfield/dynamic.rs index 6c827c47..5012af00 100644 --- a/src/bitfield/dynamic.rs +++ b/src/bitfield/dynamic.rs @@ -1,7 +1,7 @@ -use super::fixed::{FixedBitfield, FIXED_BITFIELD_BITS_LENGTH, FIXED_BITFIELD_LENGTH}; +use super::fixed::{FIXED_BITFIELD_BITS_LENGTH, FIXED_BITFIELD_LENGTH, FixedBitfield}; use crate::{ - common::{BitfieldUpdate, StoreInfo, StoreInfoInstruction, StoreInfoType}, Store, + common::{BitfieldUpdate, StoreInfo, StoreInfoInstruction, StoreInfoType}, }; use futures::future::Either; use std::{cell::RefCell, convert::TryInto}; diff --git a/src/builder.rs b/src/builder.rs index 37af78e7..07a75601 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,7 +5,7 @@ use tracing::instrument; #[cfg(feature = "cache")] use crate::common::cache::CacheOptions; -use crate::{core::HypercoreOptions, Hypercore, HypercoreError, PartialKeypair, Storage}; +use crate::{Hypercore, HypercoreError, PartialKeypair, Storage, core::HypercoreOptions}; /// Build CacheOptions. #[cfg(feature = "cache")] diff --git a/src/core.rs b/src/core.rs index cf820491..d0f430e5 100644 --- a/src/core.rs +++ b/src/core.rs @@ -8,14 +8,14 @@ use tracing::instrument; #[cfg(feature = "cache")] use crate::common::cache::CacheOptions; use crate::{ + RequestBlock, RequestSeek, RequestUpgrade, bitfield::Bitfield, common::{BitfieldUpdate, HypercoreError, NodeByteRange, Proof, StoreInfo, ValuelessProof}, - crypto::{generate_signing_key, PartialKeypair}, + crypto::{PartialKeypair, generate_signing_key}, data::BlockStore, - oplog::{Header, Oplog, MAX_OPLOG_ENTRIES_BYTE_SIZE}, + oplog::{Header, MAX_OPLOG_ENTRIES_BYTE_SIZE, Oplog}, storage::Storage, tree::{MerkleTree, MerkleTreeChangeset}, - RequestBlock, RequestSeek, RequestUpgrade, }; #[derive(Debug)] @@ -884,8 +884,8 @@ pub(crate) mod tests { } #[async_std::test] - async fn core_create_proof_block_and_upgrade_from_existing_state_with_additional( - ) -> Result<(), HypercoreError> { + async fn core_create_proof_block_and_upgrade_from_existing_state_with_additional() + -> Result<(), HypercoreError> { let mut hypercore = create_hypercore_with_data(10).await?; let proof = hypercore .create_proof( @@ -1084,10 +1084,12 @@ pub(crate) mod tests { ) .await? .unwrap(); - assert!(hypercore_clone - .verify_and_apply_proof(&proof) - .await - .is_err()); + assert!( + hypercore_clone + .verify_and_apply_proof(&proof) + .await + .is_err() + ); Ok(()) } diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs index 26533617..cc09ee8c 100644 --- a/src/crypto/hash.rs +++ b/src/crypto/hash.rs @@ -1,9 +1,9 @@ use blake2::{ - digest::{generic_array::GenericArray, typenum::U32, FixedOutput}, Blake2b, Blake2bMac, Digest, + digest::{FixedOutput, generic_array::GenericArray, typenum::U32}, }; use byteorder::{BigEndian, WriteBytesExt}; -use compact_encoding::{as_array, to_encoded_bytes, EncodingError, FixedWidthEncoding}; +use compact_encoding::{EncodingError, FixedWidthEncoding, as_array, to_encoded_bytes}; use ed25519_dalek::VerifyingKey; use merkle_tree_stream::Node as NodeTrait; use std::convert::AsRef; diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs index 1bf2ab5b..6ec1ffe8 100644 --- a/src/crypto/mod.rs +++ b/src/crypto/mod.rs @@ -4,6 +4,6 @@ mod hash; mod key_pair; mod manifest; -pub(crate) use hash::{signable_tree, Hash}; -pub use key_pair::{generate as generate_signing_key, sign, verify, PartialKeypair}; -pub(crate) use manifest::{default_signer_manifest, Manifest, ManifestSigner}; +pub(crate) use hash::{Hash, signable_tree}; +pub use key_pair::{PartialKeypair, generate as generate_signing_key, sign, verify}; +pub(crate) use manifest::{Manifest, ManifestSigner, default_signer_manifest}; diff --git a/src/encoding.rs b/src/encoding.rs index b54fd057..d9571335 100644 --- a/src/encoding.rs +++ b/src/encoding.rs @@ -1,11 +1,11 @@ //! Hypercore-specific compact encodings use crate::{ - crypto::{Manifest, ManifestSigner}, DataBlock, DataHash, DataSeek, DataUpgrade, Node, RequestBlock, RequestSeek, RequestUpgrade, + crypto::{Manifest, ManifestSigner}, }; use compact_encoding::{ - as_array, encode_bytes_fixed, encoded_size_usize, map_decode, map_encode, sum_encoded_size, - take_array, write_slice, CompactEncoding, EncodingError, EncodingErrorKind, VecEncodable, + CompactEncoding, EncodingError, EncodingErrorKind, VecEncodable, as_array, encode_bytes_fixed, + encoded_size_usize, map_decode, map_encode, sum_encoded_size, take_array, write_slice, }; impl CompactEncoding for Node { diff --git a/src/lib.rs b/src/lib.rs index eae3b218..2930c144 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -95,9 +95,9 @@ pub use crate::common::{ RequestSeek, RequestUpgrade, Store, }; pub use crate::core::{AppendOutcome, Hypercore, Info}; -pub use crate::crypto::{generate_signing_key, sign, verify, PartialKeypair}; +pub use crate::crypto::{PartialKeypair, generate_signing_key, sign, verify}; pub use crate::storage::{Storage, StorageTraits}; pub use ed25519_dalek::{ - SecretKey, Signature, SigningKey, VerifyingKey, KEYPAIR_LENGTH, PUBLIC_KEY_LENGTH, - SECRET_KEY_LENGTH, + KEYPAIR_LENGTH, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, SecretKey, Signature, SigningKey, + VerifyingKey, }; diff --git a/src/oplog/entry.rs b/src/oplog/entry.rs index c8eeac41..0c6ba426 100644 --- a/src/oplog/entry.rs +++ b/src/oplog/entry.rs @@ -1,9 +1,9 @@ use compact_encoding::{ - map_decode, map_encode, sum_encoded_size, take_array, take_array_mut, write_array, - CompactEncoding, EncodingError, + CompactEncoding, EncodingError, map_decode, map_encode, sum_encoded_size, take_array, + take_array_mut, write_array, }; -use crate::{common::BitfieldUpdate, Node}; +use crate::{Node, common::BitfieldUpdate}; /// Entry tree upgrade #[derive(Debug)] diff --git a/src/oplog/header.rs b/src/oplog/header.rs index c7ad6c2d..48b2aa35 100644 --- a/src/oplog/header.rs +++ b/src/oplog/header.rs @@ -1,13 +1,13 @@ use compact_encoding::{ - decode_usize, map_decode, take_array, write_array, CompactEncoding, EncodingError, + CompactEncoding, EncodingError, decode_usize, map_decode, take_array, write_array, }; use compact_encoding::{map_encode, sum_encoded_size}; -use ed25519_dalek::{SigningKey, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH}; +use ed25519_dalek::{PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, SigningKey}; -use crate::crypto::default_signer_manifest; -use crate::crypto::Manifest; use crate::PartialKeypair; use crate::VerifyingKey; +use crate::crypto::Manifest; +use crate::crypto::default_signer_manifest; /// Oplog header. #[derive(Debug, Clone)] @@ -165,7 +165,7 @@ impl CompactEncoding for PartialKeypair { len => { return Err(EncodingError::invalid_data(&format!( "Incorrect public key length while decoding. length = [{len}] expected [{PUBLIC_KEY_LENGTH}]" - ))) + ))); } }; let (sk_len, rest) = decode_usize(rest)?; @@ -180,7 +180,7 @@ impl CompactEncoding for PartialKeypair { len => { return Err(EncodingError::invalid_data(&format!( "Incorrect secret key length while decoding. length = [{len}] expected [{FULL_SIGNING_KEY_LENGTH}]" - ))) + ))); } }; Ok((PartialKeypair { public, secret }, rest)) diff --git a/src/oplog/mod.rs b/src/oplog/mod.rs index eb64cfb9..36d83ff8 100644 --- a/src/oplog/mod.rs +++ b/src/oplog/mod.rs @@ -1,6 +1,6 @@ use compact_encoding::{ - as_array_mut, get_slices_checked, get_slices_mut_checked, map_decode, take_array_mut, - CompactEncoding, FixedWidthEncoding, FixedWidthU32, + CompactEncoding, FixedWidthEncoding, FixedWidthU32, as_array_mut, get_slices_checked, + get_slices_mut_checked, map_decode, take_array_mut, }; use futures::future::Either; use std::convert::{TryFrom, TryInto}; @@ -412,7 +412,9 @@ impl Oplog { let calculated_checksum = crc32fast::hash(to_hash); if calculated_checksum != stored_checksum { return Err(HypercoreError::InvalidChecksum { - context: format!("Calculated signature [{calculated_checksum}] does not match oplog signature [{stored_checksum}]"), + context: format!( + "Calculated signature [{calculated_checksum}] does not match oplog signature [{stored_checksum}]" + ), }); }; Ok(Some(ValidateLeaderOutcome { diff --git a/src/replication/events.rs b/src/replication/events.rs index b9c07df2..09f8320f 100644 --- a/src/replication/events.rs +++ b/src/replication/events.rs @@ -1,6 +1,6 @@ //! events related to replication -use crate::{common::BitfieldUpdate, HypercoreError}; -use async_broadcast::{broadcast, InactiveReceiver, Receiver, Sender}; +use crate::{HypercoreError, common::BitfieldUpdate}; +use async_broadcast::{InactiveReceiver, Receiver, Sender, broadcast}; static MAX_EVENT_QUEUE_CAPACITY: usize = 32; diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 23b65574..749192b9 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -11,8 +11,8 @@ use std::path::PathBuf; use tracing::instrument; use crate::{ - common::{Store, StoreInfo, StoreInfoInstruction, StoreInfoType}, HypercoreError, + common::{Store, StoreInfo, StoreInfoInstruction, StoreInfoType}, }; /// Supertrait for Storage @@ -157,10 +157,7 @@ impl Storage { Err(HypercoreError::InvalidOperation { context: format!( "Could not read from store {}, index {} / length {} is out of bounds for store length {}", - current_store, - instruction.index, - read_length, - length + current_store, instruction.index, read_length, length ), }) } diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index d76bd685..a7f7424a 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -1,5 +1,5 @@ use compact_encoding::{ - as_array, map_decode, to_encoded_bytes, EncodingError, FixedWidthEncoding, FixedWidthU64, + EncodingError, FixedWidthEncoding, FixedWidthU64, as_array, map_decode, to_encoded_bytes, }; use ed25519_dalek::Signature; use futures::future::Either; @@ -14,11 +14,11 @@ use crate::common::{HypercoreError, NodeByteRange, Proof, ValuelessProof}; use crate::crypto::Hash; use crate::oplog::HeaderTree; use crate::{ - common::{StoreInfo, StoreInfoInstruction}, - Node, VerifyingKey, + DataBlock, DataHash, DataSeek, DataUpgrade, RequestBlock, RequestSeek, RequestUpgrade, Store, }; use crate::{ - DataBlock, DataHash, DataSeek, DataUpgrade, RequestBlock, RequestSeek, RequestUpgrade, Store, + Node, VerifyingKey, + common::{StoreInfo, StoreInfoInstruction}, }; use super::MerkleTreeChangeset; @@ -1474,7 +1474,7 @@ fn index_from_info(info: &StoreInfo) -> u64 { fn node_from_bytes(index: &u64, data: &[u8]) -> Result { let len_buf = &data[..8]; let hash = &data[8..]; - let len = map_decode!(len_buf, [FixedWidthU64<'_>]).0 .0; + let len = map_decode!(len_buf, [FixedWidthU64<'_>]).0.0; Ok(Node::new(*index, hash.to_vec(), len)) } diff --git a/src/tree/merkle_tree_changeset.rs b/src/tree/merkle_tree_changeset.rs index 93053028..aea217f3 100644 --- a/src/tree/merkle_tree_changeset.rs +++ b/src/tree/merkle_tree_changeset.rs @@ -2,8 +2,9 @@ use ed25519_dalek::{Signature, SigningKey, VerifyingKey}; use std::convert::TryFrom; use crate::{ - crypto::{signable_tree, verify, Hash}, - sign, HypercoreError, Node, + HypercoreError, Node, + crypto::{Hash, signable_tree, verify}, + sign, }; /// Changeset for a `MerkleTree`. This allows to incrementally change a `MerkleTree` in two steps: diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 247b13c2..02422e71 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use ed25519_dalek::{SigningKey, VerifyingKey, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH}; +use ed25519_dalek::{PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, SigningKey, VerifyingKey}; use sha2::{Digest, Sha256}; use std::io::prelude::*; use std::path::Path; diff --git a/tests/core.rs b/tests/core.rs index f3e8d2ec..7b39a76b 100644 --- a/tests/core.rs +++ b/tests/core.rs @@ -33,12 +33,14 @@ async fn hypercore_new_with_key_pair() -> Result<()> { async fn hypercore_open_with_key_pair_error() -> Result<()> { let storage = Storage::new_memory().await?; let key_pair = get_test_key_pair(); - assert!(HypercoreBuilder::new(storage) - .key_pair(key_pair) - .open(true) - .build() - .await - .is_err()); + assert!( + HypercoreBuilder::new(storage) + .key_pair(key_pair) + .open(true) + .build() + .await + .is_err() + ); Ok(()) } diff --git a/tests/model.rs b/tests/model.rs index e6a52fed..86a74657 100644 --- a/tests/model.rs +++ b/tests/model.rs @@ -99,11 +99,7 @@ async fn assert_implementation_matches_model(ops: Vec) -> bool { let start = { let result = model.len() as u64 / len_divisor_for_start as u64; if result == model.len() as u64 { - if !model.is_empty() { - result - 1 - } else { - 0 - } + if !model.is_empty() { result - 1 } else { 0 } } else { result } From 4f125211566400191b7f0e908f999637b5d87f6a Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:14:45 -0500 Subject: [PATCH 06/13] cargo clippy --fix --- src/bitfield/dynamic.rs | 20 ++++++++------------ src/tree/merkle_tree.rs | 25 ++++++++++--------------- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/bitfield/dynamic.rs b/src/bitfield/dynamic.rs index 5012af00..da85e04a 100644 --- a/src/bitfield/dynamic.rs +++ b/src/bitfield/dynamic.rs @@ -176,22 +176,20 @@ impl DynamicBitfield { // not pages that don't exist, as they can't possibly contain the value. // To keep the common case fast, first try the same page as the position - if let Some(p) = self.pages.get(first_page) { - if let Some(index) = p.borrow().index_of(value, first_index as u32) { + if let Some(p) = self.pages.get(first_page) + && let Some(index) = p.borrow().index_of(value, first_index as u32) { return Some(first_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); }; - } // It wasn't found on the first page, now get the keys that are bigger // than the given index and sort them. let mut keys: Vec<&u64> = self.pages.keys().filter(|key| **key > first_page).collect(); keys.sort(); for key in keys { - if let Some(p) = self.pages.get(*key) { - if let Some(index) = p.borrow().index_of(value, 0) { + if let Some(p) = self.pages.get(*key) + && let Some(index) = p.borrow().index_of(value, 0) { return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); }; - } } } else { // Searching for the false value is easier as it is automatically hit on @@ -223,11 +221,10 @@ impl DynamicBitfield { // not pages that don't exist, as they can't possibly contain the value. // To keep the common case fast, first try the same page as the position - if let Some(p) = self.pages.get(last_page) { - if let Some(index) = p.borrow().last_index_of(value, last_index as u32) { + if let Some(p) = self.pages.get(last_page) + && let Some(index) = p.borrow().last_index_of(value, last_index as u32) { return Some(last_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); }; - } // It wasn't found on the last page, now get the keys that are smaller // than the given index and sort them. @@ -236,14 +233,13 @@ impl DynamicBitfield { keys.reverse(); for key in keys { - if let Some(p) = self.pages.get(*key) { - if let Some(index) = p + if let Some(p) = self.pages.get(*key) + && let Some(index) = p .borrow() .last_index_of(value, FIXED_BITFIELD_BITS_LENGTH as u32 - 1) { return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); }; - } } } else { // Searching for the false value is easier as it is automatically hit on diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index a7f7424a..934c54df 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -241,11 +241,10 @@ impl MerkleTree { let mut parent: Option = None; for node in &changeset.nodes { if node.index == iter.index() { - if is_right { - if let Some(parent) = parent { + if is_right + && let Some(parent) = parent { tree_offset += node.length - parent.length; } - } parent = Some(node.clone()); is_right = iter.is_right(); iter.parent(); @@ -410,8 +409,8 @@ impl MerkleTree { sub_tree = indexed.index; } } - if !untrusted_sub_tree { - if let Some(seek) = seek.as_ref() { + if !untrusted_sub_tree + && let Some(seek) = seek.as_ref() { let index_or_instructions = self.seek_from_head(to, seek.bytes, &nodes)?; sub_tree = match index_or_instructions { Either::Left(new_instructions) => { @@ -421,7 +420,6 @@ impl MerkleTree { Either::Right(index) => index, }; } - } if upgrade.is_some() { if let Either::Left(new_instructions) = self.upgrade_proof( @@ -436,13 +434,12 @@ impl MerkleTree { instructions.extend(new_instructions); } - if head > to { - if let Either::Left(new_instructions) = + if head > to + && let Either::Left(new_instructions) = self.additional_upgrade_proof(to, head, &mut p, &nodes)? { instructions.extend(new_instructions); } - } } if instructions.is_empty() { @@ -520,8 +517,8 @@ impl MerkleTree { proof.seek.as_ref(), &mut changeset, )?; - if let Some(upgrade) = proof.upgrade.as_ref() { - if verify_upgrade( + if let Some(upgrade) = proof.upgrade.as_ref() + && verify_upgrade( proof.fork, upgrade, unverified_block_root_node.as_ref(), @@ -530,7 +527,6 @@ impl MerkleTree { )? { unverified_block_root_node = None; } - } if let Some(unverified_block_root_node) = unverified_block_root_node { let node_or_instruction = @@ -1354,8 +1350,8 @@ fn verify_tree( let mut root: Option = None; - if let Some(seek) = seek { - if !seek.nodes.is_empty() { + if let Some(seek) = seek + && !seek.nodes.is_empty() { let mut iter = flat_tree::Iterator::new(seek.nodes[0].index); let mut q = NodeQueue::new(seek.nodes.clone(), None); let node = q.shift(iter.index())?; @@ -1370,7 +1366,6 @@ fn verify_tree( } root = Some(current_root); } - } if let Some(untrusted_node) = untrusted_node { let mut iter = flat_tree::Iterator::new(untrusted_node.index); From 4eaafb8bb7bb6a13e5089ecc50824e8f3b72e020 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:16:30 -0500 Subject: [PATCH 07/13] Add .clippy.toml Add allow breaking api changes. We want to fix these preemptively before deploying them. --- .clippy.toml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .clippy.toml diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 00000000..cda8d17e --- /dev/null +++ b/.clippy.toml @@ -0,0 +1 @@ +avoid-breaking-exported-api = false From 730b1b22be62d80cd5e34a68ffb960c3efcfa61f Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:17:07 -0500 Subject: [PATCH 08/13] new lints! --- src/lib.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2930c144..3140c5d7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,21 @@ -#![forbid(unsafe_code, future_incompatible)] -#![forbid(rust_2018_idioms, rust_2018_compatibility)] -#![forbid(missing_debug_implementations)] -#![forbid(missing_docs)] -#![warn(unreachable_pub)] +#![forbid( + unsafe_code, + future_incompatible, + rust_2018_idioms, + rust_2018_compatibility, + missing_debug_implementations, + missing_docs +)] #![cfg_attr(test, deny(warnings))] #![doc(test(attr(deny(warnings))))] +#![warn( + unreachable_pub, + redundant_lifetimes, + non_local_definitions, + clippy::needless_pass_by_value, + clippy::needless_pass_by_ref_mut, + clippy::enum_glob_use +)] //! ## Introduction //! From 9e4befc77663da1bab590709b41fc5cd5422cc9b Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:19:15 -0500 Subject: [PATCH 09/13] rm needless mut --- src/data/mod.rs | 2 +- src/oplog/mod.rs | 2 +- src/tree/merkle_tree.rs | 82 ++++++++++++++++++++--------------------- 3 files changed, 43 insertions(+), 43 deletions(-) diff --git a/src/data/mod.rs b/src/data/mod.rs index fa70a904..4c9d75f7 100644 --- a/src/data/mod.rs +++ b/src/data/mod.rs @@ -40,7 +40,7 @@ impl BlockStore { } /// Clears a segment, returns infos to write to storage. - pub(crate) fn clear(&mut self, start: u64, length: u64) -> StoreInfo { + pub(crate) fn clear(&self, start: u64, length: u64) -> StoreInfo { StoreInfo::new_delete(Store::Data, start, length) } } diff --git a/src/oplog/mod.rs b/src/oplog/mod.rs index 36d83ff8..3fb975c8 100644 --- a/src/oplog/mod.rs +++ b/src/oplog/mod.rs @@ -206,7 +206,7 @@ impl Oplog { } pub(crate) fn update_header_with_changeset( - &mut self, + &self, changeset: &MerkleTreeChangeset, bitfield_update: Option, header: &mut Header, diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index 934c54df..608a5853 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -168,7 +168,7 @@ impl MerkleTree { /// Get storage byte range of given hypercore index pub(crate) fn byte_range( - &mut self, + &self, hypercore_index: u64, infos: Option<&[StoreInfo]>, ) -> Result, NodeByteRange>, HypercoreError> { @@ -216,7 +216,7 @@ impl MerkleTree { /// Get the byte offset given hypercore index pub(crate) fn byte_offset( - &mut self, + &self, hypercore_index: u64, infos: Option<&[StoreInfo]>, ) -> Result, u64>, HypercoreError> { @@ -226,7 +226,7 @@ impl MerkleTree { /// Get the byte offset of hypercore index in a changeset pub(crate) fn byte_offset_in_changeset( - &mut self, + &self, hypercore_index: u64, changeset: &MerkleTreeChangeset, infos: Option<&[StoreInfo]>, @@ -241,10 +241,9 @@ impl MerkleTree { let mut parent: Option = None; for node in &changeset.nodes { if node.index == iter.index() { - if is_right - && let Some(parent) = parent { - tree_offset += node.length - parent.length; - } + if is_right && let Some(parent) = parent { + tree_offset += node.length - parent.length; + } parent = Some(node.clone()); is_right = iter.is_right(); iter.parent(); @@ -278,7 +277,7 @@ impl MerkleTree { } pub(crate) fn truncate( - &mut self, + &self, length: u64, fork: u64, infos: Option<&[StoreInfo]>, @@ -409,17 +408,16 @@ impl MerkleTree { sub_tree = indexed.index; } } - if !untrusted_sub_tree - && let Some(seek) = seek.as_ref() { - let index_or_instructions = self.seek_from_head(to, seek.bytes, &nodes)?; - sub_tree = match index_or_instructions { - Either::Left(new_instructions) => { - instructions.extend(new_instructions); - return Ok(Either::Left(instructions.into_boxed_slice())); - } - Either::Right(index) => index, - }; - } + if !untrusted_sub_tree && let Some(seek) = seek.as_ref() { + let index_or_instructions = self.seek_from_head(to, seek.bytes, &nodes)?; + sub_tree = match index_or_instructions { + Either::Left(new_instructions) => { + instructions.extend(new_instructions); + return Ok(Either::Left(instructions.into_boxed_slice())); + } + Either::Right(index) => index, + }; + } if upgrade.is_some() { if let Either::Left(new_instructions) = self.upgrade_proof( @@ -437,9 +435,9 @@ impl MerkleTree { if head > to && let Either::Left(new_instructions) = self.additional_upgrade_proof(to, head, &mut p, &nodes)? - { - instructions.extend(new_instructions); - } + { + instructions.extend(new_instructions); + } } if instructions.is_empty() { @@ -502,7 +500,7 @@ impl MerkleTree { /// Verifies a proof received from a peer. pub(crate) fn verify_proof( - &mut self, + &self, proof: &Proof, public_key: &VerifyingKey, infos: Option<&[StoreInfo]>, @@ -524,9 +522,10 @@ impl MerkleTree { unverified_block_root_node.as_ref(), public_key, &mut changeset, - )? { - unverified_block_root_node = None; - } + )? + { + unverified_block_root_node = None; + } if let Some(unverified_block_root_node) = unverified_block_root_node { let node_or_instruction = @@ -558,7 +557,7 @@ impl MerkleTree { /// Attempts to get missing nodes from given index. NB: must be called in a loop. pub(crate) fn missing_nodes( - &mut self, + &self, index: u64, infos: Option<&[StoreInfo]>, ) -> Result, u64>, HypercoreError> { @@ -690,7 +689,7 @@ impl MerkleTree { } fn byte_offset_from_index( - &mut self, + &self, index: u64, infos: Option<&[StoreInfo]>, ) -> Result, u64>, HypercoreError> { @@ -1351,21 +1350,22 @@ fn verify_tree( let mut root: Option = None; if let Some(seek) = seek - && !seek.nodes.is_empty() { - let mut iter = flat_tree::Iterator::new(seek.nodes[0].index); - let mut q = NodeQueue::new(seek.nodes.clone(), None); - let node = q.shift(iter.index())?; - let mut current_root: Node = node.clone(); + && !seek.nodes.is_empty() + { + let mut iter = flat_tree::Iterator::new(seek.nodes[0].index); + let mut q = NodeQueue::new(seek.nodes.clone(), None); + let node = q.shift(iter.index())?; + let mut current_root: Node = node.clone(); + changeset.nodes.push(node); + while q.length > 0 { + let node = q.shift(iter.sibling())?; + let parent_node = parent_node(iter.parent(), ¤t_root, &node); + current_root = parent_node.clone(); changeset.nodes.push(node); - while q.length > 0 { - let node = q.shift(iter.sibling())?; - let parent_node = parent_node(iter.parent(), ¤t_root, &node); - current_root = parent_node.clone(); - changeset.nodes.push(node); - changeset.nodes.push(parent_node); - } - root = Some(current_root); + changeset.nodes.push(parent_node); } + root = Some(current_root); + } if let Some(untrusted_node) = untrusted_node { let mut iter = flat_tree::Iterator::new(untrusted_node.index); From 72b86aecf5428b1b07436f5f5d673e46b16f4044 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 16:53:13 -0500 Subject: [PATCH 10/13] clippy fixes --- Cargo.toml | 2 +- src/replication/shared_core.rs | 92 +++++++++++----------------------- src/tree/merkle_tree.rs | 10 ++-- 3 files changed, 35 insertions(+), 69 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 61991325..6566ecca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ test-log = { version = "0.2.11", default-features = false, features = ["trace"] tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } [features] -default = ["tokio", "sparse", "replication"] +default = ["tokio", "sparse", "replication", "cache", "shared-core"] replication = ["dep:async-broadcast"] shared-core = ["replication", "dep:async-lock"] sparse = ["random-access-disk/sparse"] diff --git a/src/replication/shared_core.rs b/src/replication/shared_core.rs index f30de479..c2db5827 100644 --- a/src/replication/shared_core.rs +++ b/src/replication/shared_core.rs @@ -6,7 +6,7 @@ use crate::{ }; use async_broadcast::Receiver; use async_lock::Mutex; -use std::{future::Future, sync::Arc}; +use std::sync::Arc; use super::{ CoreInfo, CoreMethods, CoreMethodsError, Event, ReplicationMethods, ReplicationMethodsError, @@ -29,95 +29,63 @@ impl SharedCore { } impl CoreInfo for SharedCore { - fn info(&self) -> impl Future + Send { - async move { - let core = &self.0.lock().await; - core.info() - } + async fn info(&self) -> Info { + let core = &self.0.lock().await; + core.info() } - fn key_pair(&self) -> impl Future + Send { - async move { - let core = &self.0.lock().await; - core.key_pair().clone() - } + async fn key_pair(&self) -> PartialKeypair { + let core = &self.0.lock().await; + core.key_pair().clone() } } impl ReplicationMethods for SharedCore { - fn verify_and_apply_proof( - &self, - proof: &Proof, - ) -> impl Future> { - async move { - let mut core = self.0.lock().await; - Ok(core.verify_and_apply_proof(proof).await?) - } + async fn verify_and_apply_proof(&self, proof: &Proof) -> Result { + Ok(self.0.lock().await.verify_and_apply_proof(proof).await?) } - fn missing_nodes( - &self, - index: u64, - ) -> impl Future> { - async move { - let mut core = self.0.lock().await; - Ok(core.missing_nodes(index).await?) - } + async fn missing_nodes(&self, index: u64) -> Result { + Ok(self.0.lock().await.missing_nodes(index).await?) } - fn create_proof( + async fn create_proof( &self, block: Option, hash: Option, seek: Option, upgrade: Option, - ) -> impl Future, ReplicationMethodsError>> { - async move { - let mut core = self.0.lock().await; - Ok(core.create_proof(block, hash, seek, upgrade).await?) - } + ) -> Result, ReplicationMethodsError> { + Ok(self + .0 + .lock() + .await + .create_proof(block, hash, seek, upgrade) + .await?) } - fn event_subscribe(&self) -> impl Future> { - async move { self.0.lock().await.event_subscribe() } + async fn event_subscribe(&self) -> Receiver { + self.0.lock().await.event_subscribe() } } impl CoreMethods for SharedCore { - fn has(&self, index: u64) -> impl Future + Send { - async move { - let core = self.0.lock().await; - core.has(index) - } + async fn has(&self, index: u64) -> bool { + self.0.lock().await.has(index) } - fn get( - &self, - index: u64, - ) -> impl Future>, CoreMethodsError>> + Send { - async move { - let mut core = self.0.lock().await; - Ok(core.get(index).await?) - } + async fn get(&self, index: u64) -> Result>, CoreMethodsError> { + Ok(self.0.lock().await.get(index).await?) } - fn append( - &self, - data: &[u8], - ) -> impl Future> + Send { - async move { - let mut core = self.0.lock().await; - Ok(core.append(data).await?) - } + async fn append(&self, data: &[u8]) -> Result { + Ok(self.0.lock().await.append(data).await?) } - fn append_batch, B: AsRef<[A]> + Send>( + async fn append_batch, B: AsRef<[A]> + Send>( &self, batch: B, - ) -> impl Future> + Send { - async move { - let mut core = self.0.lock().await; - Ok(core.append_batch(batch).await?) - } + ) -> Result { + Ok(self.0.lock().await.append_batch(batch).await?) } } diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index 608a5853..ec133109 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -793,11 +793,10 @@ impl MerkleTree { ) -> Result>, HypercoreError> { // First check the cache #[cfg(feature = "cache")] - if let Some(node_cache) = &self.node_cache { - if let Some(node) = node_cache.get(&index) { + if let Some(node_cache) = &self.node_cache + && let Some(node) = node_cache.get(&index) { return Ok(Either::Right(Some(node))); } - } // Then check if unflushed has the node if let Some(node) = self.unflushed.get(index) { @@ -1303,11 +1302,10 @@ impl MerkleTree { if !info.miss { let node = node_from_bytes(&index, info.data.as_ref().unwrap())?; #[cfg(feature = "cache")] - if !node.blank { - if let Some(node_cache) = &self.node_cache { + if !node.blank + && let Some(node_cache) = &self.node_cache { node_cache.insert(node.index, node.clone()) } - } nodes.insert(index, Some(node)); } else { nodes.insert(index, None); From 59e8effe3d6eadfc72dc8f42b37268328dfaf63e Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 18:48:17 -0500 Subject: [PATCH 11/13] lint --- src/bitfield/dynamic.rs | 27 +++++++++++++++------------ src/crypto/hash.rs | 8 ++++---- src/replication/mod.rs | 2 +- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/src/bitfield/dynamic.rs b/src/bitfield/dynamic.rs index da85e04a..c2266f6d 100644 --- a/src/bitfield/dynamic.rs +++ b/src/bitfield/dynamic.rs @@ -177,9 +177,10 @@ impl DynamicBitfield { // To keep the common case fast, first try the same page as the position if let Some(p) = self.pages.get(first_page) - && let Some(index) = p.borrow().index_of(value, first_index as u32) { - return Some(first_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); - }; + && let Some(index) = p.borrow().index_of(value, first_index as u32) + { + return Some(first_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); + }; // It wasn't found on the first page, now get the keys that are bigger // than the given index and sort them. @@ -187,9 +188,10 @@ impl DynamicBitfield { keys.sort(); for key in keys { if let Some(p) = self.pages.get(*key) - && let Some(index) = p.borrow().index_of(value, 0) { - return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); - }; + && let Some(index) = p.borrow().index_of(value, 0) + { + return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); + }; } } else { // Searching for the false value is easier as it is automatically hit on @@ -222,9 +224,10 @@ impl DynamicBitfield { // To keep the common case fast, first try the same page as the position if let Some(p) = self.pages.get(last_page) - && let Some(index) = p.borrow().last_index_of(value, last_index as u32) { - return Some(last_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); - }; + && let Some(index) = p.borrow().last_index_of(value, last_index as u32) + { + return Some(last_page * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); + }; // It wasn't found on the last page, now get the keys that are smaller // than the given index and sort them. @@ -237,9 +240,9 @@ impl DynamicBitfield { && let Some(index) = p .borrow() .last_index_of(value, FIXED_BITFIELD_BITS_LENGTH as u32 - 1) - { - return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); - }; + { + return Some(key * DYNAMIC_BITFIELD_PAGE_SIZE as u64 + index as u64); + }; } } else { // Searching for the false value is easier as it is automatically hit on diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs index cc09ee8c..e12ff571 100644 --- a/src/crypto/hash.rs +++ b/src/crypto/hash.rs @@ -46,7 +46,7 @@ pub(crate) struct Hash { impl Hash { /// Hash a `Leaf` node. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn from_leaf(data: &[u8]) -> Self { let size = u64_as_be(data.len() as u64); @@ -61,7 +61,7 @@ impl Hash { } /// Hash two `Leaf` nodes hashes together to form a `Parent` hash. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn from_hashes(left: &Node, right: &Node) -> Self { let (node1, node2) = if left.index <= right.index { (left, right) @@ -84,7 +84,7 @@ impl Hash { /// Hash a public key. Useful to find the key you're looking for on a public /// network without leaking the key itself. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn for_discovery_key(public_key: VerifyingKey) -> Self { let mut hasher = Blake2bMac::::new_with_salt_and_personal(public_key.as_bytes(), &[], &[]).unwrap(); @@ -96,7 +96,7 @@ impl Hash { /// Hash a vector of `Root` nodes. // Called `crypto.tree()` in the JS implementation. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn from_roots(roots: &[impl AsRef]) -> Self { let mut hasher = Blake2b256::new(); hasher.update(ROOT_TYPE); diff --git a/src/replication/mod.rs b/src/replication/mod.rs index 166cb302..bc7b2e4d 100644 --- a/src/replication/mod.rs +++ b/src/replication/mod.rs @@ -1,4 +1,4 @@ -//! External interface for replication +//! Hypercore to Hypercore replication pub mod events; #[cfg(feature = "shared-core")] pub mod shared_core; From 71f48bca74120bb48e69ba787b249531ebcf1c57 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Fri, 6 Feb 2026 19:19:47 -0500 Subject: [PATCH 12/13] Move code shared with HC protocol into crate The crate is called hypercore_schema --- Cargo.toml | 5 +- examples/replication.rs | 6 +- src/common/cache.rs | 2 +- src/common/mod.rs | 4 - src/common/node.rs | 149 -------------- src/common/peer.rs | 86 +------- src/core.rs | 11 +- src/crypto/hash.rs | 324 ------------------------------ src/crypto/mod.rs | 2 +- src/encoding.rs | 198 +----------------- src/lib.rs | 6 +- src/oplog/entry.rs | 3 +- src/oplog/mod.rs | 4 +- src/replication/mod.rs | 7 +- src/replication/shared_core.rs | 6 +- src/tree/merkle_tree.rs | 27 +-- src/tree/merkle_tree_changeset.rs | 5 +- 17 files changed, 49 insertions(+), 796 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6566ecca..4886876c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,9 @@ moka = { version = "0.12", optional = true, features = ["sync"] } async-broadcast = { version = "0.7.1", optional = true } async-lock = {version = "3.4.0", optional = true } +[dependencies.hypercore_schema] +version = "0.2.0" + [target.'cfg(not(target_arch = "wasm32"))'.dependencies] random-access-disk = { version = "3", default-features = false } @@ -61,7 +64,7 @@ test-log = { version = "0.2.11", default-features = false, features = ["trace"] tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt"] } [features] -default = ["tokio", "sparse", "replication", "cache", "shared-core"] +default = ["tokio", "sparse", "replication", "cache"] replication = ["dep:async-broadcast"] shared-core = ["replication", "dep:async-lock"] sparse = ["random-access-disk/sparse"] diff --git a/examples/replication.rs b/examples/replication.rs index 3d058551..f2943796 100644 --- a/examples/replication.rs +++ b/examples/replication.rs @@ -1,9 +1,7 @@ #[cfg(feature = "async-std")] use async_std::main as async_main; -use hypercore::{ - Hypercore, HypercoreBuilder, HypercoreError, PartialKeypair, RequestBlock, RequestUpgrade, - Storage, -}; +use hypercore::{Hypercore, HypercoreBuilder, HypercoreError, PartialKeypair, Storage}; +use hypercore_schema::{RequestBlock, RequestUpgrade}; use tempfile::Builder; #[cfg(feature = "tokio")] use tokio::main as async_main; diff --git a/src/common/cache.rs b/src/common/cache.rs index fc6a4961..4181ed1b 100644 --- a/src/common/cache.rs +++ b/src/common/cache.rs @@ -1,7 +1,7 @@ use moka::sync::Cache; use std::time::Duration; -use crate::Node; +use hypercore_schema::Node; // Default to 1 year of cache const DEFAULT_CACHE_TTL_SEC: u64 = 31556952; diff --git a/src/common/mod.rs b/src/common/mod.rs index 9df64303..cb350618 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -6,12 +6,8 @@ mod peer; mod store; pub use self::error::HypercoreError; -pub use self::node::Node; pub(crate) use self::node::NodeByteRange; pub(crate) use self::peer::ValuelessProof; -pub use self::peer::{ - DataBlock, DataHash, DataSeek, DataUpgrade, Proof, RequestBlock, RequestSeek, RequestUpgrade, -}; pub use self::store::Store; pub(crate) use self::store::{StoreInfo, StoreInfoInstruction, StoreInfoType}; diff --git a/src/common/node.rs b/src/common/node.rs index 3ebf8f39..463a2b9a 100644 --- a/src/common/node.rs +++ b/src/common/node.rs @@ -1,155 +1,6 @@ -use merkle_tree_stream::Node as NodeTrait; -use merkle_tree_stream::{NodeKind, NodeParts}; -use pretty_hash::fmt as pretty_fmt; -use std::cmp::Ordering; -use std::convert::AsRef; -use std::fmt::{self, Display}; - -use crate::crypto::Hash; - /// Node byte range #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct NodeByteRange { pub(crate) index: u64, pub(crate) length: u64, } - -/// Nodes of the Merkle Tree that are persisted to disk. -// TODO: replace `hash: Vec` with `hash: Hash`. This requires patching / -// rewriting the Blake2b crate to support `.from_bytes()` to serialize from -// disk. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Node { - /// This node's index in the Merkle tree - pub(crate) index: u64, - /// Hash of the data in this node - // TODO make this [u8; 32] like: - // https://github.com/holepunchto/hypercore/blob/d21ebdeca1b27eb4c2232f8af17d5ae939ee97f2/lib/messages.js#L246 - pub(crate) hash: Vec, - /// Number of bytes in this [`Node::data`] - pub(crate) length: u64, - /// Index of this nodes parent - pub(crate) parent: u64, - /// Hypercore's data. Can be receieved after the rest of the node, so it's optional. - pub(crate) data: Option>, - pub(crate) blank: bool, -} - -impl Node { - /// Create a new instance. - // TODO: ensure sizes are correct. - pub fn new(index: u64, hash: Vec, length: u64) -> Self { - let mut blank = true; - for byte in &hash { - if *byte != 0 { - blank = false; - break; - } - } - Self { - index, - hash, - length, - parent: flat_tree::parent(index), - data: Some(Vec::with_capacity(0)), - blank, - } - } - - /// Creates a new blank node - pub fn new_blank(index: u64) -> Self { - Self { - index, - hash: vec![0, 32], - length: 0, - parent: 0, - data: None, - blank: true, - } - } -} - -impl NodeTrait for Node { - #[inline] - fn index(&self) -> u64 { - self.index - } - - #[inline] - fn hash(&self) -> &[u8] { - &self.hash - } - - #[inline] - fn len(&self) -> u64 { - self.length - } - - #[inline] - fn is_empty(&self) -> bool { - self.length == 0 - } - - #[inline] - fn parent(&self) -> u64 { - self.parent - } -} - -impl AsRef for Node { - #[inline] - fn as_ref(&self) -> &Self { - self - } -} - -impl Display for Node { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Node {{ index: {}, hash: {}, length: {} }}", - self.index, - pretty_fmt(&self.hash).unwrap(), - self.length - ) - } -} - -impl PartialOrd for Node { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Node { - fn cmp(&self, other: &Self) -> Ordering { - self.index.cmp(&other.index) - } -} - -impl From> for Node { - fn from(parts: NodeParts) -> Self { - let partial = parts.node(); - let data = match partial.data() { - NodeKind::Leaf(data) => Some(data.clone()), - NodeKind::Parent => None, - }; - let hash: Vec = parts.hash().as_bytes().into(); - let mut blank = true; - for byte in &hash { - if *byte != 0 { - blank = false; - break; - } - } - - Node { - index: partial.index(), - parent: partial.parent, - length: partial.len(), - hash, - data, - blank, - } - } -} diff --git a/src/common/peer.rs b/src/common/peer.rs index b4203176..41542b95 100644 --- a/src/common/peer.rs +++ b/src/common/peer.rs @@ -2,47 +2,7 @@ //! hypercore-protocol-rs uses these types and wraps them //! into wire messages. -use crate::Node; - -#[derive(Debug, Clone, PartialEq)] -/// Request of a DataBlock or DataHash from peer -pub struct RequestBlock { - /// Hypercore index - pub index: u64, - /// TODO: document - pub nodes: u64, -} - -#[derive(Debug, Clone, PartialEq)] -/// Request of a DataSeek from peer -pub struct RequestSeek { - /// TODO: document - pub bytes: u64, -} - -#[derive(Debug, Clone, PartialEq)] -/// Request for a DataUpgrade from peer -pub struct RequestUpgrade { - /// Hypercore start index - pub start: u64, - /// Length of elements - pub length: u64, -} - -#[derive(Debug, Clone, PartialEq)] -/// Proof generated from corresponding requests -pub struct Proof { - /// Fork - pub fork: u64, - /// Data block. - pub block: Option, - /// Data hash - pub hash: Option, - /// Data seek - pub seek: Option, - /// Data updrade - pub upgrade: Option, -} +use hypercore_schema::{DataBlock, DataHash, DataSeek, DataUpgrade, Proof}; #[derive(Debug, Clone, PartialEq)] /// Valueless proof generated from corresponding requests @@ -72,47 +32,3 @@ impl ValuelessProof { } } } - -#[derive(Debug, Clone, PartialEq)] -/// Block of data to peer -pub struct DataBlock { - /// Hypercore index - pub index: u64, - /// Data block value in bytes - pub value: Vec, - /// Nodes of the merkle tree - pub nodes: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -/// Data hash to peer -pub struct DataHash { - /// Hypercore index - pub index: u64, - /// TODO: document - pub nodes: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -/// TODO: Document -pub struct DataSeek { - /// TODO: Document - pub bytes: u64, - /// TODO: Document - pub nodes: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -/// TODO: Document -pub struct DataUpgrade { - /// Starting block of this upgrade response - pub start: u64, - /// Number of blocks in this upgrade response - pub length: u64, - /// The nodes of the merkle tree - pub nodes: Vec, - /// TODO: Document - pub additional_nodes: Vec, - /// TODO: Document - pub signature: Vec, -} diff --git a/src/core.rs b/src/core.rs index d0f430e5..3adc9a50 100644 --- a/src/core.rs +++ b/src/core.rs @@ -8,9 +8,8 @@ use tracing::instrument; #[cfg(feature = "cache")] use crate::common::cache::CacheOptions; use crate::{ - RequestBlock, RequestSeek, RequestUpgrade, bitfield::Bitfield, - common::{BitfieldUpdate, HypercoreError, NodeByteRange, Proof, StoreInfo, ValuelessProof}, + common::{BitfieldUpdate, HypercoreError, NodeByteRange, StoreInfo, ValuelessProof}, crypto::{PartialKeypair, generate_signing_key}, data::BlockStore, oplog::{Header, MAX_OPLOG_ENTRIES_BYTE_SIZE, Oplog}, @@ -18,6 +17,8 @@ use crate::{ tree::{MerkleTree, MerkleTreeChangeset}, }; +use hypercore_schema::{Proof, RequestBlock, RequestSeek, RequestUpgrade}; + #[derive(Debug)] pub(crate) struct HypercoreOptions { pub(crate) key_pair: Option, @@ -328,6 +329,9 @@ impl Hypercore { #[cfg(feature = "replication")] { + use tracing::trace; + + trace!(bitfield_update = ?bitfield_update, "Hppercore.append_batch emit DataUpgrade & Have"); let _ = self.events.send(crate::replication::events::DataUpgrade {}); let _ = self .events @@ -361,6 +365,9 @@ impl Hypercore { #[cfg(feature = "replication")] // if not in this core, emit Event::Get(index) { + use tracing::trace; + + trace!(index = index, "Hppercore emit 'get' event"); self.events.send_on_get(index); } return Ok(None); diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs index e12ff571..58cd52d6 100644 --- a/src/crypto/hash.rs +++ b/src/crypto/hash.rs @@ -1,22 +1,4 @@ -use blake2::{ - Blake2b, Blake2bMac, Digest, - digest::{FixedOutput, generic_array::GenericArray, typenum::U32}, -}; -use byteorder::{BigEndian, WriteBytesExt}; use compact_encoding::{EncodingError, FixedWidthEncoding, as_array, to_encoded_bytes}; -use ed25519_dalek::VerifyingKey; -use merkle_tree_stream::Node as NodeTrait; -use std::convert::AsRef; -use std::mem; -use std::ops::{Deref, DerefMut}; - -use crate::common::Node; - -// https://en.wikipedia.org/wiki/Merkle_tree#Second_preimage_attack -const LEAF_TYPE: [u8; 1] = [0x00]; -const PARENT_TYPE: [u8; 1] = [0x01]; -const ROOT_TYPE: [u8; 1] = [0x02]; -const HYPERCORE: [u8; 9] = *b"hypercore"; // These the output of, see `hash_namespace` test below for how they are produced // https://github.com/holepunchto/hypercore/blob/cf08b72f14ed7d9ef6d497ebb3071ee0ae20967e/lib/caps.js#L16 @@ -35,179 +17,6 @@ const TREE: [u8; 32] = [ // 0x8B, 0x15, 0xB8, 0x2E, 0xC5, 0xED, 0x78, 0xC4, 0xEC, 0x59, 0x7B, 0x03, 0x6E, 0x2A, 0x14, 0x98, // ]; -pub(crate) type Blake2bResult = GenericArray; -type Blake2b256 = Blake2b; - -/// `BLAKE2b` hash. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct Hash { - hash: Blake2bResult, -} - -impl Hash { - /// Hash a `Leaf` node. - #[expect(dead_code)] - pub(crate) fn from_leaf(data: &[u8]) -> Self { - let size = u64_as_be(data.len() as u64); - - let mut hasher = Blake2b256::new(); - hasher.update(LEAF_TYPE); - hasher.update(size); - hasher.update(data); - - Self { - hash: hasher.finalize(), - } - } - - /// Hash two `Leaf` nodes hashes together to form a `Parent` hash. - #[expect(dead_code)] - pub(crate) fn from_hashes(left: &Node, right: &Node) -> Self { - let (node1, node2) = if left.index <= right.index { - (left, right) - } else { - (right, left) - }; - - let size = u64_as_be(node1.length + node2.length); - - let mut hasher = Blake2b256::new(); - hasher.update(PARENT_TYPE); - hasher.update(size); - hasher.update(node1.hash()); - hasher.update(node2.hash()); - - Self { - hash: hasher.finalize(), - } - } - - /// Hash a public key. Useful to find the key you're looking for on a public - /// network without leaking the key itself. - #[expect(dead_code)] - pub(crate) fn for_discovery_key(public_key: VerifyingKey) -> Self { - let mut hasher = - Blake2bMac::::new_with_salt_and_personal(public_key.as_bytes(), &[], &[]).unwrap(); - blake2::digest::Update::update(&mut hasher, &HYPERCORE); - Self { - hash: hasher.finalize_fixed(), - } - } - - /// Hash a vector of `Root` nodes. - // Called `crypto.tree()` in the JS implementation. - #[expect(dead_code)] - pub(crate) fn from_roots(roots: &[impl AsRef]) -> Self { - let mut hasher = Blake2b256::new(); - hasher.update(ROOT_TYPE); - - for node in roots { - let node = node.as_ref(); - hasher.update(node.hash()); - hasher.update(u64_as_be(node.index())); - hasher.update(u64_as_be(node.len())); - } - - Self { - hash: hasher.finalize(), - } - } - - /// Returns a byte slice of this `Hash`'s contents. - pub(crate) fn as_bytes(&self) -> &[u8] { - self.hash.as_slice() - } - - // NB: The following methods mirror Javascript naming in - // https://github.com/mafintosh/hypercore-crypto/blob/master/index.js - // for v10 that use LE bytes. - - /// Hash data - pub(crate) fn data(data: &[u8]) -> Self { - let size = - (|| Ok::<_, EncodingError>(to_encoded_bytes!((data.len() as u64).as_fixed_width())))() - .expect("Encoding u64 should not fail"); - - let mut hasher = Blake2b256::new(); - hasher.update(LEAF_TYPE); - hasher.update(&size); - hasher.update(data); - - Self { - hash: hasher.finalize(), - } - } - - /// Hash a parent - pub(crate) fn parent(left: &Node, right: &Node) -> Self { - let (node1, node2) = if left.index <= right.index { - (left, right) - } else { - (right, left) - }; - - let len = node1.length + node2.length; - let size: Box<[u8]> = - (|| Ok::<_, EncodingError>(to_encoded_bytes!(len.as_fixed_width())))() - .expect("Encoding u64 should not fail"); - - let mut hasher = Blake2b256::new(); - hasher.update(PARENT_TYPE); - hasher.update(&size); - hasher.update(node1.hash()); - hasher.update(node2.hash()); - - Self { - hash: hasher.finalize(), - } - } - - /// Hash a tree - pub(crate) fn tree(roots: &[impl AsRef]) -> Self { - let mut hasher = Blake2b256::new(); - hasher.update(ROOT_TYPE); - - for node in roots { - let node = node.as_ref(); - let buffer = (|| { - Ok::<_, EncodingError>(to_encoded_bytes!( - node.index().as_fixed_width(), - node.len().as_fixed_width() - )) - })() - .expect("Encoding u64 should not fail"); - - hasher.update(node.hash()); - hasher.update(&buffer[..8]); - hasher.update(&buffer[8..]); - } - - Self { - hash: hasher.finalize(), - } - } -} - -fn u64_as_be(n: u64) -> [u8; 8] { - let mut size = [0u8; mem::size_of::()]; - size.as_mut().write_u64::(n).unwrap(); - size -} - -impl Deref for Hash { - type Target = Blake2bResult; - - fn deref(&self) -> &Self::Target { - &self.hash - } -} - -impl DerefMut for Hash { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.hash - } -} - /// Create a signable buffer for tree. This is treeSignable in Javascript. /// See https://github.com/hypercore-protocol/hypercore/blob/70b271643c4e4b1e5ecae5bb579966dfe6361ff3/lib/caps.js#L17 pub(crate) fn signable_tree(hash: &[u8], length: u64, fork: u64) -> Box<[u8]> { @@ -221,136 +30,3 @@ pub(crate) fn signable_tree(hash: &[u8], length: u64, fork: u64) -> Box<[u8]> { })() .expect("Encoding should not fail") } - -#[cfg(test)] -mod tests { - use super::*; - - use self::data_encoding::HEXLOWER; - use data_encoding; - - fn hash_with_extra_byte(data: &[u8], byte: u8) -> Box<[u8]> { - let mut hasher = Blake2b256::new(); - hasher.update(data); - hasher.update([byte]); - let hash = hasher.finalize(); - hash.as_slice().into() - } - - fn hex_bytes(hex: &str) -> Vec { - HEXLOWER.decode(hex.as_bytes()).unwrap() - } - - fn check_hash(hash: Hash, hex: &str) { - assert_eq!(hash.as_bytes(), &hex_bytes(hex)[..]); - } - - #[test] - fn leaf_hash() { - check_hash( - Hash::from_leaf(&[]), - "5187b7a8021bf4f2c004ea3a54cfece1754f11c7624d2363c7f4cf4fddd1441e", - ); - check_hash( - Hash::from_leaf(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - "e1001bb0bb9322b6b202b2f737dc12181b11727168d33ca48ffe361c66cd1abe", - ); - } - - #[test] - fn parent_hash() { - let d1: &[u8] = &[0, 1, 2, 3, 4]; - let d2: &[u8] = &[42, 43, 44, 45, 46, 47, 48]; - let node1 = Node::new(0, Hash::from_leaf(d1).as_bytes().to_vec(), d1.len() as u64); - let node2 = Node::new(1, Hash::from_leaf(d2).as_bytes().to_vec(), d2.len() as u64); - check_hash( - Hash::from_hashes(&node1, &node2), - "6fac58578fa385f25a54c0637adaca71fdfddcea885d561f33d80c4487149a14", - ); - check_hash( - Hash::from_hashes(&node2, &node1), - "6fac58578fa385f25a54c0637adaca71fdfddcea885d561f33d80c4487149a14", - ); - } - - #[test] - fn root_hash() { - let d1: &[u8] = &[0, 1, 2, 3, 4]; - let d2: &[u8] = &[42, 43, 44, 45, 46, 47, 48]; - let node1 = Node::new(0, Hash::from_leaf(d1).as_bytes().to_vec(), d1.len() as u64); - let node2 = Node::new(1, Hash::from_leaf(d2).as_bytes().to_vec(), d2.len() as u64); - check_hash( - Hash::from_roots(&[&node1, &node2]), - "2d117e0bb15c6e5236b6ce764649baed1c41890da901a015341503146cc20bcd", - ); - check_hash( - Hash::from_roots(&[&node2, &node1]), - "9826c8c2d28fc309cce73a4b6208e83e5e4b0433d2369bfbf8858272153849f1", - ); - } - - #[test] - fn discovery_key_hashing() -> Result<(), ed25519_dalek::SignatureError> { - let public_key = VerifyingKey::from_bytes(&[ - 119, 143, 141, 149, 81, 117, 201, 46, 76, 237, 94, 79, 85, 99, 246, 155, 254, 192, 200, - 108, 198, 246, 112, 53, 44, 69, 121, 67, 102, 111, 230, 57, - ])?; - - let expected = &[ - 37, 167, 138, 168, 22, 21, 132, 126, 186, 0, 153, 93, 242, 157, 212, 29, 126, 227, 15, - 59, 1, 248, 146, 32, 159, 121, 183, 90, 87, 217, 137, 225, - ]; - - assert_eq!(Hash::for_discovery_key(public_key).as_bytes(), expected); - - Ok(()) - } - - // The following uses test data from - // https://github.com/mafintosh/hypercore-crypto/blob/master/test.js - - #[test] - fn hash_leaf() { - let data = b"hello world"; - check_hash( - Hash::data(data), - "9f1b578fd57a4df015493d2886aec9600eef913c3bb009768c7f0fb875996308", - ); - } - - #[test] - fn hash_parent() { - let data = b"hello world"; - let len = data.len() as u64; - let node1 = Node::new(0, Hash::data(data).as_bytes().to_vec(), len); - let node2 = Node::new(1, Hash::data(data).as_bytes().to_vec(), len); - check_hash( - Hash::parent(&node1, &node2), - "3ad0c9b58b771d1b7707e1430f37c23a23dd46e0c7c3ab9c16f79d25f7c36804", - ); - } - - #[test] - fn hash_tree() { - let hash: [u8; 32] = [0; 32]; - let node1 = Node::new(3, hash.to_vec(), 11); - let node2 = Node::new(9, hash.to_vec(), 2); - check_hash( - Hash::tree(&[&node1, &node2]), - "0e576a56b478cddb6ffebab8c494532b6de009466b2e9f7af9143fc54b9eaa36", - ); - } - - // This is the rust version from - // https://github.com/hypercore-protocol/hypercore/blob/70b271643c4e4b1e5ecae5bb579966dfe6361ff3/lib/caps.js - // and validates that our arrays match - #[test] - fn hash_namespace() { - let mut hasher = Blake2b256::new(); - hasher.update(HYPERCORE); - let hash = hasher.finalize(); - let ns = hash.as_slice(); - let tree: Box<[u8]> = { hash_with_extra_byte(ns, 0) }; - assert_eq!(tree, TREE.into()); - } -} diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs index 6ec1ffe8..98b307e8 100644 --- a/src/crypto/mod.rs +++ b/src/crypto/mod.rs @@ -4,6 +4,6 @@ mod hash; mod key_pair; mod manifest; -pub(crate) use hash::{Hash, signable_tree}; +pub(crate) use hash::signable_tree; pub use key_pair::{PartialKeypair, generate as generate_signing_key, sign, verify}; pub(crate) use manifest::{Manifest, ManifestSigner, default_signer_manifest}; diff --git a/src/encoding.rs b/src/encoding.rs index d9571335..76405df7 100644 --- a/src/encoding.rs +++ b/src/encoding.rs @@ -1,203 +1,9 @@ //! Hypercore-specific compact encodings -use crate::{ - DataBlock, DataHash, DataSeek, DataUpgrade, Node, RequestBlock, RequestSeek, RequestUpgrade, - crypto::{Manifest, ManifestSigner}, -}; +use crate::crypto::{Manifest, ManifestSigner}; use compact_encoding::{ - CompactEncoding, EncodingError, EncodingErrorKind, VecEncodable, as_array, encode_bytes_fixed, - encoded_size_usize, map_decode, map_encode, sum_encoded_size, take_array, write_slice, + CompactEncoding, EncodingError, EncodingErrorKind, encode_bytes_fixed, take_array, write_slice, }; -impl CompactEncoding for Node { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.index, self.length) + 32) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - let hash = as_array::<32>(&self.hash)?; - Ok(map_encode!(buffer, self.index, self.length, hash)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((index, length, hash), rest) = map_decode!(buffer, [u64, u64, [u8; 32]]); - Ok((Node::new(index, hash.to_vec(), length), rest)) - } -} - -impl VecEncodable for Node { - fn vec_encoded_size(vec: &[Self]) -> Result - where - Self: Sized, - { - let mut out = encoded_size_usize(vec.len()); - for x in vec { - out += x.encoded_size()?; - } - Ok(out) - } -} - -impl CompactEncoding for RequestBlock { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.index, self.nodes)) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!(buffer, self.index, self.nodes)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((index, nodes), rest) = map_decode!(buffer, [u64, u64]); - Ok((RequestBlock { index, nodes }, rest)) - } -} - -impl CompactEncoding for RequestSeek { - fn encoded_size(&self) -> Result { - self.bytes.encoded_size() - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - self.bytes.encode(buffer) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let (bytes, rest) = u64::decode(buffer)?; - Ok((RequestSeek { bytes }, rest)) - } -} - -impl CompactEncoding for RequestUpgrade { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.start, self.length)) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!(buffer, self.start, self.length)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((start, length), rest) = map_decode!(buffer, [u64, u64]); - Ok((RequestUpgrade { start, length }, rest)) - } -} - -impl CompactEncoding for DataBlock { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.index, self.value, self.nodes)) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!(buffer, self.index, self.value, self.nodes)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((index, value, nodes), rest) = map_decode!(buffer, [u64, Vec, Vec]); - Ok(( - DataBlock { - index, - value, - nodes, - }, - rest, - )) - } -} - -impl CompactEncoding for DataHash { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.index, self.nodes)) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!(buffer, self.index, self.nodes)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((index, nodes), rest) = map_decode!(buffer, [u64, Vec]); - Ok((DataHash { index, nodes }, rest)) - } -} - -impl CompactEncoding for DataSeek { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!(self.bytes, self.nodes)) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!(buffer, self.bytes, self.nodes)) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((bytes, nodes), rest) = map_decode!(buffer, [u64, Vec]); - Ok((DataSeek { bytes, nodes }, rest)) - } -} - -// from: -// https://github.com/holepunchto/hypercore/blob/d21ebdeca1b27eb4c2232f8af17d5ae939ee97f2/lib/messages.js#L394 -impl CompactEncoding for DataUpgrade { - fn encoded_size(&self) -> Result { - Ok(sum_encoded_size!( - self.start, - self.length, - self.nodes, - self.additional_nodes, - self.signature - )) - } - - fn encode<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a mut [u8], EncodingError> { - Ok(map_encode!( - buffer, - self.start, - self.length, - self.nodes, - self.additional_nodes, - self.signature - )) - } - - fn decode(buffer: &[u8]) -> Result<(Self, &[u8]), EncodingError> - where - Self: Sized, - { - let ((start, length, nodes, additional_nodes, signature), rest) = - map_decode!(buffer, [u64, u64, Vec, Vec, Vec]); - Ok(( - DataUpgrade { - start, - length, - nodes, - additional_nodes, - signature, - }, - rest, - )) - } -} - impl CompactEncoding for ManifestSigner { fn encoded_size(&self) -> Result { Ok( diff --git a/src/lib.rs b/src/lib.rs index 3140c5d7..c2a4f4d0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,6 @@ missing_debug_implementations, missing_docs )] -#![cfg_attr(test, deny(warnings))] #![doc(test(attr(deny(warnings))))] #![warn( unreachable_pub, @@ -101,10 +100,7 @@ mod tree; #[cfg(feature = "cache")] pub use crate::builder::CacheOptionsBuilder; pub use crate::builder::HypercoreBuilder; -pub use crate::common::{ - DataBlock, DataHash, DataSeek, DataUpgrade, HypercoreError, Node, Proof, RequestBlock, - RequestSeek, RequestUpgrade, Store, -}; +pub use crate::common::{HypercoreError, Store}; pub use crate::core::{AppendOutcome, Hypercore, Info}; pub use crate::crypto::{PartialKeypair, generate_signing_key, sign, verify}; pub use crate::storage::{Storage, StorageTraits}; diff --git a/src/oplog/entry.rs b/src/oplog/entry.rs index 0c6ba426..787e4c93 100644 --- a/src/oplog/entry.rs +++ b/src/oplog/entry.rs @@ -3,7 +3,8 @@ use compact_encoding::{ take_array_mut, write_array, }; -use crate::{Node, common::BitfieldUpdate}; +use crate::common::BitfieldUpdate; +use hypercore_schema::Node; /// Entry tree upgrade #[derive(Debug)] diff --git a/src/oplog/mod.rs b/src/oplog/mod.rs index 3fb975c8..3a3153e6 100644 --- a/src/oplog/mod.rs +++ b/src/oplog/mod.rs @@ -7,7 +7,9 @@ use std::convert::{TryFrom, TryInto}; use crate::common::{BitfieldUpdate, Store, StoreInfo, StoreInfoInstruction}; use crate::tree::MerkleTreeChangeset; -use crate::{HypercoreError, Node, PartialKeypair}; +use crate::{HypercoreError, PartialKeypair}; + +use hypercore_schema::Node; pub(crate) mod entry; mod header; diff --git a/src/replication/mod.rs b/src/replication/mod.rs index bc7b2e4d..35843736 100644 --- a/src/replication/mod.rs +++ b/src/replication/mod.rs @@ -6,10 +6,9 @@ pub mod shared_core; #[cfg(feature = "shared-core")] pub use shared_core::SharedCore; -use crate::{ - AppendOutcome, HypercoreError, Info, PartialKeypair, Proof, RequestBlock, RequestSeek, - RequestUpgrade, -}; +use crate::{AppendOutcome, HypercoreError, Info, PartialKeypair}; + +use hypercore_schema::{Proof, RequestBlock, RequestSeek, RequestUpgrade}; pub use events::Event; diff --git a/src/replication/shared_core.rs b/src/replication/shared_core.rs index c2db5827..80418062 100644 --- a/src/replication/shared_core.rs +++ b/src/replication/shared_core.rs @@ -1,11 +1,9 @@ //! Implementation of a Hypercore that can have multiple owners. Along with implementations of all //! the hypercore traits. -use crate::{ - AppendOutcome, Hypercore, Info, PartialKeypair, Proof, RequestBlock, RequestSeek, - RequestUpgrade, -}; +use crate::{AppendOutcome, Hypercore, Info, PartialKeypair}; use async_broadcast::Receiver; use async_lock::Mutex; +use hypercore_schema::{Proof, RequestBlock, RequestSeek, RequestUpgrade}; use std::sync::Arc; use super::{ diff --git a/src/tree/merkle_tree.rs b/src/tree/merkle_tree.rs index ec133109..cb374a40 100644 --- a/src/tree/merkle_tree.rs +++ b/src/tree/merkle_tree.rs @@ -8,18 +8,19 @@ use intmap::IntMap; use moka::sync::Cache; use std::convert::TryFrom; +use crate::Store; #[cfg(feature = "cache")] use crate::common::cache::CacheOptions; -use crate::common::{HypercoreError, NodeByteRange, Proof, ValuelessProof}; -use crate::crypto::Hash; +use crate::common::{HypercoreError, NodeByteRange, ValuelessProof}; use crate::oplog::HeaderTree; use crate::{ - DataBlock, DataHash, DataSeek, DataUpgrade, RequestBlock, RequestSeek, RequestUpgrade, Store, -}; -use crate::{ - Node, VerifyingKey, + VerifyingKey, common::{StoreInfo, StoreInfoInstruction}, }; +use hypercore_schema::{ + DataBlock, DataHash, DataSeek, DataUpgrade, Hash, Node, Proof, RequestBlock, RequestSeek, + RequestUpgrade, +}; use super::MerkleTreeChangeset; @@ -794,9 +795,10 @@ impl MerkleTree { // First check the cache #[cfg(feature = "cache")] if let Some(node_cache) = &self.node_cache - && let Some(node) = node_cache.get(&index) { - return Ok(Either::Right(Some(node))); - } + && let Some(node) = node_cache.get(&index) + { + return Ok(Either::Right(Some(node))); + } // Then check if unflushed has the node if let Some(node) = self.unflushed.get(index) { @@ -1303,9 +1305,10 @@ impl MerkleTree { let node = node_from_bytes(&index, info.data.as_ref().unwrap())?; #[cfg(feature = "cache")] if !node.blank - && let Some(node_cache) = &self.node_cache { - node_cache.insert(node.index, node.clone()) - } + && let Some(node_cache) = &self.node_cache + { + node_cache.insert(node.index, node.clone()) + } nodes.insert(index, Some(node)); } else { nodes.insert(index, None); diff --git a/src/tree/merkle_tree_changeset.rs b/src/tree/merkle_tree_changeset.rs index aea217f3..7b78e4fd 100644 --- a/src/tree/merkle_tree_changeset.rs +++ b/src/tree/merkle_tree_changeset.rs @@ -2,10 +2,11 @@ use ed25519_dalek::{Signature, SigningKey, VerifyingKey}; use std::convert::TryFrom; use crate::{ - HypercoreError, Node, - crypto::{Hash, signable_tree, verify}, + HypercoreError, + crypto::{signable_tree, verify}, sign, }; +use hypercore_schema::{Hash, Node}; /// Changeset for a `MerkleTree`. This allows to incrementally change a `MerkleTree` in two steps: /// first create the changes to this changeset, get out information from this to put to the oplog, From fc4a63ba22ea237884a4553325a7a82a56f3af85 Mon Sep 17 00:00:00 2001 From: Blake Griffith Date: Wed, 18 Feb 2026 17:52:20 -0500 Subject: [PATCH 13/13] Update CHANGELOG.md --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c09999ec..62db2695 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,8 +11,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +* Added some new clippy lints to enforce. + ### Changed +* `get_random_access` was renamed to `get_random_access_mut`. +* Bumped to 2024 edition. +* Structs that were shared with `hypercore-protocol` have been moved into a separate crate: `hypercore_schema`. +* Dependencies update +* Some function signatures were changed that were pass-by-value or pass-by-ref that didn't need to be. + ### Removed