fix clippy and check warnings (#452)

This commit is contained in:
Al Liu 2023-10-06 15:08:52 +08:00 committed by GitHub
parent 309e5a29e9
commit dbac7d7597
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 25 additions and 28 deletions

View File

@ -4,7 +4,7 @@ use bls_signatures::{PrivateKey, PublicKey, Serialize, Signature};
use rand::{seq::SliceRandom, SeedableRng}; use rand::{seq::SliceRandom, SeedableRng};
use serde::{Deserialize, Serialize as SerdeSerialize}; use serde::{Deserialize, Serialize as SerdeSerialize};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::ops::Deref;
use thiserror::Error; use thiserror::Error;
use super::LeaderSelection; use super::LeaderSelection;
@ -96,7 +96,7 @@ fn view_to_bytes(view: View) -> Box<[u8]> {
// for now, just use something that works // for now, just use something that works
fn choice(state: &RandomBeaconState, nodes: &[NodeId]) -> NodeId { fn choice(state: &RandomBeaconState, nodes: &[NodeId]) -> NodeId {
let mut seed = [0; 32]; let mut seed = [0; 32];
seed.copy_from_slice(&state.entropy().deref()[..32]); seed.copy_from_slice(&state.entropy()[..32]);
let mut rng = rand_chacha::ChaChaRng::from_seed(seed); let mut rng = rand_chacha::ChaChaRng::from_seed(seed);
*nodes.choose(&mut rng).unwrap() *nodes.choose(&mut rng).unwrap()
} }
@ -110,7 +110,7 @@ impl LeaderSelection for RandomBeaconState {
impl CommitteeMembership for RandomBeaconState { impl CommitteeMembership for RandomBeaconState {
fn reshape_committees(&self, nodes: &mut [NodeId]) { fn reshape_committees(&self, nodes: &mut [NodeId]) {
let mut seed = [0; 32]; let mut seed = [0; 32];
seed.copy_from_slice(&self.entropy().deref()[..32]); seed.copy_from_slice(&self.entropy()[..32]);
FisherYatesShuffle::shuffle(nodes, seed); FisherYatesShuffle::shuffle(nodes, seed);
} }
} }

View File

@ -154,7 +154,7 @@ impl ReferenceStateMachine for RefState {
} }
Transition::ApproveNewViewWithLatestTimeoutQc(timeout_qc, _) => { Transition::ApproveNewViewWithLatestTimeoutQc(timeout_qc, _) => {
let new_view = RefState::new_view_from(timeout_qc); let new_view = RefState::new_view_from(timeout_qc);
state.chain.entry(new_view).or_insert(ViewEntry::new()); state.chain.entry(new_view).or_default();
state.highest_voted_view = new_view; state.highest_voted_view = new_view;
} }
} }
@ -407,10 +407,6 @@ impl RefState {
} }
impl ViewEntry { impl ViewEntry {
fn new() -> ViewEntry {
Default::default()
}
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.blocks.is_empty() && self.timeout_qcs.is_empty() self.blocks.is_empty() && self.timeout_qcs.is_empty()
} }

View File

@ -7,6 +7,6 @@ edition = "2021"
[dependencies] [dependencies]
blake2 = { version = "0.10" } blake2 = { version = "0.10" }
bytes = { versino = "1.3", features = ["serde"] } bytes = { version = "1.3", features = ["serde"] }
nomos-core = { path = "../../nomos-core" } nomos-core = { path = "../../nomos-core" }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }

View File

@ -88,10 +88,12 @@ impl NetworkAdapter for MockAdapter {
self.send(message, &Committee::default()).await self.send(message, &Committee::default()).await
} }
#[allow(clippy::diverging_sub_expression)]
async fn timeout_stream(&self, _committee: &Committee, _view: View) -> BoxedStream<TimeoutMsg> { async fn timeout_stream(&self, _committee: &Committee, _view: View) -> BoxedStream<TimeoutMsg> {
todo!() todo!()
} }
#[allow(clippy::diverging_sub_expression)]
async fn timeout_qc_stream(&self, _view: View) -> BoxedStream<TimeoutQcMsg> { async fn timeout_qc_stream(&self, _view: View) -> BoxedStream<TimeoutQcMsg> {
todo!() todo!()
} }
@ -122,6 +124,7 @@ impl NetworkAdapter for MockAdapter {
))) )))
} }
#[allow(clippy::diverging_sub_expression)]
async fn new_view_stream(&self, _: &Committee, _view: View) -> BoxedStream<NewViewMsg> { async fn new_view_stream(&self, _: &Committee, _view: View) -> BoxedStream<NewViewMsg> {
todo!() todo!()
} }

View File

@ -98,8 +98,8 @@ where
async fn run(self) -> Result<(), DynError> { async fn run(self) -> Result<(), DynError> {
let Self { let Self {
mut service_state, mut service_state,
mut backend, backend,
mut da, da,
network_relay, network_relay,
} = self; } = self;
@ -113,12 +113,12 @@ where
loop { loop {
tokio::select! { tokio::select! {
Some(blob) = network_blobs.next() => { Some(blob) = network_blobs.next() => {
if let Err(e) = handle_new_blob(&mut da, &mut backend, &adapter, blob).await { if let Err(e) = handle_new_blob(&da, &backend, &adapter, blob).await {
tracing::debug!("Failed to add a new received blob: {e:?}"); tracing::debug!("Failed to add a new received blob: {e:?}");
} }
} }
Some(msg) = service_state.inbound_relay.recv() => { Some(msg) = service_state.inbound_relay.recv() => {
if let Err(e) = handle_da_msg(&mut backend, msg).await { if let Err(e) = handle_da_msg(&backend, msg).await {
tracing::debug!("Failed to handle da msg: {e:?}"); tracing::debug!("Failed to handle da msg: {e:?}");
} }
} }
@ -132,8 +132,8 @@ async fn handle_new_blob<
Backend: DaBackend<Blob = Protocol::Blob>, Backend: DaBackend<Blob = Protocol::Blob>,
A: NetworkAdapter<Blob = Protocol::Blob, Attestation = Protocol::Attestation>, A: NetworkAdapter<Blob = Protocol::Blob, Attestation = Protocol::Attestation>,
>( >(
da: &mut Protocol, da: &Protocol,
backend: &mut Backend, backend: &Backend,
adapter: &A, adapter: &A,
blob: Protocol::Blob, blob: Protocol::Blob,
) -> Result<(), DaError> { ) -> Result<(), DaError> {
@ -151,7 +151,7 @@ async fn handle_new_blob<
.map_err(DaError::Dyn) .map_err(DaError::Dyn)
} }
async fn handle_da_msg<B: DaBackend>(backend: &mut B, msg: DaMsg<B::Blob>) -> Result<(), DaError> async fn handle_da_msg<B: DaBackend>(backend: &B, msg: DaMsg<B::Blob>) -> Result<(), DaError>
where where
<B::Blob as Blob>::Hash: Debug, <B::Blob as Blob>::Hash: Debug,
{ {
@ -163,7 +163,6 @@ where
} }
} }
DaMsg::RemoveBlobs { blobs } => { DaMsg::RemoveBlobs { blobs } => {
let backend = &*backend;
futures::stream::iter(blobs) futures::stream::iter(blobs)
.for_each_concurrent(None, |blob| async move { .for_each_concurrent(None, |blob| async move {
if let Err(e) = backend.remove_blob(&blob).await { if let Err(e) = backend.remove_blob(&blob).await {

View File

@ -108,7 +108,7 @@ impl<Backend: MetricsBackend> ServiceData for MetricsService<Backend> {
impl<Backend: MetricsBackend> MetricsService<Backend> { impl<Backend: MetricsBackend> MetricsService<Backend> {
async fn handle_load( async fn handle_load(
backend: &mut Backend, backend: &Backend,
service_id: &OwnedServiceId, service_id: &OwnedServiceId,
reply_channel: tokio::sync::oneshot::Sender<Option<Backend::MetricsData>>, reply_channel: tokio::sync::oneshot::Sender<Option<Backend::MetricsData>>,
) { ) {
@ -153,7 +153,7 @@ impl<Backend: MetricsBackend + Send + Sync + 'static> ServiceCore for MetricsSer
service_id, service_id,
reply_channel, reply_channel,
} => { } => {
MetricsService::handle_load(&mut backend, &service_id, reply_channel).await; MetricsService::handle_load(&backend, &service_id, reply_channel).await;
} }
MetricsMessage::Update { service_id, data } => { MetricsMessage::Update { service_id, data } => {
MetricsService::handle_update(&mut backend, &service_id, data).await; MetricsService::handle_update(&mut backend, &service_id, data).await;

View File

@ -266,7 +266,7 @@ impl NetworkBackend for Mock {
.lock() .lock()
.unwrap() .unwrap()
.entry(topic) .entry(topic)
.or_insert_with(Vec::new) .or_default()
.push(msg.clone()); .push(msg.clone());
let _ = self.message_event.send(NetworkEvent::RawMessage(msg)); let _ = self.message_event.send(NetworkEvent::RawMessage(msg));
} }

View File

@ -17,6 +17,7 @@ tracing = "0.1"
[dev-dependencies] [dev-dependencies]
tokio = { version = "1", features = ["sync", "macros", "time"] } tokio = { version = "1", features = ["sync", "macros", "time"] }
tempfile = "3"
[features] [features]
default = [] default = []

View File

@ -123,21 +123,19 @@ mod test {
let sled_settings = SledBackendSettings { let sled_settings = SledBackendSettings {
db_path: temp_path.path().to_path_buf(), db_path: temp_path.path().to_path_buf(),
}; };
let key = "foo";
let value = "bar";
let mut sled_db: SledBackend<NoStorageSerde> = SledBackend::new(sled_settings)?; let mut sled_db: SledBackend<NoStorageSerde> = SledBackend::new(sled_settings)?;
let result = sled_db let result = sled_db
.execute(Box::new(move |tx| { .execute(Box::new(move |tx| {
let key = key; let key = "foo";
let value = value; let value = "bar";
tx.insert(key, value)?; tx.insert(key, value)?;
let result = tx.get(key)?; let result = tx.get(key)?;
tx.remove(key)?; tx.remove(key)?;
Ok(result.map(|ivec| ivec.to_vec().into())) Ok(result.map(|ivec| ivec.to_vec().into()))
})) }))
.await??; .await??;
assert_eq!(result, Some(value.as_bytes().into())); assert_eq!(result, Some("bar".as_bytes().into()));
Ok(()) Ok(())
} }

View File

@ -75,9 +75,9 @@ async fn run_nodes_and_destination_client() -> (
..Default::default() ..Default::default()
}; };
let mixnode1 = MixnetNode::new(config1.clone()); let mixnode1 = MixnetNode::new(config1);
let mixnode2 = MixnetNode::new(config2.clone()); let mixnode2 = MixnetNode::new(config2);
let mixnode3 = MixnetNode::new(config3.clone()); let mixnode3 = MixnetNode::new(config3);
let topology = MixnetTopology { let topology = MixnetTopology {
layers: vec![ layers: vec![