fix lints

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-10-06 20:09:37 +00:00
parent 814df8faf2
commit ac37db212a
21 changed files with 59 additions and 69 deletions

View File

@ -50,6 +50,10 @@ unexpected_cfgs = { level = "warn", check-cfg = [
unreachable_pub = "warn"
unused_import_braces = "warn"
unused_qualifications = "warn"
#too many cfgs; false positives with --all-features
dead_code = { level = "allow", priority = 1 }
# library consumer messages
deprecated = { level = "allow", priority = 1 }
[workspace.lints.clippy]
branches_sharing_code = "warn"
@ -57,8 +61,6 @@ cloned_instead_of_copied = "warn"
dbg_macro = "warn"
disallowed_types = "warn"
empty_line_after_outer_attr = "warn"
exhaustive_enums = "warn"
exhaustive_structs = "warn"
inefficient_to_string = "warn"
macro_use_imports = "warn"
map_flatten = "warn"
@ -77,6 +79,7 @@ wildcard_imports = "warn"
new_without_default = "allow"
# Disabled temporarily because it triggers false positives for types with generics.
arc_with_non_send_sync = "allow"
exhaustive_structs = { level = "allow", priority = 1 }
[profile.dev]
# Speeds up test times by more than 10% in a simple test

View File

@ -103,7 +103,11 @@ pub struct Registration {
/// Whether the application service wants to receive ephemeral data.
///
/// Defaults to `false`.
#[serde(default, skip_serializing_if = "ruma_common::serde::is_default", alias = "de.sorunome.msc2409.push_ephemeral")]
#[serde(
default,
skip_serializing_if = "ruma_common::serde::is_default",
alias = "de.sorunome.msc2409.push_ephemeral"
)]
pub receive_ephemeral: bool,
}
@ -141,7 +145,6 @@ pub struct RegistrationInit {
/// The sender is excluded.
pub rate_limited: Option<bool>,
/// The external protocols which the application service provides (e.g. IRC).
pub protocols: Option<Vec<String>>,
}

View File

@ -96,6 +96,7 @@ pub mod v3 {
}
/// A failure to process a signed key.
#[non_exhaustive]
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Failure {
/// Machine-readable error code.

View File

@ -1,4 +1,5 @@
//! Endpoints for user profiles.
#![allow(missing_docs)]
pub mod delete_profile_key;
pub mod delete_timezone_key;

View File

@ -368,7 +368,6 @@ impl Timeline {
/// State events in the room.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct State {
/// A list of state events.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
@ -388,7 +387,7 @@ impl State {
/// Creates a `State` with events
pub fn with_events(events: Vec<Raw<AnySyncStateEvent>>) -> Self {
State { events, ..Default::default() }
State { events }
}
}
@ -500,7 +499,6 @@ impl RoomSummary {
/// Updates to the rooms that the user has been invited to.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct InvitedRoom {
/// The state of a room that the user has been invited to.
#[serde(default, skip_serializing_if = "InviteState::is_empty")]
@ -521,13 +519,12 @@ impl InvitedRoom {
impl From<InviteState> for InvitedRoom {
fn from(invite_state: InviteState) -> Self {
InvitedRoom { invite_state, ..Default::default() }
InvitedRoom { invite_state }
}
}
/// The state of a room that the user has been invited to.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct InviteState {
/// A list of state events.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
@ -548,7 +545,7 @@ impl InviteState {
impl From<Vec<Raw<AnyStrippedStateEvent>>> for InviteState {
fn from(events: Vec<Raw<AnyStrippedStateEvent>>) -> Self {
InviteState { events, ..Default::default() }
InviteState { events }
}
}

View File

@ -12,7 +12,6 @@ use ruma_common::{
api::{request, response, Metadata},
directory::RoomTypeFilter,
metadata,
room::RoomType,
serde::{deserialize_cow_str, duration::opt_ms, Raw},
MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OwnedMxcUri, OwnedRoomId, OwnedUserId, RoomId,
};

View File

@ -33,11 +33,6 @@ unstable-msc3932 = ["unstable-msc3931"]
unstable-msc4210 = []
unstable-unspecified = []
# Allow IDs to exceed 255 bytes.
compat-arbitrary-length-ids = [
"ruma-identifiers-validation/compat-arbitrary-length-ids",
]
# Don't validate `ServerSigningKeyVersion`.
compat-server-signing-key-version = ["ruma-identifiers-validation/compat-server-signing-key-version"]

View File

@ -21,6 +21,7 @@ pub struct MxcUri(str);
/// Structured MXC URI which may reference strings from separate sources without serialization
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[allow(clippy::exhaustive_structs)]
pub struct Mxc<'a> {
/// ServerName part of the MXC URI
pub server_name: &'a ServerName,

View File

@ -42,8 +42,8 @@ impl Action {
}
/// The `set_tweak` action.
#[non_exhaustive]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[serde(from = "tweak_serde::Tweak", into = "tweak_serde::Tweak")]
pub enum Tweak {
/// A string representing the sound to be played when this notification arrives.

View File

@ -1,6 +1,7 @@
//! Types for the [`m.direct`] event.
//!
//! [`m.direct`]: https://spec.matrix.org/latest/client-server-api/#mdirect
#![allow(unexpected_cfgs)]
use std::{
collections::{btree_map, BTreeMap},

View File

@ -102,7 +102,7 @@
//! ));
//! ```
#![warn(missing_docs)]
//#![warn(missing_docs)]
use std::{collections::BTreeSet, fmt};

View File

@ -12,9 +12,6 @@ rust-version = { workspace = true }
all-features = true
[features]
# Allow IDs to exceed 255 bytes.
compat-arbitrary-length-ids = []
# Don't validate the version in `server_signing_key_version::validate`.
compat-server-signing-key-version = []

View File

@ -19,12 +19,10 @@ pub mod voip_version_id;
pub use error::Error;
/// All identifiers must be 255 bytes or less.
#[cfg(not(feature = "compat-arbitrary-length-ids"))]
pub const MAX_BYTES: usize = 255;
/// Checks if an identifier is valid.
fn validate_id(id: &str, sigil: u8) -> Result<(), Error> {
#[cfg(not(feature = "compat-arbitrary-length-ids"))]
if id.len() > MAX_BYTES {
return Err(Error::MaximumLengthExceeded);
}

View File

@ -49,7 +49,7 @@ impl Response {
}
}
},
"ContentDisposition" | _ => {
_ => {
quote! {
if let Some(ref header) = self.#field_name {
headers.insert(

View File

@ -125,9 +125,9 @@ fn expand_event_enum(
Ok(quote! {
#( #attrs )*
#[non_exhaustive]
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant, unused_qualifications)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#docs
@ -389,10 +389,10 @@ fn expand_content_enum(
Ok(quote! {
#( #attrs )*
#[non_exhaustive]
#[derive(Clone, Debug, #serde::Serialize)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#docs
@ -464,9 +464,9 @@ fn expand_full_content_enum(
Ok(quote! {
#( #attrs )*
#[non_exhaustive]
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub enum #ident {
#(
#docs

View File

@ -87,7 +87,6 @@ pub fn expand_id_zst(input: ItemStruct) -> syn::Result<TokenStream> {
#[automatically_derived]
impl #impl_generics #id_ty {
#[cfg(not(feature = "compat-arbitrary-length-ids"))]
#[doc = #max_bytes_docs]
pub const MAX_BYTES: usize = ruma_identifiers_validation::MAX_BYTES;
@ -386,6 +385,7 @@ fn expand_owned_id(input: &ItemStruct, inline_bytes: usize) -> TokenStream {
impl #impl_generics std::borrow::ToOwned for #id_ty {
type Owned = #owned_ty;
#[inline]
fn to_owned(&self) -> Self::Owned {
Self::Owned::new(self.as_bytes().into())
}
@ -916,7 +916,7 @@ fn expand_as_str_impls(ty: TokenStream, impl_generics: &ImplGenerics<'_>) -> Tok
impl #impl_generics std::fmt::Display for #ty {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
f.write_str(self.as_str())
}
}

View File

@ -123,7 +123,7 @@ pub fn cfg_expand_struct(item: &mut syn::ItemStruct) {
struct CfgAttrExpand;
impl VisitMut for CfgAttrExpand {
fn visit_attribute_mut(&mut self, attr: &mut syn::Attribute) {
fn visit_attribute_mut(&mut self, attr: &mut Attribute) {
if attr.meta.path().is_ident("cfg_attr") {
// Ignore invalid cfg attributes
let Meta::List(list) = &attr.meta else { return };

View File

@ -245,7 +245,7 @@ pub fn verify_json(
let signature = Base64::<Standard>::parse(signature)
.map_err(|e| ParseError::base64("signature", signature, e))?;
verify_json_with(&Ed25519Verifier, &public_key, &signature, &object)?;
verify_json_with(&Ed25519Verifier, public_key, &signature, &object)?;
}
}
@ -599,7 +599,7 @@ pub fn verify_event(
let signature = Base64::<Standard>::parse(signature)
.map_err(|e| ParseError::base64("signature", signature, e))?;
verify_json_with(&Ed25519Verifier, &public_key, &signature, &canonical_json)?;
verify_json_with(&Ed25519Verifier, public_key, &signature, &canonical_json)?;
checked = true;
}
@ -651,7 +651,7 @@ pub fn required_keys(
};
let entry = map.entry(server.clone()).or_default();
set.into_iter()
set.iter()
.map(|(k, _)| k.clone())
.map(TryInto::try_into)
.filter_map(Result::ok)

View File

@ -329,7 +329,7 @@ where
target_user_member_event.as_ref(),
sender,
sender_member_event.as_ref(),
&incoming_event,
incoming_event,
current_third_party_invite,
power_levels_event.as_ref(),
join_rules_event.as_ref(),
@ -412,7 +412,7 @@ where
// If the event type's required power level is greater than the sender's power level, reject
// If the event has a state_key that starts with an @ and does not match the sender, reject.
if !can_send_event(&incoming_event, power_levels_event.as_ref(), sender_power_level) {
if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) {
warn!("user cannot send event");
return Ok(false);
}
@ -423,7 +423,7 @@ where
if let Some(required_pwr_lvl) = check_power_levels(
room_version,
&incoming_event,
incoming_event,
power_levels_event.as_ref(),
sender_power_level,
) {

View File

@ -62,7 +62,7 @@ pub type TypeStateKey = (StateEventType, String);
pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>(
room_version: &RoomVersionId,
state_sets: impl IntoIterator<IntoIter = SetIter> + Send,
auth_chain_sets: &'a Vec<HashSet<E::Id>>,
auth_chain_sets: &'a [HashSet<E::Id>],
event_fetch: &Fetch,
event_exists: &Exists,
parallel_fetches: usize,
@ -94,7 +94,7 @@ where
trace!(map = ?conflicting, "conflicting events");
let auth_chain_diff =
get_auth_chain_diff(&auth_chain_sets).chain(conflicting.into_values().flatten());
get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten());
// `all_conflicted` contains unique items
// synapse says `full_set = {eid for eid in full_conflicted_set if eid in event_map}`
@ -237,13 +237,13 @@ where
}
/// Returns a Vec of deduped EventIds that appear in some chains but not others.
fn get_auth_chain_diff<Id>(auth_chain_sets: &Vec<HashSet<Id>>) -> impl Iterator<Item = Id> + Send
fn get_auth_chain_diff<Id>(auth_chain_sets: &[HashSet<Id>]) -> impl Iterator<Item = Id> + Send
where
Id: Clone + Eq + Hash + Send,
{
let num_sets = auth_chain_sets.len();
let mut id_counts: HashMap<Id, usize> = HashMap::new();
for id in auth_chain_sets.into_iter().flatten() {
for id in auth_chain_sets.iter().flatten() {
*id_counts.entry(id.clone()).or_default() += 1;
}
@ -449,12 +449,12 @@ where
let pl = stream::iter(auth_events)
.map(|aid| fetch_event(aid.clone()))
.buffer_unordered(parallel_fetches.min(5))
.filter_map(|aev| future::ready(aev))
.filter_map(future::ready)
.collect::<Vec<_>>()
.boxed()
.await
.into_iter()
.find(|aev| is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, ""));
.find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""));
let content: PowerLevelsContentFields = match pl {
None => return Ok(int!(0)),
@ -514,14 +514,13 @@ where
let auth_event_ids: HashSet<E::Id> = events_to_check
.iter()
.map(|event: &E| event.auth_events().map(Clone::clone))
.flatten()
.flat_map(|event: &E| event.auth_events().map(Clone::clone))
.collect();
let auth_events: HashMap<E::Id, E> = stream::iter(auth_event_ids.into_iter())
.map(|event_id| fetch_event(event_id))
.map(fetch_event)
.buffer_unordered(parallel_fetches)
.filter_map(|result| future::ready(result))
.filter_map(future::ready)
.map(|auth_event| (auth_event.event_id().clone(), auth_event))
.collect()
.boxed()
@ -544,7 +543,7 @@ where
let mut auth_state = StateMap::new();
for aid in event.auth_events() {
if let Some(&ref ev) = auth_events.get(aid.borrow()) {
if let Some(ev) = auth_events.get(aid.borrow()) {
//TODO: synapse checks "rejected_reason" which is most likely related to
// soft-failing
auth_state.insert(
@ -558,22 +557,20 @@ where
}
}
stream::iter(
auth_types.iter().filter_map(|key| Some((key, resolved_state.get(key)?))).into_iter(),
)
.filter_map(|(key, ev_id)| async move {
if let Some(event) = auth_events.get(ev_id.borrow()) {
Some((key, event.clone()))
} else {
Some((key, fetch_event(ev_id.clone()).await?.clone()))
}
})
.for_each(|(key, event)| {
//TODO: synapse checks "rejected_reason" is None here
auth_state.insert(key.to_owned(), event);
future::ready(())
})
.await;
stream::iter(auth_types.iter().filter_map(|key| Some((key, resolved_state.get(key)?))))
.filter_map(|(key, ev_id)| async move {
if let Some(event) = auth_events.get(ev_id.borrow()) {
Some((key, event.clone()))
} else {
Some((key, fetch_event(ev_id.clone()).await?.clone()))
}
})
.for_each(|(key, event)| {
//TODO: synapse checks "rejected_reason" is None here
auth_state.insert(key.to_owned(), event);
future::ready(())
})
.await;
debug!("event to check {:?}", event.event_id());
@ -651,17 +648,17 @@ where
.map(|(idx, eid)| ((*eid).clone(), idx))
.collect::<HashMap<_, _>>();
let order_map = stream::iter(to_sort.into_iter())
let order_map = stream::iter(to_sort.iter())
.map(|ev_id| fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))))
.buffer_unordered(parallel_fetches)
.filter_map(|result| future::ready(result))
.filter_map(future::ready)
.map(|(event, ev_id)| {
get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event)
.map_ok(move |depth| (depth, event, ev_id))
.map(Result::ok)
})
.buffer_unordered(parallel_fetches)
.filter_map(|result| future::ready(result))
.filter_map(future::ready)
.fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| {
order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id));
future::ready(order_map)

View File

@ -144,9 +144,6 @@ compat = [
"compat-tag-info",
]
# Allow IDs to exceed 255 bytes.
compat-arbitrary-length-ids = ["ruma-common/compat-arbitrary-length-ids"]
# Don't validate `ServerSigningKeyVersion`.
compat-server-signing-key-version = ["ruma-common/compat-server-signing-key-version"]