clean up some clippy warnings

This commit is contained in:
Scott Lamb 2023-01-28 11:59:21 -08:00
parent 3965cbc547
commit 284a59b05e
No known key found for this signature in database
18 changed files with 82 additions and 41 deletions

View File

@ -3,7 +3,7 @@ name = "moonfire-base"
version = "0.0.1"
authors = ["Scott Lamb <slamb@slamb.org>"]
readme = "../README.md"
edition = "2018"
edition = "2021"
license-file = "../../LICENSE.txt"
[features]

View File

@ -71,7 +71,7 @@ impl RealClocks {
let mut ts = mem::MaybeUninit::uninit();
assert_eq!(0, libc::clock_gettime(clock, ts.as_mut_ptr()));
let ts = ts.assume_init();
Timespec::new(ts.tv_sec as i64, ts.tv_nsec as i32)
Timespec::new(ts.tv_sec, ts.tv_nsec as i32)
}
}
}

View File

@ -3,7 +3,7 @@ name = "moonfire-db"
version = "0.7.5"
authors = ["Scott Lamb <slamb@slamb.org>"]
readme = "../README.md"
edition = "2018"
edition = "2021"
license-file = "../../LICENSE.txt"
[features]

View File

@ -307,7 +307,7 @@ pub struct SessionHash(pub [u8; 24]);
impl SessionHash {
pub fn encode_base64(&self, output: &mut [u8; 32]) {
::base64::encode_config_slice(&self.0, ::base64::STANDARD_NO_PAD, output);
::base64::encode_config_slice(self.0, ::base64::STANDARD_NO_PAD, output);
}
pub fn decode_base64(input: &[u8]) -> Result<Self, Error> {
@ -629,6 +629,7 @@ impl State {
)
}
#[allow(clippy::too_many_arguments)]
fn make_session_int<'s>(
rand: &SystemRandom,
conn: &Connection,

View File

@ -6,7 +6,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(protobuf_codegen::Codegen::new()
.pure()
.out_dir(std::env::var("OUT_DIR")?)
.inputs(&["proto/schema.proto"])
.inputs(["proto/schema.proto"])
.include("proto")
.customize(protobuf_codegen::Customize::default().gen_mod_rs(true))
.run()?)

View File

@ -656,7 +656,16 @@ impl ::std::fmt::Display for CompositeId {
/// structs.
struct StreamStateChanger {
sids: [Option<i32>; NUM_STREAM_TYPES],
streams: Vec<(i32, Option<(i32, StreamType, StreamChange)>)>,
/// For each stream to change, a (stream_id, upsert or `None` to delete) tuple.
streams: Vec<(i32, Option<StreamStateChangerUpsert>)>,
}
/// Upsert state used internally within [`StreamStateChanger`].
struct StreamStateChangerUpsert {
camera_id: i32,
type_: StreamType,
sc: StreamChange,
}
impl StreamStateChanger {
@ -722,8 +731,14 @@ impl StreamStateChanger {
bail!("missing stream {}", sid);
}
sids[i] = Some(sid);
let sc = mem::take(*sc);
streams.push((sid, Some((camera_id, type_, sc))));
streams.push((
sid,
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc: mem::take(*sc),
}),
));
}
} else {
if sc.config.is_empty() && sc.sample_file_dir_id.is_none() {
@ -747,8 +762,14 @@ impl StreamStateChanger {
})?;
let id = tx.last_insert_rowid() as i32;
sids[i] = Some(id);
let sc = mem::take(*sc);
streams.push((id, Some((camera_id, type_, sc))));
streams.push((
id,
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc: mem::take(*sc),
}),
));
}
}
Ok(StreamStateChanger { sids, streams })
@ -763,7 +784,14 @@ impl StreamStateChanger {
for (id, stream) in self.streams.drain(..) {
use ::std::collections::btree_map::Entry;
match (streams_by_id.entry(id), stream) {
(Entry::Vacant(e), Some((camera_id, type_, sc))) => {
(
Entry::Vacant(e),
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc,
}),
) => {
e.insert(Stream {
id,
type_,
@ -789,7 +817,7 @@ impl StreamStateChanger {
});
}
(Entry::Vacant(_), None) => {}
(Entry::Occupied(e), Some((_, _, sc))) => {
(Entry::Occupied(e), Some(StreamStateChangerUpsert { sc, .. })) => {
let e = e.into_mut();
e.sample_file_dir_id = sc.sample_file_dir_id;
e.config = sc.config;
@ -1006,7 +1034,7 @@ impl LockedDatabase {
// oldest recordings for the stream.
let start = CompositeId::new(stream_id, 0);
let end = CompositeId(l.id.0 + 1);
let n = raw::delete_recordings(&tx, dir, start..end)? as usize;
let n = raw::delete_recordings(&tx, dir, start..end)?;
if n != s.to_delete.len() {
bail!(
"Found {} rows in {} .. {}, expected {}: {:?}",
@ -2395,13 +2423,13 @@ impl<'db, C: Clocks + Clone> DatabaseGuard<'db, C> {
impl<'db, C: Clocks + Clone> ::std::ops::Deref for DatabaseGuard<'db, C> {
type Target = LockedDatabase;
fn deref(&self) -> &LockedDatabase {
&*self.db
&self.db
}
}
impl<'db, C: Clocks + Clone> ::std::ops::DerefMut for DatabaseGuard<'db, C> {
fn deref_mut(&mut self) -> &mut LockedDatabase {
&mut *self.db
&mut self.db
}
}

View File

@ -93,7 +93,7 @@ pub struct FileStream {
reader: Reader,
}
type ReadReceiver = tokio::sync::oneshot::Receiver<Result<(Option<OpenFile>, Vec<u8>), Error>>;
type ReadReceiver = tokio::sync::oneshot::Receiver<Result<SuccessfulRead, Error>>;
enum FileStreamState {
Idle(OpenFile),
@ -120,11 +120,14 @@ impl FileStream {
self.state = FileStreamState::Invalid;
Poll::Ready(Some(Err(e)))
}
Poll::Ready(Ok(Ok((Some(file), chunk)))) => {
Poll::Ready(Ok(Ok(SuccessfulRead {
chunk,
file: Some(file),
}))) => {
self.state = FileStreamState::Idle(file);
Poll::Ready(Some(Ok(chunk)))
}
Poll::Ready(Ok(Ok((None, chunk)))) => {
Poll::Ready(Ok(Ok(SuccessfulRead { chunk, file: None }))) => {
self.state = FileStreamState::Invalid;
Poll::Ready(Some(Ok(chunk)))
}
@ -207,18 +210,25 @@ impl Drop for OpenFile {
}
}
struct SuccessfulRead {
chunk: Vec<u8>,
/// If this is not the final requested chunk, the `OpenFile` for next time.
file: Option<OpenFile>,
}
enum ReaderCommand {
/// Opens a file and reads the first chunk.
OpenFile {
composite_id: CompositeId,
range: std::ops::Range<u64>,
tx: tokio::sync::oneshot::Sender<Result<(Option<OpenFile>, Vec<u8>), Error>>,
tx: tokio::sync::oneshot::Sender<Result<SuccessfulRead, Error>>,
},
/// Reads the next chunk of the file.
ReadNextChunk {
file: OpenFile,
tx: tokio::sync::oneshot::Sender<Result<(Option<OpenFile>, Vec<u8>), Error>>,
tx: tokio::sync::oneshot::Sender<Result<SuccessfulRead, Error>>,
},
/// Closes the file early, as when the [FileStream] is dropped before completing.
@ -267,11 +277,7 @@ impl ReaderInt {
}
}
fn open(
&self,
composite_id: CompositeId,
range: Range<u64>,
) -> Result<(Option<OpenFile>, Vec<u8>), Error> {
fn open(&self, composite_id: CompositeId, range: Range<u64>) -> Result<SuccessfulRead, Error> {
let p = super::CompositeIdPath::from(composite_id);
// Reader::open_file checks for an empty range, but check again right
@ -362,7 +368,7 @@ impl ReaderInt {
}))
}
fn chunk(&self, mut file: OpenFile) -> (Option<OpenFile>, Vec<u8>) {
fn chunk(&self, mut file: OpenFile) -> SuccessfulRead {
// Read a chunk that's large enough to minimize thread handoffs but
// short enough to keep memory usage under control. It's hopefully
// unnecessary to worry about disk seeks; the madvise call should cause
@ -393,7 +399,7 @@ impl ReaderInt {
file.map_pos = end;
Some(file)
};
(file, chunk)
SuccessfulRead { chunk, file }
}
}

View File

@ -429,7 +429,7 @@ pub(crate) fn list_garbage(
let mut garbage = FnvHashSet::default();
let mut stmt =
conn.prepare_cached("select composite_id from garbage where sample_file_dir_id = ?")?;
let mut rows = stmt.query(&[&dir_id])?;
let mut rows = stmt.query([&dir_id])?;
while let Some(row) = rows.next()? {
garbage.insert(CompositeId(row.get(0)?));
}

View File

@ -108,7 +108,7 @@ impl SampleIndexIterator {
true => (self.bytes, self.bytes_other),
false => (self.bytes_other, self.bytes),
};
self.i_and_is_key = (i2 as u32) | (((raw1 & 1) as u32) << 31);
self.i_and_is_key = (i2 as u32) | ((raw1 & 1) << 31);
let bytes_delta = unzigzag32(raw2);
if self.is_key() {
self.bytes = prev_bytes_key + bytes_delta;
@ -257,7 +257,7 @@ impl Segment {
recording
);
db.with_recording_playback(self_.id, &mut |playback| {
let mut begin = Box::new(SampleIndexIterator::default());
let mut begin = Box::<SampleIndexIterator>::default();
let data = &playback.video_index;
let mut it = SampleIndexIterator::default();
if !it.next(data)? {

View File

@ -380,7 +380,7 @@ impl<C: Clocks + Clone> Syncer<C, Arc<dir::SampleFileDir>> {
{
{
let mut db = self.db.lock();
delete_recordings(&mut *db)?;
delete_recordings(&mut db)?;
db.flush("synchronous deletion")?;
}
let mut garbage: Vec<_> = {
@ -618,6 +618,11 @@ pub struct Writer<'a, C: Clocks + Clone, D: DirWriter> {
state: WriterState<D::File>,
}
// clippy points out that the `Open` variant is significantly larger and
// suggests boxing it. There's no benefit to this given that we don't have a lot
// of `WriterState`s active at once, and they should cycle between `Open` and
// `Closed`.
#[allow(clippy::large_enum_variant)]
enum WriterState<F: FileWriter> {
Unopened,
Open(InnerWriter<F>),

View File

@ -65,7 +65,7 @@ impl hyper::body::Buf for Chunk {
self.0.len()
}
fn chunk(&self) -> &[u8] {
&*self.0
&self.0
}
fn advance(&mut self, cnt: usize) {
self.0 = ::std::mem::replace(&mut self.0, ARefss::new(&[][..])).map(|b| &b[cnt..]);

View File

@ -58,7 +58,7 @@ fn update_limits(model: &Model, siv: &mut Cursive) {
fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str) {
debug!("on_edit called for id {}", id);
let mut model = model.borrow_mut();
let model: &mut Model = &mut *model;
let model: &mut Model = &mut model;
let stream = model.streams.get_mut(&id).unwrap();
let new_value = decode_size(content).ok();
let delta = new_value.unwrap_or(0) - stream.retain.unwrap_or(0);
@ -95,7 +95,7 @@ fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str)
fn edit_record(model: &RefCell<Model>, id: i32, record: bool) {
let mut model = model.borrow_mut();
let model: &mut Model = &mut *model;
let model: &mut Model = &mut model;
let stream = model.streams.get_mut(&id).unwrap();
stream.record = record;
}

View File

@ -80,7 +80,7 @@ pub fn run(args: Args) -> Result<i32, Error> {
permissions,
)?;
let mut encoded = [0u8; 64];
base64::encode_config_slice(&sid, base64::STANDARD_NO_PAD, &mut encoded);
base64::encode_config_slice(sid, base64::STANDARD_NO_PAD, &mut encoded);
let encoded = std::str::from_utf8(&encoded[..]).expect("base64 is valid UTF-8");
if let Some(ref p) = args.curl_cookie_jar {

View File

@ -221,7 +221,7 @@ fn make_listener(addr: &config::AddressConfig) -> Result<Listener, Error> {
// Go through std::net::TcpListener to avoid needing async. That's there for DNS resolution,
// but it's unnecessary when starting from a SocketAddr.
let listener = std::net::TcpListener::bind(&sa)
let listener = std::net::TcpListener::bind(sa)
.with_context(|_| format!("unable to bind TCP socket {}", &sa))?;
listener.set_nonblocking(true)?;
Ok(Listener::Tcp(tokio::net::TcpListener::from_std(listener)?))
@ -375,7 +375,7 @@ async fn inner(
.map(db::Permissions::from),
trust_forward_hdrs: b.trust_forward_headers,
time_zone_name: time_zone_name.clone(),
privileged_unix_uid: b.own_uid_is_privileged.then(|| own_euid),
privileged_unix_uid: b.own_uid_is_privileged.then_some(own_euid),
})?);
let make_svc = make_service_fn(move |conn: &crate::web::accept::Conn| {
let conn_data = *conn.data();

View File

@ -1630,7 +1630,7 @@ impl FileBuilder {
self.body
.buf
.extend_from_slice(b"stsc\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01");
self.body.append_u32(self.num_subtitle_samples as u32);
self.body.append_u32(self.num_subtitle_samples);
self.body.append_u32(1);
})
}
@ -1665,7 +1665,7 @@ impl FileBuilder {
self.body.buf.extend_from_slice(b"stsz\x00\x00\x00\x00");
self.body
.append_u32((mem::size_of::<u16>() + SUBTITLE_LENGTH) as u32);
self.body.append_u32(self.num_subtitle_samples as u32);
self.body.append_u32(self.num_subtitle_samples);
})
}

View File

@ -52,6 +52,7 @@ impl<'a, C> Streamer<'a, C>
where
C: 'a + Clocks + Clone,
{
#[allow(clippy::too_many_arguments)]
pub fn new<'tmp>(
env: &Environment<'a, 'tmp, C>,
dir: Arc<dir::SampleFileDir>,

View File

@ -200,7 +200,7 @@ impl Service {
pub fn new(config: Config) -> Result<Self, Error> {
let mut ui_dir = None;
if let Some(d) = config.ui_dir {
match FsDir::builder().for_path(&d) {
match FsDir::builder().for_path(d) {
Err(e) => {
warn!(
"Unable to load ui dir {}; will serve no static files: {}",

View File

@ -121,7 +121,7 @@ impl Service {
fn encode_sid(sid: db::RawSessionId, flags: i32) -> String {
let mut cookie = String::with_capacity(128);
cookie.push_str("s=");
base64::encode_config_buf(&sid, base64::STANDARD_NO_PAD, &mut cookie);
base64::encode_config_buf(sid, base64::STANDARD_NO_PAD, &mut cookie);
use auth::SessionFlag;
if (flags & SessionFlag::HttpOnly as i32) != 0 {
cookie.push_str("; HttpOnly");