clean up some clippy warnings

This commit is contained in:
Scott Lamb
2023-01-28 11:59:21 -08:00
parent 3965cbc547
commit 284a59b05e
18 changed files with 82 additions and 41 deletions

View File

@@ -3,7 +3,7 @@ name = "moonfire-db"
version = "0.7.5"
authors = ["Scott Lamb <slamb@slamb.org>"]
readme = "../README.md"
edition = "2018"
edition = "2021"
license-file = "../../LICENSE.txt"
[features]

View File

@@ -307,7 +307,7 @@ pub struct SessionHash(pub [u8; 24]);
impl SessionHash {
pub fn encode_base64(&self, output: &mut [u8; 32]) {
::base64::encode_config_slice(&self.0, ::base64::STANDARD_NO_PAD, output);
::base64::encode_config_slice(self.0, ::base64::STANDARD_NO_PAD, output);
}
pub fn decode_base64(input: &[u8]) -> Result<Self, Error> {
@@ -629,6 +629,7 @@ impl State {
)
}
#[allow(clippy::too_many_arguments)]
fn make_session_int<'s>(
rand: &SystemRandom,
conn: &Connection,

View File

@@ -6,7 +6,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(protobuf_codegen::Codegen::new()
.pure()
.out_dir(std::env::var("OUT_DIR")?)
.inputs(&["proto/schema.proto"])
.inputs(["proto/schema.proto"])
.include("proto")
.customize(protobuf_codegen::Customize::default().gen_mod_rs(true))
.run()?)

View File

@@ -656,7 +656,16 @@ impl ::std::fmt::Display for CompositeId {
/// structs.
struct StreamStateChanger {
sids: [Option<i32>; NUM_STREAM_TYPES],
streams: Vec<(i32, Option<(i32, StreamType, StreamChange)>)>,
/// For each stream to change, a (stream_id, upsert or `None` to delete) tuple.
streams: Vec<(i32, Option<StreamStateChangerUpsert>)>,
}
/// Upsert state used internally within [`StreamStateChanger`].
struct StreamStateChangerUpsert {
camera_id: i32,
type_: StreamType,
sc: StreamChange,
}
impl StreamStateChanger {
@@ -722,8 +731,14 @@ impl StreamStateChanger {
bail!("missing stream {}", sid);
}
sids[i] = Some(sid);
let sc = mem::take(*sc);
streams.push((sid, Some((camera_id, type_, sc))));
streams.push((
sid,
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc: mem::take(*sc),
}),
));
}
} else {
if sc.config.is_empty() && sc.sample_file_dir_id.is_none() {
@@ -747,8 +762,14 @@ impl StreamStateChanger {
})?;
let id = tx.last_insert_rowid() as i32;
sids[i] = Some(id);
let sc = mem::take(*sc);
streams.push((id, Some((camera_id, type_, sc))));
streams.push((
id,
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc: mem::take(*sc),
}),
));
}
}
Ok(StreamStateChanger { sids, streams })
@@ -763,7 +784,14 @@ impl StreamStateChanger {
for (id, stream) in self.streams.drain(..) {
use ::std::collections::btree_map::Entry;
match (streams_by_id.entry(id), stream) {
(Entry::Vacant(e), Some((camera_id, type_, sc))) => {
(
Entry::Vacant(e),
Some(StreamStateChangerUpsert {
camera_id,
type_,
sc,
}),
) => {
e.insert(Stream {
id,
type_,
@@ -789,7 +817,7 @@ impl StreamStateChanger {
});
}
(Entry::Vacant(_), None) => {}
(Entry::Occupied(e), Some((_, _, sc))) => {
(Entry::Occupied(e), Some(StreamStateChangerUpsert { sc, .. })) => {
let e = e.into_mut();
e.sample_file_dir_id = sc.sample_file_dir_id;
e.config = sc.config;
@@ -1006,7 +1034,7 @@ impl LockedDatabase {
// oldest recordings for the stream.
let start = CompositeId::new(stream_id, 0);
let end = CompositeId(l.id.0 + 1);
let n = raw::delete_recordings(&tx, dir, start..end)? as usize;
let n = raw::delete_recordings(&tx, dir, start..end)?;
if n != s.to_delete.len() {
bail!(
"Found {} rows in {} .. {}, expected {}: {:?}",
@@ -2395,13 +2423,13 @@ impl<'db, C: Clocks + Clone> DatabaseGuard<'db, C> {
impl<'db, C: Clocks + Clone> ::std::ops::Deref for DatabaseGuard<'db, C> {
type Target = LockedDatabase;
fn deref(&self) -> &LockedDatabase {
&*self.db
&self.db
}
}
impl<'db, C: Clocks + Clone> ::std::ops::DerefMut for DatabaseGuard<'db, C> {
fn deref_mut(&mut self) -> &mut LockedDatabase {
&mut *self.db
&mut self.db
}
}

View File

@@ -93,7 +93,7 @@ pub struct FileStream {
reader: Reader,
}
type ReadReceiver = tokio::sync::oneshot::Receiver<Result<(Option<OpenFile>, Vec<u8>), Error>>;
type ReadReceiver = tokio::sync::oneshot::Receiver<Result<SuccessfulRead, Error>>;
enum FileStreamState {
Idle(OpenFile),
@@ -120,11 +120,14 @@ impl FileStream {
self.state = FileStreamState::Invalid;
Poll::Ready(Some(Err(e)))
}
Poll::Ready(Ok(Ok((Some(file), chunk)))) => {
Poll::Ready(Ok(Ok(SuccessfulRead {
chunk,
file: Some(file),
}))) => {
self.state = FileStreamState::Idle(file);
Poll::Ready(Some(Ok(chunk)))
}
Poll::Ready(Ok(Ok((None, chunk)))) => {
Poll::Ready(Ok(Ok(SuccessfulRead { chunk, file: None }))) => {
self.state = FileStreamState::Invalid;
Poll::Ready(Some(Ok(chunk)))
}
@@ -207,18 +210,25 @@ impl Drop for OpenFile {
}
}
struct SuccessfulRead {
chunk: Vec<u8>,
/// If this is not the final requested chunk, the `OpenFile` for next time.
file: Option<OpenFile>,
}
enum ReaderCommand {
/// Opens a file and reads the first chunk.
OpenFile {
composite_id: CompositeId,
range: std::ops::Range<u64>,
tx: tokio::sync::oneshot::Sender<Result<(Option<OpenFile>, Vec<u8>), Error>>,
tx: tokio::sync::oneshot::Sender<Result<SuccessfulRead, Error>>,
},
/// Reads the next chunk of the file.
ReadNextChunk {
file: OpenFile,
tx: tokio::sync::oneshot::Sender<Result<(Option<OpenFile>, Vec<u8>), Error>>,
tx: tokio::sync::oneshot::Sender<Result<SuccessfulRead, Error>>,
},
/// Closes the file early, as when the [FileStream] is dropped before completing.
@@ -267,11 +277,7 @@ impl ReaderInt {
}
}
fn open(
&self,
composite_id: CompositeId,
range: Range<u64>,
) -> Result<(Option<OpenFile>, Vec<u8>), Error> {
fn open(&self, composite_id: CompositeId, range: Range<u64>) -> Result<SuccessfulRead, Error> {
let p = super::CompositeIdPath::from(composite_id);
// Reader::open_file checks for an empty range, but check again right
@@ -362,7 +368,7 @@ impl ReaderInt {
}))
}
fn chunk(&self, mut file: OpenFile) -> (Option<OpenFile>, Vec<u8>) {
fn chunk(&self, mut file: OpenFile) -> SuccessfulRead {
// Read a chunk that's large enough to minimize thread handoffs but
// short enough to keep memory usage under control. It's hopefully
// unnecessary to worry about disk seeks; the madvise call should cause
@@ -393,7 +399,7 @@ impl ReaderInt {
file.map_pos = end;
Some(file)
};
(file, chunk)
SuccessfulRead { chunk, file }
}
}

View File

@@ -429,7 +429,7 @@ pub(crate) fn list_garbage(
let mut garbage = FnvHashSet::default();
let mut stmt =
conn.prepare_cached("select composite_id from garbage where sample_file_dir_id = ?")?;
let mut rows = stmt.query(&[&dir_id])?;
let mut rows = stmt.query([&dir_id])?;
while let Some(row) = rows.next()? {
garbage.insert(CompositeId(row.get(0)?));
}

View File

@@ -108,7 +108,7 @@ impl SampleIndexIterator {
true => (self.bytes, self.bytes_other),
false => (self.bytes_other, self.bytes),
};
self.i_and_is_key = (i2 as u32) | (((raw1 & 1) as u32) << 31);
self.i_and_is_key = (i2 as u32) | ((raw1 & 1) << 31);
let bytes_delta = unzigzag32(raw2);
if self.is_key() {
self.bytes = prev_bytes_key + bytes_delta;
@@ -257,7 +257,7 @@ impl Segment {
recording
);
db.with_recording_playback(self_.id, &mut |playback| {
let mut begin = Box::new(SampleIndexIterator::default());
let mut begin = Box::<SampleIndexIterator>::default();
let data = &playback.video_index;
let mut it = SampleIndexIterator::default();
if !it.next(data)? {

View File

@@ -380,7 +380,7 @@ impl<C: Clocks + Clone> Syncer<C, Arc<dir::SampleFileDir>> {
{
{
let mut db = self.db.lock();
delete_recordings(&mut *db)?;
delete_recordings(&mut db)?;
db.flush("synchronous deletion")?;
}
let mut garbage: Vec<_> = {
@@ -618,6 +618,11 @@ pub struct Writer<'a, C: Clocks + Clone, D: DirWriter> {
state: WriterState<D::File>,
}
// clippy points out that the `Open` variant is significantly larger and
// suggests boxing it. There's no benefit to this given that we don't have a lot
// of `WriterState`s active at once, and they should cycle between `Open` and
// `Closed`.
#[allow(clippy::large_enum_variant)]
enum WriterState<F: FileWriter> {
Unopened,
Open(InnerWriter<F>),