mirror of
https://github.com/scottlamb/moonfire-nvr.git
synced 2025-11-25 03:56:18 -05:00
use jiff crate
This commit is contained in:
@@ -25,6 +25,7 @@ futures = "0.3"
|
||||
h264-reader = { workspace = true }
|
||||
hashlink = "0.9.1"
|
||||
itertools = { workspace = true }
|
||||
jiff = { workspace = true }
|
||||
libc = "0.2"
|
||||
nix = { workspace = true, features = ["dir", "feature", "fs", "mman"] }
|
||||
num-rational = { version = "0.4.0", default-features = false, features = ["std"] }
|
||||
@@ -38,7 +39,6 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1.0"
|
||||
tempfile = "3.2.0"
|
||||
time = "0.1"
|
||||
tokio = { version = "1.24", features = ["macros", "rt-multi-thread", "sync"] }
|
||||
tracing = { workspace = true }
|
||||
ulid = "1.0.0"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
//! In-memory indexes by calendar day.
|
||||
|
||||
use base::time::{Duration, Time, TIME_UNITS_PER_SEC};
|
||||
use base::{err, Error};
|
||||
use base::Error;
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp;
|
||||
use std::collections::BTreeMap;
|
||||
@@ -20,28 +20,22 @@ use tracing::{error, trace};
|
||||
pub struct Key(pub(crate) [u8; 10]);
|
||||
|
||||
impl Key {
|
||||
fn new(tm: time::Tm) -> Result<Self, Error> {
|
||||
fn new(tm: &jiff::Zoned) -> Result<Self, Error> {
|
||||
let mut s = Key([0u8; 10]);
|
||||
write!(
|
||||
&mut s.0[..],
|
||||
"{}",
|
||||
tm.strftime("%Y-%m-%d")
|
||||
.map_err(|e| err!(Internal, source(e)))?
|
||||
)?;
|
||||
write!(&mut s.0[..], "{}", tm.strftime("%Y-%m-%d"))?;
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
pub fn bounds(&self) -> Range<Time> {
|
||||
let mut my_tm = time::strptime(self.as_ref(), "%Y-%m-%d").expect("days must be parseable");
|
||||
my_tm.tm_utcoff = 1; // to the time crate, values != 0 mean local time.
|
||||
my_tm.tm_isdst = -1;
|
||||
let start = Time(my_tm.to_timespec().sec * TIME_UNITS_PER_SEC);
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
my_tm.tm_mday += 1;
|
||||
let end = Time(my_tm.to_timespec().sec * TIME_UNITS_PER_SEC);
|
||||
start..end
|
||||
let date: jiff::civil::Date = self.as_ref().parse().expect("Key should be valid date");
|
||||
let start = date
|
||||
.to_zoned(base::time::global_zone())
|
||||
.expect("Key should be valid date");
|
||||
let end = start.tomorrow().expect("Key should have valid tomorrow");
|
||||
|
||||
// Note day boundaries are expected to always be whole numbers of seconds.
|
||||
Time(start.timestamp().as_second() * TIME_UNITS_PER_SEC)
|
||||
..Time(end.timestamp().as_second() * TIME_UNITS_PER_SEC)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,13 +54,14 @@ impl std::fmt::Debug for Key {
|
||||
pub trait Value: std::fmt::Debug + Default {
|
||||
type Change: std::fmt::Debug;
|
||||
|
||||
/// Applies the given change to this value.
|
||||
/// Applies the given change to this value; `c` may be positive or negative.
|
||||
fn apply(&mut self, c: &Self::Change);
|
||||
|
||||
fn is_empty(&self) -> bool;
|
||||
}
|
||||
|
||||
/// In-memory state about a particular stream on a particular day.
|
||||
/// In-memory state about a particular stream on a particular day, or a change
|
||||
/// to make via `<StreamValue as Value::apply>`.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct StreamValue {
|
||||
/// The number of recordings that overlap with this day.
|
||||
@@ -81,6 +76,7 @@ pub struct StreamValue {
|
||||
impl Value for StreamValue {
|
||||
type Change = Self;
|
||||
|
||||
/// Applies the given change, which may have positive or negative recordings and duration.
|
||||
fn apply(&mut self, c: &StreamValue) {
|
||||
self.recordings += c.recordings;
|
||||
self.duration += c.duration;
|
||||
@@ -198,42 +194,34 @@ impl<'a, V: Value> IntoIterator for &'a Map<V> {
|
||||
|
||||
impl Map<StreamValue> {
|
||||
/// Adjusts `self` to reflect the range of the given recording.
|
||||
///
|
||||
/// Note that the specified range may span two days. It will never span more because the maximum
|
||||
/// length of a recording entry is less than a day (even a 23-hour "spring forward" day).
|
||||
///
|
||||
/// This function swallows/logs date formatting errors because they shouldn't happen and there's
|
||||
/// not much that can be done about them. (The database operation has already gone through.)
|
||||
/// See [`crate::recording::MAX_RECORDING_WALL_DURATION`].
|
||||
pub(crate) fn adjust(&mut self, r: Range<Time>, sign: i64) {
|
||||
// Find first day key.
|
||||
let sec = r.start.unix_seconds();
|
||||
let mut my_tm = time::at(time::Timespec { sec, nsec: 0 });
|
||||
let day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill first day key from {:?}->{:?}: {}; will ignore.",
|
||||
r, my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let start = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(sec).expect("valid timestamp"),
|
||||
base::time::global_zone(),
|
||||
);
|
||||
let start_day = Key::new(&start).expect("valid key");
|
||||
|
||||
// Determine the start of the next day.
|
||||
// Use mytm to hold a non-normalized representation of the boundary.
|
||||
my_tm.tm_isdst = -1;
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
my_tm.tm_mday += 1;
|
||||
let boundary = my_tm.to_timespec();
|
||||
let boundary_90k = boundary.sec * TIME_UNITS_PER_SEC;
|
||||
let boundary = start
|
||||
.date()
|
||||
.tomorrow()
|
||||
.expect("valid tomorrow")
|
||||
.to_zoned(start.time_zone().clone())
|
||||
.expect("valid tomorrow");
|
||||
let boundary_90k = boundary.timestamp().as_second() * TIME_UNITS_PER_SEC;
|
||||
|
||||
// Adjust the first day.
|
||||
let first_day_delta = StreamValue {
|
||||
recordings: sign,
|
||||
duration: Duration(sign * (cmp::min(r.end.0, boundary_90k) - r.start.0)),
|
||||
};
|
||||
self.adjust_day(day, first_day_delta);
|
||||
self.adjust_day(start_day, first_day_delta);
|
||||
|
||||
if r.end.0 <= boundary_90k {
|
||||
return;
|
||||
@@ -242,13 +230,12 @@ impl Map<StreamValue> {
|
||||
// Fill day with the second day. This requires a normalized representation so recalculate.
|
||||
// (The C mktime(3) already normalized for us once, but .to_timespec() discarded that
|
||||
// result.)
|
||||
let my_tm = time::at(boundary);
|
||||
let day = match Key::new(my_tm) {
|
||||
let day = match Key::new(&boundary) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill second day key from {:?}: {}; will ignore.",
|
||||
my_tm, e
|
||||
boundary, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
@@ -263,35 +250,29 @@ impl Map<StreamValue> {
|
||||
|
||||
impl Map<SignalValue> {
|
||||
/// Adjusts `self` to reflect the range of the given recording.
|
||||
/// Note that the specified range may span several days (unlike StreamValue).
|
||||
///
|
||||
/// This function swallows/logs date formatting errors because they shouldn't happen and there's
|
||||
/// not much that can be done about them. (The database operation has already gone through.)
|
||||
/// Note that the specified range may span several days (unlike `StreamValue`).
|
||||
pub(crate) fn adjust(&mut self, mut r: Range<Time>, old_state: u16, new_state: u16) {
|
||||
// Find first day key.
|
||||
let sec = r.start.unix_seconds();
|
||||
let mut my_tm = time::at(time::Timespec { sec, nsec: 0 });
|
||||
let mut day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill first day key from {:?}->{:?}: {}; will ignore.",
|
||||
r, my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let mut tm = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(sec).expect("valid timestamp"),
|
||||
base::time::global_zone(),
|
||||
);
|
||||
let mut day = Key::new(&tm).expect("valid date");
|
||||
|
||||
// Determine the start of the next day.
|
||||
// Use mytm to hold a non-normalized representation of the boundary.
|
||||
my_tm.tm_isdst = -1;
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
// Determine the starts of subsequent days.
|
||||
tm = tm
|
||||
.with()
|
||||
.hour(0)
|
||||
.minute(0)
|
||||
.second(0)
|
||||
.build()
|
||||
.expect("midnight is valid");
|
||||
|
||||
loop {
|
||||
my_tm.tm_mday += 1;
|
||||
let boundary_90k = my_tm.to_timespec().sec * TIME_UNITS_PER_SEC;
|
||||
tm = tm.tomorrow().expect("valid tomorrow");
|
||||
let boundary_90k = tm.timestamp().as_second() * TIME_UNITS_PER_SEC;
|
||||
|
||||
// Adjust this day.
|
||||
let duration = Duration(cmp::min(r.end.0, boundary_90k) - r.start.0);
|
||||
@@ -308,23 +289,8 @@ impl Map<SignalValue> {
|
||||
return;
|
||||
}
|
||||
|
||||
// Fill day with the next day. This requires a normalized representation so
|
||||
// recalculate. (The C mktime(3) already normalized for us once, but .to_timespec()
|
||||
// discarded that result.)
|
||||
let my_tm = time::at(time::Timespec {
|
||||
sec: Time(boundary_90k).unix_seconds(),
|
||||
nsec: 0,
|
||||
});
|
||||
day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill day key from {:?}: {}; will ignore.",
|
||||
my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// Fill day with the next day.
|
||||
day = Key::new(&tm).expect("valid date");
|
||||
r.start.0 = boundary_90k;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -621,7 +621,7 @@ pub struct LockedDatabase {
|
||||
|
||||
/// The monotonic time when the database was opened (whether in read-write mode or read-only
|
||||
/// mode).
|
||||
open_monotonic: recording::Time,
|
||||
open_monotonic: base::clock::Instant,
|
||||
|
||||
auth: auth::State,
|
||||
signal: signal::State,
|
||||
@@ -1076,8 +1076,10 @@ impl LockedDatabase {
|
||||
r"update open set duration_90k = ?, end_time_90k = ? where id = ?",
|
||||
)?;
|
||||
let rows = stmt.execute(params![
|
||||
(recording::Time::new(clocks.monotonic()) - self.open_monotonic).0,
|
||||
recording::Time::new(clocks.realtime()).0,
|
||||
recording::Duration::try_from(clocks.monotonic() - self.open_monotonic)
|
||||
.expect("valid duration")
|
||||
.0,
|
||||
recording::Time::from(clocks.realtime()).0,
|
||||
o.id,
|
||||
])?;
|
||||
if rows != 1 {
|
||||
@@ -2346,9 +2348,9 @@ impl<C: Clocks + Clone> Database<C> {
|
||||
// Note: the meta check comes after the version check to improve the error message when
|
||||
// trying to open a version 0 or version 1 database (which lacked the meta table).
|
||||
let (db_uuid, config) = raw::read_meta(&conn)?;
|
||||
let open_monotonic = recording::Time::new(clocks.monotonic());
|
||||
let open_monotonic = clocks.monotonic();
|
||||
let open = if read_write {
|
||||
let real = recording::Time::new(clocks.realtime());
|
||||
let real = recording::Time::from(clocks.realtime());
|
||||
let mut stmt = conn
|
||||
.prepare(" insert into open (uuid, start_time_90k, boot_uuid) values (?, ?, ?)")?;
|
||||
let open_uuid = SqlUuid(Uuid::new_v4());
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::dir;
|
||||
use crate::writer;
|
||||
use base::clock::Clocks;
|
||||
use base::FastHashMap;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use tempfile::TempDir;
|
||||
@@ -33,14 +32,13 @@ pub const TEST_VIDEO_SAMPLE_ENTRY_DATA: &[u8] =
|
||||
/// Performs global initialization for tests.
|
||||
/// * set up logging. (Note the output can be confusing unless `RUST_TEST_THREADS=1` is set in
|
||||
/// the program's environment prior to running.)
|
||||
/// * set `TZ=America/Los_Angeles` so that tests that care about calendar time get the expected
|
||||
/// results regardless of machine setup.)
|
||||
/// * set time zone `America/Los_Angeles` so that tests that care about
|
||||
/// calendar time get the expected results regardless of machine setup.)
|
||||
/// * use a fast but insecure password hashing format.
|
||||
pub fn init() {
|
||||
INIT.call_once(|| {
|
||||
base::tracing_setup::install_for_tests();
|
||||
env::set_var("TZ", "America/Los_Angeles");
|
||||
time::tzset();
|
||||
base::time::testutil::init_zone();
|
||||
crate::auth::set_test_config();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -19,8 +19,6 @@ use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::thread;
|
||||
use std::time::Duration as StdDuration;
|
||||
use time::{Duration, Timespec};
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
/// Trait to allow mocking out [crate::dir::SampleFileDir] in syncer tests.
|
||||
@@ -103,7 +101,7 @@ struct Syncer<C: Clocks + Clone, D: DirWriter> {
|
||||
/// A plan to flush at a given instant due to a recently-saved recording's `flush_if_sec` parameter.
|
||||
struct PlannedFlush {
|
||||
/// Monotonic time at which this flush should happen.
|
||||
when: Timespec,
|
||||
when: base::clock::Instant,
|
||||
|
||||
/// Recording which prompts this flush. If this recording is already flushed at the planned
|
||||
/// time, it can be skipped.
|
||||
@@ -440,9 +438,7 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
let now = self.db.clocks().monotonic();
|
||||
|
||||
// Calculate the timeout to use, mapping negative durations to 0.
|
||||
let timeout = (t - now)
|
||||
.to_std()
|
||||
.unwrap_or_else(|_| StdDuration::new(0, 0));
|
||||
let timeout = t.saturating_sub(&now);
|
||||
match self.db.clocks().recv_timeout(cmds, timeout) {
|
||||
Err(mpsc::RecvTimeoutError::Disconnected) => return false, // cmd senders gone.
|
||||
Err(mpsc::RecvTimeoutError::Timeout) => {
|
||||
@@ -534,8 +530,11 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
let c = db.cameras_by_id().get(&s.camera_id).unwrap();
|
||||
|
||||
// Schedule a flush.
|
||||
let how_soon =
|
||||
Duration::seconds(i64::from(s.config.flush_if_sec)) - wall_duration.to_tm_duration();
|
||||
let how_soon = base::clock::Duration::from_secs(u64::from(s.config.flush_if_sec))
|
||||
.saturating_sub(
|
||||
base::clock::Duration::try_from(wall_duration)
|
||||
.expect("wall_duration is non-negative"),
|
||||
);
|
||||
let now = self.db.clocks().monotonic();
|
||||
let when = now + how_soon;
|
||||
let reason = format!(
|
||||
@@ -546,7 +545,7 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
s.type_.as_str(),
|
||||
id
|
||||
);
|
||||
trace!("scheduling flush in {} because {}", how_soon, &reason);
|
||||
trace!("scheduling flush in {:?} because {}", how_soon, &reason);
|
||||
self.planned_flushes.push(PlannedFlush {
|
||||
when,
|
||||
reason,
|
||||
@@ -600,15 +599,15 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
return;
|
||||
}
|
||||
if let Err(e) = l.flush(&f.reason) {
|
||||
let d = Duration::minutes(1);
|
||||
let d = base::clock::Duration::from_secs(60);
|
||||
warn!(
|
||||
"flush failure on save for reason {}; will retry after {}: {:?}",
|
||||
"flush failure on save for reason {}; will retry after {:?}: {:?}",
|
||||
f.reason, d, e
|
||||
);
|
||||
self.planned_flushes
|
||||
.peek_mut()
|
||||
.expect("planned_flushes is non-empty")
|
||||
.when = self.db.clocks().monotonic() + Duration::minutes(1);
|
||||
.when = self.db.clocks().monotonic() + base::clock::Duration::from_secs(60);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1162,7 +1161,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn new_harness(flush_if_sec: u32) -> Harness {
|
||||
let clocks = SimulatedClocks::new(::time::Timespec::new(0, 0));
|
||||
let clocks = SimulatedClocks::new(base::clock::SystemTime::new(0, 0));
|
||||
let tdb = testutil::TestDb::new_with_flush_if_sec(clocks, flush_if_sec);
|
||||
let dir_id = *tdb
|
||||
.db
|
||||
@@ -1653,7 +1652,7 @@ mod tests {
|
||||
let mut h = new_harness(60); // flush_if_sec=60
|
||||
|
||||
// There's a database constraint forbidding a recording starting at t=0, so advance.
|
||||
h.db.clocks().sleep(time::Duration::seconds(1));
|
||||
h.db.clocks().sleep(base::clock::Duration::from_secs(1));
|
||||
|
||||
// Setup: add a 3-byte recording.
|
||||
let video_sample_entry_id =
|
||||
@@ -1700,7 +1699,7 @@ mod tests {
|
||||
h.db.lock().flush("forced").unwrap();
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
||||
h.db.clocks().sleep(time::Duration::seconds(30));
|
||||
h.db.clocks().sleep(base::clock::Duration::from_secs(30));
|
||||
|
||||
// Then, a 1-byte recording.
|
||||
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
||||
@@ -1735,13 +1734,22 @@ mod tests {
|
||||
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 2);
|
||||
let db_flush_count_before = h.db.lock().flushes();
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(31, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(31)
|
||||
);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush (no-op)
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(61, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(61)
|
||||
);
|
||||
assert_eq!(h.db.lock().flushes(), db_flush_count_before);
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(91, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(91)
|
||||
);
|
||||
assert_eq!(h.db.lock().flushes(), db_flush_count_before + 1);
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
||||
|
||||
Reference in New Issue
Block a user