include all recordings in days map (fixes #57)

This is a quick fix to a problem that gives a confusing/poor initial
experience, as in this thread:
https://groups.google.com/g/moonfire-nvr-users/c/WB-TIW3bBZI/m/Gqh-L6I9BgAJ

I don't think it's a permanent solution. In particular, when we
implement an event stream (#40), I don't want to have a separate event
for every frame, so having the days map change that often won't work.
The client side will likely manipulate the days map then to include a
special entry for a growing recording, representing "from this time to
now".
This commit is contained in:
Scott Lamb 2020-07-18 11:57:17 -07:00
parent 5515db0513
commit 459615a616
3 changed files with 33 additions and 17 deletions

View File

@ -447,8 +447,9 @@ pub struct Stream {
/// gaps and overlap.
pub duration: recording::Duration,
/// Mapping of calendar day (in the server's time zone) to a summary of recordings on that day.
pub days: BTreeMap<StreamDayKey, StreamDayValue>,
/// Mapping of calendar day (in the server's time zone) to a summary of committed recordings on
/// that day.
pub committed_days: BTreeMap<StreamDayKey, StreamDayValue>,
pub record: bool,
/// The `next_recording_id` currently committed to the database.
@ -588,7 +589,18 @@ impl Stream {
self.duration += r.end - r.start;
self.sample_file_bytes += sample_file_bytes as i64;
self.fs_bytes += round_up(i64::from(sample_file_bytes));
adjust_days(r, 1, &mut self.days);
adjust_days(r, 1, &mut self.committed_days);
}
/// Returns a days map including unflushed recordings.
pub fn days(&self) -> BTreeMap<StreamDayKey, StreamDayValue> {
let mut days = self.committed_days.clone();
for u in &self.uncommitted {
let l = u.lock();
adjust_days(l.start .. l.start + recording::Duration(i64::from(l.duration_90k)),
1, &mut days);
}
days
}
}
@ -789,7 +801,7 @@ impl StreamStateChanger {
bytes_to_add: 0,
fs_bytes_to_add: 0,
duration: recording::Duration(0),
days: BTreeMap::new(),
committed_days: BTreeMap::new(),
record: sc.record,
next_recording_id: 1,
uncommitted: VecDeque::new(),
@ -1031,7 +1043,7 @@ impl LockedDatabase {
dir.garbage_needs_unlink.insert(row.id);
let d = recording::Duration(row.duration as i64);
s.duration -= d;
adjust_days(row.start .. row.start + d, -1, &mut s.days);
adjust_days(row.start .. row.start + d, -1, &mut s.committed_days);
}
// Process add_recordings.
@ -1530,7 +1542,7 @@ impl LockedDatabase {
bytes_to_add: 0,
fs_bytes_to_add: 0,
duration: recording::Duration(0),
days: BTreeMap::new(),
committed_days: BTreeMap::new(),
next_recording_id: row.get(7)?,
record: row.get(8)?,
uncommitted: VecDeque::new(),

View File

@ -96,8 +96,11 @@ The `application/json` response will have a dict as follows:
filesystem block allocated to each file.
* `days`: (only included if request pararameter `days` is true)
dictionary representing calendar days (in the server's time zone)
with non-zero total duration of recordings for that day. The keys
are of the form `YYYY-mm-dd`; the values are objects with the
with non-zero total duration of recordings for that day. Currently
this includes uncommitted and growing recordings. This is likely
to change in a future release for
[#40](https://github.com/scottlamb/moonfire-nvr/issues/40). The
keys are of the form `YYYY-mm-dd`; the values are objects with the
following attributes:
* `totalDuration90k` is the total duration recorded during that
day. If a recording spans a day boundary, some portion of it

View File

@ -87,7 +87,7 @@ pub struct Camera<'a> {
pub config: Option<CameraConfig<'a>>,
#[serde(serialize_with = "Camera::serialize_streams")]
pub streams: [Option<Stream<'a>>; 2],
pub streams: [Option<Stream>; 2],
}
#[derive(Debug, Serialize)]
@ -100,7 +100,7 @@ pub struct CameraConfig<'a> {
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
pub struct Stream<'a> {
pub struct Stream {
pub retain_bytes: i64,
pub min_start_time_90k: Option<i64>,
pub max_end_time_90k: Option<i64>,
@ -110,7 +110,7 @@ pub struct Stream<'a> {
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "Stream::serialize_days")]
pub days: Option<&'a BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
pub days: Option<BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
}
#[derive(Serialize)]
@ -210,7 +210,7 @@ impl<'a> Camera<'a> {
})
}
fn serialize_streams<S>(streams: &[Option<Stream<'a>>; 2], serializer: S) -> Result<S::Ok, S::Error>
fn serialize_streams<S>(streams: &[Option<Stream>; 2], serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut map = serializer.serialize_map(Some(streams.len()))?;
for (i, s) in streams.iter().enumerate() {
@ -223,8 +223,9 @@ impl<'a> Camera<'a> {
}
}
impl<'a> Stream<'a> {
fn wrap(db: &'a db::LockedDatabase, id: Option<i32>, include_days: bool) -> Result<Option<Self>, Error> {
impl Stream {
fn wrap(db: &db::LockedDatabase, id: Option<i32>, include_days: bool)
-> Result<Option<Self>, Error> {
let id = match id {
Some(id) => id,
None => return Ok(None),
@ -237,14 +238,14 @@ impl<'a> Stream<'a> {
total_duration_90k: s.duration.0,
total_sample_file_bytes: s.sample_file_bytes,
fs_bytes: s.fs_bytes,
days: if include_days { Some(&s.days) } else { None },
days: if include_days { Some(s.days()) } else { None },
}))
}
fn serialize_days<S>(days: &Option<&BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
fn serialize_days<S>(days: &Option<BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let days = match *days {
let days = match days.as_ref() {
Some(d) => d,
None => return serializer.serialize_none(),
};