2020-03-02 01:53:41 -05:00
|
|
|
// This file is part of Moonfire NVR, a security camera network video recorder.
|
2021-04-10 20:34:52 -04:00
|
|
|
// Copyright (C) 2021 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
2021-02-17 16:28:48 -05:00
|
|
|
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
//! Database access logic for the Moonfire NVR SQLite schema.
|
|
|
|
//!
|
|
|
|
//! The SQLite schema includes everything except the actual video samples (see the `dir` module
|
|
|
|
//! for management of those). See `schema.sql` for a more detailed description.
|
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! The [`Database`] struct caches data in RAM, making the assumption that only one process is
|
2016-11-25 17:34:00 -05:00
|
|
|
//! accessing the database at a time. Performance and efficiency notes:
|
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! * several query operations here feature row callbacks. The callback is invoked with
|
2016-11-25 17:34:00 -05:00
|
|
|
//! the database lock. Thus, the callback shouldn't perform long-running operations.
|
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! * startup may be slow, as it scans the entire index for the recording table. This seems
|
2016-11-25 17:34:00 -05:00
|
|
|
//! acceptable.
|
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! * the operations used for web file serving should return results with acceptable latency.
|
2016-11-25 17:34:00 -05:00
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! * however, the database lock may be held for longer than is acceptable for
|
2016-11-25 17:34:00 -05:00
|
|
|
//! the critical path of recording frames. The caller should preallocate sample file uuids
|
|
|
|
//! and such to avoid database operations in these paths.
|
|
|
|
//!
|
2021-04-10 20:34:52 -04:00
|
|
|
//! * adding and removing recordings done during normal operations use a batch interface.
|
2018-02-22 19:35:34 -05:00
|
|
|
//! A list of mutations is built up in-memory and occasionally flushed to reduce SSD write
|
|
|
|
//! cycles.
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::auth;
|
2021-03-23 12:40:52 -04:00
|
|
|
use crate::days;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::dir;
|
2021-10-26 14:47:13 -04:00
|
|
|
use crate::json::SampleFileDirConfig;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::raw;
|
2021-03-23 12:40:52 -04:00
|
|
|
use crate::recording;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::schema;
|
2019-06-06 19:18:13 -04:00
|
|
|
use crate::signal;
|
2021-10-21 13:25:37 -04:00
|
|
|
use base::bail_t;
|
2021-02-17 01:15:54 -05:00
|
|
|
use base::clock::{self, Clocks};
|
|
|
|
use base::strutil::encode_size;
|
2021-08-19 13:02:25 -04:00
|
|
|
use failure::{bail, format_err, Error, ResultExt};
|
2018-12-28 22:53:29 -05:00
|
|
|
use fnv::{FnvHashMap, FnvHashSet};
|
2020-11-22 20:37:55 -05:00
|
|
|
use hashlink::LinkedHashMap;
|
2019-01-04 14:56:15 -05:00
|
|
|
use itertools::Itertools;
|
2020-03-19 23:46:25 -04:00
|
|
|
use rusqlite::{named_params, params};
|
2019-01-04 14:56:15 -05:00
|
|
|
use smallvec::SmallVec;
|
2016-11-25 17:34:00 -05:00
|
|
|
use std::cell::RefCell;
|
2021-02-17 01:15:54 -05:00
|
|
|
use std::cmp;
|
2020-03-20 00:35:42 -04:00
|
|
|
use std::collections::{BTreeMap, VecDeque};
|
|
|
|
use std::convert::TryInto;
|
2019-09-26 09:09:27 -04:00
|
|
|
use std::fmt::Write as _;
|
2018-01-23 14:05:07 -05:00
|
|
|
use std::mem;
|
2021-02-17 01:15:54 -05:00
|
|
|
use std::ops::Range;
|
2021-10-26 14:47:13 -04:00
|
|
|
use std::path::PathBuf;
|
2016-11-25 17:34:00 -05:00
|
|
|
use std::str;
|
|
|
|
use std::string::String;
|
2017-03-02 22:29:28 -05:00
|
|
|
use std::sync::Arc;
|
2022-09-29 01:19:35 -04:00
|
|
|
use std::sync::{Mutex, MutexGuard};
|
2016-11-25 17:34:00 -05:00
|
|
|
use std::vec::Vec;
|
2023-02-16 02:14:54 -05:00
|
|
|
use tracing::warn;
|
|
|
|
use tracing::{error, info, trace};
|
2016-11-25 17:34:00 -05:00
|
|
|
use uuid::Uuid;
|
|
|
|
|
2016-12-20 18:44:04 -05:00
|
|
|
/// Expected schema version. See `guide/schema.md` for more information.
|
2021-08-31 23:57:16 -04:00
|
|
|
pub const EXPECTED_VERSION: i32 = 7;
|
2016-12-20 18:44:04 -05:00
|
|
|
|
2020-11-22 20:37:55 -05:00
|
|
|
/// Length of the video index cache.
|
|
|
|
/// The actual data structure is one bigger than this because we insert before we remove.
|
|
|
|
/// Make it one less than a power of two so that the data structure's size is efficient.
|
|
|
|
const VIDEO_INDEX_CACHE_LEN: usize = 1023;
|
|
|
|
|
2021-05-17 17:31:50 -04:00
|
|
|
const GET_RECORDING_PLAYBACK_SQL: &str = r#"
|
2016-12-21 01:08:18 -05:00
|
|
|
select
|
|
|
|
video_index
|
|
|
|
from
|
|
|
|
recording_playback
|
|
|
|
where
|
|
|
|
composite_id = :composite_id
|
|
|
|
"#;
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2021-05-17 17:31:50 -04:00
|
|
|
const INSERT_VIDEO_SAMPLE_ENTRY_SQL: &str = r#"
|
2020-03-20 23:52:30 -04:00
|
|
|
insert into video_sample_entry (width, height, pasp_h_spacing, pasp_v_spacing,
|
2020-03-20 00:35:42 -04:00
|
|
|
rfc6381_codec, data)
|
2020-03-20 23:52:30 -04:00
|
|
|
values (:width, :height, :pasp_h_spacing, :pasp_v_spacing,
|
2020-03-20 00:35:42 -04:00
|
|
|
:rfc6381_codec, :data)
|
2016-11-25 17:34:00 -05:00
|
|
|
"#;
|
|
|
|
|
2021-05-17 17:31:50 -04:00
|
|
|
const UPDATE_STREAM_COUNTERS_SQL: &str = r#"
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
update stream
|
|
|
|
set cum_recordings = :cum_recordings,
|
2020-08-05 00:44:01 -04:00
|
|
|
cum_media_duration_90k = :cum_media_duration_90k,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
cum_runs = :cum_runs
|
|
|
|
where id = :stream_id
|
|
|
|
"#;
|
2016-12-21 01:08:18 -05:00
|
|
|
|
2020-07-12 19:51:39 -04:00
|
|
|
/// The size of a filesystem block, to use in disk space accounting.
|
|
|
|
/// This should really be obtained by a stat call on the sample file directory in question,
|
|
|
|
/// but that requires some refactoring. See
|
|
|
|
/// [#89](https://github.com/scottlamb/moonfire-nvr/issues/89). We might be able to get away with
|
|
|
|
/// this hardcoded value for a while.
|
|
|
|
const ASSUMED_BLOCK_SIZE_BYTES: i64 = 4096;
|
|
|
|
|
|
|
|
/// Rounds a file size up to the next multiple of the block size.
|
|
|
|
/// This is useful in representing the actual amount of filesystem space used.
|
|
|
|
pub(crate) fn round_up(bytes: i64) -> i64 {
|
|
|
|
let blk = ASSUMED_BLOCK_SIZE_BYTES;
|
|
|
|
(bytes + blk - 1) / blk * blk
|
|
|
|
}
|
|
|
|
|
2021-10-26 13:12:19 -04:00
|
|
|
/// A wrapper around `Uuid` which implements `FromSql` and `ToSql`.
|
|
|
|
pub struct SqlUuid(pub Uuid);
|
2017-02-13 22:36:05 -05:00
|
|
|
|
2021-10-26 13:12:19 -04:00
|
|
|
impl rusqlite::types::FromSql for SqlUuid {
|
2017-02-13 22:36:05 -05:00
|
|
|
fn column_result(value: rusqlite::types::ValueRef) -> rusqlite::types::FromSqlResult<Self> {
|
2018-12-01 18:20:19 -05:00
|
|
|
let uuid = Uuid::from_slice(value.as_blob()?)
|
2017-02-13 22:36:05 -05:00
|
|
|
.map_err(|e| rusqlite::types::FromSqlError::Other(Box::new(e)))?;
|
2021-10-26 13:12:19 -04:00
|
|
|
Ok(SqlUuid(uuid))
|
2017-02-13 22:36:05 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-26 13:12:19 -04:00
|
|
|
impl rusqlite::types::ToSql for SqlUuid {
|
2021-09-22 15:35:17 -04:00
|
|
|
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
|
|
|
|
Ok(self.0.as_bytes()[..].into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
struct VideoIndex(Box<[u8]>);
|
2017-03-01 02:28:25 -05:00
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
impl rusqlite::types::FromSql for VideoIndex {
|
2017-03-01 02:28:25 -05:00
|
|
|
fn column_result(value: rusqlite::types::ValueRef) -> rusqlite::types::FromSqlResult<Self> {
|
2019-07-12 00:59:01 -04:00
|
|
|
Ok(VideoIndex(value.as_blob()?.to_vec().into_boxed_slice()))
|
2017-03-01 02:28:25 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// A concrete box derived from a ISO/IEC 14496-12 section 8.5.2 VisualSampleEntry box. Describes
|
|
|
|
/// the codec, width, height, etc.
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct VideoSampleEntry {
|
2020-03-20 00:35:42 -04:00
|
|
|
pub id: i32,
|
|
|
|
|
|
|
|
// Fields matching VideoSampleEntryToInsert below.
|
2017-10-04 02:25:58 -04:00
|
|
|
pub data: Vec<u8>,
|
|
|
|
pub rfc6381_codec: String,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub width: u16,
|
|
|
|
pub height: u16,
|
2020-03-20 00:35:42 -04:00
|
|
|
pub pasp_h_spacing: u16,
|
|
|
|
pub pasp_v_spacing: u16,
|
|
|
|
}
|
|
|
|
|
2021-08-12 16:32:01 -04:00
|
|
|
impl VideoSampleEntry {
|
|
|
|
/// Returns the aspect ratio as a minimized ratio.
|
|
|
|
pub fn aspect(&self) -> num_rational::Ratio<u32> {
|
|
|
|
num_rational::Ratio::new(
|
|
|
|
u32::from(self.width) * u32::from(self.pasp_h_spacing),
|
|
|
|
u32::from(self.height) * u32::from(self.pasp_v_spacing),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-13 17:37:45 -04:00
|
|
|
#[derive(Clone, PartialEq, Eq)]
|
2020-03-20 00:35:42 -04:00
|
|
|
pub struct VideoSampleEntryToInsert {
|
|
|
|
pub data: Vec<u8>,
|
|
|
|
pub rfc6381_codec: String,
|
|
|
|
pub width: u16,
|
|
|
|
pub height: u16,
|
|
|
|
pub pasp_h_spacing: u16,
|
|
|
|
pub pasp_v_spacing: u16,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2021-04-28 01:51:30 -04:00
|
|
|
impl std::fmt::Debug for VideoSampleEntryToInsert {
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
|
|
use pretty_hex::PrettyHex;
|
|
|
|
f.debug_struct("VideoSampleEntryToInsert")
|
|
|
|
.field("data", &self.data.hex_dump())
|
|
|
|
.field("rfc6381_codec", &self.rfc6381_codec)
|
|
|
|
.field("width", &self.width)
|
|
|
|
.field("height", &self.height)
|
|
|
|
.field("pasp_h_spacing", &self.pasp_h_spacing)
|
|
|
|
.field("pasp_v_spacing", &self.pasp_v_spacing)
|
|
|
|
.finish()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 01:08:18 -05:00
|
|
|
/// A row used in `list_recordings_by_time` and `list_recordings_by_id`.
|
2020-06-10 01:06:03 -04:00
|
|
|
#[derive(Copy, Clone, Debug)]
|
2016-12-21 01:08:18 -05:00
|
|
|
pub struct ListRecordingsRow {
|
2016-11-25 17:34:00 -05:00
|
|
|
pub start: recording::Time,
|
2018-03-01 23:59:05 -05:00
|
|
|
pub video_sample_entry_id: i32,
|
2016-12-21 01:08:18 -05:00
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
pub id: CompositeId,
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
/// This is a recording::Duration, but a single recording's duration fits into an i32.
|
2020-08-05 00:44:01 -04:00
|
|
|
pub wall_duration_90k: i32,
|
|
|
|
pub media_duration_90k: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub video_samples: i32,
|
|
|
|
pub video_sync_samples: i32,
|
|
|
|
pub sample_file_bytes: i32,
|
2016-12-21 01:08:18 -05:00
|
|
|
pub run_offset: i32,
|
2018-03-02 14:38:11 -05:00
|
|
|
pub open_id: u32,
|
2016-12-21 01:08:18 -05:00
|
|
|
pub flags: i32,
|
2020-06-10 01:06:03 -04:00
|
|
|
|
|
|
|
/// This is populated by `list_recordings_by_id` but not `list_recordings_by_time`.
|
|
|
|
/// (It's not included in the `recording_cover` index, so adding it to
|
|
|
|
/// `list_recordings_by_time` would be inefficient.)
|
2020-08-05 00:44:01 -04:00
|
|
|
pub prev_media_duration_and_runs: Option<(recording::Duration, i32)>,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A row used in `list_aggregated_recordings`.
|
2016-12-21 01:08:18 -05:00
|
|
|
#[derive(Clone, Debug)]
|
2016-11-25 17:34:00 -05:00
|
|
|
pub struct ListAggregatedRecordingsRow {
|
2016-12-21 01:08:18 -05:00
|
|
|
pub time: Range<recording::Time>,
|
|
|
|
pub ids: Range<i32>,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub video_samples: i64,
|
|
|
|
pub video_sync_samples: i64,
|
|
|
|
pub sample_file_bytes: i64,
|
2018-03-01 23:59:05 -05:00
|
|
|
pub video_sample_entry_id: i32,
|
2018-01-23 14:05:07 -05:00
|
|
|
pub stream_id: i32,
|
2016-12-21 01:08:18 -05:00
|
|
|
pub run_start_id: i32,
|
2018-03-02 14:38:11 -05:00
|
|
|
pub open_id: u32,
|
|
|
|
pub first_uncommitted: Option<i32>,
|
2018-03-02 18:40:32 -05:00
|
|
|
pub growing: bool,
|
2021-10-10 19:13:57 -04:00
|
|
|
pub has_trailing_zero: bool,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
impl ListAggregatedRecordingsRow {
|
|
|
|
fn from(row: ListRecordingsRow) -> Self {
|
2018-12-28 16:06:32 -05:00
|
|
|
let recording_id = row.id.recording();
|
|
|
|
let uncommitted = (row.flags & RecordingFlags::Uncommitted as i32) != 0;
|
|
|
|
let growing = (row.flags & RecordingFlags::Growing as i32) != 0;
|
|
|
|
ListAggregatedRecordingsRow {
|
2021-02-17 01:15:54 -05:00
|
|
|
time: row.start..recording::Time(row.start.0 + row.wall_duration_90k as i64),
|
|
|
|
ids: recording_id..recording_id + 1,
|
2018-12-28 16:06:32 -05:00
|
|
|
video_samples: row.video_samples as i64,
|
|
|
|
video_sync_samples: row.video_sync_samples as i64,
|
|
|
|
sample_file_bytes: row.sample_file_bytes as i64,
|
|
|
|
video_sample_entry_id: row.video_sample_entry_id,
|
|
|
|
stream_id: row.id.stream(),
|
|
|
|
run_start_id: recording_id - row.run_offset,
|
|
|
|
open_id: row.open_id,
|
2021-02-17 01:15:54 -05:00
|
|
|
first_uncommitted: if uncommitted {
|
|
|
|
Some(recording_id)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
2018-12-28 16:06:32 -05:00
|
|
|
growing,
|
2021-10-10 19:13:57 -04:00
|
|
|
has_trailing_zero: (row.flags & RecordingFlags::TrailingZero as i32) != 0,
|
2018-12-28 16:06:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 02:28:25 -05:00
|
|
|
/// Select fields from the `recordings_playback` table. Retrieve with `with_recording_playback`.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[derive(Debug)]
|
2017-03-01 02:28:25 -05:00
|
|
|
pub struct RecordingPlayback<'a> {
|
|
|
|
pub video_index: &'a [u8],
|
|
|
|
}
|
|
|
|
|
2016-12-21 01:08:18 -05:00
|
|
|
/// Bitmask in the `flags` field in the `recordings` table; see `schema.sql`.
|
2021-05-17 17:31:50 -04:00
|
|
|
#[repr(u32)]
|
2016-12-21 01:08:18 -05:00
|
|
|
pub enum RecordingFlags {
|
|
|
|
TrailingZero = 1,
|
2018-03-02 14:38:11 -05:00
|
|
|
|
|
|
|
// These values (starting from high bit on down) are never written to the database.
|
2018-03-02 18:40:32 -05:00
|
|
|
Growing = 1 << 30,
|
|
|
|
Uncommitted = 1 << 31,
|
2016-12-21 01:08:18 -05:00
|
|
|
}
|
|
|
|
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
/// A recording to pass to `LockedDatabase::add_recording` and `raw::insert_recording`.
|
2018-03-02 18:40:32 -05:00
|
|
|
#[derive(Clone, Debug, Default)]
|
|
|
|
pub struct RecordingToInsert {
|
2016-12-21 01:08:18 -05:00
|
|
|
pub run_offset: i32,
|
|
|
|
pub flags: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub sample_file_bytes: i32,
|
2018-03-02 18:40:32 -05:00
|
|
|
pub start: recording::Time,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
|
|
|
|
/// Filled in by `add_recording`.
|
2020-08-05 00:44:01 -04:00
|
|
|
pub prev_media_duration: recording::Duration,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
|
|
|
|
/// Filled in by `add_recording`.
|
|
|
|
pub prev_runs: i32,
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub wall_duration_90k: i32, // a recording::Duration, but guaranteed to fit in i32.
|
2020-08-05 00:44:01 -04:00
|
|
|
pub media_duration_90k: i32,
|
2016-12-30 00:05:57 -05:00
|
|
|
pub local_time_delta: recording::Duration,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub video_samples: i32,
|
|
|
|
pub video_sync_samples: i32,
|
|
|
|
pub video_sample_entry_id: i32,
|
|
|
|
pub video_index: Vec<u8>,
|
2020-03-20 23:52:30 -04:00
|
|
|
pub sample_file_blake3: Option<[u8; 32]>,
|
2021-09-16 19:24:17 -04:00
|
|
|
pub end_reason: Option<String>,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2018-03-02 14:38:11 -05:00
|
|
|
impl RecordingToInsert {
|
|
|
|
fn to_list_row(&self, id: CompositeId, open_id: u32) -> ListRecordingsRow {
|
|
|
|
ListRecordingsRow {
|
2018-03-02 18:40:32 -05:00
|
|
|
start: self.start,
|
2018-03-02 14:38:11 -05:00
|
|
|
video_sample_entry_id: self.video_sample_entry_id,
|
|
|
|
id,
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k: self.wall_duration_90k,
|
|
|
|
media_duration_90k: self.media_duration_90k,
|
2018-03-02 14:38:11 -05:00
|
|
|
video_samples: self.video_samples,
|
|
|
|
video_sync_samples: self.video_sync_samples,
|
|
|
|
sample_file_bytes: self.sample_file_bytes,
|
|
|
|
run_offset: self.run_offset,
|
|
|
|
open_id,
|
|
|
|
flags: self.flags | RecordingFlags::Uncommitted as i32,
|
2020-08-05 00:44:01 -04:00
|
|
|
prev_media_duration_and_runs: Some((self.prev_media_duration, self.prev_runs)),
|
2018-03-02 14:38:11 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-23 16:35:25 -05:00
|
|
|
/// A row used in `raw::list_oldest_recordings` and `db::delete_oldest_recordings`.
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
pub(crate) struct ListOldestRecordingsRow {
|
2018-02-20 13:11:10 -05:00
|
|
|
pub id: CompositeId,
|
2018-02-23 16:35:25 -05:00
|
|
|
pub start: recording::Time,
|
2020-08-05 00:44:01 -04:00
|
|
|
pub wall_duration_90k: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub sample_file_bytes: i32,
|
|
|
|
}
|
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct SampleFileDir {
|
|
|
|
pub id: i32,
|
2021-10-26 14:47:13 -04:00
|
|
|
pub path: PathBuf,
|
2018-02-12 01:45:51 -05:00
|
|
|
pub uuid: Uuid,
|
2018-02-15 02:10:10 -05:00
|
|
|
dir: Option<Arc<dir::SampleFileDir>>,
|
|
|
|
last_complete_open: Option<Open>,
|
2018-12-01 03:03:43 -05:00
|
|
|
|
|
|
|
/// ids which are in the `garbage` database table (rather than `recording`) as of last commit
|
|
|
|
/// but may still exist on disk. These can't be safely removed from the database yet.
|
|
|
|
pub(crate) garbage_needs_unlink: FnvHashSet<CompositeId>,
|
|
|
|
|
|
|
|
/// ids which are in the `garbage` database table and are guaranteed to no longer exist on
|
|
|
|
/// disk (have been unlinked and the dir has been synced). These may be removed from the
|
|
|
|
/// database on next flush. Mutually exclusive with `garbage_needs_unlink`.
|
|
|
|
pub(crate) garbage_unlinked: Vec<CompositeId>,
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SampleFileDir {
|
2018-02-15 02:10:10 -05:00
|
|
|
/// Returns a cloned copy of the directory, or Err if closed.
|
|
|
|
///
|
|
|
|
/// Use `LockedDatabase::open_sample_file_dirs` prior to calling this method.
|
|
|
|
pub fn get(&self) -> Result<Arc<dir::SampleFileDir>, Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok(self
|
|
|
|
.dir
|
|
|
|
.as_ref()
|
|
|
|
.ok_or_else(|| format_err!("sample file dir {} is closed", self.id))?
|
|
|
|
.clone())
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
2018-03-01 15:24:32 -05:00
|
|
|
|
|
|
|
/// Returns expected existing metadata when opening this directory.
|
2021-09-22 15:39:02 -04:00
|
|
|
fn expected_meta(&self, db_uuid: &Uuid) -> schema::DirMeta {
|
2018-03-01 15:24:32 -05:00
|
|
|
let mut meta = schema::DirMeta::default();
|
|
|
|
meta.db_uuid.extend_from_slice(&db_uuid.as_bytes()[..]);
|
|
|
|
meta.dir_uuid.extend_from_slice(&self.uuid.as_bytes()[..]);
|
|
|
|
if let Some(o) = self.last_complete_open {
|
2022-05-02 14:22:14 -04:00
|
|
|
let open = meta.last_complete_open.mut_or_insert_default();
|
2018-03-01 15:24:32 -05:00
|
|
|
open.id = o.id;
|
|
|
|
open.uuid.extend_from_slice(&o.uuid.as_bytes()[..]);
|
|
|
|
}
|
|
|
|
meta
|
|
|
|
}
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
|
2018-12-28 13:21:49 -05:00
|
|
|
pub use crate::auth::RawSessionId;
|
2021-02-17 01:15:54 -05:00
|
|
|
pub use crate::auth::Request;
|
2018-12-28 13:21:49 -05:00
|
|
|
pub use crate::auth::Session;
|
|
|
|
pub use crate::auth::User;
|
|
|
|
pub use crate::auth::UserChange;
|
2018-11-02 02:25:06 -04:00
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// In-memory state about a camera.
|
2016-12-09 00:28:50 -05:00
|
|
|
#[derive(Debug)]
|
2016-11-25 17:34:00 -05:00
|
|
|
pub struct Camera {
|
|
|
|
pub id: i32,
|
|
|
|
pub uuid: Uuid,
|
|
|
|
pub short_name: String,
|
2021-09-10 19:31:03 -04:00
|
|
|
pub config: crate::json::CameraConfig,
|
|
|
|
pub streams: [Option<i32>; NUM_STREAM_TYPES],
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
|
2018-12-29 14:06:44 -05:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
2021-02-17 01:15:54 -05:00
|
|
|
pub enum StreamType {
|
2021-05-17 17:31:50 -04:00
|
|
|
Main,
|
|
|
|
Sub,
|
2021-09-10 19:31:03 -04:00
|
|
|
Ext,
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-01-23 14:05:07 -05:00
|
|
|
|
2021-09-10 19:31:03 -04:00
|
|
|
pub const NUM_STREAM_TYPES: usize = 3;
|
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
impl StreamType {
|
|
|
|
pub fn from_index(i: usize) -> Option<Self> {
|
|
|
|
match i {
|
2021-05-17 17:31:50 -04:00
|
|
|
0 => Some(StreamType::Main),
|
|
|
|
1 => Some(StreamType::Sub),
|
2021-09-10 19:31:03 -04:00
|
|
|
2 => Some(StreamType::Ext),
|
2018-01-23 14:05:07 -05:00
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn index(self) -> usize {
|
|
|
|
match self {
|
2021-05-17 17:31:50 -04:00
|
|
|
StreamType::Main => 0,
|
|
|
|
StreamType::Sub => 1,
|
2021-09-10 19:31:03 -04:00
|
|
|
StreamType::Ext => 2,
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn as_str(self) -> &'static str {
|
|
|
|
match self {
|
2021-05-17 17:31:50 -04:00
|
|
|
StreamType::Main => "main",
|
|
|
|
StreamType::Sub => "sub",
|
2021-09-10 19:31:03 -04:00
|
|
|
StreamType::Ext => "ext",
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn parse(type_: &str) -> Option<Self> {
|
|
|
|
match type_ {
|
2021-05-17 17:31:50 -04:00
|
|
|
"main" => Some(StreamType::Main),
|
|
|
|
"sub" => Some(StreamType::Sub),
|
2021-09-10 19:31:03 -04:00
|
|
|
"ext" => Some(StreamType::Ext),
|
2018-01-23 14:05:07 -05:00
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-21 01:46:14 -05:00
|
|
|
impl ::std::fmt::Display for StreamType {
|
|
|
|
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
|
|
|
|
f.write_str(self.as_str())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-10 19:31:03 -04:00
|
|
|
pub const ALL_STREAM_TYPES: [StreamType; NUM_STREAM_TYPES] =
|
|
|
|
[StreamType::Main, StreamType::Sub, StreamType::Ext];
|
2018-02-12 01:45:51 -05:00
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
pub struct Stream {
|
|
|
|
pub id: i32,
|
|
|
|
pub camera_id: i32,
|
2018-02-12 01:45:51 -05:00
|
|
|
pub sample_file_dir_id: Option<i32>,
|
2018-01-23 14:05:07 -05:00
|
|
|
pub type_: StreamType,
|
2021-09-10 19:31:03 -04:00
|
|
|
pub config: crate::json::StreamConfig,
|
2018-02-22 19:35:34 -05:00
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
/// The time range of recorded data associated with this stream (minimum start time and maximum
|
2016-11-25 17:34:00 -05:00
|
|
|
/// end time). `None` iff there are no recordings for this camera.
|
|
|
|
pub range: Option<Range<recording::Time>>,
|
2020-07-12 19:51:39 -04:00
|
|
|
|
|
|
|
/// The total bytes of flushed sample files. This doesn't include disk space wasted in the
|
|
|
|
/// last filesystem block allocated to each file ("internal fragmentation").
|
2016-11-25 17:34:00 -05:00
|
|
|
pub sample_file_bytes: i64,
|
|
|
|
|
2020-07-12 19:51:39 -04:00
|
|
|
/// The total bytes on the filesystem used by this stream. This slightly more than
|
|
|
|
/// `sample_file_bytes` because it includes the wasted space in the last filesystem block.
|
|
|
|
pub fs_bytes: i64,
|
|
|
|
|
2018-12-01 03:03:43 -05:00
|
|
|
/// On flush, delete the following recordings (move them to the `garbage` table, to be
|
|
|
|
/// collected later). Note they must be the oldest recordings. The later collection involves
|
|
|
|
/// the syncer unlinking the files on disk and syncing the directory then enqueueing for
|
|
|
|
/// another following flush removal from the `garbage` table.
|
2018-02-23 16:35:25 -05:00
|
|
|
to_delete: Vec<ListOldestRecordingsRow>,
|
|
|
|
|
2018-03-01 16:50:59 -05:00
|
|
|
/// The total bytes to delete with the next flush.
|
2018-02-23 16:35:25 -05:00
|
|
|
pub bytes_to_delete: i64,
|
2020-07-12 19:51:39 -04:00
|
|
|
pub fs_bytes_to_delete: i64,
|
2018-02-23 16:35:25 -05:00
|
|
|
|
2018-03-01 16:50:59 -05:00
|
|
|
/// The total bytes to add with the next flush. (`mark_synced` has already been called on these
|
|
|
|
/// recordings.)
|
|
|
|
pub bytes_to_add: i64,
|
2020-07-12 19:51:39 -04:00
|
|
|
pub fs_bytes_to_add: i64,
|
2018-03-01 16:50:59 -05:00
|
|
|
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
/// The total duration of undeleted recorded data. This may not be `range.end - range.start`
|
|
|
|
/// due to gaps and overlap.
|
2016-11-25 17:34:00 -05:00
|
|
|
pub duration: recording::Duration,
|
|
|
|
|
2020-07-18 14:57:17 -04:00
|
|
|
/// Mapping of calendar day (in the server's time zone) to a summary of committed recordings on
|
|
|
|
/// that day.
|
2021-03-23 12:40:52 -04:00
|
|
|
pub committed_days: days::Map<days::StreamValue>,
|
2018-02-22 19:35:34 -05:00
|
|
|
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
/// The `cum_recordings` currently committed to the database.
|
|
|
|
pub(crate) cum_recordings: i32,
|
|
|
|
|
2020-08-05 00:44:01 -04:00
|
|
|
/// The `cum_media_duration_90k` currently committed to the database.
|
|
|
|
cum_media_duration: recording::Duration,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
|
|
|
|
/// The `cum_runs` currently committed to the database.
|
|
|
|
cum_runs: i32,
|
2018-02-22 19:35:34 -05:00
|
|
|
|
|
|
|
/// The recordings which have been added via `LockedDatabase::add_recording` but have yet to
|
|
|
|
/// committed to the database.
|
|
|
|
///
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
/// `uncommitted[i]` uses sample filename `CompositeId::new(id, cum_recordings + i)`;
|
|
|
|
/// `cum_recordings` should be advanced when one is committed to maintain this invariant.
|
2018-02-22 19:35:34 -05:00
|
|
|
///
|
|
|
|
/// TODO: alter the serving path to show these just as if they were already committed.
|
2018-03-02 18:40:32 -05:00
|
|
|
uncommitted: VecDeque<Arc<Mutex<RecordingToInsert>>>,
|
2018-03-01 16:50:59 -05:00
|
|
|
|
|
|
|
/// The number of recordings in `uncommitted` which are synced and ready to commit.
|
|
|
|
synced_recordings: usize,
|
2019-01-21 18:58:52 -05:00
|
|
|
|
2019-06-14 11:47:11 -04:00
|
|
|
on_live_segment: Vec<Box<dyn FnMut(LiveSegment) -> bool + Send>>,
|
2019-01-21 18:58:52 -05:00
|
|
|
}
|
|
|
|
|
2020-08-07 18:30:22 -04:00
|
|
|
/// Bounds of a live view segment. Currently this is a single frame of video.
|
2019-01-21 18:58:52 -05:00
|
|
|
/// This is used for live stream recordings. The stream id should already be known to the
|
2021-04-10 20:34:52 -04:00
|
|
|
/// subscriber. Note this doesn't actually contain the video, just a reference that can be
|
|
|
|
/// looked up within the database.
|
2019-01-21 18:58:52 -05:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct LiveSegment {
|
|
|
|
pub recording: i32,
|
|
|
|
|
2020-08-07 18:30:22 -04:00
|
|
|
/// If the segment's one frame is a key frame.
|
|
|
|
pub is_key: bool,
|
|
|
|
|
2019-01-21 18:58:52 -05:00
|
|
|
/// The pts, relative to the start of the recording, of the start and end of this live segment,
|
|
|
|
/// in 90kHz units.
|
2020-08-07 13:16:06 -04:00
|
|
|
pub media_off_90k: Range<i32>,
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
|
2018-02-23 17:49:10 -05:00
|
|
|
#[derive(Clone, Debug, Default)]
|
2018-02-12 01:45:51 -05:00
|
|
|
pub struct StreamChange {
|
|
|
|
pub sample_file_dir_id: Option<i32>,
|
2021-09-10 19:31:03 -04:00
|
|
|
pub config: crate::json::StreamConfig,
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
|
2017-02-05 22:58:41 -05:00
|
|
|
/// Information about a camera, used by `add_camera` and `update_camera`.
|
2021-09-10 19:31:03 -04:00
|
|
|
#[derive(Clone, Debug, Default)]
|
2017-02-05 22:58:41 -05:00
|
|
|
pub struct CameraChange {
|
|
|
|
pub short_name: String,
|
2021-09-10 19:31:03 -04:00
|
|
|
pub config: crate::json::CameraConfig,
|
2018-02-12 01:45:51 -05:00
|
|
|
|
|
|
|
/// `StreamType t` is represented by `streams[t.index()]`. A default StreamChange will
|
|
|
|
/// correspond to no stream in the database, provided there are no existing recordings for that
|
|
|
|
/// stream.
|
2021-09-10 19:31:03 -04:00
|
|
|
pub streams: [StreamChange; NUM_STREAM_TYPES],
|
2017-02-05 22:58:41 -05:00
|
|
|
}
|
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
impl Stream {
|
2019-09-26 09:09:27 -04:00
|
|
|
/// Adds a single fully committed recording with the given properties to the in-memory state.
|
2016-11-25 17:34:00 -05:00
|
|
|
fn add_recording(&mut self, r: Range<recording::Time>, sample_file_bytes: i32) {
|
|
|
|
self.range = Some(match self.range {
|
2021-02-17 01:15:54 -05:00
|
|
|
Some(ref e) => cmp::min(e.start, r.start)..cmp::max(e.end, r.end),
|
|
|
|
None => r.start..r.end,
|
2016-11-25 17:34:00 -05:00
|
|
|
});
|
|
|
|
self.duration += r.end - r.start;
|
2021-09-10 19:31:03 -04:00
|
|
|
self.sample_file_bytes += i64::from(sample_file_bytes);
|
2020-07-12 19:51:39 -04:00
|
|
|
self.fs_bytes += round_up(i64::from(sample_file_bytes));
|
2021-03-23 12:40:52 -04:00
|
|
|
self.committed_days.adjust(r, 1);
|
2020-07-18 14:57:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a days map including unflushed recordings.
|
2021-03-23 12:40:52 -04:00
|
|
|
pub fn days(&self) -> days::Map<days::StreamValue> {
|
2020-07-18 14:57:17 -04:00
|
|
|
let mut days = self.committed_days.clone();
|
|
|
|
for u in &self.uncommitted {
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = u.lock().unwrap();
|
2021-03-23 12:40:52 -04:00
|
|
|
days.adjust(
|
2021-02-17 01:15:54 -05:00
|
|
|
l.start..l.start + recording::Duration(i64::from(l.wall_duration_90k)),
|
|
|
|
1,
|
|
|
|
);
|
2020-07-18 14:57:17 -04:00
|
|
|
}
|
|
|
|
days
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Initializes the recordings associated with the given camera.
|
2021-02-17 01:15:54 -05:00
|
|
|
fn init_recordings(
|
|
|
|
conn: &mut rusqlite::Connection,
|
|
|
|
stream_id: i32,
|
|
|
|
camera: &Camera,
|
|
|
|
stream: &mut Stream,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
info!(
|
|
|
|
"Loading recordings for camera {} stream {:?}",
|
|
|
|
camera.short_name, stream.type_
|
|
|
|
);
|
|
|
|
let mut stmt = conn.prepare(
|
|
|
|
r#"
|
2016-11-25 17:34:00 -05:00
|
|
|
select
|
|
|
|
recording.start_time_90k,
|
2020-08-05 00:44:01 -04:00
|
|
|
recording.wall_duration_90k,
|
2016-11-25 17:34:00 -05:00
|
|
|
recording.sample_file_bytes
|
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
2018-01-23 14:05:07 -05:00
|
|
|
stream_id = :stream_id
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
let mut rows = stmt.query(named_params! {":stream_id": stream_id})?;
|
2016-11-25 17:34:00 -05:00
|
|
|
let mut i = 0;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let start = recording::Time(row.get(0)?);
|
|
|
|
let duration = recording::Duration(row.get(1)?);
|
|
|
|
let bytes = row.get(2)?;
|
2021-02-17 01:15:54 -05:00
|
|
|
stream.add_recording(start..start + duration, bytes);
|
2016-11-25 17:34:00 -05:00
|
|
|
i += 1;
|
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
info!(
|
|
|
|
"Loaded {} recordings for camera {} stream {:?}",
|
|
|
|
i, camera.short_name, stream.type_
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct LockedDatabase {
|
|
|
|
conn: rusqlite::Connection,
|
2018-02-15 02:10:10 -05:00
|
|
|
uuid: Uuid,
|
2019-01-04 19:11:58 -05:00
|
|
|
flush_count: usize,
|
2018-02-15 02:10:10 -05:00
|
|
|
|
|
|
|
/// If the database is open in read-write mode, the information about the current Open row.
|
2019-01-21 18:58:52 -05:00
|
|
|
pub open: Option<Open>,
|
2018-03-09 20:41:53 -05:00
|
|
|
|
|
|
|
/// The monotonic time when the database was opened (whether in read-write mode or read-only
|
|
|
|
/// mode).
|
|
|
|
open_monotonic: recording::Time,
|
|
|
|
|
2018-11-02 02:25:06 -04:00
|
|
|
auth: auth::State,
|
2019-06-06 19:18:13 -04:00
|
|
|
signal: signal::State,
|
2018-11-02 02:25:06 -04:00
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
sample_file_dirs_by_id: BTreeMap<i32, SampleFileDir>,
|
2016-11-25 17:34:00 -05:00
|
|
|
cameras_by_id: BTreeMap<i32, Camera>,
|
2018-01-23 14:05:07 -05:00
|
|
|
streams_by_id: BTreeMap<i32, Stream>,
|
2021-02-17 01:15:54 -05:00
|
|
|
cameras_by_uuid: BTreeMap<Uuid, i32>, // values are ids.
|
2018-03-01 23:59:05 -05:00
|
|
|
video_sample_entries_by_id: BTreeMap<i32, Arc<VideoSampleEntry>>,
|
2020-11-22 20:37:55 -05:00
|
|
|
video_index_cache: RefCell<LinkedHashMap<i64, Box<[u8]>, fnv::FnvBuildHasher>>,
|
2019-06-14 11:47:11 -04:00
|
|
|
on_flush: Vec<Box<dyn Fn() + Send>>,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2018-02-23 00:46:41 -05:00
|
|
|
/// Represents a row of the `open` database table.
|
2018-02-15 02:10:10 -05:00
|
|
|
#[derive(Copy, Clone, Debug)]
|
2019-01-21 18:58:52 -05:00
|
|
|
pub struct Open {
|
|
|
|
pub id: u32,
|
2018-02-23 00:46:41 -05:00
|
|
|
pub(crate) uuid: Uuid,
|
2018-02-15 02:10:10 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// A combination of a stream id and recording id into a single 64-bit int.
|
|
|
|
/// This is used as a primary key in the SQLite `recording` table (see `schema.sql`)
|
|
|
|
/// and the sample file's name on disk (see `dir.rs`).
|
2018-02-22 19:35:34 -05:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
|
2018-02-20 13:11:10 -05:00
|
|
|
pub struct CompositeId(pub i64);
|
|
|
|
|
|
|
|
impl CompositeId {
|
|
|
|
pub fn new(stream_id: i32, recording_id: i32) -> Self {
|
|
|
|
CompositeId((stream_id as i64) << 32 | recording_id as i64)
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn stream(self) -> i32 {
|
|
|
|
(self.0 >> 32) as i32
|
|
|
|
}
|
|
|
|
pub fn recording(self) -> i32 {
|
|
|
|
self.0 as i32
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
impl ::std::fmt::Display for CompositeId {
|
|
|
|
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
|
|
|
|
write!(f, "{}/{}", self.stream(), self.recording())
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2018-02-20 13:11:10 -05:00
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
/// Inserts, updates, or removes streams in the `State` object to match a set of `StreamChange`
|
|
|
|
/// structs.
|
|
|
|
struct StreamStateChanger {
|
2021-09-10 19:31:03 -04:00
|
|
|
sids: [Option<i32>; NUM_STREAM_TYPES],
|
2023-01-28 14:59:21 -05:00
|
|
|
|
|
|
|
/// For each stream to change, a (stream_id, upsert or `None` to delete) tuple.
|
|
|
|
streams: Vec<(i32, Option<StreamStateChangerUpsert>)>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Upsert state used internally within [`StreamStateChanger`].
|
|
|
|
struct StreamStateChangerUpsert {
|
|
|
|
camera_id: i32,
|
|
|
|
type_: StreamType,
|
|
|
|
sc: StreamChange,
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
impl StreamStateChanger {
|
|
|
|
/// Performs the database updates (guarded by the given transaction) and returns the state
|
|
|
|
/// change to be applied on successful commit.
|
2021-02-17 01:15:54 -05:00
|
|
|
fn new(
|
|
|
|
tx: &rusqlite::Transaction,
|
|
|
|
camera_id: i32,
|
|
|
|
existing: Option<&Camera>,
|
|
|
|
streams_by_id: &BTreeMap<i32, Stream>,
|
|
|
|
change: &mut CameraChange,
|
|
|
|
) -> Result<Self, Error> {
|
2021-09-10 19:31:03 -04:00
|
|
|
let mut sids = [None; NUM_STREAM_TYPES];
|
|
|
|
let mut streams = Vec::with_capacity(NUM_STREAM_TYPES);
|
2018-02-12 01:45:51 -05:00
|
|
|
let existing_streams = existing.map(|e| e.streams).unwrap_or_default();
|
|
|
|
for (i, ref mut sc) in change.streams.iter_mut().enumerate() {
|
2019-01-21 18:58:52 -05:00
|
|
|
let type_ = StreamType::from_index(i).unwrap();
|
2018-02-12 01:45:51 -05:00
|
|
|
let mut have_data = false;
|
|
|
|
if let Some(sid) = existing_streams[i] {
|
|
|
|
let s = streams_by_id.get(&sid).unwrap();
|
|
|
|
if s.range.is_some() {
|
|
|
|
have_data = true;
|
2021-02-17 01:15:54 -05:00
|
|
|
if let (Some(d), false) = (
|
|
|
|
s.sample_file_dir_id,
|
|
|
|
s.sample_file_dir_id == sc.sample_file_dir_id,
|
|
|
|
) {
|
|
|
|
bail!(
|
|
|
|
"can't change sample_file_dir_id {:?}->{:?} for non-empty stream {}",
|
|
|
|
d,
|
|
|
|
sc.sample_file_dir_id,
|
|
|
|
sid
|
|
|
|
);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
}
|
2021-09-10 19:31:03 -04:00
|
|
|
if !have_data && sc.config.is_empty() && sc.sample_file_dir_id.is_none() {
|
2018-02-12 01:45:51 -05:00
|
|
|
// Delete stream.
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2018-02-12 01:45:51 -05:00
|
|
|
delete from stream where id = ?
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
if stmt.execute(params![sid])? != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("missing stream {}", sid);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
streams.push((sid, None));
|
|
|
|
} else {
|
|
|
|
// Update stream.
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2018-02-12 01:45:51 -05:00
|
|
|
update stream set
|
2021-09-10 19:31:03 -04:00
|
|
|
config = :config,
|
2018-02-12 01:45:51 -05:00
|
|
|
sample_file_dir_id = :sample_file_dir_id
|
|
|
|
where
|
|
|
|
id = :id
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
let rows = stmt.execute(named_params! {
|
2021-09-10 19:31:03 -04:00
|
|
|
":config": &sc.config,
|
2020-03-19 23:46:25 -04:00
|
|
|
":sample_file_dir_id": sc.sample_file_dir_id,
|
|
|
|
":id": sid,
|
|
|
|
})?;
|
2018-02-12 01:45:51 -05:00
|
|
|
if rows != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("missing stream {}", sid);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
sids[i] = Some(sid);
|
2023-01-28 14:59:21 -05:00
|
|
|
streams.push((
|
|
|
|
sid,
|
|
|
|
Some(StreamStateChangerUpsert {
|
|
|
|
camera_id,
|
|
|
|
type_,
|
|
|
|
sc: mem::take(*sc),
|
|
|
|
}),
|
|
|
|
));
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
} else {
|
2021-09-10 19:31:03 -04:00
|
|
|
if sc.config.is_empty() && sc.sample_file_dir_id.is_none() {
|
2018-02-12 01:45:51 -05:00
|
|
|
// Do nothing; there is no record and we want to keep it that way.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Insert stream.
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2021-09-10 19:31:03 -04:00
|
|
|
insert into stream (camera_id, sample_file_dir_id, type, config,
|
|
|
|
cum_recordings, cum_media_duration_90k, cum_runs)
|
|
|
|
values (:camera_id, :sample_file_dir_id, :type, :config,
|
|
|
|
0, 0, 0)
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
stmt.execute(named_params! {
|
2020-03-19 23:46:25 -04:00
|
|
|
":camera_id": camera_id,
|
|
|
|
":sample_file_dir_id": sc.sample_file_dir_id,
|
|
|
|
":type": type_.as_str(),
|
2021-09-10 19:31:03 -04:00
|
|
|
":config": &sc.config,
|
2020-03-19 23:46:25 -04:00
|
|
|
})?;
|
2018-02-12 01:45:51 -05:00
|
|
|
let id = tx.last_insert_rowid() as i32;
|
|
|
|
sids[i] = Some(id);
|
2023-01-28 14:59:21 -05:00
|
|
|
streams.push((
|
|
|
|
id,
|
|
|
|
Some(StreamStateChangerUpsert {
|
|
|
|
camera_id,
|
|
|
|
type_,
|
|
|
|
sc: mem::take(*sc),
|
|
|
|
}),
|
|
|
|
));
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok(StreamStateChanger { sids, streams })
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
/// Applies the change to the given `streams_by_id`. The caller is expected to set
|
|
|
|
/// `Camera::streams` to the return value.
|
2021-09-10 19:31:03 -04:00
|
|
|
fn apply(
|
|
|
|
mut self,
|
|
|
|
streams_by_id: &mut BTreeMap<i32, Stream>,
|
|
|
|
) -> [Option<i32>; NUM_STREAM_TYPES] {
|
2018-12-28 13:21:49 -05:00
|
|
|
for (id, stream) in self.streams.drain(..) {
|
2018-02-12 01:45:51 -05:00
|
|
|
use ::std::collections::btree_map::Entry;
|
|
|
|
match (streams_by_id.entry(id), stream) {
|
2023-01-28 14:59:21 -05:00
|
|
|
(
|
|
|
|
Entry::Vacant(e),
|
|
|
|
Some(StreamStateChangerUpsert {
|
|
|
|
camera_id,
|
|
|
|
type_,
|
|
|
|
sc,
|
|
|
|
}),
|
|
|
|
) => {
|
2019-01-21 18:58:52 -05:00
|
|
|
e.insert(Stream {
|
|
|
|
id,
|
|
|
|
type_,
|
|
|
|
camera_id,
|
|
|
|
sample_file_dir_id: sc.sample_file_dir_id,
|
2021-09-10 19:31:03 -04:00
|
|
|
config: sc.config,
|
2019-01-21 18:58:52 -05:00
|
|
|
range: None,
|
|
|
|
sample_file_bytes: 0,
|
2020-07-12 19:51:39 -04:00
|
|
|
fs_bytes: 0,
|
2019-01-21 18:58:52 -05:00
|
|
|
to_delete: Vec::new(),
|
|
|
|
bytes_to_delete: 0,
|
2020-07-12 19:51:39 -04:00
|
|
|
fs_bytes_to_delete: 0,
|
2019-01-21 18:58:52 -05:00
|
|
|
bytes_to_add: 0,
|
2020-07-12 19:51:39 -04:00
|
|
|
fs_bytes_to_add: 0,
|
2019-01-21 18:58:52 -05:00
|
|
|
duration: recording::Duration(0),
|
2021-05-17 17:31:50 -04:00
|
|
|
committed_days: days::Map::default(),
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
cum_recordings: 0,
|
2020-08-05 00:44:01 -04:00
|
|
|
cum_media_duration: recording::Duration(0),
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
cum_runs: 0,
|
2019-01-21 18:58:52 -05:00
|
|
|
uncommitted: VecDeque::new(),
|
|
|
|
synced_recordings: 0,
|
|
|
|
on_live_segment: Vec::new(),
|
|
|
|
});
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
(Entry::Vacant(_), None) => {}
|
2023-01-28 14:59:21 -05:00
|
|
|
(Entry::Occupied(e), Some(StreamStateChangerUpsert { sc, .. })) => {
|
2019-01-21 18:58:52 -05:00
|
|
|
let e = e.into_mut();
|
|
|
|
e.sample_file_dir_id = sc.sample_file_dir_id;
|
2021-09-10 19:31:03 -04:00
|
|
|
e.config = sc.config;
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
(Entry::Occupied(e), None) => {
|
|
|
|
e.remove();
|
|
|
|
}
|
2018-02-12 01:45:51 -05:00
|
|
|
};
|
|
|
|
}
|
|
|
|
self.sids
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
/// A retention change as expected by `LockedDatabase::update_retention`.
|
|
|
|
pub struct RetentionChange {
|
|
|
|
pub stream_id: i32,
|
|
|
|
pub new_record: bool,
|
|
|
|
pub new_limit: i64,
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
impl LockedDatabase {
|
|
|
|
/// Returns an immutable view of the cameras by id.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn cameras_by_id(&self) -> &BTreeMap<i32, Camera> {
|
|
|
|
&self.cameras_by_id
|
|
|
|
}
|
2018-02-12 01:45:51 -05:00
|
|
|
pub fn sample_file_dirs_by_id(&self) -> &BTreeMap<i32, SampleFileDir> {
|
2018-02-22 19:35:34 -05:00
|
|
|
&self.sample_file_dirs_by_id
|
|
|
|
}
|
|
|
|
|
2019-01-04 19:11:58 -05:00
|
|
|
/// Returns the number of completed database flushes since startup.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn flushes(&self) -> usize {
|
|
|
|
self.flush_count
|
|
|
|
}
|
2019-01-04 19:11:58 -05:00
|
|
|
|
2018-03-01 16:50:59 -05:00
|
|
|
/// Adds a placeholder for an uncommitted recording.
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
///
|
2018-03-02 18:40:32 -05:00
|
|
|
/// The caller should write samples and fill the returned `RecordingToInsert` as it goes
|
|
|
|
/// (noting that while holding the lock, it should not perform I/O or acquire the database
|
|
|
|
/// lock). Then it should sync to permanent storage and call `mark_synced`. The data will
|
|
|
|
/// be written to the database on the next `flush`.
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
///
|
|
|
|
/// A call to `add_recording` is also a promise that previous recordings (even if not yet
|
|
|
|
/// synced and committed) won't change.
|
|
|
|
///
|
2020-08-05 00:44:01 -04:00
|
|
|
/// This fills the `prev_media_duration` and `prev_runs` fields.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub(crate) fn add_recording(
|
|
|
|
&mut self,
|
|
|
|
stream_id: i32,
|
|
|
|
mut r: RecordingToInsert,
|
|
|
|
) -> Result<(CompositeId, Arc<Mutex<RecordingToInsert>>), Error> {
|
2018-02-22 19:35:34 -05:00
|
|
|
let stream = match self.streams_by_id.get_mut(&stream_id) {
|
|
|
|
None => bail!("no such stream {}", stream_id),
|
|
|
|
Some(s) => s,
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
let id = CompositeId::new(
|
|
|
|
stream_id,
|
|
|
|
stream.cum_recordings + (stream.uncommitted.len() as i32),
|
|
|
|
);
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
match stream.uncommitted.back() {
|
|
|
|
Some(s) => {
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = s.lock().unwrap();
|
2020-08-05 00:44:01 -04:00
|
|
|
r.prev_media_duration =
|
2021-03-26 01:09:29 -04:00
|
|
|
l.prev_media_duration + recording::Duration(l.media_duration_90k.into());
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
r.prev_runs = l.prev_runs + if l.run_offset == 0 { 1 } else { 0 };
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
None => {
|
2020-08-05 00:44:01 -04:00
|
|
|
r.prev_media_duration = stream.cum_media_duration;
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
r.prev_runs = stream.cum_runs;
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
};
|
2018-03-02 18:40:32 -05:00
|
|
|
let recording = Arc::new(Mutex::new(r));
|
2018-02-22 19:35:34 -05:00
|
|
|
stream.uncommitted.push_back(Arc::clone(&recording));
|
|
|
|
Ok((id, recording))
|
|
|
|
}
|
|
|
|
|
2018-03-01 16:50:59 -05:00
|
|
|
/// Marks the given uncomitted recording as synced and ready to flush.
|
|
|
|
/// This must be the next unsynced recording.
|
|
|
|
pub(crate) fn mark_synced(&mut self, id: CompositeId) -> Result<(), Error> {
|
|
|
|
let stream = match self.streams_by_id.get_mut(&id.stream()) {
|
|
|
|
None => bail!("no stream for recording {}", id),
|
|
|
|
Some(s) => s,
|
|
|
|
};
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
let next_unsynced = stream.cum_recordings + (stream.synced_recordings as i32);
|
2018-03-01 16:50:59 -05:00
|
|
|
if id.recording() != next_unsynced {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
|
|
|
"can't sync {} when next unsynced recording is {} (next unflushed is {})",
|
|
|
|
id,
|
|
|
|
next_unsynced,
|
|
|
|
stream.cum_recordings
|
|
|
|
);
|
2018-03-01 16:50:59 -05:00
|
|
|
}
|
|
|
|
if stream.synced_recordings == stream.uncommitted.len() {
|
|
|
|
bail!("can't sync un-added recording {}", id);
|
|
|
|
}
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = stream.uncommitted[stream.synced_recordings].lock().unwrap();
|
2020-07-12 19:51:39 -04:00
|
|
|
let bytes = i64::from(l.sample_file_bytes);
|
|
|
|
stream.bytes_to_add += bytes;
|
|
|
|
stream.fs_bytes_to_add += round_up(bytes);
|
2018-03-01 16:50:59 -05:00
|
|
|
stream.synced_recordings += 1;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub(crate) fn delete_garbage(
|
|
|
|
&mut self,
|
|
|
|
dir_id: i32,
|
|
|
|
ids: &mut Vec<CompositeId>,
|
|
|
|
) -> Result<(), Error> {
|
2018-02-22 19:35:34 -05:00
|
|
|
let dir = match self.sample_file_dirs_by_id.get_mut(&dir_id) {
|
|
|
|
None => bail!("no such dir {}", dir_id),
|
|
|
|
Some(d) => d,
|
|
|
|
};
|
2018-12-01 03:03:43 -05:00
|
|
|
dir.garbage_unlinked.reserve(ids.len());
|
|
|
|
ids.retain(|id| {
|
|
|
|
if !dir.garbage_needs_unlink.remove(id) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
dir.garbage_unlinked.push(*id);
|
|
|
|
false
|
|
|
|
});
|
|
|
|
if !ids.is_empty() {
|
|
|
|
bail!("delete_garbage with non-garbage ids {:?}", &ids[..]);
|
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-01-21 18:58:52 -05:00
|
|
|
/// Registers a callback to run on every live segment immediately after it's recorded.
|
|
|
|
/// The callback is run with the database lock held, so it must not call back into the database
|
|
|
|
/// or block. The callback should return false to unregister.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn watch_live(
|
|
|
|
&mut self,
|
|
|
|
stream_id: i32,
|
|
|
|
cb: Box<dyn FnMut(LiveSegment) -> bool + Send>,
|
|
|
|
) -> Result<(), Error> {
|
2019-01-21 18:58:52 -05:00
|
|
|
let s = match self.streams_by_id.get_mut(&stream_id) {
|
|
|
|
None => bail!("no such stream {}", stream_id),
|
|
|
|
Some(s) => s,
|
|
|
|
};
|
|
|
|
s.on_live_segment.push(cb);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Clears all watches on all streams.
|
|
|
|
/// Normally watches are self-cleaning: when a segment is sent, the callback returns false if
|
|
|
|
/// it is no longer interested (typically because hyper has just noticed the client is no
|
|
|
|
/// longer connected). This doesn't work when the system is shutting down and nothing more is
|
|
|
|
/// sent, though.
|
|
|
|
pub fn clear_watches(&mut self) {
|
2021-05-17 17:31:50 -04:00
|
|
|
for s in self.streams_by_id.values_mut() {
|
2019-01-21 18:58:52 -05:00
|
|
|
s.on_live_segment.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) fn send_live_segment(&mut self, stream: i32, l: LiveSegment) -> Result<(), Error> {
|
|
|
|
let s = match self.streams_by_id.get_mut(&stream) {
|
|
|
|
None => bail!("no such stream {}", stream),
|
|
|
|
Some(s) => s,
|
|
|
|
};
|
2022-03-11 15:01:35 -05:00
|
|
|
|
|
|
|
// TODO: use std's retain_mut after it's available in our minimum supported Rust version.
|
|
|
|
// <https://github.com/rust-lang/rust/issues/48919>
|
|
|
|
odds::vec::VecExt::retain_mut(&mut s.on_live_segment, |cb| cb(l.clone()));
|
2019-01-21 18:58:52 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
/// Helper for `DatabaseGuard::flush()` and `Database::drop()`.
|
2018-02-22 19:35:34 -05:00
|
|
|
///
|
2018-03-23 16:31:23 -04:00
|
|
|
/// The public API is in `DatabaseGuard::flush()`; it supplies the `Clocks` to this function.
|
|
|
|
fn flush<C: Clocks>(&mut self, clocks: &C, reason: &str) -> Result<(), Error> {
|
2023-02-16 02:14:54 -05:00
|
|
|
let span = tracing::info_span!("flush", flush_count = self.flush_count, reason);
|
|
|
|
let _enter = span.enter();
|
2018-02-23 00:46:41 -05:00
|
|
|
let o = match self.open.as_ref() {
|
|
|
|
None => bail!("database is read-only"),
|
|
|
|
Some(o) => o,
|
|
|
|
};
|
2018-02-22 19:35:34 -05:00
|
|
|
let tx = self.conn.transaction()?;
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut new_ranges =
|
|
|
|
FnvHashMap::with_capacity_and_hasher(self.streams_by_id.len(), Default::default());
|
2018-02-22 19:35:34 -05:00
|
|
|
{
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
let mut stmt = tx.prepare_cached(UPDATE_STREAM_COUNTERS_SQL)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
for (&stream_id, s) in &self.streams_by_id {
|
2018-02-23 16:35:25 -05:00
|
|
|
// Process additions.
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
let mut new_duration = 0;
|
|
|
|
let mut new_runs = 0;
|
2018-03-01 16:50:59 -05:00
|
|
|
for i in 0..s.synced_recordings {
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = s.uncommitted[i].lock().unwrap();
|
2018-03-01 16:50:59 -05:00
|
|
|
raw::insert_recording(
|
2021-02-17 01:15:54 -05:00
|
|
|
&tx,
|
|
|
|
o,
|
|
|
|
CompositeId::new(stream_id, s.cum_recordings + i as i32),
|
|
|
|
&l,
|
|
|
|
)?;
|
2020-08-05 00:44:01 -04:00
|
|
|
new_duration += i64::from(l.wall_duration_90k);
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
new_runs += if l.run_offset == 0 { 1 } else { 0 };
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-03-01 16:50:59 -05:00
|
|
|
if s.synced_recordings > 0 {
|
|
|
|
new_ranges.entry(stream_id).or_insert(None);
|
2021-05-17 13:50:12 -04:00
|
|
|
stmt.execute(named_params! {
|
2020-03-19 23:46:25 -04:00
|
|
|
":stream_id": stream_id,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
":cum_recordings": s.cum_recordings + s.synced_recordings as i32,
|
2020-08-05 00:44:01 -04:00
|
|
|
":cum_media_duration_90k": s.cum_media_duration.0 + new_duration,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
":cum_runs": s.cum_runs + new_runs,
|
2020-03-19 23:46:25 -04:00
|
|
|
})?;
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-02-23 16:35:25 -05:00
|
|
|
|
|
|
|
// Process deletions.
|
|
|
|
if let Some(l) = s.to_delete.last() {
|
2018-03-01 16:50:59 -05:00
|
|
|
new_ranges.entry(stream_id).or_insert(None);
|
2018-02-23 16:35:25 -05:00
|
|
|
let dir = match s.sample_file_dir_id {
|
|
|
|
None => bail!("stream {} has no directory!", stream_id),
|
|
|
|
Some(d) => d,
|
|
|
|
};
|
2018-12-01 03:03:43 -05:00
|
|
|
|
|
|
|
// raw::delete_recordings does a bulk transfer of a range from recording to
|
|
|
|
// garbage, rather than operating on each element of to_delete. This is
|
|
|
|
// guaranteed to give the same result because to_delete is guaranteed to be the
|
|
|
|
// oldest recordings for the stream.
|
2018-02-23 17:05:07 -05:00
|
|
|
let start = CompositeId::new(stream_id, 0);
|
2018-02-23 16:35:25 -05:00
|
|
|
let end = CompositeId(l.id.0 + 1);
|
2023-01-28 14:59:21 -05:00
|
|
|
let n = raw::delete_recordings(&tx, dir, start..end)?;
|
2018-02-23 17:05:07 -05:00
|
|
|
if n != s.to_delete.len() {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
|
|
|
"Found {} rows in {} .. {}, expected {}: {:?}",
|
|
|
|
n,
|
|
|
|
start,
|
|
|
|
end,
|
|
|
|
s.to_delete.len(),
|
|
|
|
&s.to_delete
|
|
|
|
);
|
2018-02-23 17:05:07 -05:00
|
|
|
}
|
2018-02-23 16:35:25 -05:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for dir in self.sample_file_dirs_by_id.values() {
|
2018-12-01 03:03:43 -05:00
|
|
|
raw::mark_sample_files_deleted(&tx, &dir.garbage_unlinked)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-12-28 13:21:49 -05:00
|
|
|
for (&stream_id, r) in &mut new_ranges {
|
2018-03-01 16:50:59 -05:00
|
|
|
*r = raw::get_range(&tx, stream_id)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-03-09 20:41:53 -05:00
|
|
|
{
|
|
|
|
let mut stmt = tx.prepare_cached(
|
2021-02-17 01:15:54 -05:00
|
|
|
r"update open set duration_90k = ?, end_time_90k = ? where id = ?",
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let rows = stmt.execute(params![
|
|
|
|
(recording::Time::new(clocks.monotonic()) - self.open_monotonic).0,
|
|
|
|
recording::Time::new(clocks.realtime()).0,
|
|
|
|
o.id,
|
2018-03-09 20:41:53 -05:00
|
|
|
])?;
|
|
|
|
if rows != 1 {
|
|
|
|
bail!("unable to find current open {}", o.id);
|
|
|
|
}
|
|
|
|
}
|
2018-11-02 02:25:06 -04:00
|
|
|
self.auth.flush(&tx)?;
|
2019-06-14 00:55:15 -04:00
|
|
|
self.signal.flush(&tx)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
tx.commit()?;
|
|
|
|
|
2019-09-26 09:09:27 -04:00
|
|
|
#[derive(Default)]
|
|
|
|
struct DirLog {
|
2021-02-17 01:15:54 -05:00
|
|
|
added: SmallVec<[CompositeId; 32]>,
|
|
|
|
deleted: SmallVec<[CompositeId; 32]>,
|
|
|
|
gced: SmallVec<[CompositeId; 32]>,
|
2019-09-26 09:09:27 -04:00
|
|
|
added_bytes: i64,
|
|
|
|
deleted_bytes: i64,
|
|
|
|
}
|
|
|
|
let mut dir_logs: FnvHashMap<i32, DirLog> = FnvHashMap::default();
|
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
// Process delete_garbage.
|
2019-09-26 09:09:27 -04:00
|
|
|
for (&id, dir) in &mut self.sample_file_dirs_by_id {
|
|
|
|
if !dir.garbage_unlinked.is_empty() {
|
2021-02-17 01:15:54 -05:00
|
|
|
dir_logs
|
|
|
|
.entry(id)
|
|
|
|
.or_default()
|
|
|
|
.gced
|
|
|
|
.extend(dir.garbage_unlinked.drain(..));
|
2019-09-26 09:09:27 -04:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
|
2018-03-01 16:50:59 -05:00
|
|
|
for (stream_id, new_range) in new_ranges.drain() {
|
2018-02-22 19:35:34 -05:00
|
|
|
let s = self.streams_by_id.get_mut(&stream_id).unwrap();
|
2019-09-26 09:09:27 -04:00
|
|
|
let dir_id = s.sample_file_dir_id.unwrap();
|
2020-07-12 19:51:39 -04:00
|
|
|
let dir = self.sample_file_dirs_by_id.get_mut(&dir_id).unwrap();
|
2019-09-26 09:09:27 -04:00
|
|
|
let log = dir_logs.entry(dir_id).or_default();
|
2018-02-23 16:35:25 -05:00
|
|
|
|
|
|
|
// Process delete_oldest_recordings.
|
|
|
|
s.sample_file_bytes -= s.bytes_to_delete;
|
2020-07-12 19:51:39 -04:00
|
|
|
s.fs_bytes -= s.fs_bytes_to_delete;
|
2019-09-26 09:09:27 -04:00
|
|
|
log.deleted_bytes += s.bytes_to_delete;
|
2018-02-23 16:35:25 -05:00
|
|
|
s.bytes_to_delete = 0;
|
2020-07-12 19:51:39 -04:00
|
|
|
s.fs_bytes_to_delete = 0;
|
2019-09-26 09:09:27 -04:00
|
|
|
log.deleted.reserve(s.to_delete.len());
|
2018-02-23 16:35:25 -05:00
|
|
|
for row in s.to_delete.drain(..) {
|
2019-09-26 09:09:27 -04:00
|
|
|
log.deleted.push(row.id);
|
2020-07-12 19:51:39 -04:00
|
|
|
dir.garbage_needs_unlink.insert(row.id);
|
2020-08-05 00:44:01 -04:00
|
|
|
let d = recording::Duration(i64::from(row.wall_duration_90k));
|
2018-02-23 16:35:25 -05:00
|
|
|
s.duration -= d;
|
2021-03-23 12:40:52 -04:00
|
|
|
s.committed_days.adjust(row.start..row.start + d, -1);
|
2018-02-23 16:35:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Process add_recordings.
|
2019-09-26 09:09:27 -04:00
|
|
|
log.added_bytes += s.bytes_to_add;
|
2018-03-01 16:50:59 -05:00
|
|
|
s.bytes_to_add = 0;
|
2020-07-12 19:51:39 -04:00
|
|
|
s.fs_bytes_to_add = 0;
|
2019-09-26 09:09:27 -04:00
|
|
|
log.added.reserve(s.synced_recordings);
|
2018-03-01 16:50:59 -05:00
|
|
|
for _ in 0..s.synced_recordings {
|
2018-02-22 19:35:34 -05:00
|
|
|
let u = s.uncommitted.pop_front().unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
log.added
|
|
|
|
.push(CompositeId::new(stream_id, s.cum_recordings));
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = u.lock().unwrap();
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
s.cum_recordings += 1;
|
2020-08-05 00:44:01 -04:00
|
|
|
let wall_dur = recording::Duration(l.wall_duration_90k.into());
|
|
|
|
let media_dur = recording::Duration(l.media_duration_90k.into());
|
|
|
|
s.cum_media_duration += media_dur;
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
s.cum_runs += if l.run_offset == 0 { 1 } else { 0 };
|
2020-08-05 00:44:01 -04:00
|
|
|
let end = l.start + wall_dur;
|
2021-02-17 01:15:54 -05:00
|
|
|
s.add_recording(l.start..end, l.sample_file_bytes);
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-03-01 16:50:59 -05:00
|
|
|
s.synced_recordings = 0;
|
2018-02-23 16:35:25 -05:00
|
|
|
|
|
|
|
// Fix the range.
|
2018-03-01 16:50:59 -05:00
|
|
|
s.range = new_range;
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
2018-11-02 02:25:06 -04:00
|
|
|
self.auth.post_flush();
|
2019-06-14 00:55:15 -04:00
|
|
|
self.signal.post_flush();
|
2019-01-04 19:11:58 -05:00
|
|
|
self.flush_count += 1;
|
2019-09-26 09:09:27 -04:00
|
|
|
let mut log_msg = String::with_capacity(256);
|
|
|
|
for (&dir_id, log) in &dir_logs {
|
|
|
|
let dir = self.sample_file_dirs_by_id.get(&dir_id).unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
write!(
|
|
|
|
&mut log_msg,
|
|
|
|
"\n{}: added {}B in {} recordings ({}), deleted {}B in {} ({}), \
|
2019-09-26 09:09:27 -04:00
|
|
|
GCed {} recordings ({}).",
|
2021-10-26 14:47:13 -04:00
|
|
|
dir.path.display(),
|
2021-02-17 01:15:54 -05:00
|
|
|
&encode_size(log.added_bytes),
|
|
|
|
log.added.len(),
|
|
|
|
log.added.iter().join(", "),
|
|
|
|
&encode_size(log.deleted_bytes),
|
|
|
|
log.deleted.len(),
|
|
|
|
log.deleted.iter().join(", "),
|
|
|
|
log.gced.len(),
|
|
|
|
log.gced.iter().join(", ")
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-09-26 09:09:27 -04:00
|
|
|
}
|
|
|
|
if log_msg.is_empty() {
|
|
|
|
log_msg.push_str(" no recording changes");
|
|
|
|
}
|
2023-02-16 02:14:54 -05:00
|
|
|
info!("flush complete: {log_msg}");
|
2018-02-22 19:35:34 -05:00
|
|
|
for cb in &self.on_flush {
|
|
|
|
cb();
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets a watcher which will receive an (empty) event on successful flush.
|
|
|
|
/// The lock will be held while this is run, so it should not do any I/O.
|
2019-06-14 11:47:11 -04:00
|
|
|
pub(crate) fn on_flush(&mut self, run: Box<dyn Fn() + Send>) {
|
2018-02-22 19:35:34 -05:00
|
|
|
self.on_flush.push(run);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: find a cleaner way to do this. Seems weird for src/cmds/run.rs to clear the on flush
|
|
|
|
// handlers given that it didn't add them.
|
|
|
|
pub fn clear_on_flush(&mut self) {
|
|
|
|
self.on_flush.clear();
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-02-15 02:10:10 -05:00
|
|
|
|
|
|
|
/// Opens the given sample file directories.
|
|
|
|
///
|
|
|
|
/// `ids` is implicitly de-duplicated.
|
|
|
|
///
|
|
|
|
/// When the database is in read-only mode, this simply opens all the directories after
|
|
|
|
/// locking and verifying their metadata matches the database state. In read-write mode, it
|
|
|
|
/// performs a single database transaction to update metadata for all dirs, then performs a like
|
|
|
|
/// update to the directories' on-disk metadata.
|
|
|
|
///
|
|
|
|
/// Note this violates the principle of never accessing disk while holding the database lock.
|
|
|
|
/// Currently this only happens at startup (or during configuration), so this isn't a problem
|
|
|
|
/// in practice.
|
|
|
|
pub fn open_sample_file_dirs(&mut self, ids: &[i32]) -> Result<(), Error> {
|
|
|
|
let mut in_progress = FnvHashMap::with_capacity_and_hasher(ids.len(), Default::default());
|
|
|
|
for &id in ids {
|
|
|
|
let e = in_progress.entry(id);
|
|
|
|
use ::std::collections::hash_map::Entry;
|
|
|
|
let e = match e {
|
2021-02-17 01:15:54 -05:00
|
|
|
Entry::Occupied(_) => continue, // suppress duplicate.
|
2018-02-15 02:10:10 -05:00
|
|
|
Entry::Vacant(e) => e,
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
let dir = self
|
|
|
|
.sample_file_dirs_by_id
|
|
|
|
.get_mut(&id)
|
|
|
|
.ok_or_else(|| format_err!("no such dir {}", id))?;
|
|
|
|
if dir.dir.is_some() {
|
|
|
|
continue;
|
|
|
|
}
|
2021-09-22 15:39:02 -04:00
|
|
|
let mut expected_meta = dir.expected_meta(&self.uuid);
|
2018-03-01 15:24:32 -05:00
|
|
|
if let Some(o) = self.open.as_ref() {
|
2022-05-02 14:22:14 -04:00
|
|
|
let open = expected_meta.in_progress_open.mut_or_insert_default();
|
2018-02-15 02:10:10 -05:00
|
|
|
open.id = o.id;
|
|
|
|
open.uuid.extend_from_slice(&o.uuid.as_bytes()[..]);
|
|
|
|
}
|
2021-09-22 15:39:02 -04:00
|
|
|
let d = dir::SampleFileDir::open(&dir.path, &expected_meta)
|
2021-10-26 14:47:13 -04:00
|
|
|
.map_err(|e| e.context(format!("Failed to open dir {}", dir.path.display())))?;
|
2021-02-17 01:15:54 -05:00
|
|
|
if self.open.is_none() {
|
|
|
|
// read-only mode; it's already fully opened.
|
2018-02-15 02:10:10 -05:00
|
|
|
dir.dir = Some(d);
|
2021-02-17 01:15:54 -05:00
|
|
|
} else {
|
|
|
|
// read-write mode; there are more steps to do.
|
2021-09-22 15:39:02 -04:00
|
|
|
e.insert((expected_meta, d));
|
2018-02-15 02:10:10 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-01 15:24:32 -05:00
|
|
|
let o = match self.open.as_ref() {
|
2021-02-17 01:15:54 -05:00
|
|
|
None => return Ok(()), // read-only mode; all done.
|
2018-02-15 02:10:10 -05:00
|
|
|
Some(o) => o,
|
|
|
|
};
|
|
|
|
|
|
|
|
let tx = self.conn.transaction()?;
|
|
|
|
{
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2018-02-15 02:10:10 -05:00
|
|
|
update sample_file_dir set last_complete_open_id = ? where id = ?
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2018-02-15 02:10:10 -05:00
|
|
|
for &id in in_progress.keys() {
|
2020-03-19 23:46:25 -04:00
|
|
|
if stmt.execute(params![o.id, id])? != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("unable to update dir {}", id);
|
2018-02-15 02:10:10 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tx.commit()?;
|
|
|
|
|
|
|
|
for (id, (mut meta, d)) in in_progress.drain() {
|
2018-02-22 19:35:34 -05:00
|
|
|
let dir = self.sample_file_dirs_by_id.get_mut(&id).unwrap();
|
2021-09-22 15:39:02 -04:00
|
|
|
meta.last_complete_open = meta.in_progress_open.take().into();
|
2018-02-15 02:10:10 -05:00
|
|
|
d.write_meta(&meta)?;
|
|
|
|
dir.dir = Some(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn streams_by_id(&self) -> &BTreeMap<i32, Stream> {
|
|
|
|
&self.streams_by_id
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2017-10-01 18:29:22 -04:00
|
|
|
/// Returns an immutable view of the video sample entries.
|
2018-03-01 23:59:05 -05:00
|
|
|
pub fn video_sample_entries_by_id(&self) -> &BTreeMap<i32, Arc<VideoSampleEntry>> {
|
|
|
|
&self.video_sample_entries_by_id
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a given camera by uuid.
|
|
|
|
pub fn get_camera(&self, uuid: Uuid) -> Option<&Camera> {
|
2021-05-17 17:31:50 -04:00
|
|
|
self.cameras_by_uuid.get(&uuid).map(|id| {
|
|
|
|
self.cameras_by_id
|
|
|
|
.get(id)
|
|
|
|
.expect("uuid->id requires id->cam")
|
|
|
|
})
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2018-03-02 14:38:11 -05:00
|
|
|
/// Lists the specified recordings, passing them to a supplied function. Given that the
|
|
|
|
/// function is called with the database lock held, it should be quick.
|
|
|
|
///
|
|
|
|
/// Note that at present, the returned recordings are _not_ completely ordered by start time.
|
|
|
|
/// Uncommitted recordings are returned id order after the others.
|
2018-02-23 12:19:42 -05:00
|
|
|
pub fn list_recordings_by_time(
|
2021-02-17 01:15:54 -05:00
|
|
|
&self,
|
|
|
|
stream_id: i32,
|
|
|
|
desired_time: Range<recording::Time>,
|
2021-10-21 13:25:37 -04:00
|
|
|
f: &mut dyn FnMut(ListRecordingsRow) -> Result<(), base::Error>,
|
|
|
|
) -> Result<(), base::Error> {
|
2018-03-02 14:38:11 -05:00
|
|
|
let s = match self.streams_by_id.get(&stream_id) {
|
2021-10-21 13:25:37 -04:00
|
|
|
None => bail_t!(NotFound, "no such stream {}", stream_id),
|
2018-03-02 14:38:11 -05:00
|
|
|
Some(s) => s,
|
|
|
|
};
|
|
|
|
raw::list_recordings_by_time(&self.conn, stream_id, desired_time.clone(), f)?;
|
2018-03-02 18:40:32 -05:00
|
|
|
for (i, u) in s.uncommitted.iter().enumerate() {
|
2018-03-02 14:38:11 -05:00
|
|
|
let row = {
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = u.lock().unwrap();
|
2018-03-02 18:40:32 -05:00
|
|
|
if l.video_samples > 0 {
|
2020-08-05 00:44:01 -04:00
|
|
|
let end = l.start + recording::Duration(l.wall_duration_90k as i64);
|
2018-03-02 18:40:32 -05:00
|
|
|
if l.start > desired_time.end || end < desired_time.start {
|
2021-02-17 01:15:54 -05:00
|
|
|
continue; // there's no overlap with the requested range.
|
2018-03-02 14:38:11 -05:00
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
l.to_list_row(
|
|
|
|
CompositeId::new(stream_id, s.cum_recordings + i as i32),
|
|
|
|
self.open.unwrap().id,
|
|
|
|
)
|
2018-03-02 14:38:11 -05:00
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
f(row)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
2016-12-21 01:08:18 -05:00
|
|
|
}
|
|
|
|
|
2018-03-01 23:59:05 -05:00
|
|
|
/// Lists the specified recordings in ascending order by id.
|
2018-02-23 12:19:42 -05:00
|
|
|
pub fn list_recordings_by_id(
|
2021-02-17 01:15:54 -05:00
|
|
|
&self,
|
|
|
|
stream_id: i32,
|
|
|
|
desired_ids: Range<i32>,
|
2021-10-21 13:25:37 -04:00
|
|
|
f: &mut dyn FnMut(ListRecordingsRow) -> Result<(), base::Error>,
|
|
|
|
) -> Result<(), base::Error> {
|
2018-03-02 14:38:11 -05:00
|
|
|
let s = match self.streams_by_id.get(&stream_id) {
|
2021-10-21 13:25:37 -04:00
|
|
|
None => bail_t!(NotFound, "no such stream {}", stream_id),
|
2018-03-02 14:38:11 -05:00
|
|
|
Some(s) => s,
|
|
|
|
};
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
if desired_ids.start < s.cum_recordings {
|
2018-03-02 14:38:11 -05:00
|
|
|
raw::list_recordings_by_id(&self.conn, stream_id, desired_ids.clone(), f)?;
|
|
|
|
}
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
if desired_ids.end > s.cum_recordings {
|
|
|
|
let start = cmp::max(0, desired_ids.start - s.cum_recordings) as usize;
|
2021-02-17 01:15:54 -05:00
|
|
|
let end = cmp::min(
|
|
|
|
(desired_ids.end - s.cum_recordings) as usize,
|
|
|
|
s.uncommitted.len(),
|
|
|
|
);
|
|
|
|
for i in start..end {
|
2018-03-02 14:38:11 -05:00
|
|
|
let row = {
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = s.uncommitted[i].lock().unwrap();
|
2018-03-02 18:40:32 -05:00
|
|
|
if l.video_samples > 0 {
|
2021-02-17 01:15:54 -05:00
|
|
|
l.to_list_row(
|
|
|
|
CompositeId::new(stream_id, s.cum_recordings + i as i32),
|
|
|
|
self.open.unwrap().id,
|
|
|
|
)
|
2018-03-02 14:38:11 -05:00
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
f(row)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2016-12-21 01:08:18 -05:00
|
|
|
/// Calls `list_recordings_by_time` and aggregates consecutive recordings.
|
|
|
|
/// Rows are given to the callback in arbitrary order. Callers which care about ordering
|
|
|
|
/// should do their own sorting.
|
2018-02-23 12:19:42 -05:00
|
|
|
pub fn list_aggregated_recordings(
|
2021-02-17 01:15:54 -05:00
|
|
|
&self,
|
|
|
|
stream_id: i32,
|
|
|
|
desired_time: Range<recording::Time>,
|
2018-02-23 12:19:42 -05:00
|
|
|
forced_split: recording::Duration,
|
2021-10-21 13:25:37 -04:00
|
|
|
f: &mut dyn FnMut(&ListAggregatedRecordingsRow) -> Result<(), base::Error>,
|
|
|
|
) -> Result<(), base::Error> {
|
2016-12-21 01:08:18 -05:00
|
|
|
// Iterate, maintaining a map from a recording_id to the aggregated row for the latest
|
|
|
|
// batch of recordings from the run starting at that id. Runs can be split into multiple
|
|
|
|
// batches for a few reasons:
|
|
|
|
//
|
|
|
|
// * forced split (when exceeding a duration limit)
|
|
|
|
// * a missing id (one that was deleted out of order)
|
|
|
|
// * video_sample_entry mismatch (if the parameters changed during a RTSP session)
|
|
|
|
//
|
|
|
|
// This iteration works because in a run, the start_time+duration of recording id r
|
|
|
|
// is equal to the start_time of recording id r+1. Thus ascending times guarantees
|
|
|
|
// ascending ids within a run. (Different runs, however, can be arbitrarily interleaved if
|
|
|
|
// their timestamps overlap. Tracking all active runs prevents that interleaving from
|
2018-03-02 14:38:11 -05:00
|
|
|
// causing problems.) list_recordings_by_time also returns uncommitted recordings in
|
|
|
|
// ascending order by id, and after any committed recordings with lower ids.
|
2016-12-21 01:08:18 -05:00
|
|
|
let mut aggs: BTreeMap<i32, ListAggregatedRecordingsRow> = BTreeMap::new();
|
2018-02-23 12:19:42 -05:00
|
|
|
self.list_recordings_by_time(stream_id, desired_time, &mut |row| {
|
2018-02-20 13:11:10 -05:00
|
|
|
let recording_id = row.id.recording();
|
|
|
|
let run_start_id = recording_id - row.run_offset;
|
2018-03-02 14:38:11 -05:00
|
|
|
let uncommitted = (row.flags & RecordingFlags::Uncommitted as i32) != 0;
|
2018-03-02 18:40:32 -05:00
|
|
|
let growing = (row.flags & RecordingFlags::Growing as i32) != 0;
|
2021-10-10 19:13:57 -04:00
|
|
|
let has_trailing_zero = (row.flags & RecordingFlags::TrailingZero as i32) != 0;
|
2018-03-02 18:40:32 -05:00
|
|
|
use std::collections::btree_map::Entry;
|
|
|
|
match aggs.entry(run_start_id) {
|
|
|
|
Entry::Occupied(mut e) => {
|
|
|
|
let a = e.get_mut();
|
2021-02-17 01:15:54 -05:00
|
|
|
let new_dur = a.time.end - a.time.start
|
|
|
|
+ recording::Duration(row.wall_duration_90k as i64);
|
|
|
|
let needs_flush = a.ids.end != recording_id
|
|
|
|
|| row.video_sample_entry_id != a.video_sample_entry_id
|
|
|
|
|| new_dur >= forced_split;
|
|
|
|
if needs_flush {
|
|
|
|
// flush then start a new entry.
|
2018-12-28 16:06:32 -05:00
|
|
|
f(a)?;
|
|
|
|
*a = ListAggregatedRecordingsRow::from(row);
|
2021-02-17 01:15:54 -05:00
|
|
|
} else {
|
|
|
|
// append.
|
2018-12-28 16:06:32 -05:00
|
|
|
if a.time.end != row.start {
|
2021-10-21 13:25:37 -04:00
|
|
|
bail_t!(
|
|
|
|
Internal,
|
2021-02-17 01:15:54 -05:00
|
|
|
"stream {} recording {} ends at {} but {} starts at {}",
|
|
|
|
stream_id,
|
|
|
|
a.ids.end - 1,
|
|
|
|
a.time.end,
|
|
|
|
row.id,
|
|
|
|
row.start
|
|
|
|
);
|
2018-12-28 16:06:32 -05:00
|
|
|
}
|
|
|
|
if a.open_id != row.open_id {
|
2021-10-21 13:25:37 -04:00
|
|
|
bail_t!(
|
|
|
|
Internal,
|
2021-02-17 01:15:54 -05:00
|
|
|
"stream {} recording {} has open id {} but {} has {}",
|
|
|
|
stream_id,
|
|
|
|
a.ids.end - 1,
|
|
|
|
a.open_id,
|
|
|
|
row.id,
|
|
|
|
row.open_id
|
|
|
|
);
|
2018-12-28 16:06:32 -05:00
|
|
|
}
|
2020-08-05 00:44:01 -04:00
|
|
|
a.time.end.0 += row.wall_duration_90k as i64;
|
2018-12-28 16:06:32 -05:00
|
|
|
a.ids.end = recording_id + 1;
|
|
|
|
a.video_samples += row.video_samples as i64;
|
|
|
|
a.video_sync_samples += row.video_sync_samples as i64;
|
|
|
|
a.sample_file_bytes += row.sample_file_bytes as i64;
|
|
|
|
if uncommitted {
|
|
|
|
a.first_uncommitted = a.first_uncommitted.or(Some(recording_id));
|
|
|
|
}
|
|
|
|
a.growing = growing;
|
2021-10-10 19:13:57 -04:00
|
|
|
a.has_trailing_zero = has_trailing_zero;
|
2018-03-02 18:40:32 -05:00
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
Entry::Vacant(e) => {
|
|
|
|
e.insert(ListAggregatedRecordingsRow::from(row));
|
|
|
|
}
|
2018-12-28 16:06:32 -05:00
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(())
|
|
|
|
})?;
|
2016-12-21 01:08:18 -05:00
|
|
|
for a in aggs.values() {
|
2016-11-25 17:34:00 -05:00
|
|
|
f(a)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-03-01 02:28:25 -05:00
|
|
|
/// Calls `f` with a single `recording_playback` row.
|
|
|
|
/// Note the lock is held for the duration of `f`.
|
2016-11-25 17:34:00 -05:00
|
|
|
/// This uses a LRU cache to reduce the number of retrievals from the database.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn with_recording_playback<R>(
|
|
|
|
&self,
|
|
|
|
id: CompositeId,
|
|
|
|
f: &mut dyn FnMut(&RecordingPlayback) -> Result<R, Error>,
|
|
|
|
) -> Result<R, Error> {
|
2018-03-02 14:38:11 -05:00
|
|
|
// Check for uncommitted path.
|
2021-02-17 01:15:54 -05:00
|
|
|
let s = self
|
|
|
|
.streams_by_id
|
|
|
|
.get(&id.stream())
|
|
|
|
.ok_or_else(|| format_err!("no stream for {}", id))?;
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
if s.cum_recordings <= id.recording() {
|
|
|
|
let i = id.recording() - s.cum_recordings;
|
2018-03-02 14:38:11 -05:00
|
|
|
if i as usize >= s.uncommitted.len() {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
|
|
|
"no such recording {}; latest committed is {}, latest is {}",
|
|
|
|
id,
|
|
|
|
s.cum_recordings,
|
|
|
|
s.cum_recordings + s.uncommitted.len() as i32
|
|
|
|
);
|
2018-03-02 14:38:11 -05:00
|
|
|
}
|
2022-09-29 01:19:35 -04:00
|
|
|
let l = s.uncommitted[i as usize].lock().unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
return f(&RecordingPlayback {
|
|
|
|
video_index: &l.video_index,
|
|
|
|
});
|
2018-03-02 14:38:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Committed path.
|
2018-02-22 19:35:34 -05:00
|
|
|
let mut cache = self.video_index_cache.borrow_mut();
|
2020-11-22 20:37:55 -05:00
|
|
|
use hashlink::linked_hash_map::RawEntryMut;
|
|
|
|
match cache.raw_entry_mut().from_key(&id.0) {
|
|
|
|
RawEntryMut::Occupied(mut occupied) => {
|
|
|
|
trace!("cache hit for recording {}", id);
|
|
|
|
occupied.to_back();
|
|
|
|
let video_index = occupied.get();
|
2021-05-17 17:31:50 -04:00
|
|
|
f(&RecordingPlayback { video_index })
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2020-11-22 20:37:55 -05:00
|
|
|
RawEntryMut::Vacant(vacant) => {
|
|
|
|
trace!("cache miss for recording {}", id);
|
|
|
|
let mut stmt = self.conn.prepare_cached(GET_RECORDING_PLAYBACK_SQL)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
let mut rows = stmt.query(named_params! {":composite_id": id.0})?;
|
2020-11-22 20:37:55 -05:00
|
|
|
if let Some(row) = rows.next()? {
|
|
|
|
let video_index: VideoIndex = row.get(0)?;
|
2021-02-17 01:15:54 -05:00
|
|
|
let result = f(&RecordingPlayback {
|
|
|
|
video_index: &video_index.0[..],
|
|
|
|
});
|
2020-11-22 20:37:55 -05:00
|
|
|
vacant.insert(id.0, video_index.0);
|
|
|
|
if cache.len() > VIDEO_INDEX_CACHE_LEN {
|
|
|
|
cache.pop_front();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
Err(format_err!("no such recording {}", id))
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-12 19:51:39 -04:00
|
|
|
/// Queues for deletion the oldest recordings that aren't already queued.
|
2018-02-23 16:35:25 -05:00
|
|
|
/// `f` should return true for each row that should be deleted.
|
|
|
|
pub(crate) fn delete_oldest_recordings(
|
2021-02-17 01:15:54 -05:00
|
|
|
&mut self,
|
|
|
|
stream_id: i32,
|
|
|
|
f: &mut dyn FnMut(&ListOldestRecordingsRow) -> bool,
|
|
|
|
) -> Result<(), Error> {
|
2018-02-23 16:35:25 -05:00
|
|
|
let s = match self.streams_by_id.get_mut(&stream_id) {
|
2018-02-22 19:35:34 -05:00
|
|
|
None => bail!("no stream {}", stream_id),
|
|
|
|
Some(s) => s,
|
|
|
|
};
|
2018-02-23 16:35:25 -05:00
|
|
|
let end = match s.to_delete.last() {
|
|
|
|
None => 0,
|
|
|
|
Some(row) => row.id.recording() + 1,
|
2018-02-22 19:35:34 -05:00
|
|
|
};
|
2018-02-23 16:35:25 -05:00
|
|
|
raw::list_oldest_recordings(&self.conn, CompositeId::new(stream_id, end), &mut |r| {
|
|
|
|
if f(&r) {
|
|
|
|
s.to_delete.push(r);
|
2020-07-12 19:51:39 -04:00
|
|
|
let bytes = i64::from(r.sample_file_bytes);
|
|
|
|
s.bytes_to_delete += bytes;
|
|
|
|
s.fs_bytes_to_delete += round_up(bytes);
|
2018-02-23 16:35:25 -05:00
|
|
|
return true;
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2018-02-23 16:35:25 -05:00
|
|
|
false
|
|
|
|
})
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Initializes the video_sample_entries. To be called during construction.
|
|
|
|
fn init_video_sample_entries(&mut self) -> Result<(), Error> {
|
|
|
|
info!("Loading video sample entries");
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = self.conn.prepare(
|
|
|
|
r#"
|
2016-11-25 17:34:00 -05:00
|
|
|
select
|
|
|
|
id,
|
|
|
|
width,
|
|
|
|
height,
|
2020-03-20 00:35:42 -04:00
|
|
|
pasp_h_spacing,
|
|
|
|
pasp_v_spacing,
|
2018-02-05 14:57:59 -05:00
|
|
|
rfc6381_codec,
|
2016-11-25 17:34:00 -05:00
|
|
|
data
|
|
|
|
from
|
|
|
|
video_sample_entry
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query(params![])?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let id = row.get(0)?;
|
2020-03-20 23:52:30 -04:00
|
|
|
let data: Vec<u8> = row.get(6)?;
|
2017-10-04 02:25:58 -04:00
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
self.video_sample_entries_by_id.insert(
|
2020-03-20 00:35:42 -04:00
|
|
|
id,
|
2021-02-17 01:15:54 -05:00
|
|
|
Arc::new(VideoSampleEntry {
|
|
|
|
id,
|
|
|
|
width: row.get::<_, i32>(1)?.try_into()?,
|
|
|
|
height: row.get::<_, i32>(2)?.try_into()?,
|
|
|
|
pasp_h_spacing: row.get::<_, i32>(3)?.try_into()?,
|
|
|
|
pasp_v_spacing: row.get::<_, i32>(4)?.try_into()?,
|
|
|
|
data,
|
|
|
|
rfc6381_codec: row.get(5)?,
|
|
|
|
}),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
info!(
|
|
|
|
"Loaded {} video sample entries",
|
|
|
|
self.video_sample_entries_by_id.len()
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-02-12 01:45:51 -05:00
|
|
|
/// Initializes the sample file dirs.
|
|
|
|
/// To be called during construction.
|
|
|
|
fn init_sample_file_dirs(&mut self) -> Result<(), Error> {
|
|
|
|
info!("Loading sample file dirs");
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = self.conn.prepare(
|
|
|
|
r#"
|
2018-02-12 01:45:51 -05:00
|
|
|
select
|
2018-02-15 02:10:10 -05:00
|
|
|
d.id,
|
2021-10-26 14:47:13 -04:00
|
|
|
d.config,
|
2018-02-15 02:10:10 -05:00
|
|
|
d.uuid,
|
|
|
|
d.last_complete_open_id,
|
|
|
|
o.uuid
|
2018-02-12 01:45:51 -05:00
|
|
|
from
|
2018-02-15 02:10:10 -05:00
|
|
|
sample_file_dir d left join open o on (d.last_complete_open_id = o.id);
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query(params![])?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let id = row.get(0)?;
|
2021-10-26 14:47:13 -04:00
|
|
|
let config: SampleFileDirConfig = row.get(1)?;
|
2021-10-26 13:12:19 -04:00
|
|
|
let dir_uuid: SqlUuid = row.get(2)?;
|
2019-05-31 19:19:04 -04:00
|
|
|
let open_id: Option<u32> = row.get(3)?;
|
2021-10-26 13:12:19 -04:00
|
|
|
let open_uuid: Option<SqlUuid> = row.get(4)?;
|
2018-02-15 02:10:10 -05:00
|
|
|
let last_complete_open = match (open_id, open_uuid) {
|
2021-02-17 01:15:54 -05:00
|
|
|
(Some(id), Some(uuid)) => Some(Open { id, uuid: uuid.0 }),
|
2018-02-15 02:10:10 -05:00
|
|
|
(None, None) => None,
|
2018-02-21 01:46:14 -05:00
|
|
|
_ => bail!("open table missing id {}", id),
|
2018-02-15 02:10:10 -05:00
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
self.sample_file_dirs_by_id.insert(
|
2018-02-12 01:45:51 -05:00
|
|
|
id,
|
2021-02-17 01:15:54 -05:00
|
|
|
SampleFileDir {
|
|
|
|
id,
|
|
|
|
uuid: dir_uuid.0,
|
2021-10-26 14:47:13 -04:00
|
|
|
path: config.path,
|
2021-02-17 01:15:54 -05:00
|
|
|
dir: None,
|
|
|
|
last_complete_open,
|
|
|
|
garbage_needs_unlink: raw::list_garbage(&self.conn, id)?,
|
|
|
|
garbage_unlinked: Vec::new(),
|
|
|
|
},
|
|
|
|
);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
info!(
|
|
|
|
"Loaded {} sample file dirs",
|
|
|
|
self.sample_file_dirs_by_id.len()
|
|
|
|
);
|
2018-02-12 01:45:51 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// Initializes the cameras, but not their matching recordings.
|
|
|
|
/// To be called during construction.
|
|
|
|
fn init_cameras(&mut self) -> Result<(), Error> {
|
|
|
|
info!("Loading cameras");
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = self.conn.prepare(
|
|
|
|
r#"
|
2016-11-25 17:34:00 -05:00
|
|
|
select
|
2018-01-23 14:05:07 -05:00
|
|
|
id,
|
|
|
|
uuid,
|
|
|
|
short_name,
|
2021-09-10 19:31:03 -04:00
|
|
|
config
|
2016-11-25 17:34:00 -05:00
|
|
|
from
|
|
|
|
camera;
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query(params![])?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let id = row.get(0)?;
|
2021-10-26 13:12:19 -04:00
|
|
|
let uuid: SqlUuid = row.get(1)?;
|
2021-02-17 01:15:54 -05:00
|
|
|
self.cameras_by_id.insert(
|
|
|
|
id,
|
|
|
|
Camera {
|
2021-05-17 17:31:50 -04:00
|
|
|
id,
|
2021-02-17 01:15:54 -05:00
|
|
|
uuid: uuid.0,
|
|
|
|
short_name: row.get(2)?,
|
2021-09-10 19:31:03 -04:00
|
|
|
config: row.get(3)?,
|
2021-02-17 01:15:54 -05:00
|
|
|
streams: Default::default(),
|
|
|
|
},
|
|
|
|
);
|
2018-02-22 19:35:34 -05:00
|
|
|
self.cameras_by_uuid.insert(uuid.0, id);
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
info!("Loaded {} cameras", self.cameras_by_id.len());
|
2018-01-23 14:05:07 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Initializes the streams, but not their matching recordings.
|
|
|
|
/// To be called during construction.
|
|
|
|
fn init_streams(&mut self) -> Result<(), Error> {
|
|
|
|
info!("Loading streams");
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = self.conn.prepare(
|
|
|
|
r#"
|
2018-01-23 14:05:07 -05:00
|
|
|
select
|
|
|
|
id,
|
|
|
|
type,
|
|
|
|
camera_id,
|
2018-02-12 01:45:51 -05:00
|
|
|
sample_file_dir_id,
|
2021-09-10 19:31:03 -04:00
|
|
|
config,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
cum_recordings,
|
2020-08-05 00:44:01 -04:00
|
|
|
cum_media_duration_90k,
|
2021-09-10 19:31:03 -04:00
|
|
|
cum_runs
|
2018-01-23 14:05:07 -05:00
|
|
|
from
|
|
|
|
stream;
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query(params![])?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let id = row.get(0)?;
|
|
|
|
let type_: String = row.get(1)?;
|
2021-02-17 01:15:54 -05:00
|
|
|
let type_ = StreamType::parse(&type_)
|
|
|
|
.ok_or_else(|| format_err!("no such stream type {}", type_))?;
|
2019-05-31 19:19:04 -04:00
|
|
|
let camera_id = row.get(2)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
let c = self
|
2021-02-17 01:15:54 -05:00
|
|
|
.cameras_by_id
|
|
|
|
.get_mut(&camera_id)
|
|
|
|
.ok_or_else(|| format_err!("missing camera {} for stream {}", camera_id, id))?;
|
|
|
|
self.streams_by_id.insert(
|
2018-01-23 14:05:07 -05:00
|
|
|
id,
|
2021-02-17 01:15:54 -05:00
|
|
|
Stream {
|
|
|
|
id,
|
|
|
|
type_,
|
|
|
|
camera_id,
|
|
|
|
sample_file_dir_id: row.get(3)?,
|
2021-09-10 19:31:03 -04:00
|
|
|
config: row.get(4)?,
|
2021-02-17 01:15:54 -05:00
|
|
|
range: None,
|
|
|
|
sample_file_bytes: 0,
|
|
|
|
fs_bytes: 0,
|
|
|
|
to_delete: Vec::new(),
|
|
|
|
bytes_to_delete: 0,
|
|
|
|
fs_bytes_to_delete: 0,
|
|
|
|
bytes_to_add: 0,
|
|
|
|
fs_bytes_to_add: 0,
|
|
|
|
duration: recording::Duration(0),
|
2021-05-17 17:31:50 -04:00
|
|
|
committed_days: days::Map::default(),
|
2021-09-10 19:31:03 -04:00
|
|
|
cum_recordings: row.get(5)?,
|
|
|
|
cum_media_duration: recording::Duration(row.get(6)?),
|
|
|
|
cum_runs: row.get(7)?,
|
2021-02-17 01:15:54 -05:00
|
|
|
uncommitted: VecDeque::new(),
|
|
|
|
synced_recordings: 0,
|
|
|
|
on_live_segment: Vec::new(),
|
|
|
|
},
|
|
|
|
);
|
2018-01-23 14:05:07 -05:00
|
|
|
c.streams[type_.index()] = Some(id);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
info!("Loaded {} streams", self.streams_by_id.len());
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Inserts the specified video sample entry if absent.
|
|
|
|
/// On success, returns the id of a new or existing row.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn insert_video_sample_entry(
|
|
|
|
&mut self,
|
|
|
|
entry: VideoSampleEntryToInsert,
|
|
|
|
) -> Result<i32, Error> {
|
2016-11-25 17:34:00 -05:00
|
|
|
// Check if it already exists.
|
|
|
|
// There shouldn't be too many entries, so it's fine to enumerate everything.
|
2018-03-01 23:59:05 -05:00
|
|
|
for (&id, v) in &self.video_sample_entries_by_id {
|
2020-03-20 23:52:30 -04:00
|
|
|
if v.data == entry.data {
|
|
|
|
// The other fields are derived from data, so differences indicate a bug.
|
2021-02-17 01:15:54 -05:00
|
|
|
if v.width != entry.width
|
|
|
|
|| v.height != entry.height
|
|
|
|
|| v.pasp_h_spacing != entry.pasp_h_spacing
|
|
|
|
|| v.pasp_v_spacing != entry.pasp_v_spacing
|
|
|
|
{
|
|
|
|
bail!(
|
|
|
|
"video_sample_entry id {}: existing entry {:?}, new {:?}",
|
|
|
|
id,
|
|
|
|
v,
|
|
|
|
&entry
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
return Ok(id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut stmt = self.conn.prepare_cached(INSERT_VIDEO_SAMPLE_ENTRY_SQL)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
stmt.execute(named_params! {
|
2020-03-20 00:35:42 -04:00
|
|
|
":width": i32::from(entry.width),
|
|
|
|
":height": i32::from(entry.height),
|
|
|
|
":pasp_h_spacing": i32::from(entry.pasp_h_spacing),
|
|
|
|
":pasp_v_spacing": i32::from(entry.pasp_v_spacing),
|
|
|
|
":rfc6381_codec": &entry.rfc6381_codec,
|
|
|
|
":data": &entry.data,
|
2021-04-28 01:51:30 -04:00
|
|
|
})
|
|
|
|
.map_err(|e| Error::from(e).context(format!("Unable to insert {:#?}", &entry)))?;
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
let id = self.conn.last_insert_rowid() as i32;
|
2021-02-17 01:15:54 -05:00
|
|
|
self.video_sample_entries_by_id.insert(
|
2018-02-05 14:57:59 -05:00
|
|
|
id,
|
2021-02-17 01:15:54 -05:00
|
|
|
Arc::new(VideoSampleEntry {
|
|
|
|
id,
|
|
|
|
width: entry.width,
|
|
|
|
height: entry.height,
|
|
|
|
pasp_h_spacing: entry.pasp_h_spacing,
|
|
|
|
pasp_v_spacing: entry.pasp_v_spacing,
|
|
|
|
data: entry.data,
|
|
|
|
rfc6381_codec: entry.rfc6381_codec,
|
|
|
|
}),
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
Ok(id)
|
|
|
|
}
|
2017-02-05 22:58:41 -05:00
|
|
|
|
2021-10-26 14:47:13 -04:00
|
|
|
pub fn add_sample_file_dir(&mut self, path: PathBuf) -> Result<i32, Error> {
|
2018-02-15 02:10:10 -05:00
|
|
|
let mut meta = schema::DirMeta::default();
|
2018-02-12 01:45:51 -05:00
|
|
|
let uuid = Uuid::new_v4();
|
|
|
|
let uuid_bytes = &uuid.as_bytes()[..];
|
2021-02-17 01:15:54 -05:00
|
|
|
let o = self
|
|
|
|
.open
|
|
|
|
.as_ref()
|
|
|
|
.ok_or_else(|| format_err!("database is read-only"))?;
|
2018-02-15 02:10:10 -05:00
|
|
|
|
|
|
|
// Populate meta.
|
2018-02-12 01:45:51 -05:00
|
|
|
{
|
2018-02-22 19:35:34 -05:00
|
|
|
meta.db_uuid.extend_from_slice(&self.uuid.as_bytes()[..]);
|
2018-02-15 02:10:10 -05:00
|
|
|
meta.dir_uuid.extend_from_slice(uuid_bytes);
|
2022-05-02 14:22:14 -04:00
|
|
|
let open = meta.in_progress_open.mut_or_insert_default();
|
2018-02-15 02:10:10 -05:00
|
|
|
open.id = o.id;
|
|
|
|
open.uuid.extend_from_slice(&o.uuid.as_bytes()[..]);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
2018-02-15 02:10:10 -05:00
|
|
|
|
|
|
|
let dir = dir::SampleFileDir::create(&path, &meta)?;
|
2021-10-26 14:47:13 -04:00
|
|
|
let config = SampleFileDirConfig {
|
|
|
|
path: path.clone(),
|
|
|
|
..Default::default()
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
self.conn.execute(
|
|
|
|
r#"
|
2021-10-26 14:47:13 -04:00
|
|
|
insert into sample_file_dir (config, uuid, last_complete_open_id)
|
|
|
|
values (?, ?, ?)
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
2021-10-26 14:47:13 -04:00
|
|
|
params![&config, uuid_bytes, o.id],
|
2021-02-17 01:15:54 -05:00
|
|
|
)?;
|
2018-02-15 02:10:10 -05:00
|
|
|
let id = self.conn.last_insert_rowid() as i32;
|
|
|
|
use ::std::collections::btree_map::Entry;
|
2018-02-22 19:35:34 -05:00
|
|
|
let e = self.sample_file_dirs_by_id.entry(id);
|
2018-02-15 02:10:10 -05:00
|
|
|
let d = match e {
|
|
|
|
Entry::Vacant(e) => e.insert(SampleFileDir {
|
|
|
|
id,
|
|
|
|
path,
|
|
|
|
uuid,
|
|
|
|
dir: Some(dir),
|
2021-09-22 15:39:02 -04:00
|
|
|
last_complete_open: Some(*o),
|
2018-12-01 03:03:43 -05:00
|
|
|
garbage_needs_unlink: FnvHashSet::default(),
|
|
|
|
garbage_unlinked: Vec::new(),
|
2018-02-15 02:10:10 -05:00
|
|
|
}),
|
2021-05-17 17:31:50 -04:00
|
|
|
Entry::Occupied(_) => bail!("duplicate sample file dir id {}", id),
|
2018-02-15 02:10:10 -05:00
|
|
|
};
|
2021-09-22 15:39:02 -04:00
|
|
|
meta.last_complete_open = meta.in_progress_open.take().into();
|
2018-02-15 02:10:10 -05:00
|
|
|
d.dir.as_ref().unwrap().write_meta(&meta)?;
|
2018-02-12 01:45:51 -05:00
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn delete_sample_file_dir(&mut self, dir_id: i32) -> Result<(), Error> {
|
2018-02-22 19:35:34 -05:00
|
|
|
for (&id, s) in self.streams_by_id.iter() {
|
2018-02-12 01:45:51 -05:00
|
|
|
if s.sample_file_dir_id == Some(dir_id) {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("can't delete dir referenced by stream {}", id);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
|
|
|
}
|
2018-03-01 15:24:32 -05:00
|
|
|
let mut d = match self.sample_file_dirs_by_id.entry(dir_id) {
|
|
|
|
::std::collections::btree_map::Entry::Occupied(e) => e,
|
|
|
|
_ => bail!("no such dir {} to remove", dir_id),
|
|
|
|
};
|
2018-12-01 03:03:43 -05:00
|
|
|
if !d.get().garbage_needs_unlink.is_empty() || !d.get().garbage_unlinked.is_empty() {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
|
|
|
"must collect garbage before deleting directory {}",
|
2021-10-26 14:47:13 -04:00
|
|
|
d.get().path.display()
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-01 15:24:32 -05:00
|
|
|
}
|
|
|
|
let dir = match d.get_mut().dir.take() {
|
2021-09-22 15:39:02 -04:00
|
|
|
None => dir::SampleFileDir::open(&d.get().path, &d.get().expected_meta(&self.uuid))?,
|
2018-03-01 15:24:32 -05:00
|
|
|
Some(arc) => match Arc::strong_count(&arc) {
|
2022-10-01 16:16:28 -04:00
|
|
|
1 => arc, // LockedDatabase is only reference
|
|
|
|
c => {
|
|
|
|
// a writer::Syncer also has a reference.
|
2021-02-17 01:15:54 -05:00
|
|
|
d.get_mut().dir = Some(arc); // put it back.
|
2022-10-01 16:16:28 -04:00
|
|
|
bail!(
|
|
|
|
"can't delete directory {} with active syncer (refcnt {}",
|
|
|
|
dir_id,
|
|
|
|
c
|
|
|
|
);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-01 15:24:32 -05:00
|
|
|
},
|
|
|
|
};
|
2019-07-12 14:05:36 -04:00
|
|
|
if !dir.is_empty()? {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
|
|
|
"Can't delete sample file directory {} which still has files",
|
2021-10-26 14:47:13 -04:00
|
|
|
&d.get().path.display()
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-01 15:24:32 -05:00
|
|
|
}
|
2021-09-22 15:39:02 -04:00
|
|
|
let mut meta = d.get().expected_meta(&self.uuid);
|
2020-11-23 03:23:03 -05:00
|
|
|
meta.in_progress_open = meta.last_complete_open.take().into();
|
2018-03-01 15:24:32 -05:00
|
|
|
dir.write_meta(&meta)?;
|
2021-02-17 01:15:54 -05:00
|
|
|
if self
|
|
|
|
.conn
|
|
|
|
.execute("delete from sample_file_dir where id = ?", params![dir_id])?
|
|
|
|
!= 1
|
|
|
|
{
|
2018-03-01 15:24:32 -05:00
|
|
|
bail!("missing database row for dir {}", dir_id);
|
2018-02-12 01:45:51 -05:00
|
|
|
}
|
2018-03-01 15:24:32 -05:00
|
|
|
d.remove_entry();
|
2018-02-12 01:45:51 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-02-05 22:58:41 -05:00
|
|
|
/// Adds a camera.
|
2018-01-23 14:05:07 -05:00
|
|
|
pub fn add_camera(&mut self, mut camera: CameraChange) -> Result<i32, Error> {
|
2017-02-05 22:58:41 -05:00
|
|
|
let uuid = Uuid::new_v4();
|
|
|
|
let uuid_bytes = &uuid.as_bytes()[..];
|
2018-01-23 14:05:07 -05:00
|
|
|
let tx = self.conn.transaction()?;
|
2018-02-12 01:45:51 -05:00
|
|
|
let streams;
|
2018-01-23 14:05:07 -05:00
|
|
|
let camera_id;
|
|
|
|
{
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2021-09-10 19:31:03 -04:00
|
|
|
insert into camera (uuid, short_name, config)
|
|
|
|
values (:uuid, :short_name, :config)
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
stmt.execute(named_params! {
|
2020-03-19 23:46:25 -04:00
|
|
|
":uuid": uuid_bytes,
|
|
|
|
":short_name": &camera.short_name,
|
2021-09-10 19:31:03 -04:00
|
|
|
":config": &camera.config,
|
2020-03-19 23:46:25 -04:00
|
|
|
})?;
|
2018-01-23 14:05:07 -05:00
|
|
|
camera_id = tx.last_insert_rowid() as i32;
|
2021-02-17 01:15:54 -05:00
|
|
|
streams =
|
|
|
|
StreamStateChanger::new(&tx, camera_id, None, &self.streams_by_id, &mut camera)?;
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
tx.commit()?;
|
2018-02-22 19:35:34 -05:00
|
|
|
let streams = streams.apply(&mut self.streams_by_id);
|
2021-02-17 01:15:54 -05:00
|
|
|
self.cameras_by_id.insert(
|
|
|
|
camera_id,
|
|
|
|
Camera {
|
|
|
|
id: camera_id,
|
|
|
|
uuid,
|
|
|
|
short_name: camera.short_name,
|
2021-09-10 19:31:03 -04:00
|
|
|
config: camera.config,
|
2021-02-17 01:15:54 -05:00
|
|
|
streams,
|
|
|
|
},
|
|
|
|
);
|
2018-02-22 19:35:34 -05:00
|
|
|
self.cameras_by_uuid.insert(uuid, camera_id);
|
2018-01-23 14:05:07 -05:00
|
|
|
Ok(camera_id)
|
2017-02-05 22:58:41 -05:00
|
|
|
}
|
|
|
|
|
2021-09-10 19:31:03 -04:00
|
|
|
/// Returns a `CameraChange` for the given camera which does nothing.
|
|
|
|
///
|
|
|
|
/// The caller can modify it to taste then pass it to `update_camera`.
|
|
|
|
/// TODO: consider renaming this to `update_camera` and creating a bulk
|
|
|
|
/// `apply_camera_changes`.
|
|
|
|
pub fn null_camera_change(&mut self, camera_id: i32) -> Result<CameraChange, Error> {
|
|
|
|
let camera = self
|
|
|
|
.cameras_by_id
|
|
|
|
.get(&camera_id)
|
|
|
|
.ok_or_else(|| format_err!("no such camera {}", camera_id))?;
|
|
|
|
let mut change = CameraChange {
|
|
|
|
short_name: camera.short_name.clone(),
|
|
|
|
config: camera.config.clone(),
|
|
|
|
streams: Default::default(),
|
|
|
|
};
|
|
|
|
for i in 0..NUM_STREAM_TYPES {
|
|
|
|
if let Some(stream_id) = camera.streams[i] {
|
|
|
|
let s = self
|
|
|
|
.streams_by_id
|
|
|
|
.get(&stream_id)
|
|
|
|
.expect("cameras reference valid streams");
|
|
|
|
change.streams[i] = StreamChange {
|
|
|
|
sample_file_dir_id: s.sample_file_dir_id,
|
|
|
|
config: s.config.clone(),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(change)
|
|
|
|
}
|
|
|
|
|
2017-02-05 22:58:41 -05:00
|
|
|
/// Updates a camera.
|
2018-01-23 14:05:07 -05:00
|
|
|
pub fn update_camera(&mut self, camera_id: i32, mut camera: CameraChange) -> Result<(), Error> {
|
|
|
|
let tx = self.conn.transaction()?;
|
2018-02-12 01:45:51 -05:00
|
|
|
let streams;
|
2018-02-22 19:35:34 -05:00
|
|
|
let c = self
|
2021-02-17 01:15:54 -05:00
|
|
|
.cameras_by_id
|
|
|
|
.get_mut(&camera_id)
|
|
|
|
.ok_or_else(|| format_err!("no such camera {}", camera_id))?;
|
2018-01-23 14:05:07 -05:00
|
|
|
{
|
2021-02-17 01:15:54 -05:00
|
|
|
streams =
|
|
|
|
StreamStateChanger::new(&tx, camera_id, Some(c), &self.streams_by_id, &mut camera)?;
|
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2018-01-23 14:05:07 -05:00
|
|
|
update camera set
|
|
|
|
short_name = :short_name,
|
2021-09-10 19:31:03 -04:00
|
|
|
config = :config
|
2018-01-23 14:05:07 -05:00
|
|
|
where
|
|
|
|
id = :id
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2021-05-17 13:50:12 -04:00
|
|
|
let rows = stmt.execute(named_params! {
|
2020-03-19 23:46:25 -04:00
|
|
|
":id": camera_id,
|
|
|
|
":short_name": &camera.short_name,
|
2021-09-10 19:31:03 -04:00
|
|
|
":config": &camera.config,
|
2020-03-19 23:46:25 -04:00
|
|
|
})?;
|
2018-01-23 14:05:07 -05:00
|
|
|
if rows != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("Camera {} missing from database", camera_id);
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tx.commit()?;
|
2017-02-05 22:58:41 -05:00
|
|
|
c.short_name = camera.short_name;
|
2021-09-10 19:31:03 -04:00
|
|
|
c.config = camera.config;
|
2018-02-22 19:35:34 -05:00
|
|
|
c.streams = streams.apply(&mut self.streams_by_id);
|
2017-02-05 22:58:41 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
/// Deletes a camera and its streams. The camera must have no recordings.
|
2017-02-05 22:58:41 -05:00
|
|
|
pub fn delete_camera(&mut self, id: i32) -> Result<(), Error> {
|
2021-09-10 19:31:03 -04:00
|
|
|
// TODO: also verify there are no uncommitted recordings.
|
2021-02-17 01:15:54 -05:00
|
|
|
let uuid = self
|
|
|
|
.cameras_by_id
|
|
|
|
.get(&id)
|
|
|
|
.map(|c| c.uuid)
|
|
|
|
.ok_or_else(|| format_err!("No such camera {} to remove", id))?;
|
2018-01-23 14:05:07 -05:00
|
|
|
let mut streams_to_delete = Vec::new();
|
|
|
|
let tx = self.conn.transaction()?;
|
|
|
|
{
|
|
|
|
let mut stream_stmt = tx.prepare_cached(r"delete from stream where id = :id")?;
|
2018-02-22 19:35:34 -05:00
|
|
|
for (stream_id, stream) in &self.streams_by_id {
|
2021-02-17 01:15:54 -05:00
|
|
|
if stream.camera_id != id {
|
|
|
|
continue;
|
|
|
|
};
|
2018-01-23 14:05:07 -05:00
|
|
|
if stream.range.is_some() {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("Can't remove camera {}; has recordings.", id);
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
2021-05-17 13:50:12 -04:00
|
|
|
let rows = stream_stmt.execute(named_params! {":id": stream_id})?;
|
2018-01-23 14:05:07 -05:00
|
|
|
if rows != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("Stream {} missing from database", id);
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
streams_to_delete.push(*stream_id);
|
|
|
|
}
|
|
|
|
let mut cam_stmt = tx.prepare_cached(r"delete from camera where id = :id")?;
|
2021-05-17 13:50:12 -04:00
|
|
|
let rows = cam_stmt.execute(named_params! {":id": id})?;
|
2018-01-23 14:05:07 -05:00
|
|
|
if rows != 1 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("Camera {} missing from database", id);
|
2018-01-23 14:05:07 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tx.commit()?;
|
|
|
|
for id in streams_to_delete {
|
2018-02-22 19:35:34 -05:00
|
|
|
self.streams_by_id.remove(&id);
|
2017-02-05 22:58:41 -05:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
self.cameras_by_id.remove(&id);
|
|
|
|
self.cameras_by_uuid.remove(&uuid);
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok(())
|
2017-02-05 22:58:41 -05:00
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
|
2021-09-10 19:31:03 -04:00
|
|
|
// TODO: it'd make more sense to have a bulk camera/stream edit API than
|
|
|
|
// this specific one.
|
2018-02-22 19:35:34 -05:00
|
|
|
pub fn update_retention(&mut self, changes: &[RetentionChange]) -> Result<(), Error> {
|
2021-09-10 19:31:03 -04:00
|
|
|
// TODO: should validate there's only one change per id.
|
2018-02-22 19:35:34 -05:00
|
|
|
let tx = self.conn.transaction()?;
|
|
|
|
{
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut stmt = tx.prepare_cached(
|
|
|
|
r#"
|
2018-02-22 19:35:34 -05:00
|
|
|
update stream
|
|
|
|
set
|
2021-09-10 19:31:03 -04:00
|
|
|
config = :config
|
2018-02-22 19:35:34 -05:00
|
|
|
where
|
|
|
|
id = :id
|
2021-02-17 01:15:54 -05:00
|
|
|
"#,
|
|
|
|
)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
for c in changes {
|
2021-09-10 19:31:03 -04:00
|
|
|
let stream = self
|
|
|
|
.streams_by_id
|
|
|
|
.get(&c.stream_id)
|
|
|
|
.ok_or_else(|| format_err!("no such stream id {}", c.stream_id))?;
|
|
|
|
let mut new_config = stream.config.clone();
|
|
|
|
new_config.mode = (if c.new_record { "record" } else { "" }).into();
|
|
|
|
new_config.retain_bytes = c.new_limit;
|
2021-05-17 13:50:12 -04:00
|
|
|
let rows = stmt.execute(named_params! {
|
2021-09-10 19:31:03 -04:00
|
|
|
":config": &new_config,
|
2020-03-19 23:46:25 -04:00
|
|
|
":id": c.stream_id,
|
|
|
|
})?;
|
2021-09-10 19:31:03 -04:00
|
|
|
assert_eq!(rows, 1, "missing stream {}", c.stream_id);
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tx.commit()?;
|
|
|
|
for c in changes {
|
2021-02-17 01:15:54 -05:00
|
|
|
let s = self
|
|
|
|
.streams_by_id
|
|
|
|
.get_mut(&c.stream_id)
|
|
|
|
.expect("stream in db but not state");
|
2021-09-10 19:31:03 -04:00
|
|
|
s.config.mode = (if c.new_record { "record" } else { "" }).into();
|
|
|
|
s.config.retain_bytes = c.new_limit;
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-11-02 02:25:06 -04:00
|
|
|
|
|
|
|
// ---- auth ----
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn users_by_id(&self) -> &BTreeMap<i32, User> {
|
|
|
|
self.auth.users_by_id()
|
|
|
|
}
|
2018-11-02 02:25:06 -04:00
|
|
|
|
2022-12-23 15:43:00 -05:00
|
|
|
pub fn get_user_by_id_mut(&mut self, id: i32) -> Option<&mut User> {
|
|
|
|
self.auth.get_user_by_id_mut(id)
|
|
|
|
}
|
|
|
|
|
2022-12-24 15:21:06 -05:00
|
|
|
pub fn apply_user_change(&mut self, change: UserChange) -> Result<&User, base::Error> {
|
2018-11-02 02:25:06 -04:00
|
|
|
self.auth.apply(&self.conn, change)
|
|
|
|
}
|
|
|
|
|
2022-12-24 15:21:06 -05:00
|
|
|
pub fn delete_user(&mut self, id: i32) -> Result<(), base::Error> {
|
2018-11-02 02:25:06 -04:00
|
|
|
self.auth.delete_user(&mut self.conn, id)
|
|
|
|
}
|
|
|
|
|
2019-06-20 01:54:46 -04:00
|
|
|
pub fn get_user(&self, username: &str) -> Option<&User> {
|
|
|
|
self.auth.get_user(username)
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn login_by_password(
|
|
|
|
&mut self,
|
|
|
|
req: auth::Request,
|
|
|
|
username: &str,
|
|
|
|
password: String,
|
|
|
|
domain: Option<Vec<u8>>,
|
|
|
|
session_flags: i32,
|
|
|
|
) -> Result<(RawSessionId, &Session), Error> {
|
|
|
|
self.auth
|
|
|
|
.login_by_password(&self.conn, req, username, password, domain, session_flags)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn make_session(
|
|
|
|
&mut self,
|
|
|
|
creation: Request,
|
|
|
|
uid: i32,
|
|
|
|
domain: Option<Vec<u8>>,
|
|
|
|
flags: i32,
|
|
|
|
permissions: schema::Permissions,
|
|
|
|
) -> Result<(RawSessionId, &Session), Error> {
|
|
|
|
self.auth
|
|
|
|
.make_session(&self.conn, creation, uid, domain, flags, permissions)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn authenticate_session(
|
|
|
|
&mut self,
|
|
|
|
req: auth::Request,
|
|
|
|
sid: &auth::SessionHash,
|
2021-03-06 08:16:09 -05:00
|
|
|
) -> Result<(&auth::Session, &User), base::Error> {
|
2018-11-02 02:25:06 -04:00
|
|
|
self.auth.authenticate_session(&self.conn, req, sid)
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn revoke_session(
|
|
|
|
&mut self,
|
|
|
|
reason: auth::RevocationReason,
|
|
|
|
detail: Option<String>,
|
|
|
|
req: auth::Request,
|
|
|
|
hash: &auth::SessionHash,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
self.auth
|
|
|
|
.revoke_session(&self.conn, reason, detail, req, hash)
|
2018-11-02 02:25:06 -04:00
|
|
|
}
|
2019-06-06 19:18:13 -04:00
|
|
|
|
|
|
|
// ---- signal ----
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn signals_by_id(&self) -> &BTreeMap<u32, signal::Signal> {
|
|
|
|
self.signal.signals_by_id()
|
|
|
|
}
|
2019-06-06 19:18:13 -04:00
|
|
|
pub fn signal_types_by_uuid(&self) -> &FnvHashMap<Uuid, signal::Type> {
|
|
|
|
self.signal.types_by_uuid()
|
|
|
|
}
|
|
|
|
pub fn list_changes_by_time(
|
2021-02-17 01:15:54 -05:00
|
|
|
&self,
|
|
|
|
desired_time: Range<recording::Time>,
|
|
|
|
f: &mut dyn FnMut(&signal::ListStateChangesRow),
|
|
|
|
) {
|
2019-06-06 19:18:13 -04:00
|
|
|
self.signal.list_changes_by_time(desired_time, f)
|
|
|
|
}
|
2019-06-14 00:55:15 -04:00
|
|
|
pub fn update_signals(
|
2021-02-17 01:15:54 -05:00
|
|
|
&mut self,
|
|
|
|
when: Range<recording::Time>,
|
|
|
|
signals: &[u32],
|
|
|
|
states: &[u16],
|
|
|
|
) -> Result<(), base::Error> {
|
2019-06-14 00:55:15 -04:00
|
|
|
self.signal.update_signals(when, signals, states)
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2022-01-08 13:49:50 -05:00
|
|
|
/// Pragmas for full database integrity.
|
|
|
|
///
|
|
|
|
/// These are `pub` so that the `moonfire-nvr sql` command can pass to the SQLite3 binary with
|
|
|
|
/// `-cmd`.
|
2022-09-28 12:29:16 -04:00
|
|
|
pub static INTEGRITY_PRAGMAS: [&str; 3] = [
|
2020-06-08 16:35:45 -04:00
|
|
|
// Enforce foreign keys. This is on by default with --features=bundled (as rusqlite
|
|
|
|
// compiles the SQLite3 amalgamation with -DSQLITE_DEFAULT_FOREIGN_KEYS=1). Ensure it's
|
|
|
|
// always on. Note that our foreign keys are immediate rather than deferred, so we have to
|
|
|
|
// be careful about the order of operations during the upgrade.
|
2022-01-08 13:49:50 -05:00
|
|
|
"pragma foreign_keys = on",
|
2020-06-08 16:35:45 -04:00
|
|
|
// Make the database actually durable.
|
2022-01-08 13:49:50 -05:00
|
|
|
"pragma fullfsync = on",
|
|
|
|
"pragma synchronous = 3",
|
|
|
|
];
|
|
|
|
|
|
|
|
/// Sets pragmas for full database integrity.
|
|
|
|
pub(crate) fn set_integrity_pragmas(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
|
|
|
for pragma in INTEGRITY_PRAGMAS {
|
|
|
|
conn.execute(pragma, params![])?;
|
|
|
|
}
|
2020-06-08 16:35:45 -04:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-08-19 13:02:25 -04:00
|
|
|
pub(crate) fn check_sqlite_version() -> Result<(), Error> {
|
2021-08-19 13:57:00 -04:00
|
|
|
// SQLite version 3.8.2 introduced the "without rowid" syntax used in the schema.
|
|
|
|
// https://www.sqlite.org/withoutrowid.html
|
|
|
|
if rusqlite::version_number() < 3008002 {
|
2021-08-19 13:02:25 -04:00
|
|
|
bail!(
|
2021-08-19 13:57:00 -04:00
|
|
|
"SQLite version {} is too old; need at least 3.8.2",
|
2021-08-19 13:02:25 -04:00
|
|
|
rusqlite::version()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
/// Initializes a database.
|
|
|
|
/// Note this doesn't set journal options, so that it can be used on in-memory databases for
|
|
|
|
/// test code.
|
|
|
|
pub fn init(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
2021-08-19 13:02:25 -04:00
|
|
|
check_sqlite_version()?;
|
2020-06-08 16:35:45 -04:00
|
|
|
set_integrity_pragmas(conn)?;
|
2018-03-23 16:31:23 -04:00
|
|
|
let tx = conn.transaction()?;
|
2021-08-19 13:02:25 -04:00
|
|
|
tx.execute_batch(include_str!("schema.sql"))
|
|
|
|
.context("unable to create database schema")?;
|
2018-03-23 16:31:23 -04:00
|
|
|
{
|
|
|
|
let uuid = ::uuid::Uuid::new_v4();
|
|
|
|
let uuid_bytes = &uuid.as_bytes()[..];
|
2020-03-19 23:46:25 -04:00
|
|
|
tx.execute("insert into meta (uuid) values (?)", params![uuid_bytes])?;
|
2018-03-23 16:31:23 -04:00
|
|
|
}
|
|
|
|
tx.commit()?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-01-16 17:21:08 -05:00
|
|
|
/// Gets the schema version from the given database connection.
|
|
|
|
/// A fully initialized database will return `Ok(Some(version))` where `version` is an integer that
|
|
|
|
/// can be compared to `EXPECTED_VERSION`. An empty database will return `Ok(None)`. A partially
|
|
|
|
/// initialized database (in particular, one without a version row) will return some error.
|
|
|
|
pub fn get_schema_version(conn: &rusqlite::Connection) -> Result<Option<i32>, Error> {
|
|
|
|
let ver_tables: i32 = conn.query_row_and_then(
|
|
|
|
"select count(*) from sqlite_master where name = 'version'",
|
2021-02-17 01:15:54 -05:00
|
|
|
params![],
|
|
|
|
|row| row.get(0),
|
|
|
|
)?;
|
2017-01-16 17:21:08 -05:00
|
|
|
if ver_tables == 0 {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok(Some(conn.query_row_and_then(
|
|
|
|
"select max(id) from version",
|
|
|
|
params![],
|
|
|
|
|row| row.get(0),
|
|
|
|
)?))
|
2017-01-16 17:21:08 -05:00
|
|
|
}
|
|
|
|
|
2021-09-22 15:35:17 -04:00
|
|
|
/// Returns the UUID associated with the current system boot, if available.
|
|
|
|
fn get_boot_uuid() -> Result<Option<Uuid>, Error> {
|
|
|
|
if cfg!(target_os = "linux") {
|
|
|
|
let boot_id = std::fs::read_to_string("/proc/sys/kernel/random/boot_id")?;
|
|
|
|
Ok(Some(Uuid::parse_str(boot_id.trim_end())?))
|
|
|
|
} else {
|
|
|
|
Ok(None) // don't complain about lack of platform support; just return None.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-11 18:21:05 -05:00
|
|
|
/// Checks that the schema version in the given database is as expected.
|
|
|
|
pub(crate) fn check_schema_version(conn: &rusqlite::Connection) -> Result<(), Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
let ver = get_schema_version(conn)?.ok_or_else(|| {
|
|
|
|
format_err!(
|
2021-11-23 13:41:07 -05:00
|
|
|
"no such table: version.\n\n\
|
|
|
|
If you have created an empty database by hand, delete it and use `nvr init` \
|
|
|
|
instead, as noted in the installation instructions: \
|
|
|
|
<https://github.com/scottlamb/moonfire-nvr/blob/master/guide/install.md>\n\n\
|
|
|
|
If you are starting from a database that predates schema versioning, see \
|
|
|
|
<https://github.com/scottlamb/moonfire-nvr/blob/master/guide/schema.md>."
|
2021-02-17 01:15:54 -05:00
|
|
|
)
|
|
|
|
})?;
|
2021-05-17 17:31:50 -04:00
|
|
|
match ver.cmp(&EXPECTED_VERSION) {
|
|
|
|
std::cmp::Ordering::Less => bail!(
|
2021-02-17 01:15:54 -05:00
|
|
|
"Database schema version {} is too old (expected {}); \
|
2021-11-23 13:41:07 -05:00
|
|
|
see upgrade instructions in \
|
|
|
|
<https://github.com/scottlamb/moonfire-nvr/blob/master/guide/upgrade.md>.",
|
2021-02-17 01:15:54 -05:00
|
|
|
ver,
|
|
|
|
EXPECTED_VERSION
|
2021-05-17 17:31:50 -04:00
|
|
|
),
|
|
|
|
std::cmp::Ordering::Equal => Ok(()),
|
|
|
|
std::cmp::Ordering::Greater => bail!(
|
2021-02-17 01:15:54 -05:00
|
|
|
"Database schema version {} is too new (expected {}); \
|
|
|
|
must use a newer binary to match.",
|
|
|
|
ver,
|
|
|
|
EXPECTED_VERSION
|
2021-05-17 17:31:50 -04:00
|
|
|
),
|
2021-02-11 18:21:05 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// The recording database. Abstracts away SQLite queries. Also maintains in-memory state
|
|
|
|
/// (loaded on startup, and updated on successful commit) to avoid expensive scans over the
|
|
|
|
/// recording table on common queries.
|
2018-03-23 16:31:23 -04:00
|
|
|
pub struct Database<C: Clocks + Clone = clock::RealClocks> {
|
2018-02-22 19:35:34 -05:00
|
|
|
/// This is wrapped in an `Option` to allow the `Drop` implementation and `close` to coexist.
|
2018-03-23 16:31:23 -04:00
|
|
|
db: Option<Mutex<LockedDatabase>>,
|
|
|
|
|
|
|
|
/// This is kept separately from the `LockedDatabase` to allow the `lock()` operation itself to
|
|
|
|
/// access it. It doesn't need a `Mutex` anyway; it's `Sync`, and all operations work on
|
|
|
|
/// `&self`.
|
|
|
|
clocks: C,
|
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
impl<C: Clocks + Clone> Drop for Database<C> {
|
2018-02-22 19:35:34 -05:00
|
|
|
fn drop(&mut self) {
|
2018-03-04 15:24:24 -05:00
|
|
|
if ::std::thread::panicking() {
|
2021-02-17 01:15:54 -05:00
|
|
|
return; // don't flush while panicking.
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
if let Some(m) = self.db.take() {
|
2022-09-29 01:19:35 -04:00
|
|
|
if let Err(e) = m.into_inner().unwrap().flush(&self.clocks, "drop") {
|
2018-02-22 19:35:34 -05:00
|
|
|
error!("Final database flush failed: {}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
// Helpers for Database::lock(). Closures don't implement Fn.
|
2021-02-17 01:15:54 -05:00
|
|
|
fn acquisition() -> &'static str {
|
|
|
|
"database lock acquisition"
|
|
|
|
}
|
|
|
|
fn operation() -> &'static str {
|
|
|
|
"database operation"
|
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
|
|
|
|
impl<C: Clocks + Clone> Database<C> {
|
2016-11-25 17:34:00 -05:00
|
|
|
/// Creates the database from a caller-supplied SQLite connection.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn new(
|
|
|
|
clocks: C,
|
|
|
|
mut conn: rusqlite::Connection,
|
|
|
|
read_write: bool,
|
|
|
|
) -> Result<Database<C>, Error> {
|
2021-08-19 13:02:25 -04:00
|
|
|
check_sqlite_version()?;
|
2020-06-08 16:35:45 -04:00
|
|
|
set_integrity_pragmas(&mut conn)?;
|
2021-02-11 18:21:05 -05:00
|
|
|
check_schema_version(&conn)?;
|
2018-02-15 02:10:10 -05:00
|
|
|
|
|
|
|
// Note: the meta check comes after the version check to improve the error message when
|
|
|
|
// trying to open a version 0 or version 1 database (which lacked the meta table).
|
2021-10-26 17:40:47 -04:00
|
|
|
let (db_uuid, config) = raw::read_meta(&conn)?;
|
2018-03-09 20:41:53 -05:00
|
|
|
let open_monotonic = recording::Time::new(clocks.monotonic());
|
2018-02-15 02:10:10 -05:00
|
|
|
let open = if read_write {
|
2018-03-09 20:41:53 -05:00
|
|
|
let real = recording::Time::new(clocks.realtime());
|
2021-09-22 15:35:17 -04:00
|
|
|
let mut stmt = conn
|
|
|
|
.prepare(" insert into open (uuid, start_time_90k, boot_uuid) values (?, ?, ?)")?;
|
2021-10-26 13:12:19 -04:00
|
|
|
let open_uuid = SqlUuid(Uuid::new_v4());
|
2021-09-22 15:35:17 -04:00
|
|
|
let boot_uuid = match get_boot_uuid() {
|
|
|
|
Err(e) => {
|
|
|
|
warn!("Unable to get boot uuid: {}", e);
|
|
|
|
None
|
|
|
|
}
|
2021-10-26 13:12:19 -04:00
|
|
|
Ok(id) => id.map(SqlUuid),
|
2021-09-22 15:35:17 -04:00
|
|
|
};
|
|
|
|
stmt.execute(params![open_uuid, real.0, boot_uuid])?;
|
|
|
|
let id = conn.last_insert_rowid() as u32;
|
2021-10-26 17:40:47 -04:00
|
|
|
Some(Open {
|
|
|
|
id,
|
|
|
|
uuid: open_uuid.0,
|
|
|
|
})
|
2021-02-17 01:15:54 -05:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2018-11-02 02:25:06 -04:00
|
|
|
let auth = auth::State::init(&conn)?;
|
2021-10-26 13:12:19 -04:00
|
|
|
let signal = signal::State::init(&conn, &config)?;
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = Database {
|
|
|
|
db: Some(Mutex::new(LockedDatabase {
|
|
|
|
conn,
|
2021-10-26 17:40:47 -04:00
|
|
|
uuid: db_uuid,
|
2019-01-04 19:11:58 -05:00
|
|
|
flush_count: 0,
|
2018-03-23 16:31:23 -04:00
|
|
|
open,
|
|
|
|
open_monotonic,
|
2018-11-02 02:25:06 -04:00
|
|
|
auth,
|
2019-06-06 19:18:13 -04:00
|
|
|
signal,
|
2018-03-23 16:31:23 -04:00
|
|
|
sample_file_dirs_by_id: BTreeMap::new(),
|
|
|
|
cameras_by_id: BTreeMap::new(),
|
|
|
|
cameras_by_uuid: BTreeMap::new(),
|
|
|
|
streams_by_id: BTreeMap::new(),
|
|
|
|
video_sample_entries_by_id: BTreeMap::new(),
|
2020-11-22 20:37:55 -05:00
|
|
|
video_index_cache: RefCell::new(LinkedHashMap::with_capacity_and_hasher(
|
2021-02-17 01:15:54 -05:00
|
|
|
VIDEO_INDEX_CACHE_LEN + 1,
|
|
|
|
Default::default(),
|
|
|
|
)),
|
2018-03-23 16:31:23 -04:00
|
|
|
on_flush: Vec::new(),
|
|
|
|
})),
|
2018-03-09 20:41:53 -05:00
|
|
|
clocks,
|
2018-03-23 16:31:23 -04:00
|
|
|
};
|
2016-11-25 17:34:00 -05:00
|
|
|
{
|
2017-09-22 00:51:58 -04:00
|
|
|
let l = &mut *db.lock();
|
2018-02-21 01:46:14 -05:00
|
|
|
l.init_video_sample_entries()?;
|
|
|
|
l.init_sample_file_dirs()?;
|
|
|
|
l.init_cameras()?;
|
|
|
|
l.init_streams()?;
|
2018-02-22 19:35:34 -05:00
|
|
|
for (&stream_id, ref mut stream) in &mut l.streams_by_id {
|
2018-01-23 14:05:07 -05:00
|
|
|
// TODO: we could use one thread per stream if we had multiple db conns.
|
2018-02-22 19:35:34 -05:00
|
|
|
let camera = l.cameras_by_id.get(&stream.camera_id).unwrap();
|
2018-02-21 01:46:14 -05:00
|
|
|
init_recordings(&mut l.conn, stream_id, camera, stream)?;
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(db)
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
#[inline(always)]
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn clocks(&self) -> C {
|
|
|
|
self.clocks.clone()
|
|
|
|
}
|
2018-02-15 02:10:10 -05:00
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// Locks the database; the returned reference is the only way to perform (read or write)
|
|
|
|
/// operations.
|
2018-03-23 16:31:23 -04:00
|
|
|
pub fn lock(&self) -> DatabaseGuard<C> {
|
|
|
|
let timer = clock::TimerGuard::new(&self.clocks, acquisition);
|
2022-09-29 01:19:35 -04:00
|
|
|
let db = self.db.as_ref().unwrap().lock().unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
drop(timer);
|
|
|
|
let _timer = clock::TimerGuard::<C, &'static str, fn() -> &'static str>::new(
|
2021-02-17 01:15:54 -05:00
|
|
|
&self.clocks,
|
|
|
|
operation,
|
|
|
|
);
|
2018-03-23 16:31:23 -04:00
|
|
|
DatabaseGuard {
|
|
|
|
clocks: &self.clocks,
|
|
|
|
db,
|
|
|
|
_timer,
|
|
|
|
}
|
2018-03-09 20:41:53 -05:00
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
/// For testing: closes the database (without flushing) and returns the connection.
|
|
|
|
/// This allows verification that a newly opened database is in an acceptable state.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[cfg(test)]
|
2018-02-22 19:35:34 -05:00
|
|
|
fn close(mut self) -> rusqlite::Connection {
|
2022-09-29 01:19:35 -04:00
|
|
|
self.db.take().unwrap().into_inner().unwrap().conn
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Reference to a locked database returned by [Database::lock].
|
2018-03-23 16:31:23 -04:00
|
|
|
pub struct DatabaseGuard<'db, C: Clocks> {
|
|
|
|
clocks: &'db C,
|
|
|
|
db: MutexGuard<'db, LockedDatabase>,
|
|
|
|
_timer: clock::TimerGuard<'db, C, &'static str, fn() -> &'static str>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'db, C: Clocks + Clone> DatabaseGuard<'db, C> {
|
|
|
|
/// Tries to flush unwritten changes from the stream directories.
|
|
|
|
///
|
|
|
|
/// * commits any recordings added with `add_recording` that have since been marked as
|
|
|
|
/// synced.
|
|
|
|
/// * moves old recordings to the garbage table as requested by `delete_oldest_recordings`.
|
|
|
|
/// * removes entries from the garbage table as requested by `mark_sample_files_deleted`.
|
|
|
|
///
|
|
|
|
/// On success, for each affected sample file directory with a flush watcher set, sends a
|
|
|
|
/// `Flush` event.
|
|
|
|
pub(crate) fn flush(&mut self, reason: &str) -> Result<(), Error> {
|
|
|
|
self.db.flush(self.clocks, reason)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'db, C: Clocks + Clone> ::std::ops::Deref for DatabaseGuard<'db, C> {
|
|
|
|
type Target = LockedDatabase;
|
2021-02-17 01:15:54 -05:00
|
|
|
fn deref(&self) -> &LockedDatabase {
|
2023-01-28 14:59:21 -05:00
|
|
|
&self.db
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'db, C: Clocks + Clone> ::std::ops::DerefMut for DatabaseGuard<'db, C> {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn deref_mut(&mut self) -> &mut LockedDatabase {
|
2023-01-28 14:59:21 -05:00
|
|
|
&mut self.db
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-02-17 01:15:54 -05:00
|
|
|
use super::*;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::recording::{self, TIME_UNITS_PER_SEC};
|
2021-02-17 01:15:54 -05:00
|
|
|
use crate::testutil;
|
|
|
|
use base::clock;
|
2016-11-25 17:34:00 -05:00
|
|
|
use rusqlite::Connection;
|
2021-09-10 19:31:03 -04:00
|
|
|
use url::Url;
|
2016-11-25 17:34:00 -05:00
|
|
|
use uuid::Uuid;
|
|
|
|
|
|
|
|
fn setup_conn() -> Connection {
|
2018-02-15 02:10:10 -05:00
|
|
|
let mut conn = Connection::open_in_memory().unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
super::init(&mut conn).unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
conn
|
|
|
|
}
|
|
|
|
|
|
|
|
fn assert_no_recordings(db: &Database, uuid: Uuid) {
|
|
|
|
let mut rows = 0;
|
|
|
|
let mut camera_id = -1;
|
|
|
|
{
|
|
|
|
let db = db.lock();
|
|
|
|
for row in db.cameras_by_id().values() {
|
|
|
|
rows += 1;
|
|
|
|
camera_id = row.id;
|
|
|
|
assert_eq!(uuid, row.uuid);
|
2021-09-10 19:31:03 -04:00
|
|
|
assert_eq!(
|
|
|
|
"http://test-camera/",
|
|
|
|
row.config.onvif_base_url.as_ref().unwrap().as_str()
|
|
|
|
);
|
|
|
|
assert_eq!("foo", &row.config.username);
|
|
|
|
assert_eq!("bar", &row.config.password);
|
2019-07-01 00:54:52 -04:00
|
|
|
//assert_eq!("/main", row.main_rtsp_url);
|
|
|
|
//assert_eq!("/sub", row.sub_rtsp_url);
|
2018-01-23 14:05:07 -05:00
|
|
|
//assert_eq!(42, row.retain_bytes);
|
|
|
|
//assert_eq!(None, row.range);
|
|
|
|
//assert_eq!(recording::Duration(0), row.duration);
|
|
|
|
//assert_eq!(0, row.sample_file_bytes);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
assert_eq!(1, rows);
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
let stream_id = camera_id; // TODO
|
2016-11-25 17:34:00 -05:00
|
|
|
rows = 0;
|
|
|
|
{
|
|
|
|
let db = db.lock();
|
2021-02-17 01:15:54 -05:00
|
|
|
let all_time = recording::Time(i64::min_value())..recording::Time(i64::max_value());
|
2018-02-23 12:19:42 -05:00
|
|
|
db.list_recordings_by_time(stream_id, all_time, &mut |_row| {
|
2016-11-25 17:34:00 -05:00
|
|
|
rows += 1;
|
|
|
|
Ok(())
|
2021-02-17 01:15:54 -05:00
|
|
|
})
|
|
|
|
.unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
assert_eq!(0, rows);
|
|
|
|
}
|
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
fn assert_single_recording(db: &Database, stream_id: i32, r: &RecordingToInsert) {
|
2016-11-25 17:34:00 -05:00
|
|
|
{
|
|
|
|
let db = db.lock();
|
2018-01-23 14:05:07 -05:00
|
|
|
let stream = db.streams_by_id().get(&stream_id).unwrap();
|
2020-08-05 00:44:01 -04:00
|
|
|
let dur = recording::Duration(r.wall_duration_90k as i64);
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(Some(r.start..r.start + dur), stream.range);
|
2018-01-23 14:05:07 -05:00
|
|
|
assert_eq!(r.sample_file_bytes as i64, stream.sample_file_bytes);
|
2018-03-02 18:40:32 -05:00
|
|
|
assert_eq!(dur, stream.duration);
|
2018-01-23 14:05:07 -05:00
|
|
|
db.cameras_by_id().get(&stream.camera_id).unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(slamb): test that the days logic works correctly.
|
|
|
|
|
2018-01-23 14:05:07 -05:00
|
|
|
let mut rows = 0;
|
2018-02-20 13:11:10 -05:00
|
|
|
let mut recording_id = None;
|
2016-11-25 17:34:00 -05:00
|
|
|
{
|
|
|
|
let db = db.lock();
|
2021-02-17 01:15:54 -05:00
|
|
|
let all_time = recording::Time(i64::min_value())..recording::Time(i64::max_value());
|
2018-02-23 12:19:42 -05:00
|
|
|
db.list_recordings_by_time(stream_id, all_time, &mut |row| {
|
2016-11-25 17:34:00 -05:00
|
|
|
rows += 1;
|
2018-02-20 13:11:10 -05:00
|
|
|
recording_id = Some(row.id);
|
2018-03-02 18:40:32 -05:00
|
|
|
assert_eq!(r.start, row.start);
|
2020-08-05 00:44:01 -04:00
|
|
|
assert_eq!(r.wall_duration_90k, row.wall_duration_90k);
|
2016-11-25 17:34:00 -05:00
|
|
|
assert_eq!(r.video_samples, row.video_samples);
|
|
|
|
assert_eq!(r.video_sync_samples, row.video_sync_samples);
|
|
|
|
assert_eq!(r.sample_file_bytes, row.sample_file_bytes);
|
2021-02-17 01:15:54 -05:00
|
|
|
let vse = db
|
|
|
|
.video_sample_entries_by_id()
|
|
|
|
.get(&row.video_sample_entry_id)
|
|
|
|
.unwrap();
|
2018-03-01 23:59:05 -05:00
|
|
|
assert_eq!(vse.rfc6381_codec, "avc1.4d0029");
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(())
|
2021-02-17 01:15:54 -05:00
|
|
|
})
|
|
|
|
.unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
assert_eq!(1, rows);
|
|
|
|
|
|
|
|
rows = 0;
|
2021-02-17 01:15:54 -05:00
|
|
|
raw::list_oldest_recordings(
|
|
|
|
&db.lock().conn,
|
|
|
|
CompositeId::new(stream_id, 0),
|
|
|
|
&mut |row| {
|
|
|
|
rows += 1;
|
|
|
|
assert_eq!(recording_id, Some(row.id));
|
|
|
|
assert_eq!(r.start, row.start);
|
|
|
|
assert_eq!(r.wall_duration_90k, row.wall_duration_90k);
|
|
|
|
assert_eq!(r.sample_file_bytes, row.sample_file_bytes);
|
|
|
|
true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
assert_eq!(1, rows);
|
|
|
|
|
2016-12-21 01:08:18 -05:00
|
|
|
// TODO: list_aggregated_recordings.
|
2017-03-01 02:28:25 -05:00
|
|
|
// TODO: with_recording_playback.
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2018-02-15 02:10:10 -05:00
|
|
|
fn test_no_meta_or_version() {
|
2016-12-20 18:44:04 -05:00
|
|
|
testutil::init();
|
2021-02-17 01:15:54 -05:00
|
|
|
let e = Database::new(
|
|
|
|
clock::RealClocks {},
|
|
|
|
Connection::open_in_memory().unwrap(),
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
.err()
|
|
|
|
.unwrap();
|
2018-02-21 01:46:14 -05:00
|
|
|
assert!(e.to_string().starts_with("no such table"), "{}", e);
|
2016-12-20 18:44:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_version_too_old() {
|
|
|
|
testutil::init();
|
|
|
|
let c = setup_conn();
|
2021-08-31 23:57:16 -04:00
|
|
|
c.execute_batch("delete from version; insert into version values (6, 0, '');")
|
2021-02-17 01:15:54 -05:00
|
|
|
.unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
let e = Database::new(clock::RealClocks {}, c, false).err().unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert!(
|
|
|
|
e.to_string()
|
2021-08-31 23:57:16 -04:00
|
|
|
.starts_with("Database schema version 6 is too old (expected 7)"),
|
2023-01-29 18:01:19 -05:00
|
|
|
"got: {e:?}"
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2016-12-20 18:44:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_version_too_new() {
|
|
|
|
testutil::init();
|
|
|
|
let c = setup_conn();
|
2021-08-31 23:57:16 -04:00
|
|
|
c.execute_batch("delete from version; insert into version values (8, 0, '');")
|
2021-02-17 01:15:54 -05:00
|
|
|
.unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
let e = Database::new(clock::RealClocks {}, c, false).err().unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert!(
|
|
|
|
e.to_string()
|
2021-08-31 23:57:16 -04:00
|
|
|
.starts_with("Database schema version 8 is too new (expected 7)"),
|
2023-01-29 18:01:19 -05:00
|
|
|
"got: {e:?}"
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2016-12-20 18:44:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Basic test of running some queries on a fresh database.
|
|
|
|
#[test]
|
|
|
|
fn test_fresh_db() {
|
2016-11-30 14:17:46 -05:00
|
|
|
testutil::init();
|
2016-11-25 17:34:00 -05:00
|
|
|
let conn = setup_conn();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = Database::new(clock::RealClocks {}, conn, true).unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
let db = db.lock();
|
|
|
|
assert_eq!(0, db.cameras_by_id().values().count());
|
|
|
|
}
|
|
|
|
|
2016-11-30 13:59:19 -05:00
|
|
|
/// Basic test of the full lifecycle of recording. Does not exercise error cases.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[test]
|
|
|
|
fn test_full_lifecycle() {
|
2016-11-30 14:17:46 -05:00
|
|
|
testutil::init();
|
2016-11-25 17:34:00 -05:00
|
|
|
let conn = setup_conn();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = Database::new(clock::RealClocks {}, conn, true).unwrap();
|
2021-05-17 16:08:01 -04:00
|
|
|
let tmpdir = tempfile::Builder::new()
|
|
|
|
.prefix("moonfire-nvr-test")
|
|
|
|
.tempdir()
|
|
|
|
.unwrap();
|
2021-10-26 14:47:13 -04:00
|
|
|
let path = tmpdir.path().to_owned();
|
2018-02-22 19:35:34 -05:00
|
|
|
let sample_file_dir_id = { db.lock() }.add_sample_file_dir(path).unwrap();
|
2018-02-23 17:49:10 -05:00
|
|
|
let mut c = CameraChange {
|
2018-02-04 00:56:04 -05:00
|
|
|
short_name: "testcam".to_owned(),
|
2021-09-10 19:31:03 -04:00
|
|
|
config: crate::json::CameraConfig {
|
|
|
|
description: "".to_owned(),
|
|
|
|
onvif_base_url: Some(Url::parse("http://test-camera/").unwrap()),
|
|
|
|
username: "foo".to_owned(),
|
|
|
|
password: "bar".to_owned(),
|
|
|
|
..Default::default()
|
|
|
|
},
|
2018-02-12 01:45:51 -05:00
|
|
|
streams: [
|
2018-02-22 19:35:34 -05:00
|
|
|
StreamChange {
|
|
|
|
sample_file_dir_id: Some(sample_file_dir_id),
|
2021-09-10 19:31:03 -04:00
|
|
|
config: crate::json::StreamConfig {
|
|
|
|
url: Some(Url::parse("rtsp://test-camera/main").unwrap()),
|
|
|
|
mode: crate::json::STREAM_MODE_RECORD.to_owned(),
|
|
|
|
flush_if_sec: 1,
|
|
|
|
..Default::default()
|
|
|
|
},
|
2018-02-22 19:35:34 -05:00
|
|
|
},
|
|
|
|
StreamChange {
|
|
|
|
sample_file_dir_id: Some(sample_file_dir_id),
|
2021-09-10 19:31:03 -04:00
|
|
|
config: crate::json::StreamConfig {
|
|
|
|
url: Some(Url::parse("rtsp://test-camera/sub").unwrap()),
|
|
|
|
mode: crate::json::STREAM_MODE_RECORD.to_owned(),
|
|
|
|
flush_if_sec: 1,
|
|
|
|
..Default::default()
|
|
|
|
},
|
2018-02-22 19:35:34 -05:00
|
|
|
},
|
2021-09-10 19:31:03 -04:00
|
|
|
StreamChange::default(),
|
2018-01-23 14:05:07 -05:00
|
|
|
],
|
2018-02-23 17:49:10 -05:00
|
|
|
};
|
|
|
|
let camera_id = db.lock().add_camera(c.clone()).unwrap();
|
|
|
|
let (main_stream_id, sub_stream_id);
|
2018-02-04 00:56:04 -05:00
|
|
|
{
|
|
|
|
let mut l = db.lock();
|
2018-02-23 17:49:10 -05:00
|
|
|
{
|
|
|
|
let c = l.cameras_by_id().get(&camera_id).unwrap();
|
|
|
|
main_stream_id = c.streams[0].unwrap();
|
|
|
|
sub_stream_id = c.streams[1].unwrap();
|
|
|
|
}
|
2018-02-22 19:35:34 -05:00
|
|
|
l.update_retention(&[super::RetentionChange {
|
2018-02-23 17:49:10 -05:00
|
|
|
stream_id: main_stream_id,
|
2018-02-22 19:35:34 -05:00
|
|
|
new_record: true,
|
|
|
|
new_limit: 42,
|
2021-02-17 01:15:54 -05:00
|
|
|
}])
|
|
|
|
.unwrap();
|
2018-02-23 17:49:10 -05:00
|
|
|
{
|
|
|
|
let main = l.streams_by_id().get(&main_stream_id).unwrap();
|
2021-09-10 19:31:03 -04:00
|
|
|
assert_eq!(main.config.mode, crate::json::STREAM_MODE_RECORD);
|
|
|
|
assert_eq!(main.config.retain_bytes, 42);
|
|
|
|
assert_eq!(main.config.flush_if_sec, 1);
|
2018-02-23 17:49:10 -05:00
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-10 19:31:03 -04:00
|
|
|
l.streams_by_id()
|
|
|
|
.get(&sub_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.config
|
|
|
|
.flush_if_sec,
|
2021-02-17 01:15:54 -05:00
|
|
|
1
|
|
|
|
);
|
2021-09-10 19:31:03 -04:00
|
|
|
c.streams[1].config.flush_if_sec = 2;
|
2018-02-23 17:49:10 -05:00
|
|
|
l.update_camera(camera_id, c).unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-10 19:31:03 -04:00
|
|
|
l.streams_by_id()
|
|
|
|
.get(&sub_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.config
|
|
|
|
.flush_if_sec,
|
2021-02-17 01:15:54 -05:00
|
|
|
2
|
|
|
|
);
|
2018-02-04 00:56:04 -05:00
|
|
|
}
|
|
|
|
let camera_uuid = { db.lock().cameras_by_id().get(&camera_id).unwrap().uuid };
|
|
|
|
assert_no_recordings(&db, camera_uuid);
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
|
|
|
db.lock()
|
|
|
|
.streams_by_id()
|
|
|
|
.get(&main_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.cum_recordings,
|
|
|
|
0
|
|
|
|
);
|
2018-02-04 00:56:04 -05:00
|
|
|
|
|
|
|
// Closing and reopening the database should present the same contents.
|
|
|
|
let conn = db.close();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = Database::new(clock::RealClocks {}, conn, true).unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
|
|
|
db.lock()
|
|
|
|
.streams_by_id()
|
|
|
|
.get(&sub_stream_id)
|
|
|
|
.unwrap()
|
2021-09-10 19:31:03 -04:00
|
|
|
.config
|
2021-02-17 01:15:54 -05:00
|
|
|
.flush_if_sec,
|
|
|
|
2
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
assert_no_recordings(&db, camera_uuid);
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
|
|
|
db.lock()
|
|
|
|
.streams_by_id()
|
|
|
|
.get(&main_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.cum_recordings,
|
|
|
|
0
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
// TODO: assert_eq!(db.lock().list_garbage(sample_file_dir_id).unwrap(), &[]);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
let vse_id = db
|
|
|
|
.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: include_bytes!("testdata/avc1").to_vec(),
|
|
|
|
rfc6381_codec: "avc1.4d0029".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2023-01-29 18:01:19 -05:00
|
|
|
assert!(vse_id > 0, "vse_id = {vse_id}");
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
// Inserting a recording should succeed and advance the next recording id.
|
2016-11-25 17:34:00 -05:00
|
|
|
let start = recording::Time(1430006400 * TIME_UNITS_PER_SEC);
|
2018-01-23 14:05:07 -05:00
|
|
|
let recording = RecordingToInsert {
|
2016-11-25 17:34:00 -05:00
|
|
|
sample_file_bytes: 42,
|
2016-12-21 01:08:18 -05:00
|
|
|
run_offset: 0,
|
|
|
|
flags: 0,
|
2018-03-02 18:40:32 -05:00
|
|
|
start,
|
2020-08-05 00:44:01 -04:00
|
|
|
prev_media_duration: recording::Duration(0),
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
prev_runs: 0,
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k: TIME_UNITS_PER_SEC.try_into().unwrap(),
|
|
|
|
media_duration_90k: TIME_UNITS_PER_SEC.try_into().unwrap(),
|
2016-12-30 00:05:57 -05:00
|
|
|
local_time_delta: recording::Duration(0),
|
2016-11-25 17:34:00 -05:00
|
|
|
video_samples: 1,
|
|
|
|
video_sync_samples: 1,
|
|
|
|
video_sample_entry_id: vse_id,
|
|
|
|
video_index: [0u8; 100].to_vec(),
|
2020-03-20 23:52:30 -04:00
|
|
|
sample_file_blake3: None,
|
2021-09-16 19:24:17 -04:00
|
|
|
end_reason: None,
|
2016-11-25 17:34:00 -05:00
|
|
|
};
|
2018-02-22 19:35:34 -05:00
|
|
|
let id = {
|
2016-11-25 17:34:00 -05:00
|
|
|
let mut db = db.lock();
|
2018-03-02 18:40:32 -05:00
|
|
|
let (id, _) = db.add_recording(main_stream_id, recording.clone()).unwrap();
|
2018-03-01 16:50:59 -05:00
|
|
|
db.mark_synced(id).unwrap();
|
2018-02-22 19:35:34 -05:00
|
|
|
db.flush("add test").unwrap();
|
|
|
|
id
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
|
|
|
db.lock()
|
|
|
|
.streams_by_id()
|
|
|
|
.get(&main_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.cum_recordings,
|
|
|
|
1
|
|
|
|
);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
// Queries should return the correct result (with caches update on insert).
|
2018-02-23 17:49:10 -05:00
|
|
|
assert_single_recording(&db, main_stream_id, &recording);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
// Queries on a fresh database should return the correct result (with caches populated from
|
|
|
|
// existing database contents rather than built on insert).
|
|
|
|
let conn = db.close();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = Database::new(clock::RealClocks {}, conn, true).unwrap();
|
2018-02-23 17:49:10 -05:00
|
|
|
assert_single_recording(&db, main_stream_id, &recording);
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2018-02-20 13:11:10 -05:00
|
|
|
// Deleting a recording should succeed, update the min/max times, and mark it as garbage.
|
2016-11-30 13:41:25 -05:00
|
|
|
{
|
|
|
|
let mut db = db.lock();
|
2018-02-23 16:35:25 -05:00
|
|
|
let mut n = 0;
|
2021-02-17 01:15:54 -05:00
|
|
|
db.delete_oldest_recordings(main_stream_id, &mut |_| {
|
|
|
|
n += 1;
|
|
|
|
true
|
|
|
|
})
|
|
|
|
.unwrap();
|
2018-02-23 16:35:25 -05:00
|
|
|
assert_eq!(n, 1);
|
|
|
|
{
|
2018-02-23 17:49:10 -05:00
|
|
|
let s = db.streams_by_id().get(&main_stream_id).unwrap();
|
2018-02-23 16:35:25 -05:00
|
|
|
assert_eq!(s.sample_file_bytes, 42);
|
|
|
|
assert_eq!(s.bytes_to_delete, 42);
|
|
|
|
}
|
|
|
|
n = 0;
|
|
|
|
|
|
|
|
// A second run
|
2021-02-17 01:15:54 -05:00
|
|
|
db.delete_oldest_recordings(main_stream_id, &mut |_| {
|
|
|
|
n += 1;
|
|
|
|
true
|
|
|
|
})
|
|
|
|
.unwrap();
|
2018-02-23 16:35:25 -05:00
|
|
|
assert_eq!(n, 0);
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
|
|
|
db.streams_by_id()
|
|
|
|
.get(&main_stream_id)
|
|
|
|
.unwrap()
|
|
|
|
.bytes_to_delete,
|
|
|
|
42
|
|
|
|
);
|
2018-02-22 19:35:34 -05:00
|
|
|
db.flush("delete test").unwrap();
|
2018-02-23 17:49:10 -05:00
|
|
|
let s = db.streams_by_id().get(&main_stream_id).unwrap();
|
2018-02-23 16:35:25 -05:00
|
|
|
assert_eq!(s.sample_file_bytes, 0);
|
|
|
|
assert_eq!(s.bytes_to_delete, 0);
|
2016-11-30 13:41:25 -05:00
|
|
|
}
|
|
|
|
assert_no_recordings(&db, camera_uuid);
|
2021-02-17 01:15:54 -05:00
|
|
|
let g: Vec<_> = db
|
|
|
|
.lock()
|
|
|
|
.sample_file_dirs_by_id()
|
|
|
|
.get(&sample_file_dir_id)
|
|
|
|
.unwrap()
|
|
|
|
.garbage_needs_unlink
|
|
|
|
.iter()
|
2023-01-29 18:01:19 -05:00
|
|
|
.copied()
|
2021-02-17 01:15:54 -05:00
|
|
|
.collect();
|
2018-02-22 19:35:34 -05:00
|
|
|
assert_eq!(&g, &[id]);
|
2021-02-17 01:15:54 -05:00
|
|
|
let g: Vec<_> = db
|
|
|
|
.lock()
|
|
|
|
.sample_file_dirs_by_id()
|
|
|
|
.get(&sample_file_dir_id)
|
|
|
|
.unwrap()
|
|
|
|
.garbage_unlinked
|
|
|
|
.iter()
|
2023-01-29 18:01:19 -05:00
|
|
|
.copied()
|
2021-02-17 01:15:54 -05:00
|
|
|
.collect();
|
2018-12-01 03:03:43 -05:00
|
|
|
assert_eq!(&g, &[]);
|
2016-11-30 13:59:19 -05:00
|
|
|
}
|
2020-07-12 19:51:39 -04:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn round_up() {
|
|
|
|
assert_eq!(super::round_up(0), 0);
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(super::round_up(8_191), 8_192);
|
|
|
|
assert_eq!(super::round_up(8_192), 8_192);
|
2020-07-12 19:51:39 -04:00
|
|
|
assert_eq!(super::round_up(8_193), 12_288);
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|