2018-03-04 15:24:24 -05:00
|
|
|
// This file is part of Moonfire NVR, a security camera network video recorder.
|
2021-02-17 16:28:48 -05:00
|
|
|
// Copyright (C) 2020 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
|
|
|
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
//! Writing recordings and deleting old ones.
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::db::{self, CompositeId};
|
|
|
|
use crate::dir;
|
2020-08-06 08:16:38 -04:00
|
|
|
use crate::recording::{self, MAX_RECORDING_WALL_DURATION};
|
2021-02-17 01:15:54 -05:00
|
|
|
use base::clock::{self, Clocks};
|
2021-09-23 18:55:53 -04:00
|
|
|
use base::shutdown::ShutdownError;
|
2024-01-06 13:43:20 -05:00
|
|
|
use base::FastHashMap;
|
2023-07-10 01:04:17 -04:00
|
|
|
use base::{bail, err, Error};
|
2020-04-15 01:56:20 -04:00
|
|
|
use std::cmp::{self, Ordering};
|
2021-02-17 01:15:54 -05:00
|
|
|
use std::convert::TryFrom;
|
2018-03-04 15:24:24 -05:00
|
|
|
use std::io;
|
|
|
|
use std::mem;
|
2021-10-26 14:47:13 -04:00
|
|
|
use std::path::PathBuf;
|
2022-09-29 01:19:35 -04:00
|
|
|
use std::sync::Mutex;
|
2021-02-17 01:15:54 -05:00
|
|
|
use std::sync::{mpsc, Arc};
|
2018-03-04 15:24:24 -05:00
|
|
|
use std::thread;
|
2018-03-23 18:16:43 -04:00
|
|
|
use std::time::Duration as StdDuration;
|
|
|
|
use time::{Duration, Timespec};
|
2023-02-16 02:14:54 -05:00
|
|
|
use tracing::{debug, trace, warn};
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Trait to allow mocking out [crate::dir::SampleFileDir] in syncer tests.
|
|
|
|
/// This is public because it's exposed in the [SyncerChannel] type parameters,
|
|
|
|
/// not because it's of direct use outside this module.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub trait DirWriter: 'static + Send {
|
|
|
|
type File: FileWriter;
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2019-07-12 00:59:01 -04:00
|
|
|
fn create_file(&self, id: CompositeId) -> Result<Self::File, nix::Error>;
|
|
|
|
fn sync(&self) -> Result<(), nix::Error>;
|
|
|
|
fn unlink_file(&self, id: CompositeId) -> Result<(), nix::Error>;
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Trait to allow mocking out [std::fs::File] in syncer tests.
|
|
|
|
/// This is public because it's exposed in the [SyncerChannel] type parameters,
|
|
|
|
/// not because it's of direct use outside this module.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub trait FileWriter: 'static {
|
2018-03-04 15:24:24 -05:00
|
|
|
/// As in `std::fs::File::sync_all`.
|
|
|
|
fn sync_all(&self) -> Result<(), io::Error>;
|
|
|
|
|
|
|
|
/// As in `std::io::Writer::write`.
|
|
|
|
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error>;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DirWriter for Arc<dir::SampleFileDir> {
|
|
|
|
type File = ::std::fs::File;
|
|
|
|
|
2019-07-12 00:59:01 -04:00
|
|
|
fn create_file(&self, id: CompositeId) -> Result<Self::File, nix::Error> {
|
2018-03-04 15:24:24 -05:00
|
|
|
dir::SampleFileDir::create_file(self, id)
|
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
fn sync(&self) -> Result<(), nix::Error> {
|
|
|
|
dir::SampleFileDir::sync(self)
|
|
|
|
}
|
2019-07-12 00:59:01 -04:00
|
|
|
fn unlink_file(&self, id: CompositeId) -> Result<(), nix::Error> {
|
2018-03-04 15:24:24 -05:00
|
|
|
dir::SampleFileDir::unlink_file(self, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FileWriter for ::std::fs::File {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn sync_all(&self) -> Result<(), io::Error> {
|
|
|
|
self.sync_all()
|
|
|
|
}
|
|
|
|
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
|
|
|
|
io::Write::write(self, buf)
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// A command sent to a [Syncer].
|
2018-03-04 15:24:24 -05:00
|
|
|
enum SyncerCommand<F> {
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Command sent by [SyncerChannel::async_save_recording].
|
2018-03-23 18:16:43 -04:00
|
|
|
AsyncSaveRecording(CompositeId, recording::Duration, F),
|
2021-04-10 20:34:52 -04:00
|
|
|
|
|
|
|
/// Notes that the database has been flushed and garbage collection should be attempted.
|
|
|
|
/// [start_syncer] sets up a database callback to send this command.
|
2018-03-04 15:24:24 -05:00
|
|
|
DatabaseFlushed,
|
2021-04-10 20:34:52 -04:00
|
|
|
|
|
|
|
/// Command sent by [SyncerChannel::flush].
|
2019-01-06 10:07:04 -05:00
|
|
|
Flush(mpsc::SyncSender<()>),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A channel which can be used to send commands to the syncer.
|
|
|
|
/// Can be cloned to allow multiple threads to send commands.
|
|
|
|
pub struct SyncerChannel<F>(mpsc::Sender<SyncerCommand<F>>);
|
|
|
|
|
|
|
|
impl<F> ::std::clone::Clone for SyncerChannel<F> {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn clone(&self) -> Self {
|
|
|
|
SyncerChannel(self.0.clone())
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// State of the worker thread created by [start_syncer].
|
2018-03-23 16:31:23 -04:00
|
|
|
struct Syncer<C: Clocks + Clone, D: DirWriter> {
|
2018-03-04 15:24:24 -05:00
|
|
|
dir_id: i32,
|
|
|
|
dir: D,
|
2018-03-23 16:31:23 -04:00
|
|
|
db: Arc<db::Database<C>>,
|
2019-01-04 14:56:15 -05:00
|
|
|
planned_flushes: std::collections::BinaryHeap<PlannedFlush>,
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx: base::shutdown::Receiver,
|
2019-01-04 14:56:15 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// A plan to flush at a given instant due to a recently-saved recording's `flush_if_sec` parameter.
|
2019-01-04 14:56:15 -05:00
|
|
|
struct PlannedFlush {
|
|
|
|
/// Monotonic time at which this flush should happen.
|
|
|
|
when: Timespec,
|
|
|
|
|
|
|
|
/// Recording which prompts this flush. If this recording is already flushed at the planned
|
|
|
|
/// time, it can be skipped.
|
|
|
|
recording: CompositeId,
|
|
|
|
|
|
|
|
/// A human-readable reason for the flush, for logs.
|
|
|
|
reason: String,
|
2019-01-06 10:07:04 -05:00
|
|
|
|
|
|
|
/// Senders to drop when this time is reached. This is for test instrumentation; see
|
2021-04-10 20:34:52 -04:00
|
|
|
/// [SyncerChannel::flush].
|
2019-01-06 10:07:04 -05:00
|
|
|
senders: Vec<mpsc::SyncSender<()>>,
|
2019-01-04 14:56:15 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PlannedFlush is meant for placement in a max-heap which should return the soonest flush. This
|
|
|
|
// PlannedFlush is greater than other if its when is _less_ than the other's.
|
|
|
|
impl Ord for PlannedFlush {
|
|
|
|
fn cmp(&self, other: &Self) -> Ordering {
|
|
|
|
other.when.cmp(&self.when)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for PlannedFlush {
|
|
|
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
|
|
Some(self.cmp(other))
|
|
|
|
}
|
|
|
|
}
|
2018-03-23 18:16:43 -04:00
|
|
|
|
2019-01-04 14:56:15 -05:00
|
|
|
impl PartialEq for PlannedFlush {
|
|
|
|
fn eq(&self, other: &Self) -> bool {
|
|
|
|
self.when == other.when
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2019-01-04 14:56:15 -05:00
|
|
|
impl Eq for PlannedFlush {}
|
|
|
|
|
2018-03-04 15:24:24 -05:00
|
|
|
/// Starts a syncer for the given sample file directory.
|
|
|
|
///
|
|
|
|
/// The lock must not be held on `db` when this is called.
|
|
|
|
///
|
|
|
|
/// There should be only one syncer per directory, or 0 if operating in read-only mode.
|
|
|
|
/// This function will perform the initial rotation synchronously, so that it is finished before
|
|
|
|
/// file writing starts. Afterward the syncing happens in a background thread.
|
|
|
|
///
|
|
|
|
/// Returns a `SyncerChannel` which can be used to send commands (and can be cloned freely) and
|
|
|
|
/// a `JoinHandle` for the syncer thread. Commands sent on the channel will be executed or retried
|
|
|
|
/// forever. (TODO: provide some manner of pushback during retry.) At program shutdown, all
|
|
|
|
/// `SyncerChannel` clones should be dropped and then the handle joined to allow all recordings to
|
|
|
|
/// be persisted.
|
|
|
|
///
|
|
|
|
/// Note that dropping all `SyncerChannel` clones currently includes calling
|
|
|
|
/// `LockedDatabase::clear_on_flush`, as this function installs a hook to watch database flushes.
|
|
|
|
/// TODO: add a join wrapper which arranges for the on flush hook to be removed automatically.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn start_syncer<C>(
|
|
|
|
db: Arc<db::Database<C>>,
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx: base::shutdown::Receiver,
|
2021-02-17 01:15:54 -05:00
|
|
|
dir_id: i32,
|
|
|
|
) -> Result<(SyncerChannel<::std::fs::File>, thread::JoinHandle<()>), Error>
|
|
|
|
where
|
|
|
|
C: Clocks + Clone,
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
let db2 = db.clone();
|
2021-09-23 18:55:53 -04:00
|
|
|
let (mut syncer, path) = Syncer::new(&db.lock(), shutdown_rx, db2, dir_id)?;
|
2023-02-16 02:14:54 -05:00
|
|
|
let span = tracing::info_span!("syncer", path = %path.display());
|
|
|
|
span.in_scope(|| {
|
|
|
|
tracing::info!("initial rotation");
|
|
|
|
syncer.initial_rotation()
|
|
|
|
})?;
|
2018-03-04 15:24:24 -05:00
|
|
|
let (snd, rcv) = mpsc::channel();
|
|
|
|
db.lock().on_flush(Box::new({
|
|
|
|
let snd = snd.clone();
|
2021-02-17 01:15:54 -05:00
|
|
|
move || {
|
2023-02-16 02:14:54 -05:00
|
|
|
if let Err(err) = snd.send(SyncerCommand::DatabaseFlushed) {
|
2023-08-03 16:42:41 -04:00
|
|
|
warn!(%err, "unable to notify syncer for dir {}", dir_id);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}));
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok((
|
|
|
|
SyncerChannel(snd),
|
2018-03-04 15:24:24 -05:00
|
|
|
thread::Builder::new()
|
2023-02-16 02:14:54 -05:00
|
|
|
.name(format!("sync-{dir_id}"))
|
|
|
|
.spawn(move || {
|
|
|
|
span.in_scope(|| {
|
|
|
|
tracing::info!("starting");
|
|
|
|
while syncer.iter(&rcv) {}
|
|
|
|
})
|
|
|
|
})
|
2021-02-17 01:15:54 -05:00
|
|
|
.unwrap(),
|
|
|
|
))
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// A new retention limit for use in [lower_retention].
|
2018-03-04 15:24:24 -05:00
|
|
|
pub struct NewLimit {
|
|
|
|
pub stream_id: i32,
|
|
|
|
pub limit: i64,
|
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Immediately deletes recordings if necessary to fit within the given new `retain_bytes` limit.
|
2018-03-04 15:24:24 -05:00
|
|
|
/// Note this doesn't change the limit in the database; it only deletes files.
|
|
|
|
/// Pass a limit of 0 to delete all recordings associated with a camera.
|
2021-04-10 20:34:52 -04:00
|
|
|
///
|
|
|
|
/// This is expected to be performed from `moonfire-nvr config` when no syncer is running.
|
|
|
|
/// It potentially flushes the database twice (before and after the actual deletion).
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn lower_retention(
|
2023-07-01 13:46:13 -04:00
|
|
|
db: &Arc<db::Database>,
|
2021-02-17 01:15:54 -05:00
|
|
|
dir_id: i32,
|
|
|
|
limits: &[NewLimit],
|
|
|
|
) -> Result<(), Error> {
|
2018-03-04 15:24:24 -05:00
|
|
|
let db2 = db.clone();
|
2021-09-23 18:55:53 -04:00
|
|
|
let (_tx, rx) = base::shutdown::channel();
|
|
|
|
let (mut syncer, _) = Syncer::new(&db.lock(), rx, db2, dir_id)?;
|
2018-03-04 15:24:24 -05:00
|
|
|
syncer.do_rotation(|db| {
|
|
|
|
for l in limits {
|
2020-07-12 19:51:39 -04:00
|
|
|
let (fs_bytes_before, extra);
|
2018-03-04 15:24:24 -05:00
|
|
|
{
|
2023-07-10 01:04:17 -04:00
|
|
|
let Some(stream) = db.streams_by_id().get(&l.stream_id) else {
|
|
|
|
bail!(NotFound, msg("no such stream {}", l.stream_id));
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
fs_bytes_before =
|
|
|
|
stream.fs_bytes + stream.fs_bytes_to_add - stream.fs_bytes_to_delete;
|
2021-09-10 19:31:03 -04:00
|
|
|
extra = stream.config.retain_bytes - l.limit;
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
if l.limit >= fs_bytes_before {
|
|
|
|
continue;
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
delete_recordings(db, l.stream_id, extra)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Enqueues deletion of recordings to bring a stream's disk usage within bounds.
|
|
|
|
/// The next flush will mark the recordings as garbage in the SQLite database, and then they can
|
|
|
|
/// be deleted from disk.
|
2021-02-17 01:15:54 -05:00
|
|
|
fn delete_recordings(
|
|
|
|
db: &mut db::LockedDatabase,
|
|
|
|
stream_id: i32,
|
|
|
|
extra_bytes_needed: i64,
|
|
|
|
) -> Result<(), Error> {
|
2020-07-12 19:51:39 -04:00
|
|
|
let fs_bytes_needed = {
|
2018-03-04 15:24:24 -05:00
|
|
|
let stream = match db.streams_by_id().get(&stream_id) {
|
2023-07-10 01:04:17 -04:00
|
|
|
None => bail!(NotFound, msg("no stream {stream_id}")),
|
2018-03-04 15:24:24 -05:00
|
|
|
Some(s) => s,
|
|
|
|
};
|
2020-07-12 19:51:39 -04:00
|
|
|
stream.fs_bytes + stream.fs_bytes_to_add - stream.fs_bytes_to_delete + extra_bytes_needed
|
2021-09-10 19:31:03 -04:00
|
|
|
- stream.config.retain_bytes
|
2018-03-04 15:24:24 -05:00
|
|
|
};
|
2020-07-12 19:51:39 -04:00
|
|
|
let mut fs_bytes_to_delete = 0;
|
|
|
|
if fs_bytes_needed <= 0 {
|
2021-02-17 01:15:54 -05:00
|
|
|
debug!(
|
|
|
|
"{}: have remaining quota of {}",
|
|
|
|
stream_id,
|
2021-09-10 19:31:03 -04:00
|
|
|
base::strutil::encode_size(fs_bytes_needed)
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
let mut n = 0;
|
|
|
|
db.delete_oldest_recordings(stream_id, &mut |row| {
|
2020-07-12 19:51:39 -04:00
|
|
|
if fs_bytes_needed >= fs_bytes_to_delete {
|
|
|
|
fs_bytes_to_delete += db::round_up(i64::from(row.sample_file_bytes));
|
2018-03-04 15:24:24 -05:00
|
|
|
n += 1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
false
|
|
|
|
})?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<F: FileWriter> SyncerChannel<F> {
|
|
|
|
/// Asynchronously syncs the given writer, closes it, records it into the database, and
|
|
|
|
/// starts rotation.
|
2020-08-06 08:16:38 -04:00
|
|
|
fn async_save_recording(&self, id: CompositeId, wall_duration: recording::Duration, f: F) {
|
2021-02-17 01:15:54 -05:00
|
|
|
self.0
|
|
|
|
.send(SyncerCommand::AsyncSaveRecording(id, wall_duration, f))
|
|
|
|
.unwrap();
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2019-01-06 10:07:04 -05:00
|
|
|
|
|
|
|
/// For testing: flushes the syncer, waiting for all currently-queued commands to complete,
|
|
|
|
/// including the next scheduled database flush (if any). Note this doesn't wait for any
|
|
|
|
/// post-database flush garbage collection.
|
|
|
|
pub fn flush(&self) {
|
|
|
|
let (snd, rcv) = mpsc::sync_channel(0);
|
|
|
|
self.0.send(SyncerCommand::Flush(snd)).unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
rcv.recv().unwrap_err(); // syncer should just drop the channel, closing it.
|
2019-01-06 10:07:04 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
/// Lists files which should be "abandoned" (deleted without ever recording in the database)
|
|
|
|
/// on opening.
|
2021-02-17 01:15:54 -05:00
|
|
|
fn list_files_to_abandon(
|
|
|
|
dir: &dir::SampleFileDir,
|
2024-01-06 13:43:20 -05:00
|
|
|
streams_to_next: FastHashMap<i32, i32>,
|
2021-02-17 01:15:54 -05:00
|
|
|
) -> Result<Vec<CompositeId>, Error> {
|
2018-03-23 16:31:23 -04:00
|
|
|
let mut v = Vec::new();
|
2019-07-12 14:05:36 -04:00
|
|
|
let mut d = dir.opendir()?;
|
|
|
|
for e in d.iter() {
|
2018-03-23 16:31:23 -04:00
|
|
|
let e = e?;
|
2019-07-12 14:05:36 -04:00
|
|
|
let id = match dir::parse_id(e.file_name().to_bytes()) {
|
2018-03-23 16:31:23 -04:00
|
|
|
Ok(i) => i,
|
|
|
|
Err(_) => continue,
|
|
|
|
};
|
|
|
|
let next = match streams_to_next.get(&id.stream()) {
|
|
|
|
Some(n) => *n,
|
2021-02-17 01:15:54 -05:00
|
|
|
None => continue, // unknown stream.
|
2018-03-23 16:31:23 -04:00
|
|
|
};
|
|
|
|
if id.recording() >= next {
|
|
|
|
v.push(id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<C: Clocks + Clone> Syncer<C, Arc<dir::SampleFileDir>> {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn new(
|
|
|
|
l: &db::LockedDatabase,
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx: base::shutdown::Receiver,
|
2021-02-17 01:15:54 -05:00
|
|
|
db: Arc<db::Database<C>>,
|
|
|
|
dir_id: i32,
|
2021-10-26 14:47:13 -04:00
|
|
|
) -> Result<(Self, PathBuf), Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
let d = l
|
|
|
|
.sample_file_dirs_by_id()
|
|
|
|
.get(&dir_id)
|
2023-07-10 01:04:17 -04:00
|
|
|
.ok_or_else(|| err!(NotFound, msg("no dir {dir_id}")))?;
|
2018-03-04 15:24:24 -05:00
|
|
|
let dir = d.get()?;
|
|
|
|
|
|
|
|
// Abandon files.
|
|
|
|
// First, get a list of the streams in question.
|
2024-01-06 13:43:20 -05:00
|
|
|
let streams_to_next: FastHashMap<_, _> = l
|
2021-02-17 01:15:54 -05:00
|
|
|
.streams_by_id()
|
|
|
|
.iter()
|
|
|
|
.filter_map(|(&k, v)| {
|
|
|
|
if v.sample_file_dir_id == Some(dir_id) {
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
Some((k, v.cum_recordings))
|
2021-02-17 01:15:54 -05:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
2019-07-12 14:05:36 -04:00
|
|
|
let to_abandon = list_files_to_abandon(&dir, streams_to_next)?;
|
2018-03-04 15:24:24 -05:00
|
|
|
let mut undeletable = 0;
|
|
|
|
for &id in &to_abandon {
|
2023-07-10 01:04:17 -04:00
|
|
|
if let Err(err) = dir.unlink_file(id) {
|
|
|
|
if err == nix::Error::ENOENT {
|
|
|
|
warn!(%id, "dir: abandoned recording already deleted");
|
2018-03-04 15:24:24 -05:00
|
|
|
} else {
|
2023-07-10 01:04:17 -04:00
|
|
|
warn!(%err, %id, "dir: unable to unlink abandoned recording");
|
2018-03-04 15:24:24 -05:00
|
|
|
undeletable += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if undeletable > 0 {
|
2023-07-10 01:04:17 -04:00
|
|
|
bail!(
|
|
|
|
Unknown,
|
|
|
|
msg("unable to delete {undeletable} abandoned recordings; see logs")
|
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok((
|
|
|
|
Syncer {
|
|
|
|
dir_id,
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx,
|
2021-02-17 01:15:54 -05:00
|
|
|
dir,
|
|
|
|
db,
|
|
|
|
planned_flushes: std::collections::BinaryHeap::new(),
|
|
|
|
},
|
|
|
|
d.path.clone(),
|
|
|
|
))
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Rotates files for all streams and deletes stale files from previous runs.
|
|
|
|
/// Called from main thread.
|
|
|
|
fn initial_rotation(&mut self) -> Result<(), Error> {
|
|
|
|
self.do_rotation(|db| {
|
2021-05-17 17:31:50 -04:00
|
|
|
let streams: Vec<i32> = db.streams_by_id().keys().copied().collect();
|
2018-03-04 15:24:24 -05:00
|
|
|
for &stream_id in &streams {
|
|
|
|
delete_recordings(db, stream_id, 0)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Helper to do initial or retention-lowering rotation. Called from main thread.
|
|
|
|
fn do_rotation<F>(&mut self, delete_recordings: F) -> Result<(), Error>
|
2021-02-17 01:15:54 -05:00
|
|
|
where
|
|
|
|
F: Fn(&mut db::LockedDatabase) -> Result<(), Error>,
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
{
|
|
|
|
let mut db = self.db.lock();
|
2023-01-28 14:59:21 -05:00
|
|
|
delete_recordings(&mut db)?;
|
2018-03-04 15:24:24 -05:00
|
|
|
db.flush("synchronous deletion")?;
|
|
|
|
}
|
|
|
|
let mut garbage: Vec<_> = {
|
|
|
|
let l = self.db.lock();
|
|
|
|
let d = l.sample_file_dirs_by_id().get(&self.dir_id).unwrap();
|
2021-05-17 17:31:50 -04:00
|
|
|
d.garbage_needs_unlink.iter().copied().collect()
|
2018-03-04 15:24:24 -05:00
|
|
|
};
|
|
|
|
if !garbage.is_empty() {
|
|
|
|
// Try to delete files; retain ones in `garbage` that don't exist.
|
|
|
|
let mut errors = 0;
|
|
|
|
for &id in &garbage {
|
2023-08-03 16:42:41 -04:00
|
|
|
if let Err(err) = self.dir.unlink_file(id) {
|
|
|
|
if err != nix::Error::ENOENT {
|
|
|
|
warn!(%err, "dir: unable to unlink {}", id);
|
2018-03-04 15:24:24 -05:00
|
|
|
errors += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if errors > 0 {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
2023-07-10 01:04:17 -04:00
|
|
|
Unknown,
|
|
|
|
msg("unable to unlink {errors} files (see earlier warning messages for details)"),
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
self.dir.sync()?;
|
|
|
|
self.db.lock().delete_garbage(self.dir_id, &mut garbage)?;
|
|
|
|
self.db.lock().flush("synchronous garbage collection")?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
2019-01-04 19:11:58 -05:00
|
|
|
/// Processes a single command or timeout.
|
|
|
|
///
|
|
|
|
/// Returns true iff the loop should continue.
|
|
|
|
fn iter(&mut self, cmds: &mpsc::Receiver<SyncerCommand<D::File>>) -> bool {
|
|
|
|
// Wait for a command, the next flush timeout (if specified), or channel disconnect.
|
|
|
|
let next_flush = self.planned_flushes.peek().map(|f| f.when);
|
|
|
|
let cmd = match next_flush {
|
|
|
|
None => match cmds.recv() {
|
2021-02-17 01:15:54 -05:00
|
|
|
Err(_) => return false, // all cmd senders are gone.
|
2019-01-04 19:11:58 -05:00
|
|
|
Ok(cmd) => cmd,
|
|
|
|
},
|
|
|
|
Some(t) => {
|
|
|
|
let now = self.db.clocks().monotonic();
|
|
|
|
|
|
|
|
// Calculate the timeout to use, mapping negative durations to 0.
|
2021-05-17 17:31:50 -04:00
|
|
|
let timeout = (t - now)
|
|
|
|
.to_std()
|
|
|
|
.unwrap_or_else(|_| StdDuration::new(0, 0));
|
2022-09-28 12:29:16 -04:00
|
|
|
match self.db.clocks().recv_timeout(cmds, timeout) {
|
2021-02-17 01:15:54 -05:00
|
|
|
Err(mpsc::RecvTimeoutError::Disconnected) => return false, // cmd senders gone.
|
2019-01-04 19:11:58 -05:00
|
|
|
Err(mpsc::RecvTimeoutError::Timeout) => {
|
|
|
|
self.flush();
|
|
|
|
return true;
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-23 18:16:43 -04:00
|
|
|
Ok(cmd) => cmd,
|
2019-01-04 19:11:58 -05:00
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2019-01-04 19:11:58 -05:00
|
|
|
};
|
2018-03-23 18:16:43 -04:00
|
|
|
|
2019-01-04 19:11:58 -05:00
|
|
|
// Have a command; handle it.
|
|
|
|
match cmd {
|
2021-09-23 18:55:53 -04:00
|
|
|
SyncerCommand::AsyncSaveRecording(id, wall_dur, f) => {
|
|
|
|
if self.save(id, wall_dur, f).is_err() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SyncerCommand::DatabaseFlushed => {
|
|
|
|
if self.collect_garbage().is_err() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2019-01-06 10:07:04 -05:00
|
|
|
SyncerCommand::Flush(flush) => {
|
|
|
|
// The sender is waiting for the supplied writer to be dropped. If there's no
|
|
|
|
// timeout, do so immediately; otherwise wait for that timeout then drop it.
|
|
|
|
if let Some(mut f) = self.planned_flushes.peek_mut() {
|
|
|
|
f.senders.push(flush);
|
|
|
|
}
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2019-01-04 19:11:58 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
true
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Collects garbage (without forcing a sync). Called from worker thread.
|
2021-09-23 18:55:53 -04:00
|
|
|
fn collect_garbage(&mut self) -> Result<(), ShutdownError> {
|
2019-01-04 19:11:58 -05:00
|
|
|
trace!("Collecting garbage");
|
2018-03-04 15:24:24 -05:00
|
|
|
let mut garbage: Vec<_> = {
|
|
|
|
let l = self.db.lock();
|
|
|
|
let d = l.sample_file_dirs_by_id().get(&self.dir_id).unwrap();
|
2021-05-17 17:31:50 -04:00
|
|
|
d.garbage_needs_unlink.iter().copied().collect()
|
2018-03-04 15:24:24 -05:00
|
|
|
};
|
|
|
|
if garbage.is_empty() {
|
2021-09-23 18:55:53 -04:00
|
|
|
return Ok(());
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
let c = &self.db.clocks();
|
2018-03-04 15:24:24 -05:00
|
|
|
for &id in &garbage {
|
2021-09-23 18:55:53 -04:00
|
|
|
clock::retry(c, &self.shutdown_rx, &mut || {
|
2018-03-04 15:24:24 -05:00
|
|
|
if let Err(e) = self.dir.unlink_file(id) {
|
2021-07-09 18:01:15 -04:00
|
|
|
if e == nix::Error::ENOENT {
|
2018-03-04 15:24:24 -05:00
|
|
|
warn!("dir: recording {} already deleted!", id);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
return Err(e);
|
|
|
|
}
|
|
|
|
Ok(())
|
2021-09-23 18:55:53 -04:00
|
|
|
})?;
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2021-09-23 18:55:53 -04:00
|
|
|
clock::retry(c, &self.shutdown_rx, &mut || self.dir.sync())?;
|
|
|
|
clock::retry(c, &self.shutdown_rx, &mut || {
|
2021-02-17 01:15:54 -05:00
|
|
|
self.db.lock().delete_garbage(self.dir_id, &mut garbage)
|
2021-09-23 18:55:53 -04:00
|
|
|
})?;
|
|
|
|
Ok(())
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Saves the given recording and prompts rotation. Called from worker thread.
|
|
|
|
/// Note that this doesn't flush immediately; SQLite transactions are batched to lower SSD
|
|
|
|
/// wear. On the next flush, the old recordings will actually be marked as garbage in the
|
|
|
|
/// database, and shortly afterward actually deleted from disk.
|
2021-09-23 18:55:53 -04:00
|
|
|
fn save(
|
|
|
|
&mut self,
|
|
|
|
id: CompositeId,
|
|
|
|
wall_duration: recording::Duration,
|
|
|
|
f: D::File,
|
|
|
|
) -> Result<(), ShutdownError> {
|
2019-01-04 19:11:58 -05:00
|
|
|
trace!("Processing save for {}", id);
|
2018-03-04 15:24:24 -05:00
|
|
|
let stream_id = id.stream();
|
|
|
|
|
|
|
|
// Free up a like number of bytes.
|
2021-09-23 18:55:53 -04:00
|
|
|
clock::retry(&self.db.clocks(), &self.shutdown_rx, &mut || f.sync_all())?;
|
|
|
|
clock::retry(&self.db.clocks(), &self.shutdown_rx, &mut || {
|
|
|
|
self.dir.sync()
|
|
|
|
})?;
|
2018-03-04 15:24:24 -05:00
|
|
|
let mut db = self.db.lock();
|
|
|
|
db.mark_synced(id).unwrap();
|
|
|
|
delete_recordings(&mut db, stream_id, 0).unwrap();
|
2018-03-23 18:16:43 -04:00
|
|
|
let s = db.streams_by_id().get(&stream_id).unwrap();
|
|
|
|
let c = db.cameras_by_id().get(&s.camera_id).unwrap();
|
|
|
|
|
|
|
|
// Schedule a flush.
|
2021-09-10 19:31:03 -04:00
|
|
|
let how_soon =
|
|
|
|
Duration::seconds(i64::from(s.config.flush_if_sec)) - wall_duration.to_tm_duration();
|
2018-03-23 18:16:43 -04:00
|
|
|
let now = self.db.clocks().monotonic();
|
2019-01-04 14:56:15 -05:00
|
|
|
let when = now + how_soon;
|
2021-02-17 01:15:54 -05:00
|
|
|
let reason = format!(
|
|
|
|
"{} sec after start of {} {}-{} recording {}",
|
2021-09-10 19:31:03 -04:00
|
|
|
s.config.flush_if_sec,
|
2021-02-17 01:15:54 -05:00
|
|
|
wall_duration,
|
|
|
|
c.short_name,
|
|
|
|
s.type_.as_str(),
|
|
|
|
id
|
|
|
|
);
|
2018-03-23 18:16:43 -04:00
|
|
|
trace!("scheduling flush in {} because {}", how_soon, &reason);
|
2019-01-04 14:56:15 -05:00
|
|
|
self.planned_flushes.push(PlannedFlush {
|
|
|
|
when,
|
|
|
|
reason,
|
|
|
|
recording: id,
|
2019-01-06 10:07:04 -05:00
|
|
|
senders: Vec::new(),
|
2019-01-04 14:56:15 -05:00
|
|
|
});
|
2021-09-23 18:55:53 -04:00
|
|
|
Ok(())
|
2018-03-23 18:16:43 -04:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2019-01-04 19:11:58 -05:00
|
|
|
/// Flushes the database if necessary to honor `flush_if_sec` for some recording.
|
|
|
|
/// Called from worker thread when one of the `planned_flushes` arrives.
|
2019-01-04 14:56:15 -05:00
|
|
|
fn flush(&mut self) {
|
2019-01-04 19:11:58 -05:00
|
|
|
trace!("Flushing");
|
2019-01-04 14:56:15 -05:00
|
|
|
let mut l = self.db.lock();
|
|
|
|
|
|
|
|
// Look through the planned flushes and see if any are still relevant. It's possible
|
|
|
|
// they're not because something else (e.g., a syncer for a different sample file dir)
|
|
|
|
// has flushed the database in the meantime.
|
|
|
|
use std::collections::binary_heap::PeekMut;
|
|
|
|
while let Some(f) = self.planned_flushes.peek_mut() {
|
|
|
|
let s = match l.streams_by_id().get(&f.recording.stream()) {
|
|
|
|
Some(s) => s,
|
|
|
|
None => {
|
|
|
|
// Removing streams while running hasn't been implemented yet, so this should
|
|
|
|
// be impossible.
|
2021-02-17 01:15:54 -05:00
|
|
|
warn!(
|
|
|
|
"bug: no stream for {} which was scheduled to be flushed",
|
|
|
|
f.recording
|
|
|
|
);
|
2019-01-04 14:56:15 -05:00
|
|
|
PeekMut::pop(f);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
if s.cum_recordings <= f.recording.recording() {
|
|
|
|
// not yet committed.
|
2019-01-04 14:56:15 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace!("planned flush ({}) no longer needed", &f.reason);
|
|
|
|
PeekMut::pop(f);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's anything left to do now, try to flush.
|
|
|
|
let f = match self.planned_flushes.peek() {
|
|
|
|
None => return,
|
|
|
|
Some(f) => f,
|
|
|
|
};
|
|
|
|
let now = self.db.clocks().monotonic();
|
|
|
|
if f.when > now {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if let Err(e) = l.flush(&f.reason) {
|
2018-03-23 18:16:43 -04:00
|
|
|
let d = Duration::minutes(1);
|
2021-02-17 01:15:54 -05:00
|
|
|
warn!(
|
|
|
|
"flush failure on save for reason {}; will retry after {}: {:?}",
|
|
|
|
f.reason, d, e
|
|
|
|
);
|
|
|
|
self.planned_flushes
|
|
|
|
.peek_mut()
|
|
|
|
.expect("planned_flushes is non-empty")
|
|
|
|
.when = self.db.clocks().monotonic() + Duration::minutes(1);
|
2019-01-04 14:56:15 -05:00
|
|
|
return;
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2019-01-04 14:56:15 -05:00
|
|
|
|
|
|
|
// A successful flush should take care of everything planned.
|
|
|
|
self.planned_flushes.clear();
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Struct for writing a single run (of potentially several recordings) to disk and committing its
|
|
|
|
/// metadata to the database. `Writer` hands off each recording's state to the syncer when done. It
|
|
|
|
/// saves the recording to the database (if I/O errors do not prevent this), retries forever,
|
|
|
|
/// or panics (if further writing on this stream is impossible).
|
2018-03-23 16:31:23 -04:00
|
|
|
pub struct Writer<'a, C: Clocks + Clone, D: DirWriter> {
|
2018-03-04 15:24:24 -05:00
|
|
|
dir: &'a D,
|
2018-03-23 16:31:23 -04:00
|
|
|
db: &'a db::Database<C>,
|
2018-03-04 15:24:24 -05:00
|
|
|
channel: &'a SyncerChannel<D::File>,
|
|
|
|
stream_id: i32,
|
|
|
|
state: WriterState<D::File>,
|
|
|
|
}
|
|
|
|
|
2023-01-28 14:59:21 -05:00
|
|
|
// clippy points out that the `Open` variant is significantly larger and
|
|
|
|
// suggests boxing it. There's no benefit to this given that we don't have a lot
|
|
|
|
// of `WriterState`s active at once, and they should cycle between `Open` and
|
|
|
|
// `Closed`.
|
|
|
|
#[allow(clippy::large_enum_variant)]
|
2018-03-04 15:24:24 -05:00
|
|
|
enum WriterState<F: FileWriter> {
|
|
|
|
Unopened,
|
|
|
|
Open(InnerWriter<F>),
|
|
|
|
Closed(PreviousWriter),
|
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// State for writing a single recording, used within [Writer].
|
2018-03-04 15:24:24 -05:00
|
|
|
///
|
2021-04-10 20:34:52 -04:00
|
|
|
/// Note that the recording created by every `InnerWriter` must be written to the [SyncerChannel]
|
2018-03-04 15:24:24 -05:00
|
|
|
/// with at least one sample. The sample may have zero duration.
|
|
|
|
struct InnerWriter<F: FileWriter> {
|
|
|
|
f: F,
|
|
|
|
r: Arc<Mutex<db::RecordingToInsert>>,
|
|
|
|
e: recording::SampleIndexEncoder,
|
|
|
|
id: CompositeId,
|
2022-04-13 17:37:45 -04:00
|
|
|
video_sample_entry_id: i32,
|
2019-01-21 18:58:52 -05:00
|
|
|
|
2020-03-20 23:52:30 -04:00
|
|
|
hasher: blake3::Hasher,
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
/// The start time of this recording, based solely on examining the local clock after frames in
|
|
|
|
/// this recording were received. Frames can suffer from various kinds of delay (initial
|
2018-03-04 15:24:24 -05:00
|
|
|
/// buffering, encoding, and network transmission), so this time is set to far in the future on
|
|
|
|
/// construction, given a real value on the first packet, and decreased as less-delayed packets
|
|
|
|
/// are discovered. See design/time.md for details.
|
|
|
|
local_start: recording::Time,
|
|
|
|
|
|
|
|
/// A sample which has been written to disk but not added to `index`. Index writes are one
|
|
|
|
/// sample behind disk writes because the duration of a sample is the difference between its
|
|
|
|
/// pts and the next sample's pts. A sample is flushed when the next sample is written, when
|
|
|
|
/// the writer is closed cleanly (the caller supplies the next pts), or when the writer is
|
|
|
|
/// closed uncleanly (with a zero duration, which the `.mp4` format allows only at the end).
|
|
|
|
///
|
2021-09-23 18:55:53 -04:00
|
|
|
/// `unindexed_sample` should always be `Some`, except when a `write` call has aborted on
|
|
|
|
/// shutdown. In that case, the close will be unable to write the full segment.
|
2020-04-15 01:56:20 -04:00
|
|
|
unindexed_sample: Option<UnindexedSample>,
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// A sample which has been written to disk but not included in the index yet.
|
|
|
|
/// The index includes the sample's duration, which is calculated from the
|
|
|
|
/// _following_ sample's pts, so the most recent sample is always unindexed.
|
2018-03-04 15:24:24 -05:00
|
|
|
#[derive(Copy, Clone)]
|
2020-04-15 01:56:20 -04:00
|
|
|
struct UnindexedSample {
|
2018-03-04 15:24:24 -05:00
|
|
|
local_time: recording::Time,
|
2020-04-15 01:56:20 -04:00
|
|
|
pts_90k: i64, // relative to the start of the run, not a single recording.
|
2018-03-04 15:24:24 -05:00
|
|
|
len: i32,
|
|
|
|
is_key: bool,
|
|
|
|
}
|
|
|
|
|
2021-04-10 20:34:52 -04:00
|
|
|
/// State associated with a run's previous recording; used within [Writer].
|
2018-03-04 15:24:24 -05:00
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
struct PreviousWriter {
|
|
|
|
end: recording::Time,
|
|
|
|
run_offset: i32,
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
impl<'a, C: Clocks + Clone, D: DirWriter> Writer<'a, C, D> {
|
2019-01-21 18:58:52 -05:00
|
|
|
/// `db` must not be locked.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn new(
|
|
|
|
dir: &'a D,
|
|
|
|
db: &'a db::Database<C>,
|
|
|
|
channel: &'a SyncerChannel<D::File>,
|
|
|
|
stream_id: i32,
|
|
|
|
) -> Self {
|
2018-03-04 15:24:24 -05:00
|
|
|
Writer {
|
|
|
|
dir,
|
|
|
|
db,
|
|
|
|
channel,
|
|
|
|
stream_id,
|
|
|
|
state: WriterState::Unopened,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-13 17:37:45 -04:00
|
|
|
/// Opens a new recording if not already open.
|
|
|
|
///
|
2018-12-28 13:21:49 -05:00
|
|
|
/// On successful return, `self.state` will be `WriterState::Open(w)` with `w` violating the
|
2020-04-15 01:56:20 -04:00
|
|
|
/// invariant that `unindexed_sample` is `Some`. The caller (`write`) is responsible for
|
2018-12-28 13:21:49 -05:00
|
|
|
/// correcting this.
|
2022-04-13 17:37:45 -04:00
|
|
|
fn open(
|
|
|
|
&mut self,
|
|
|
|
shutdown_rx: &mut base::shutdown::Receiver,
|
|
|
|
video_sample_entry_id: i32,
|
|
|
|
) -> Result<(), Error> {
|
2018-03-04 15:24:24 -05:00
|
|
|
let prev = match self.state {
|
|
|
|
WriterState::Unopened => None,
|
2022-04-13 17:37:45 -04:00
|
|
|
WriterState::Open(ref o) => {
|
|
|
|
if o.video_sample_entry_id != video_sample_entry_id {
|
2023-07-10 01:04:17 -04:00
|
|
|
bail!(Internal, msg("inconsistent video_sample_entry_id"));
|
2022-04-13 17:37:45 -04:00
|
|
|
}
|
|
|
|
return Ok(());
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
WriterState::Closed(prev) => Some(prev),
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
let (id, r) = self.db.lock().add_recording(
|
|
|
|
self.stream_id,
|
|
|
|
db::RecordingToInsert {
|
|
|
|
run_offset: prev.map(|p| p.run_offset + 1).unwrap_or(0),
|
|
|
|
start: prev
|
|
|
|
.map(|p| p.end)
|
|
|
|
.unwrap_or(recording::Time(i64::max_value())),
|
2022-04-13 17:37:45 -04:00
|
|
|
video_sample_entry_id,
|
2021-02-17 01:15:54 -05:00
|
|
|
flags: db::RecordingFlags::Growing as i32,
|
|
|
|
..Default::default()
|
|
|
|
},
|
|
|
|
)?;
|
2021-09-23 18:55:53 -04:00
|
|
|
let f = clock::retry(&self.db.clocks(), shutdown_rx, &mut || {
|
|
|
|
self.dir.create_file(id)
|
2023-07-10 01:04:17 -04:00
|
|
|
})
|
|
|
|
.map_err(|e| err!(Cancelled, source(e)))?;
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
self.state = WriterState::Open(InnerWriter {
|
|
|
|
f,
|
|
|
|
r,
|
2021-05-17 17:31:50 -04:00
|
|
|
e: recording::SampleIndexEncoder::default(),
|
2018-03-04 15:24:24 -05:00
|
|
|
id,
|
2020-03-20 23:52:30 -04:00
|
|
|
hasher: blake3::Hasher::new(),
|
2018-03-04 15:24:24 -05:00
|
|
|
local_start: recording::Time(i64::max_value()),
|
2020-04-15 01:56:20 -04:00
|
|
|
unindexed_sample: None,
|
2022-04-13 17:37:45 -04:00
|
|
|
video_sample_entry_id,
|
2021-02-17 01:15:54 -05:00
|
|
|
});
|
2018-12-28 13:21:49 -05:00
|
|
|
Ok(())
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
pub fn previously_opened(&self) -> Result<bool, Error> {
|
|
|
|
Ok(match self.state {
|
|
|
|
WriterState::Unopened => false,
|
|
|
|
WriterState::Closed(_) => true,
|
2023-07-10 01:04:17 -04:00
|
|
|
WriterState::Open(_) => bail!(Internal, msg("open!")),
|
2018-03-04 15:24:24 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
/// Writes a new frame to this recording.
|
2018-03-04 15:24:24 -05:00
|
|
|
/// `local_time` should be the local clock's time as of when this packet was received.
|
2021-02-17 01:15:54 -05:00
|
|
|
pub fn write(
|
|
|
|
&mut self,
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx: &mut base::shutdown::Receiver,
|
2021-02-17 01:15:54 -05:00
|
|
|
pkt: &[u8],
|
|
|
|
local_time: recording::Time,
|
|
|
|
pts_90k: i64,
|
|
|
|
is_key: bool,
|
2022-04-13 17:37:45 -04:00
|
|
|
video_sample_entry_id: i32,
|
2021-02-17 01:15:54 -05:00
|
|
|
) -> Result<(), Error> {
|
2022-04-13 17:37:45 -04:00
|
|
|
self.open(shutdown_rx, video_sample_entry_id)?;
|
2018-12-28 13:21:49 -05:00
|
|
|
let w = match self.state {
|
|
|
|
WriterState::Open(ref mut w) => w,
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2020-04-15 01:56:20 -04:00
|
|
|
// Note w's invariant that `unindexed_sample` is `None` may currently be violated.
|
2018-03-04 15:24:24 -05:00
|
|
|
// We must restore it on all success or error paths.
|
|
|
|
|
2020-04-15 01:56:20 -04:00
|
|
|
if let Some(unindexed) = w.unindexed_sample.take() {
|
2021-03-10 15:45:32 -05:00
|
|
|
let duration = pts_90k - unindexed.pts_90k;
|
2018-03-04 15:24:24 -05:00
|
|
|
if duration <= 0 {
|
2020-08-07 18:30:22 -04:00
|
|
|
w.unindexed_sample = Some(unindexed); // restore invariant.
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
2023-07-10 01:04:17 -04:00
|
|
|
InvalidArgument,
|
|
|
|
msg(
|
|
|
|
"pts not monotonically increasing; got {} then {}",
|
|
|
|
unindexed.pts_90k,
|
|
|
|
pts_90k,
|
|
|
|
),
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2021-03-10 15:45:32 -05:00
|
|
|
let duration = match i32::try_from(duration) {
|
|
|
|
Ok(d) => d,
|
|
|
|
Err(_) => {
|
|
|
|
w.unindexed_sample = Some(unindexed); // restore invariant.
|
|
|
|
bail!(
|
2023-07-10 01:04:17 -04:00
|
|
|
InvalidArgument,
|
|
|
|
msg(
|
|
|
|
"excessive pts jump from {} to {}",
|
|
|
|
unindexed.pts_90k,
|
|
|
|
pts_90k,
|
|
|
|
),
|
2021-03-10 15:45:32 -05:00
|
|
|
)
|
|
|
|
}
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
if let Err(e) = w.add_sample(
|
|
|
|
duration,
|
|
|
|
unindexed.len,
|
|
|
|
unindexed.is_key,
|
|
|
|
unindexed.local_time,
|
|
|
|
self.db,
|
|
|
|
self.stream_id,
|
|
|
|
) {
|
2020-08-07 18:30:22 -04:00
|
|
|
w.unindexed_sample = Some(unindexed); // restore invariant.
|
|
|
|
return Err(e);
|
2019-01-21 18:58:52 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
let mut remaining = pkt;
|
|
|
|
while !remaining.is_empty() {
|
2021-09-23 18:55:53 -04:00
|
|
|
let written =
|
|
|
|
match clock::retry(&self.db.clocks(), shutdown_rx, &mut || w.f.write(remaining)) {
|
|
|
|
Ok(w) => w,
|
|
|
|
Err(e) => {
|
|
|
|
// close() will do nothing because unindexed_sample will be None.
|
2023-02-16 02:14:54 -05:00
|
|
|
tracing::warn!(
|
2023-07-10 01:04:17 -04:00
|
|
|
"abandoning incompletely written recording {} on shutdown",
|
2021-09-23 18:55:53 -04:00
|
|
|
w.id
|
|
|
|
);
|
2023-07-10 01:04:17 -04:00
|
|
|
bail!(Cancelled, source(e));
|
2021-09-23 18:55:53 -04:00
|
|
|
}
|
|
|
|
};
|
2018-03-04 15:24:24 -05:00
|
|
|
remaining = &remaining[written..];
|
|
|
|
}
|
2020-04-15 01:56:20 -04:00
|
|
|
w.unindexed_sample = Some(UnindexedSample {
|
2018-03-04 15:24:24 -05:00
|
|
|
local_time,
|
|
|
|
pts_90k,
|
2020-04-15 01:56:20 -04:00
|
|
|
len: i32::try_from(pkt.len()).unwrap(),
|
2018-03-04 15:24:24 -05:00
|
|
|
is_key,
|
|
|
|
});
|
2020-03-20 23:52:30 -04:00
|
|
|
w.hasher.update(pkt);
|
2018-03-04 15:24:24 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-04-13 17:37:45 -04:00
|
|
|
/// Cleanly closes a single recording within this writer, using a supplied
|
|
|
|
/// pts of the next sample for the last sample's duration (if known).
|
|
|
|
///
|
|
|
|
/// The `Writer` may be used again, causing another recording to be created
|
|
|
|
/// within the same run.
|
|
|
|
///
|
|
|
|
/// If the `Writer` is dropped without `close`, the `Drop` trait impl will
|
|
|
|
/// close, swallowing errors and using a zero duration for the last sample.
|
2021-09-16 19:24:17 -04:00
|
|
|
pub fn close(&mut self, next_pts: Option<i64>, reason: Option<String>) -> Result<(), Error> {
|
2018-03-04 15:24:24 -05:00
|
|
|
self.state = match mem::replace(&mut self.state, WriterState::Unopened) {
|
|
|
|
WriterState::Open(w) => {
|
2021-09-16 19:24:17 -04:00
|
|
|
let prev = w.close(self.channel, next_pts, self.db, self.stream_id, reason)?;
|
2018-03-04 15:24:24 -05:00
|
|
|
WriterState::Closed(prev)
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
s => s,
|
|
|
|
};
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
Ok(())
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 08:16:38 -04:00
|
|
|
fn clamp(v: i64, min: i64, max: i64) -> i64 {
|
|
|
|
std::cmp::min(std::cmp::max(v, min), max)
|
|
|
|
}
|
|
|
|
|
2018-03-04 15:24:24 -05:00
|
|
|
impl<F: FileWriter> InnerWriter<F> {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn add_sample<C: Clocks + Clone>(
|
|
|
|
&mut self,
|
|
|
|
duration_90k: i32,
|
|
|
|
bytes: i32,
|
|
|
|
is_key: bool,
|
|
|
|
pkt_local_time: recording::Time,
|
|
|
|
db: &db::Database<C>,
|
|
|
|
stream_id: i32,
|
|
|
|
) -> Result<(), Error> {
|
2022-09-29 01:19:35 -04:00
|
|
|
let mut l = self.r.lock().unwrap();
|
2020-08-06 08:16:38 -04:00
|
|
|
|
|
|
|
// design/time.md explains these time manipulations in detail.
|
2020-08-07 18:30:22 -04:00
|
|
|
let prev_media_duration_90k = l.media_duration_90k;
|
2020-08-06 08:16:38 -04:00
|
|
|
let media_duration_90k = l.media_duration_90k + duration_90k;
|
2021-02-17 01:15:54 -05:00
|
|
|
let local_start = cmp::min(
|
|
|
|
self.local_start,
|
|
|
|
pkt_local_time - recording::Duration(i64::from(media_duration_90k)),
|
|
|
|
);
|
2020-08-06 08:16:38 -04:00
|
|
|
let limit = i64::from(media_duration_90k / 2000); // 1/2000th, aka 500 ppm.
|
|
|
|
let start = if l.run_offset == 0 {
|
|
|
|
// Start time isn't anchored to previous recording's end; adjust.
|
|
|
|
local_start
|
|
|
|
} else {
|
|
|
|
l.start
|
|
|
|
};
|
2021-02-17 01:15:54 -05:00
|
|
|
let wall_duration_90k = media_duration_90k
|
|
|
|
+ i32::try_from(clamp(local_start.0 - start.0, -limit, limit)).unwrap();
|
2020-08-06 08:16:38 -04:00
|
|
|
if wall_duration_90k > i32::try_from(MAX_RECORDING_WALL_DURATION).unwrap() {
|
2021-02-17 01:15:54 -05:00
|
|
|
bail!(
|
2023-07-10 01:04:17 -04:00
|
|
|
OutOfRange,
|
|
|
|
msg("Duration {wall_duration_90k} exceeds maximum {MAX_RECORDING_WALL_DURATION}"),
|
2021-02-17 01:15:54 -05:00
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
2020-08-06 08:16:38 -04:00
|
|
|
l.wall_duration_90k = wall_duration_90k;
|
|
|
|
l.start = start;
|
|
|
|
self.local_start = local_start;
|
|
|
|
self.e.add_sample(duration_90k, bytes, is_key, &mut l);
|
2020-08-07 18:30:22 -04:00
|
|
|
drop(l);
|
2021-02-17 01:15:54 -05:00
|
|
|
db.lock()
|
|
|
|
.send_live_segment(
|
|
|
|
stream_id,
|
2024-08-15 09:12:11 -04:00
|
|
|
db::LiveFrame {
|
2021-02-17 01:15:54 -05:00
|
|
|
recording: self.id.recording(),
|
|
|
|
is_key,
|
|
|
|
media_off_90k: prev_media_duration_90k..media_duration_90k,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-08-07 18:30:22 -04:00
|
|
|
Ok(())
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
fn close<C: Clocks + Clone>(
|
|
|
|
mut self,
|
|
|
|
channel: &SyncerChannel<F>,
|
|
|
|
next_pts: Option<i64>,
|
|
|
|
db: &db::Database<C>,
|
|
|
|
stream_id: i32,
|
2021-09-16 19:24:17 -04:00
|
|
|
reason: Option<String>,
|
2021-02-17 01:15:54 -05:00
|
|
|
) -> Result<PreviousWriter, Error> {
|
2021-09-23 18:55:53 -04:00
|
|
|
let unindexed = self.unindexed_sample.take().ok_or_else(|| {
|
2023-07-10 01:04:17 -04:00
|
|
|
err!(
|
|
|
|
FailedPrecondition,
|
|
|
|
msg(
|
|
|
|
"unable to add recording {} to database due to aborted write",
|
|
|
|
self.id,
|
|
|
|
),
|
2021-09-23 18:55:53 -04:00
|
|
|
)
|
|
|
|
})?;
|
2018-03-23 18:16:43 -04:00
|
|
|
let (last_sample_duration, flags) = match next_pts {
|
2020-08-06 08:16:38 -04:00
|
|
|
None => (0, db::RecordingFlags::TrailingZero as i32),
|
2023-07-10 01:04:17 -04:00
|
|
|
Some(p) => (
|
|
|
|
i32::try_from(p - unindexed.pts_90k).map_err(|_| {
|
|
|
|
err!(
|
|
|
|
OutOfRange,
|
|
|
|
msg(
|
|
|
|
"pts {} following {} creates invalid duration",
|
|
|
|
p,
|
|
|
|
unindexed.pts_90k
|
|
|
|
)
|
|
|
|
)
|
|
|
|
})?,
|
|
|
|
0,
|
|
|
|
),
|
2018-03-04 15:24:24 -05:00
|
|
|
};
|
2020-03-20 23:52:30 -04:00
|
|
|
let blake3 = self.hasher.finalize();
|
2020-08-06 08:16:38 -04:00
|
|
|
let (run_offset, end);
|
2021-02-17 01:15:54 -05:00
|
|
|
self.add_sample(
|
|
|
|
last_sample_duration,
|
|
|
|
unindexed.len,
|
|
|
|
unindexed.is_key,
|
|
|
|
unindexed.local_time,
|
|
|
|
db,
|
|
|
|
stream_id,
|
|
|
|
)?;
|
2019-01-21 18:58:52 -05:00
|
|
|
|
|
|
|
// This always ends a live segment.
|
2020-08-06 08:16:38 -04:00
|
|
|
let wall_duration;
|
2018-03-04 15:24:24 -05:00
|
|
|
{
|
2022-09-29 01:19:35 -04:00
|
|
|
let mut l = self.r.lock().unwrap();
|
2018-03-04 15:24:24 -05:00
|
|
|
l.flags = flags;
|
2020-08-06 08:16:38 -04:00
|
|
|
l.local_time_delta = self.local_start - l.start;
|
2021-05-17 17:31:50 -04:00
|
|
|
l.sample_file_blake3 = Some(*blake3.as_bytes());
|
2021-09-16 19:24:17 -04:00
|
|
|
l.end_reason = reason;
|
2020-08-06 08:16:38 -04:00
|
|
|
wall_duration = recording::Duration(i64::from(l.wall_duration_90k));
|
2018-03-04 15:24:24 -05:00
|
|
|
run_offset = l.run_offset;
|
2020-08-06 08:16:38 -04:00
|
|
|
end = l.start + wall_duration;
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
drop(self.r);
|
2020-08-06 08:16:38 -04:00
|
|
|
channel.async_save_recording(self.id, wall_duration, self.f);
|
2021-02-17 01:15:54 -05:00
|
|
|
Ok(PreviousWriter { end, run_offset })
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 16:31:23 -04:00
|
|
|
impl<'a, C: Clocks + Clone, D: DirWriter> Drop for Writer<'a, C, D> {
|
2018-03-04 15:24:24 -05:00
|
|
|
fn drop(&mut self) {
|
|
|
|
if ::std::thread::panicking() {
|
|
|
|
// This will probably panic again. Don't do it.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if let WriterState::Open(w) = mem::replace(&mut self.state, WriterState::Unopened) {
|
|
|
|
// Swallow any error. The caller should only drop the Writer without calling close()
|
|
|
|
// if there's already been an error. The caller should report that. No point in
|
|
|
|
// complaining again.
|
2021-09-16 19:24:17 -04:00
|
|
|
let _ = w.close(
|
|
|
|
self.channel,
|
|
|
|
None,
|
|
|
|
self.db,
|
|
|
|
self.stream_id,
|
|
|
|
Some("drop".to_owned()),
|
|
|
|
);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-02-17 01:15:54 -05:00
|
|
|
use super::Writer;
|
2020-03-20 00:35:42 -04:00
|
|
|
use crate::db::{self, CompositeId, VideoSampleEntryToInsert};
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::recording;
|
2021-02-17 01:15:54 -05:00
|
|
|
use crate::testutil;
|
|
|
|
use base::clock::{Clocks, SimulatedClocks};
|
2018-03-04 15:24:24 -05:00
|
|
|
use std::collections::VecDeque;
|
|
|
|
use std::io;
|
|
|
|
use std::sync::mpsc;
|
2021-02-17 01:15:54 -05:00
|
|
|
use std::sync::Arc;
|
2022-09-29 01:19:35 -04:00
|
|
|
use std::sync::Mutex;
|
2023-02-16 02:14:54 -05:00
|
|
|
use tracing::{trace, warn};
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
struct MockDir(Arc<Mutex<VecDeque<MockDirAction>>>);
|
|
|
|
|
|
|
|
enum MockDirAction {
|
2021-02-17 01:15:54 -05:00
|
|
|
Create(
|
|
|
|
CompositeId,
|
|
|
|
Box<dyn Fn(CompositeId) -> Result<MockFile, nix::Error> + Send>,
|
|
|
|
),
|
2019-07-12 00:59:01 -04:00
|
|
|
Sync(Box<dyn Fn() -> Result<(), nix::Error> + Send>),
|
2021-02-17 01:15:54 -05:00
|
|
|
Unlink(
|
|
|
|
CompositeId,
|
|
|
|
Box<dyn Fn(CompositeId) -> Result<(), nix::Error> + Send>,
|
|
|
|
),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl MockDir {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn new() -> Self {
|
|
|
|
MockDir(Arc::new(Mutex::new(VecDeque::new())))
|
|
|
|
}
|
|
|
|
fn expect(&self, action: MockDirAction) {
|
2022-09-29 01:19:35 -04:00
|
|
|
self.0.lock().unwrap().push_back(action);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
fn ensure_done(&self) {
|
2022-09-29 01:19:35 -04:00
|
|
|
assert_eq!(self.0.lock().unwrap().len(), 0);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl super::DirWriter for MockDir {
|
|
|
|
type File = MockFile;
|
|
|
|
|
2019-07-12 00:59:01 -04:00
|
|
|
fn create_file(&self, id: CompositeId) -> Result<Self::File, nix::Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
match self
|
|
|
|
.0
|
|
|
|
.lock()
|
2022-09-29 01:19:35 -04:00
|
|
|
.unwrap()
|
2021-02-17 01:15:54 -05:00
|
|
|
.pop_front()
|
|
|
|
.expect("got create_file with no expectation")
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
MockDirAction::Create(expected_id, ref f) => {
|
|
|
|
assert_eq!(id, expected_id);
|
|
|
|
f(id)
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2023-01-29 18:01:19 -05:00
|
|
|
_ => panic!("got create_file({id}), expected something else"),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
2019-07-12 00:59:01 -04:00
|
|
|
fn sync(&self) -> Result<(), nix::Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
match self
|
|
|
|
.0
|
|
|
|
.lock()
|
2022-09-29 01:19:35 -04:00
|
|
|
.unwrap()
|
2021-02-17 01:15:54 -05:00
|
|
|
.pop_front()
|
|
|
|
.expect("got sync with no expectation")
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
MockDirAction::Sync(f) => f(),
|
|
|
|
_ => panic!("got sync, expected something else"),
|
|
|
|
}
|
|
|
|
}
|
2019-07-12 00:59:01 -04:00
|
|
|
fn unlink_file(&self, id: CompositeId) -> Result<(), nix::Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
match self
|
|
|
|
.0
|
|
|
|
.lock()
|
2022-09-29 01:19:35 -04:00
|
|
|
.unwrap()
|
2021-02-17 01:15:54 -05:00
|
|
|
.pop_front()
|
|
|
|
.expect("got unlink_file with no expectation")
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
MockDirAction::Unlink(expected_id, f) => {
|
|
|
|
assert_eq!(id, expected_id);
|
|
|
|
f(id)
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2023-01-29 18:01:19 -05:00
|
|
|
_ => panic!("got unlink({id}), expected something else"),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for MockDir {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if !::std::thread::panicking() {
|
2022-09-29 01:19:35 -04:00
|
|
|
assert_eq!(self.0.lock().unwrap().len(), 0);
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
struct MockFile(Arc<Mutex<VecDeque<MockFileAction>>>);
|
|
|
|
|
|
|
|
enum MockFileAction {
|
2019-06-14 11:47:11 -04:00
|
|
|
SyncAll(Box<dyn Fn() -> Result<(), io::Error> + Send>),
|
|
|
|
Write(Box<dyn Fn(&[u8]) -> Result<usize, io::Error> + Send>),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl MockFile {
|
2021-02-17 01:15:54 -05:00
|
|
|
fn new() -> Self {
|
|
|
|
MockFile(Arc::new(Mutex::new(VecDeque::new())))
|
|
|
|
}
|
|
|
|
fn expect(&self, action: MockFileAction) {
|
2022-09-29 01:19:35 -04:00
|
|
|
self.0.lock().unwrap().push_back(action);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
fn ensure_done(&self) {
|
2022-09-29 01:19:35 -04:00
|
|
|
assert_eq!(self.0.lock().unwrap().len(), 0);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl super::FileWriter for MockFile {
|
|
|
|
fn sync_all(&self) -> Result<(), io::Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
match self
|
|
|
|
.0
|
|
|
|
.lock()
|
2022-09-29 01:19:35 -04:00
|
|
|
.unwrap()
|
2021-02-17 01:15:54 -05:00
|
|
|
.pop_front()
|
|
|
|
.expect("got sync_all with no expectation")
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
MockFileAction::SyncAll(f) => f(),
|
|
|
|
_ => panic!("got sync_all, expected something else"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
|
2021-02-17 01:15:54 -05:00
|
|
|
match self
|
|
|
|
.0
|
|
|
|
.lock()
|
2022-09-29 01:19:35 -04:00
|
|
|
.unwrap()
|
2021-02-17 01:15:54 -05:00
|
|
|
.pop_front()
|
|
|
|
.expect("got write with no expectation")
|
|
|
|
{
|
2018-03-04 15:24:24 -05:00
|
|
|
MockFileAction::Write(f) => f(buf),
|
2023-01-29 18:01:19 -05:00
|
|
|
_ => panic!("got write({buf:?}), expected something else"),
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct Harness {
|
2018-03-23 16:31:23 -04:00
|
|
|
db: Arc<db::Database<SimulatedClocks>>,
|
2018-03-04 15:24:24 -05:00
|
|
|
dir_id: i32,
|
2021-05-17 16:08:01 -04:00
|
|
|
_tmpdir: ::tempfile::TempDir,
|
2018-03-04 15:24:24 -05:00
|
|
|
dir: MockDir,
|
|
|
|
channel: super::SyncerChannel<MockFile>,
|
2021-09-23 18:55:53 -04:00
|
|
|
_shutdown_tx: base::shutdown::Sender,
|
|
|
|
shutdown_rx: base::shutdown::Receiver,
|
2019-01-04 19:11:58 -05:00
|
|
|
syncer: super::Syncer<SimulatedClocks, MockDir>,
|
2021-09-23 18:55:53 -04:00
|
|
|
syncer_rx: mpsc::Receiver<super::SyncerCommand<MockFile>>,
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-09-10 19:31:03 -04:00
|
|
|
fn new_harness(flush_if_sec: u32) -> Harness {
|
2018-03-23 16:31:23 -04:00
|
|
|
let clocks = SimulatedClocks::new(::time::Timespec::new(0, 0));
|
2019-01-04 19:11:58 -05:00
|
|
|
let tdb = testutil::TestDb::new_with_flush_if_sec(clocks, flush_if_sec);
|
2021-02-17 01:15:54 -05:00
|
|
|
let dir_id = *tdb
|
|
|
|
.db
|
|
|
|
.lock()
|
|
|
|
.sample_file_dirs_by_id()
|
|
|
|
.keys()
|
|
|
|
.next()
|
|
|
|
.unwrap();
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
// This starts a real fs-backed syncer. Get rid of it.
|
|
|
|
tdb.db.lock().clear_on_flush();
|
|
|
|
drop(tdb.syncer_channel);
|
|
|
|
tdb.syncer_join.join().unwrap();
|
|
|
|
|
2021-03-10 15:45:32 -05:00
|
|
|
// Start a mock syncer.
|
2018-03-04 15:24:24 -05:00
|
|
|
let dir = MockDir::new();
|
2021-09-23 18:55:53 -04:00
|
|
|
let (shutdown_tx, shutdown_rx) = base::shutdown::channel();
|
2019-01-04 19:11:58 -05:00
|
|
|
let syncer = super::Syncer {
|
2021-02-17 01:15:54 -05:00
|
|
|
dir_id: *tdb
|
|
|
|
.db
|
|
|
|
.lock()
|
|
|
|
.sample_file_dirs_by_id()
|
|
|
|
.keys()
|
|
|
|
.next()
|
|
|
|
.unwrap(),
|
2018-03-04 15:24:24 -05:00
|
|
|
dir: dir.clone(),
|
|
|
|
db: tdb.db.clone(),
|
2019-01-04 14:56:15 -05:00
|
|
|
planned_flushes: std::collections::BinaryHeap::new(),
|
2021-09-23 18:55:53 -04:00
|
|
|
shutdown_rx: shutdown_rx.clone(),
|
2018-03-04 15:24:24 -05:00
|
|
|
};
|
2021-09-23 18:55:53 -04:00
|
|
|
let (syncer_tx, syncer_rx) = mpsc::channel();
|
2018-03-04 15:24:24 -05:00
|
|
|
tdb.db.lock().on_flush(Box::new({
|
2021-09-23 18:55:53 -04:00
|
|
|
let snd = syncer_tx.clone();
|
2021-02-17 01:15:54 -05:00
|
|
|
move || {
|
2023-08-03 16:42:41 -04:00
|
|
|
if let Err(err) = snd.send(super::SyncerCommand::DatabaseFlushed) {
|
|
|
|
warn!(%err, "unable to notify syncer for dir {} of flush", dir_id);
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}));
|
|
|
|
Harness {
|
|
|
|
dir_id,
|
|
|
|
dir,
|
|
|
|
db: tdb.db,
|
|
|
|
_tmpdir: tdb.tmpdir,
|
2021-09-23 18:55:53 -04:00
|
|
|
channel: super::SyncerChannel(syncer_tx),
|
|
|
|
_shutdown_tx: shutdown_tx,
|
|
|
|
shutdown_rx,
|
2019-01-04 19:11:58 -05:00
|
|
|
syncer,
|
2021-09-23 18:55:53 -04:00
|
|
|
syncer_rx,
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-17 01:15:54 -05:00
|
|
|
fn eio() -> io::Error {
|
|
|
|
io::Error::new(io::ErrorKind::Other, "got EIO")
|
|
|
|
}
|
2018-03-04 15:24:24 -05:00
|
|
|
|
2021-03-10 15:45:32 -05:00
|
|
|
#[test]
|
|
|
|
fn excessive_pts_jump() {
|
|
|
|
testutil::init();
|
|
|
|
let mut h = new_harness(0);
|
|
|
|
let video_sample_entry_id =
|
|
|
|
h.db.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: [0u8; 100].to_vec(),
|
|
|
|
rfc6381_codec: "avc1.000000".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2022-04-13 17:37:45 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2021-03-10 15:45:32 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
2021-07-09 18:01:15 -04:00
|
|
|
Box::new(|_id| Err(nix::Error::EIO)),
|
2021-03-10 15:45:32 -05:00
|
|
|
));
|
|
|
|
let f = MockFile::new();
|
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|_| Ok(1))));
|
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:37:45 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"1",
|
|
|
|
recording::Time(1),
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2021-03-10 15:45:32 -05:00
|
|
|
|
|
|
|
let e = w
|
2021-09-23 18:55:53 -04:00
|
|
|
.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"2",
|
|
|
|
recording::Time(2),
|
|
|
|
i32::max_value() as i64 + 1,
|
|
|
|
true,
|
2022-04-13 17:52:18 -04:00
|
|
|
video_sample_entry_id,
|
2021-09-23 18:55:53 -04:00
|
|
|
)
|
2021-03-10 15:45:32 -05:00
|
|
|
.unwrap_err();
|
|
|
|
assert!(e.to_string().contains("excessive pts jump"));
|
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
|
|
|
drop(w);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2021-03-10 15:45:32 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2021-03-10 15:45:32 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2021-03-10 15:45:32 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
|
|
|
}
|
|
|
|
|
2018-12-01 03:03:43 -05:00
|
|
|
/// Tests the database flushing while a syncer is still processing a previous flush event.
|
|
|
|
#[test]
|
|
|
|
fn double_flush() {
|
|
|
|
testutil::init();
|
2019-01-04 19:11:58 -05:00
|
|
|
let mut h = new_harness(0);
|
2021-02-17 01:15:54 -05:00
|
|
|
h.db.lock()
|
|
|
|
.update_retention(&[db::RetentionChange {
|
|
|
|
stream_id: testutil::TEST_STREAM_ID,
|
|
|
|
new_record: true,
|
|
|
|
new_limit: 0,
|
|
|
|
}])
|
|
|
|
.unwrap();
|
2018-12-01 03:03:43 -05:00
|
|
|
|
|
|
|
// Setup: add a 3-byte recording.
|
2021-02-17 01:15:54 -05:00
|
|
|
let video_sample_entry_id =
|
|
|
|
h.db.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: [0u8; 100].to_vec(),
|
|
|
|
rfc6381_codec: "avc1.000000".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2022-04-13 17:52:18 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2019-01-04 19:11:58 -05:00
|
|
|
let f = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"123");
|
|
|
|
Ok(3)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:52:18 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"123",
|
|
|
|
recording::Time(2),
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
2021-09-16 19:24:17 -04:00
|
|
|
w.close(Some(1), None).unwrap();
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
|
|
|
|
|
|
|
// Then a 1-byte recording.
|
|
|
|
let f = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 1),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"4");
|
|
|
|
Ok(1)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:52:18 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"4",
|
|
|
|
recording::Time(3),
|
|
|
|
1,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Unlink(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let db = h.db.clone();
|
|
|
|
move |_| {
|
|
|
|
// The drop(w) below should cause the old recording to be deleted (moved to
|
|
|
|
// garbage). When the database is flushed, the syncer forces garbage collection
|
|
|
|
// including this unlink.
|
|
|
|
|
|
|
|
// Do another database flush here, as if from another syncer.
|
|
|
|
db.lock().flush("another syncer running").unwrap();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}),
|
|
|
|
));
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
|
|
|
drop(w);
|
|
|
|
|
|
|
|
trace!("expecting AsyncSave");
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
|
|
|
trace!("expecting planned flush");
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
|
|
|
trace!("expecting DatabaseFlushed");
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
trace!("expecting DatabaseFlushed again");
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed again
|
2019-01-04 19:11:58 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
|
|
|
|
|
|
|
// Garbage should be marked collected on the next database flush.
|
2018-12-01 03:03:43 -05:00
|
|
|
{
|
|
|
|
let mut l = h.db.lock();
|
2019-01-04 19:11:58 -05:00
|
|
|
let dir = l.sample_file_dirs_by_id().get(&h.dir_id).unwrap();
|
|
|
|
assert!(dir.garbage_needs_unlink.is_empty());
|
|
|
|
assert!(!dir.garbage_unlinked.is_empty());
|
2018-12-01 03:03:43 -05:00
|
|
|
l.flush("forced gc").unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
let dir = l.sample_file_dirs_by_id().get(&h.dir_id).unwrap();
|
|
|
|
assert!(dir.garbage_needs_unlink.is_empty());
|
|
|
|
assert!(dir.garbage_unlinked.is_empty());
|
2018-12-01 03:03:43 -05:00
|
|
|
}
|
|
|
|
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
|
2018-12-01 03:03:43 -05:00
|
|
|
// The syncer should shut down cleanly.
|
|
|
|
drop(h.channel);
|
|
|
|
h.db.lock().clear_on_flush();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-23 18:55:53 -04:00
|
|
|
h.syncer_rx.try_recv().err(),
|
2021-02-17 01:15:54 -05:00
|
|
|
Some(std::sync::mpsc::TryRecvError::Disconnected)
|
|
|
|
);
|
2019-01-04 19:11:58 -05:00
|
|
|
assert!(h.syncer.planned_flushes.is_empty());
|
2018-12-01 03:03:43 -05:00
|
|
|
}
|
|
|
|
|
2018-03-04 15:24:24 -05:00
|
|
|
#[test]
|
|
|
|
fn write_path_retries() {
|
|
|
|
testutil::init();
|
2019-01-04 19:11:58 -05:00
|
|
|
let mut h = new_harness(0);
|
2021-02-17 01:15:54 -05:00
|
|
|
let video_sample_entry_id =
|
|
|
|
h.db.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: [0u8; 100].to_vec(),
|
|
|
|
rfc6381_codec: "avc1.000000".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2022-04-13 17:52:18 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
2021-07-09 18:01:15 -04:00
|
|
|
Box::new(|_id| Err(nix::Error::EIO)),
|
2021-02-17 01:15:54 -05:00
|
|
|
));
|
2019-01-04 19:11:58 -05:00
|
|
|
let f = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
2019-01-04 19:11:58 -05:00
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"1234");
|
|
|
|
Err(eio())
|
|
|
|
})));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"1234");
|
|
|
|
Ok(1)
|
|
|
|
})));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"234");
|
|
|
|
Err(eio())
|
|
|
|
})));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"234");
|
|
|
|
Ok(3)
|
|
|
|
})));
|
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Err(eio()))));
|
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:52:18 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"1234",
|
|
|
|
recording::Time(1),
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir
|
2021-07-09 18:01:15 -04:00
|
|
|
.expect(MockDirAction::Sync(Box::new(|| Err(nix::Error::EIO))));
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
|
|
|
drop(w);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
{
|
|
|
|
let l = h.db.lock();
|
|
|
|
let s = l.streams_by_id().get(&testutil::TEST_STREAM_ID).unwrap();
|
|
|
|
assert_eq!(s.bytes_to_add, 0);
|
|
|
|
assert_eq!(s.sample_file_bytes, 4);
|
|
|
|
}
|
2019-01-04 19:11:58 -05:00
|
|
|
|
|
|
|
// The syncer should shut down cleanly.
|
2018-03-04 15:24:24 -05:00
|
|
|
drop(h.channel);
|
|
|
|
h.db.lock().clear_on_flush();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-23 18:55:53 -04:00
|
|
|
h.syncer_rx.try_recv().err(),
|
2021-02-17 01:15:54 -05:00
|
|
|
Some(std::sync::mpsc::TryRecvError::Disconnected)
|
|
|
|
);
|
2019-01-04 19:11:58 -05:00
|
|
|
assert!(h.syncer.planned_flushes.is_empty());
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn gc_path_retries() {
|
|
|
|
testutil::init();
|
2019-01-04 19:11:58 -05:00
|
|
|
let mut h = new_harness(0);
|
2021-02-17 01:15:54 -05:00
|
|
|
h.db.lock()
|
|
|
|
.update_retention(&[db::RetentionChange {
|
|
|
|
stream_id: testutil::TEST_STREAM_ID,
|
|
|
|
new_record: true,
|
|
|
|
new_limit: 0,
|
|
|
|
}])
|
|
|
|
.unwrap();
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
// Setup: add a 3-byte recording.
|
2021-02-17 01:15:54 -05:00
|
|
|
let video_sample_entry_id =
|
|
|
|
h.db.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: [0u8; 100].to_vec(),
|
|
|
|
rfc6381_codec: "avc1.000000".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2022-04-13 17:52:18 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2019-01-04 19:11:58 -05:00
|
|
|
let f = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"123");
|
|
|
|
Ok(3)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:52:18 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"123",
|
|
|
|
recording::Time(2),
|
|
|
|
0,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
2021-09-16 19:24:17 -04:00
|
|
|
w.close(Some(1), None).unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
|
|
|
|
|
|
|
// Then a 1-byte recording.
|
|
|
|
let f = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 1),
|
|
|
|
Box::new({
|
|
|
|
let f = f.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"4");
|
|
|
|
Ok(1)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2022-04-13 17:52:18 -04:00
|
|
|
w.write(
|
|
|
|
&mut h.shutdown_rx,
|
|
|
|
b"4",
|
|
|
|
recording::Time(3),
|
|
|
|
1,
|
|
|
|
true,
|
|
|
|
video_sample_entry_id,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Unlink(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let db = h.db.clone();
|
|
|
|
move |_| {
|
|
|
|
// The drop(w) below should cause the old recording to be deleted (moved to
|
|
|
|
// garbage). When the database is flushed, the syncer forces garbage collection
|
|
|
|
// including this unlink.
|
|
|
|
|
|
|
|
// This should have already applied the changes to sample file bytes, even
|
|
|
|
// though the garbage has yet to be collected.
|
|
|
|
let l = db.lock();
|
|
|
|
let s = l.streams_by_id().get(&testutil::TEST_STREAM_ID).unwrap();
|
|
|
|
assert_eq!(s.bytes_to_delete, 0);
|
|
|
|
assert_eq!(s.bytes_to_add, 0);
|
|
|
|
assert_eq!(s.sample_file_bytes, 1);
|
2021-07-09 18:01:15 -04:00
|
|
|
Err(nix::Error::EIO) // force a retry.
|
2021-02-17 01:15:54 -05:00
|
|
|
}
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
h.dir.expect(MockDirAction::Unlink(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new(|_| Ok(())),
|
|
|
|
));
|
|
|
|
h.dir
|
2021-07-09 18:01:15 -04:00
|
|
|
.expect(MockDirAction::Sync(Box::new(|| Err(nix::Error::EIO))));
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
2018-03-23 18:16:43 -04:00
|
|
|
|
2019-01-04 19:11:58 -05:00
|
|
|
drop(w);
|
2018-03-23 18:16:43 -04:00
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
f.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
2018-03-04 15:24:24 -05:00
|
|
|
|
|
|
|
// Garbage should be marked collected on the next flush.
|
|
|
|
{
|
|
|
|
let mut l = h.db.lock();
|
2019-01-04 19:11:58 -05:00
|
|
|
let dir = l.sample_file_dirs_by_id().get(&h.dir_id).unwrap();
|
|
|
|
assert!(dir.garbage_needs_unlink.is_empty());
|
|
|
|
assert!(!dir.garbage_unlinked.is_empty());
|
2018-03-04 15:24:24 -05:00
|
|
|
l.flush("forced gc").unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
let dir = l.sample_file_dirs_by_id().get(&h.dir_id).unwrap();
|
|
|
|
assert!(dir.garbage_needs_unlink.is_empty());
|
|
|
|
assert!(dir.garbage_unlinked.is_empty());
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
|
|
|
|
// The syncer should shut down cleanly.
|
|
|
|
drop(h.channel);
|
|
|
|
h.db.lock().clear_on_flush();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-23 18:55:53 -04:00
|
|
|
h.syncer_rx.try_recv().err(),
|
2021-02-17 01:15:54 -05:00
|
|
|
Some(std::sync::mpsc::TryRecvError::Disconnected)
|
|
|
|
);
|
2019-01-04 19:11:58 -05:00
|
|
|
assert!(h.syncer.planned_flushes.is_empty());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn planned_flush() {
|
|
|
|
testutil::init();
|
2021-02-17 01:15:54 -05:00
|
|
|
let mut h = new_harness(60); // flush_if_sec=60
|
2019-01-04 19:11:58 -05:00
|
|
|
|
|
|
|
// There's a database constraint forbidding a recording starting at t=0, so advance.
|
|
|
|
h.db.clocks().sleep(time::Duration::seconds(1));
|
|
|
|
|
|
|
|
// Setup: add a 3-byte recording.
|
2021-02-17 01:15:54 -05:00
|
|
|
let video_sample_entry_id =
|
|
|
|
h.db.lock()
|
|
|
|
.insert_video_sample_entry(VideoSampleEntryToInsert {
|
|
|
|
width: 1920,
|
|
|
|
height: 1080,
|
|
|
|
pasp_h_spacing: 1,
|
|
|
|
pasp_v_spacing: 1,
|
|
|
|
data: [0u8; 100].to_vec(),
|
|
|
|
rfc6381_codec: "avc1.000000".to_owned(),
|
|
|
|
})
|
|
|
|
.unwrap();
|
2022-04-13 17:52:18 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2019-01-04 19:11:58 -05:00
|
|
|
let f1 = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 0),
|
|
|
|
Box::new({
|
|
|
|
let f = f1.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f1.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"123");
|
|
|
|
Ok(3)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f1.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2021-02-17 01:15:54 -05:00
|
|
|
w.write(
|
2021-09-23 18:55:53 -04:00
|
|
|
&mut h.shutdown_rx,
|
2021-02-17 01:15:54 -05:00
|
|
|
b"123",
|
|
|
|
recording::Time(recording::TIME_UNITS_PER_SEC),
|
|
|
|
0,
|
|
|
|
true,
|
2022-04-13 17:52:18 -04:00
|
|
|
video_sample_entry_id,
|
2021-02-17 01:15:54 -05:00
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
|
|
|
drop(w);
|
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
|
|
|
|
|
|
|
// Flush and let 30 seconds go by.
|
|
|
|
h.db.lock().flush("forced").unwrap();
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
|
|
|
h.db.clocks().sleep(time::Duration::seconds(30));
|
|
|
|
|
|
|
|
// Then, a 1-byte recording.
|
2022-04-13 17:52:18 -04:00
|
|
|
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
2019-01-04 19:11:58 -05:00
|
|
|
let f2 = MockFile::new();
|
2021-02-17 01:15:54 -05:00
|
|
|
h.dir.expect(MockDirAction::Create(
|
|
|
|
CompositeId::new(1, 1),
|
|
|
|
Box::new({
|
|
|
|
let f = f2.clone();
|
|
|
|
move |_id| Ok(f.clone())
|
|
|
|
}),
|
|
|
|
));
|
|
|
|
f2.expect(MockFileAction::Write(Box::new(|buf| {
|
|
|
|
assert_eq!(buf, b"4");
|
|
|
|
Ok(1)
|
|
|
|
})));
|
2019-01-04 19:11:58 -05:00
|
|
|
f2.expect(MockFileAction::SyncAll(Box::new(|| Ok(()))));
|
2021-02-17 01:15:54 -05:00
|
|
|
w.write(
|
2021-09-23 18:55:53 -04:00
|
|
|
&mut h.shutdown_rx,
|
2021-02-17 01:15:54 -05:00
|
|
|
b"4",
|
|
|
|
recording::Time(31 * recording::TIME_UNITS_PER_SEC),
|
|
|
|
1,
|
|
|
|
true,
|
2022-04-13 17:52:18 -04:00
|
|
|
video_sample_entry_id,
|
2021-02-17 01:15:54 -05:00
|
|
|
)
|
|
|
|
.unwrap();
|
2019-01-04 19:11:58 -05:00
|
|
|
h.dir.expect(MockDirAction::Sync(Box::new(|| Ok(()))));
|
|
|
|
|
|
|
|
drop(w);
|
|
|
|
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // AsyncSave
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 2);
|
|
|
|
|
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 2);
|
|
|
|
let db_flush_count_before = h.db.lock().flushes();
|
|
|
|
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(31, 0));
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush (no-op)
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(61, 0));
|
|
|
|
assert_eq!(h.db.lock().flushes(), db_flush_count_before);
|
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
2019-01-04 19:11:58 -05:00
|
|
|
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(91, 0));
|
|
|
|
assert_eq!(h.db.lock().flushes(), db_flush_count_before + 1);
|
|
|
|
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
2021-09-23 18:55:53 -04:00
|
|
|
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
2019-01-04 19:11:58 -05:00
|
|
|
|
|
|
|
f1.ensure_done();
|
|
|
|
f2.ensure_done();
|
|
|
|
h.dir.ensure_done();
|
|
|
|
|
2018-03-04 15:24:24 -05:00
|
|
|
// The syncer should shut down cleanly.
|
|
|
|
drop(h.channel);
|
|
|
|
h.db.lock().clear_on_flush();
|
2021-02-17 01:15:54 -05:00
|
|
|
assert_eq!(
|
2021-09-23 18:55:53 -04:00
|
|
|
h.syncer_rx.try_recv().err(),
|
2021-02-17 01:15:54 -05:00
|
|
|
Some(std::sync::mpsc::TryRecvError::Disconnected)
|
|
|
|
);
|
2019-01-04 19:11:58 -05:00
|
|
|
assert!(h.syncer.planned_flushes.is_empty());
|
2018-03-04 15:24:24 -05:00
|
|
|
}
|
|
|
|
}
|