knob to reduce db commits (SSD write cycles)
This improves the practicality of having many streams (including the doubling of streams by having main + sub streams for each camera). With these tuned properly, extra streams don't cause any extra write cycles in normal or error cases. Consider the worst case in which each RTSP session immediately sends a single frame and then fails. Moonfire retries every second, so this would formerly cause one commit per second per stream. (flush_if_sec=0 preserves this behavior.) Now the commits can be arbitrarily infrequent by setting higher values of flush_if_sec. WARNING: this isn't production-ready! I hacked up dir.rs to make tests pass and "moonfire-nvr run" work in the best-case scenario, but it doesn't handle errors gracefully. I've been debating what to do when writing a recording fails. I considered "abandoning" the recording then either reusing or skipping its id. (in the latter case, marking the file as garbage if it can't be unlinked immediately). I think now there's no point in abandoning a recording. If I can't write to that file, there's no reason to believe another will work better. It's better to retry that recording forever, and perhaps put the whole directory into an error state that stops recording until those writes go through. I'm planning to redesign dir.rs to make this happen.
This commit is contained in:
parent
31adbc1e9f
commit
b037c9bdd7
301
db/dir.rs
301
db/dir.rs
|
@ -36,6 +36,7 @@ use db::{self, CompositeId};
|
||||||
use failure::{Error, Fail};
|
use failure::{Error, Fail};
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use libc::{self, c_char};
|
use libc::{self, c_char};
|
||||||
|
use parking_lot::Mutex;
|
||||||
use protobuf::{self, Message};
|
use protobuf::{self, Message};
|
||||||
use recording;
|
use recording;
|
||||||
use openssl::hash;
|
use openssl::hash;
|
||||||
|
@ -47,7 +48,7 @@ use std::io::{self, Read, Write};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::os::unix::ffi::OsStrExt;
|
use std::os::unix::ffi::OsStrExt;
|
||||||
use std::os::unix::io::FromRawFd;
|
use std::os::unix::io::FromRawFd;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
|
@ -62,9 +63,6 @@ pub struct SampleFileDir {
|
||||||
/// The open file descriptor for the directory. The worker uses it to create files and sync the
|
/// The open file descriptor for the directory. The worker uses it to create files and sync the
|
||||||
/// directory. Other threads use it to open sample files for reading during video serving.
|
/// directory. Other threads use it to open sample files for reading during video serving.
|
||||||
fd: Fd,
|
fd: Fd,
|
||||||
|
|
||||||
// Lock order: don't acquire mutable.lock() while holding db.lock().
|
|
||||||
mutable: Mutex<SharedMutableState>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A file descriptor associated with a directory (not necessarily the sample file dir).
|
/// A file descriptor associated with a directory (not necessarily the sample file dir).
|
||||||
|
@ -199,9 +197,6 @@ impl SampleFileDir {
|
||||||
.map_err(|e| format_err!("unable to open sample file dir {}: {}", path, e))?;
|
.map_err(|e| format_err!("unable to open sample file dir {}: {}", path, e))?;
|
||||||
Ok(Arc::new(SampleFileDir {
|
Ok(Arc::new(SampleFileDir {
|
||||||
fd,
|
fd,
|
||||||
mutable: Mutex::new(SharedMutableState{
|
|
||||||
next_id_by_stream: FnvHashMap::default(),
|
|
||||||
}),
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,40 +253,11 @@ impl SampleFileDir {
|
||||||
prev: Option<PreviousWriter>, stream_id: i32,
|
prev: Option<PreviousWriter>, stream_id: i32,
|
||||||
video_sample_entry_id: i32)
|
video_sample_entry_id: i32)
|
||||||
-> Result<Writer<'a>, Error> {
|
-> Result<Writer<'a>, Error> {
|
||||||
// Grab the next id. The dir itself will typically have an id (possibly one ahead of what's
|
let (id, r) = db.lock().add_recording(stream_id)?;
|
||||||
// stored in the database), but not on the first attempt for a stream.
|
|
||||||
use std::collections::hash_map::Entry;
|
|
||||||
let recording_id;
|
|
||||||
match self.mutable.lock().unwrap().next_id_by_stream.entry(stream_id) {
|
|
||||||
Entry::Occupied(mut e) => {
|
|
||||||
let v = e.get_mut();
|
|
||||||
recording_id = *v;
|
|
||||||
*v += 1;
|
|
||||||
},
|
|
||||||
Entry::Vacant(e) => {
|
|
||||||
let mut l = db.lock();
|
|
||||||
recording_id = l.streams_by_id().get(&stream_id).unwrap().next_recording_id;
|
|
||||||
e.insert(recording_id + 1);
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
let id = CompositeId::new(stream_id, recording_id);
|
|
||||||
let p = SampleFileDir::get_rel_pathname(id);
|
let p = SampleFileDir::get_rel_pathname(id);
|
||||||
|
let f = unsafe { self.fd.openat(p.as_ptr(), libc::O_WRONLY | libc::O_EXCL | libc::O_CREAT,
|
||||||
let f = match unsafe { self.fd.openat(p.as_ptr(),
|
0o600) }.unwrap(); // TODO: don't unwrap!
|
||||||
libc::O_WRONLY | libc::O_EXCL | libc::O_CREAT,
|
Writer::open(f, id, r, prev, video_sample_entry_id, channel)
|
||||||
0o600) } {
|
|
||||||
Ok(f) => f,
|
|
||||||
Err(e) => {
|
|
||||||
// Put the id back to try again later.
|
|
||||||
let mut l = self.mutable.lock().unwrap();
|
|
||||||
let v = l.next_id_by_stream.get_mut(&stream_id).unwrap();
|
|
||||||
assert_eq!(*v, recording_id + 1);
|
|
||||||
*v -= 1;
|
|
||||||
return Err(e.into());
|
|
||||||
},
|
|
||||||
};
|
|
||||||
Writer::open(f, id, prev, video_sample_entry_id, channel)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> { self.fd.statfs() }
|
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> { self.fd.statfs() }
|
||||||
|
@ -325,16 +291,11 @@ impl SampleFileDir {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// State shared between users of the `SampleFileDirectory` struct and the syncer.
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct SharedMutableState {
|
|
||||||
next_id_by_stream: FnvHashMap<i32, i32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A command sent to the syncer. These correspond to methods in the `SyncerChannel` struct.
|
/// A command sent to the syncer. These correspond to methods in the `SyncerChannel` struct.
|
||||||
enum SyncerCommand {
|
enum SyncerCommand {
|
||||||
AsyncSaveRecording(db::RecordingToInsert, fs::File),
|
AsyncSaveRecording(CompositeId, Arc<Mutex<db::UncommittedRecording>>, fs::File),
|
||||||
AsyncAbandonRecording(CompositeId),
|
//AsyncAbandonRecording(CompositeId),
|
||||||
|
DatabaseFlushed,
|
||||||
Flush(mpsc::SyncSender<()>),
|
Flush(mpsc::SyncSender<()>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,20 +306,9 @@ pub struct SyncerChannel(mpsc::Sender<SyncerCommand>);
|
||||||
|
|
||||||
/// State of the worker thread.
|
/// State of the worker thread.
|
||||||
struct Syncer {
|
struct Syncer {
|
||||||
|
dir_id: i32,
|
||||||
dir: Arc<SampleFileDir>,
|
dir: Arc<SampleFileDir>,
|
||||||
db: Arc<db::Database>,
|
db: Arc<db::Database>,
|
||||||
|
|
||||||
/// Files to be unlinked then immediately forgotten about. They are `>= next_recording_id` for
|
|
||||||
/// their stream, `next_recording_id` won't be advanced without a sync of the directory, and
|
|
||||||
/// extraneous files `>= next_recording_id` are unlinked on startup, so this should be
|
|
||||||
/// sufficient.
|
|
||||||
to_abandon: Vec<CompositeId>,
|
|
||||||
|
|
||||||
/// Files to be unlinked then removed from the garbage table.
|
|
||||||
to_unlink: Vec<CompositeId>,
|
|
||||||
|
|
||||||
/// Files to be removed from the garbage table.
|
|
||||||
to_mark_deleted: Vec<CompositeId>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Starts a syncer for the given sample file directory.
|
/// Starts a syncer for the given sample file directory.
|
||||||
|
@ -371,13 +321,23 @@ struct Syncer {
|
||||||
///
|
///
|
||||||
/// Returns a `SyncerChannel` which can be used to send commands (and can be cloned freely) and
|
/// Returns a `SyncerChannel` which can be used to send commands (and can be cloned freely) and
|
||||||
/// a `JoinHandle` for the syncer thread. At program shutdown, all `SyncerChannel` clones should be
|
/// a `JoinHandle` for the syncer thread. At program shutdown, all `SyncerChannel` clones should be
|
||||||
/// removed and then the handle joined to allow all recordings to be persisted.
|
/// dropped and then the handle joined to allow all recordings to be persisted.
|
||||||
|
///
|
||||||
|
/// Note that dropping all `SyncerChannel` clones currently includes calling
|
||||||
|
/// `LockedDatabase::clear_on_flush`, as this function installs a hook to watch database flushes.
|
||||||
|
/// TODO: add a join wrapper which arranges for the on flush hook to be removed automatically.
|
||||||
pub fn start_syncer(db: Arc<db::Database>, dir_id: i32)
|
pub fn start_syncer(db: Arc<db::Database>, dir_id: i32)
|
||||||
-> Result<(SyncerChannel, thread::JoinHandle<()>), Error> {
|
-> Result<(SyncerChannel, thread::JoinHandle<()>), Error> {
|
||||||
let db2 = db.clone();
|
let db2 = db.clone();
|
||||||
let (mut syncer, path) = Syncer::new(&db.lock(), db2, dir_id)?;
|
let (mut syncer, path) = Syncer::new(&db.lock(), db2, dir_id)?;
|
||||||
syncer.initial_rotation()?;
|
syncer.initial_rotation()?;
|
||||||
let (snd, rcv) = mpsc::channel();
|
let (snd, rcv) = mpsc::channel();
|
||||||
|
db.lock().on_flush(Box::new({
|
||||||
|
let snd = snd.clone();
|
||||||
|
move || if let Err(e) = snd.send(SyncerCommand::DatabaseFlushed) {
|
||||||
|
warn!("Unable to notify syncer for dir {} of flush: {}", dir_id, e);
|
||||||
|
}
|
||||||
|
}));
|
||||||
Ok((SyncerChannel(snd),
|
Ok((SyncerChannel(snd),
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name(format!("sync-{}", path))
|
.name(format!("sync-{}", path))
|
||||||
|
@ -440,13 +400,14 @@ fn get_rows_to_delete(db: &db::LockedDatabase, stream_id: i32,
|
||||||
impl SyncerChannel {
|
impl SyncerChannel {
|
||||||
/// Asynchronously syncs the given writer, closes it, records it into the database, and
|
/// Asynchronously syncs the given writer, closes it, records it into the database, and
|
||||||
/// starts rotation.
|
/// starts rotation.
|
||||||
fn async_save_recording(&self, recording: db::RecordingToInsert, f: fs::File) {
|
fn async_save_recording(&self, id: CompositeId, recording: Arc<Mutex<db::UncommittedRecording>>,
|
||||||
self.0.send(SyncerCommand::AsyncSaveRecording(recording, f)).unwrap();
|
f: fs::File) {
|
||||||
|
self.0.send(SyncerCommand::AsyncSaveRecording(id, recording, f)).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn async_abandon_recording(&self, id: CompositeId) {
|
//fn async_abandon_recording(&self, id: CompositeId) {
|
||||||
self.0.send(SyncerCommand::AsyncAbandonRecording(id)).unwrap();
|
// self.0.send(SyncerCommand::AsyncAbandonRecording(id)).unwrap();
|
||||||
}
|
//}
|
||||||
|
|
||||||
/// For testing: flushes the syncer, waiting for all currently-queued commands to complete.
|
/// For testing: flushes the syncer, waiting for all currently-queued commands to complete.
|
||||||
pub fn flush(&self) {
|
pub fn flush(&self) {
|
||||||
|
@ -463,9 +424,8 @@ impl Syncer {
|
||||||
.get(&dir_id)
|
.get(&dir_id)
|
||||||
.ok_or_else(|| format_err!("no dir {}", dir_id))?;
|
.ok_or_else(|| format_err!("no dir {}", dir_id))?;
|
||||||
let dir = d.get()?;
|
let dir = d.get()?;
|
||||||
let to_unlink = l.list_garbage(dir_id)?;
|
|
||||||
|
|
||||||
// Get files to abandon.
|
// Abandon files.
|
||||||
// First, get a list of the streams in question.
|
// First, get a list of the streams in question.
|
||||||
let streams_to_next: FnvHashMap<_, _> =
|
let streams_to_next: FnvHashMap<_, _> =
|
||||||
l.streams_by_id()
|
l.streams_by_id()
|
||||||
|
@ -479,13 +439,25 @@ impl Syncer {
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let to_abandon = Syncer::list_files_to_abandon(&d.path, streams_to_next)?;
|
let to_abandon = Syncer::list_files_to_abandon(&d.path, streams_to_next)?;
|
||||||
|
let mut undeletable = 0;
|
||||||
|
for &id in &to_abandon {
|
||||||
|
if let Err(e) = SampleFileDir::unlink(&dir.fd, id) {
|
||||||
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
|
warn!("dir: abandoned recording {} already deleted!", id);
|
||||||
|
} else {
|
||||||
|
warn!("dir: Unable to unlink abandoned recording {}: {}", id, e);
|
||||||
|
undeletable += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if undeletable > 0 {
|
||||||
|
bail!("Unable to delete {} abandoned recordings.", undeletable);
|
||||||
|
}
|
||||||
|
|
||||||
Ok((Syncer {
|
Ok((Syncer {
|
||||||
|
dir_id,
|
||||||
dir,
|
dir,
|
||||||
db,
|
db,
|
||||||
to_abandon,
|
|
||||||
to_unlink,
|
|
||||||
to_mark_deleted: Vec::new(),
|
|
||||||
}, d.path.clone()))
|
}, d.path.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,8 +487,9 @@ impl Syncer {
|
||||||
loop {
|
loop {
|
||||||
match cmds.recv() {
|
match cmds.recv() {
|
||||||
Err(_) => return, // all senders have closed the channel; shutdown
|
Err(_) => return, // all senders have closed the channel; shutdown
|
||||||
Ok(SyncerCommand::AsyncSaveRecording(recording, f)) => self.save(recording, f),
|
Ok(SyncerCommand::AsyncSaveRecording(id, r, f)) => self.save(id, r, f),
|
||||||
Ok(SyncerCommand::AsyncAbandonRecording(uuid)) => self.abandon(uuid),
|
//Ok(SyncerCommand::AsyncAbandonRecording(uuid)) => self.abandon(uuid),
|
||||||
|
Ok(SyncerCommand::DatabaseFlushed) => { let _ = self.collect_garbage(true); },
|
||||||
Ok(SyncerCommand::Flush(_)) => {}, // just drop the supplied sender, closing it.
|
Ok(SyncerCommand::Flush(_)) => {}, // just drop the supplied sender, closing it.
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -535,106 +508,102 @@ impl Syncer {
|
||||||
|
|
||||||
fn do_rotation<F>(&mut self, get_rows_to_delete: F) -> Result<(), Error>
|
fn do_rotation<F>(&mut self, get_rows_to_delete: F) -> Result<(), Error>
|
||||||
where F: FnOnce(&db::LockedDatabase) -> Result<Vec<db::ListOldestSampleFilesRow>, Error> {
|
where F: FnOnce(&db::LockedDatabase) -> Result<Vec<db::ListOldestSampleFilesRow>, Error> {
|
||||||
let to_delete = {
|
|
||||||
let mut db = self.db.lock();
|
|
||||||
let to_delete = get_rows_to_delete(&*db)?;
|
|
||||||
let mut tx = db.tx()?;
|
|
||||||
tx.delete_recordings(&to_delete)?;
|
|
||||||
tx.commit()?;
|
|
||||||
to_delete
|
|
||||||
};
|
|
||||||
for row in to_delete {
|
|
||||||
self.to_unlink.push(row.id);
|
|
||||||
}
|
|
||||||
self.try_unlink();
|
|
||||||
if !self.to_unlink.is_empty() {
|
|
||||||
bail!("failed to unlink {} sample files", self.to_unlink.len());
|
|
||||||
}
|
|
||||||
self.dir.sync()?;
|
|
||||||
{
|
{
|
||||||
let mut db = self.db.lock();
|
let mut db = self.db.lock();
|
||||||
let mut tx = db.tx()?;
|
let mut to_delete = get_rows_to_delete(&*db)?;
|
||||||
tx.mark_sample_files_deleted(&self.to_mark_deleted)?;
|
db.delete_recordings(&mut to_delete);
|
||||||
tx.commit()?;
|
db.flush("synchronous deletion")?;
|
||||||
}
|
}
|
||||||
self.to_mark_deleted.clear();
|
self.collect_garbage(false)?;
|
||||||
Ok(())
|
self.db.lock().flush("synchronous garbage collection")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect_garbage(&mut self, warn_on_missing: bool) -> Result<(), Error> {
|
||||||
|
let mut garbage: Vec<_> = {
|
||||||
|
let l = self.db.lock();
|
||||||
|
let d = match l.sample_file_dirs_by_id().get(&self.dir_id) {
|
||||||
|
None => {
|
||||||
|
error!("can't find dir {} in db!", self.dir_id);
|
||||||
|
bail!("can't find dir {} in db!", self.dir_id);
|
||||||
|
},
|
||||||
|
Some(d) => d,
|
||||||
|
};
|
||||||
|
d.garbage.iter().map(|id| *id).collect()
|
||||||
|
};
|
||||||
|
let len_before = garbage.len();
|
||||||
|
garbage.retain(|&id| {
|
||||||
|
if let Err(e) = SampleFileDir::unlink(&self.dir.fd, id) {
|
||||||
|
if e.kind() == io::ErrorKind::NotFound {
|
||||||
|
if warn_on_missing {
|
||||||
|
warn!("dir: recording {} already deleted!", id);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("dir: Unable to unlink {}: {}", id, e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
});
|
||||||
|
let res = if len_before > garbage.len() {
|
||||||
|
Err(format_err!("Unable to unlink {} files", len_before - garbage.len()))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
if garbage.is_empty() {
|
||||||
|
// No progress.
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
if let Err(e) = self.dir.sync() {
|
||||||
|
error!("unable to sync dir: {}", e);
|
||||||
|
return res.and(Err(e.into()));
|
||||||
|
}
|
||||||
|
if let Err(e) = self.db.lock().delete_garbage(self.dir_id, &mut garbage) {
|
||||||
|
error!("unable to delete garbage ({} files) for dir {}: {}",
|
||||||
|
self.dir_id, garbage.len(), e);
|
||||||
|
return res.and(Err(e.into()));
|
||||||
|
}
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Saves the given recording and causes rotation to happen.
|
/// Saves the given recording and causes rotation to happen.
|
||||||
/// Note that part of rotation is deferred for the next cycle (saved writing or program startup)
|
/// Note that part of rotation is deferred for the next cycle (saved writing or program startup)
|
||||||
/// so that there can be only one dir sync and database transaction per save.
|
/// so that there can be only one dir sync and database transaction per save.
|
||||||
fn save(&mut self, recording: db::RecordingToInsert, f: fs::File) {
|
|
||||||
if let Err(e) = self.save_helper(&recording, f) {
|
|
||||||
error!("will discard recording {} due to error while saving: {}", recording.id, e);
|
|
||||||
self.abandon(recording.id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn abandon(&mut self, id: CompositeId) {
|
|
||||||
self.to_abandon.push(id);
|
|
||||||
self.try_unlink();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal helper for `save`. This is separated out so that the question-mark operator
|
/// Internal helper for `save`. This is separated out so that the question-mark operator
|
||||||
/// can be used in the many error paths.
|
/// can be used in the many error paths.
|
||||||
fn save_helper(&mut self, recording: &db::RecordingToInsert, f: fs::File)
|
/// TODO: less unwrapping! keep a queue?
|
||||||
-> Result<(), Error> {
|
fn save(&mut self, id: CompositeId, recording: Arc<Mutex<db::UncommittedRecording>>,
|
||||||
self.try_unlink();
|
f: fs::File) {
|
||||||
if !self.to_unlink.is_empty() {
|
let stream_id = id.stream();
|
||||||
bail!("failed to unlink {} files.", self.to_unlink.len());
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: if these calls fail, any other writes are likely to fail as well.
|
// Free up a like number of bytes.
|
||||||
f.sync_all()?;
|
|
||||||
self.dir.sync()?;
|
|
||||||
|
|
||||||
let mut to_delete = Vec::new();
|
|
||||||
let mut db = self.db.lock();
|
|
||||||
{
|
{
|
||||||
let stream_id = recording.id.stream();
|
let mut to_delete = Vec::new();
|
||||||
let stream =
|
let len = recording.lock().recording.as_ref().unwrap().sample_file_bytes as i64;
|
||||||
db.streams_by_id().get(&stream_id)
|
let mut db = self.db.lock();
|
||||||
.ok_or_else(|| format_err!("no such stream {}", stream_id))?;
|
{
|
||||||
get_rows_to_delete(&db, stream_id, stream,
|
let stream = db.streams_by_id().get(&stream_id).unwrap();
|
||||||
recording.sample_file_bytes as i64, &mut to_delete)?;
|
get_rows_to_delete(&db, stream_id, stream, len, &mut to_delete).unwrap();
|
||||||
|
}
|
||||||
|
db.delete_recordings(&mut to_delete);
|
||||||
}
|
}
|
||||||
let mut tx = db.tx()?;
|
|
||||||
tx.mark_sample_files_deleted(&self.to_mark_deleted)?;
|
|
||||||
tx.delete_recordings(&to_delete)?;
|
|
||||||
tx.insert_recording(recording)?;
|
|
||||||
tx.commit()?;
|
|
||||||
|
|
||||||
self.to_mark_deleted.clear();
|
f.sync_all().unwrap();
|
||||||
self.to_unlink.extend(to_delete.iter().map(|row| row.id));
|
self.dir.sync().unwrap();
|
||||||
self.to_unlink.extend_from_slice(&self.to_abandon);
|
recording.lock().synced = true;
|
||||||
self.to_abandon.clear();
|
let mut db = self.db.lock();
|
||||||
Ok(())
|
let reason = {
|
||||||
}
|
let s = db.streams_by_id().get(&stream_id).unwrap();
|
||||||
|
let c = db.cameras_by_id().get(&s.camera_id).unwrap();
|
||||||
/// Tries to unlink all the files in `self.to_unlink` and `self.to_abandon`.
|
let unflushed = s.unflushed();
|
||||||
/// Any which can't be unlinked will be retained in the vec.
|
if unflushed < s.flush_if {
|
||||||
fn try_unlink(&mut self) {
|
debug!("{}-{}: unflushed={} < if={}, not flushing",
|
||||||
let to_mark_deleted = &mut self.to_mark_deleted;
|
c.short_name, s.type_.as_str(), unflushed, s.flush_if);
|
||||||
let fd = &self.dir.fd;
|
return;
|
||||||
for &mut (ref mut v, mark_deleted) in &mut [(&mut self.to_unlink, true),
|
}
|
||||||
(&mut self.to_abandon, false)] {
|
format!("{}-{}: unflushed={} >= if={}",
|
||||||
v.retain(|&id| {
|
c.short_name, s.type_.as_str(), unflushed, s.flush_if)
|
||||||
if let Err(e) = SampleFileDir::unlink(fd, id) {
|
};
|
||||||
if e.kind() == io::ErrorKind::NotFound {
|
let _ = db.flush(&reason);
|
||||||
warn!("dir: recording {} already deleted!", id);
|
|
||||||
} else {
|
|
||||||
warn!("dir: Unable to unlink {}: {}", id, e);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mark_deleted {
|
|
||||||
to_mark_deleted.push(id);
|
|
||||||
}
|
|
||||||
false
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -651,6 +620,7 @@ pub struct Writer<'a>(Option<InnerWriter<'a>>);
|
||||||
struct InnerWriter<'a> {
|
struct InnerWriter<'a> {
|
||||||
syncer_channel: &'a SyncerChannel,
|
syncer_channel: &'a SyncerChannel,
|
||||||
f: fs::File,
|
f: fs::File,
|
||||||
|
r: Arc<Mutex<db::UncommittedRecording>>,
|
||||||
index: recording::SampleIndexEncoder,
|
index: recording::SampleIndexEncoder,
|
||||||
id: CompositeId,
|
id: CompositeId,
|
||||||
corrupt: bool,
|
corrupt: bool,
|
||||||
|
@ -744,11 +714,13 @@ pub struct PreviousWriter {
|
||||||
|
|
||||||
impl<'a> Writer<'a> {
|
impl<'a> Writer<'a> {
|
||||||
/// Opens the writer; for use by `SampleFileDir` (which should supply `f`).
|
/// Opens the writer; for use by `SampleFileDir` (which should supply `f`).
|
||||||
fn open(f: fs::File, id: CompositeId, prev: Option<PreviousWriter>,
|
fn open(f: fs::File, id: CompositeId, r: Arc<Mutex<db::UncommittedRecording>>,
|
||||||
|
prev: Option<PreviousWriter>,
|
||||||
video_sample_entry_id: i32, syncer_channel: &'a SyncerChannel) -> Result<Self, Error> {
|
video_sample_entry_id: i32, syncer_channel: &'a SyncerChannel) -> Result<Self, Error> {
|
||||||
Ok(Writer(Some(InnerWriter {
|
Ok(Writer(Some(InnerWriter {
|
||||||
syncer_channel,
|
syncer_channel,
|
||||||
f,
|
f,
|
||||||
|
r,
|
||||||
index: recording::SampleIndexEncoder::new(),
|
index: recording::SampleIndexEncoder::new(),
|
||||||
id,
|
id,
|
||||||
corrupt: false,
|
corrupt: false,
|
||||||
|
@ -784,6 +756,7 @@ impl<'a> Writer<'a> {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if remaining.len() < pkt.len() {
|
if remaining.len() < pkt.len() {
|
||||||
// Partially written packet. Truncate if possible.
|
// Partially written packet. Truncate if possible.
|
||||||
|
// TODO: have the syncer do this / retry it if necessary?
|
||||||
if let Err(e2) = w.f.set_len(w.index.sample_file_bytes as u64) {
|
if let Err(e2) = w.f.set_len(w.index.sample_file_bytes as u64) {
|
||||||
error!("After write to {} failed with {}, truncate failed with {}; \
|
error!("After write to {} failed with {}, truncate failed with {}; \
|
||||||
sample file is corrupt.", w.id, e, e2);
|
sample file is corrupt.", w.id, e, e2);
|
||||||
|
@ -820,7 +793,7 @@ impl<'a> InnerWriter<'a> {
|
||||||
|
|
||||||
fn close(mut self, next_pts: Option<i64>) -> Result<PreviousWriter, Error> {
|
fn close(mut self, next_pts: Option<i64>) -> Result<PreviousWriter, Error> {
|
||||||
if self.corrupt {
|
if self.corrupt {
|
||||||
self.syncer_channel.async_abandon_recording(self.id);
|
//self.syncer_channel.async_abandon_recording(self.id);
|
||||||
bail!("recording {} is corrupt", self.id);
|
bail!("recording {} is corrupt", self.id);
|
||||||
}
|
}
|
||||||
let unflushed =
|
let unflushed =
|
||||||
|
@ -839,8 +812,7 @@ impl<'a> InnerWriter<'a> {
|
||||||
let flags = if self.index.has_trailing_zero() { db::RecordingFlags::TrailingZero as i32 }
|
let flags = if self.index.has_trailing_zero() { db::RecordingFlags::TrailingZero as i32 }
|
||||||
else { 0 };
|
else { 0 };
|
||||||
let local_start_delta = self.local_start - start;
|
let local_start_delta = self.local_start - start;
|
||||||
let recording = db::RecordingToInsert{
|
let recording = db::RecordingToInsert {
|
||||||
id: self.id,
|
|
||||||
sample_file_bytes: self.index.sample_file_bytes,
|
sample_file_bytes: self.index.sample_file_bytes,
|
||||||
time: start .. end,
|
time: start .. end,
|
||||||
local_time_delta: local_start_delta,
|
local_time_delta: local_start_delta,
|
||||||
|
@ -852,7 +824,8 @@ impl<'a> InnerWriter<'a> {
|
||||||
run_offset: self.run_offset,
|
run_offset: self.run_offset,
|
||||||
flags: flags,
|
flags: flags,
|
||||||
};
|
};
|
||||||
self.syncer_channel.async_save_recording(recording, self.f);
|
self.r.lock().recording = Some(recording);
|
||||||
|
self.syncer_channel.async_save_recording(self.id, self.r, self.f);
|
||||||
Ok(PreviousWriter{
|
Ok(PreviousWriter{
|
||||||
end_time: end,
|
end_time: end,
|
||||||
local_time_delta: local_start_delta,
|
local_time_delta: local_start_delta,
|
||||||
|
|
|
@ -48,6 +48,7 @@ extern crate uuid;
|
||||||
mod coding;
|
mod coding;
|
||||||
pub mod db;
|
pub mod db;
|
||||||
pub mod dir;
|
pub mod dir;
|
||||||
|
mod raw;
|
||||||
pub mod recording;
|
pub mod recording;
|
||||||
pub mod schema;
|
pub mod schema;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,203 @@
|
||||||
|
// This file is part of Moonfire NVR, a security camera digital video recorder.
|
||||||
|
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// In addition, as a special exception, the copyright holders give
|
||||||
|
// permission to link the code of portions of this program with the
|
||||||
|
// OpenSSL library under certain conditions as described in each
|
||||||
|
// individual source file, and distribute linked combinations including
|
||||||
|
// the two.
|
||||||
|
//
|
||||||
|
// You must obey the GNU General Public License in all respects for all
|
||||||
|
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||||
|
// exception, you may extend this exception to your version of the
|
||||||
|
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||||
|
// so, delete this exception statement from your version. If you delete
|
||||||
|
// this exception statement from all source files in the program, then
|
||||||
|
// also delete it here.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Raw database access: SQLite statements which do not touch any cached state.
|
||||||
|
|
||||||
|
use db::{self, CompositeId};
|
||||||
|
use failure::Error;
|
||||||
|
use fnv::FnvHashSet;
|
||||||
|
use recording;
|
||||||
|
use rusqlite;
|
||||||
|
use std::ops::Range;
|
||||||
|
|
||||||
|
const INSERT_RECORDING_SQL: &'static str = r#"
|
||||||
|
insert into recording (composite_id, stream_id, run_offset, flags, sample_file_bytes,
|
||||||
|
start_time_90k, duration_90k, local_time_delta_90k, video_samples,
|
||||||
|
video_sync_samples, video_sample_entry_id)
|
||||||
|
values (:composite_id, :stream_id, :run_offset, :flags, :sample_file_bytes,
|
||||||
|
:start_time_90k, :duration_90k, :local_time_delta_90k,
|
||||||
|
:video_samples, :video_sync_samples, :video_sample_entry_id)
|
||||||
|
"#;
|
||||||
|
|
||||||
|
const INSERT_RECORDING_PLAYBACK_SQL: &'static str = r#"
|
||||||
|
insert into recording_playback (composite_id, sample_file_sha1, video_index)
|
||||||
|
values (:composite_id, :sample_file_sha1, :video_index)
|
||||||
|
"#;
|
||||||
|
|
||||||
|
const STREAM_MIN_START_SQL: &'static str = r#"
|
||||||
|
select
|
||||||
|
start_time_90k
|
||||||
|
from
|
||||||
|
recording
|
||||||
|
where
|
||||||
|
stream_id = :stream_id
|
||||||
|
order by start_time_90k limit 1
|
||||||
|
"#;
|
||||||
|
|
||||||
|
const STREAM_MAX_START_SQL: &'static str = r#"
|
||||||
|
select
|
||||||
|
start_time_90k,
|
||||||
|
duration_90k
|
||||||
|
from
|
||||||
|
recording
|
||||||
|
where
|
||||||
|
stream_id = :stream_id
|
||||||
|
order by start_time_90k desc;
|
||||||
|
"#;
|
||||||
|
|
||||||
|
/// Inserts the specified recording (for from `try_flush` only).
|
||||||
|
pub(crate) fn insert_recording(tx: &rusqlite::Transaction, id: CompositeId,
|
||||||
|
r: &db::RecordingToInsert) -> Result<(), Error> {
|
||||||
|
if r.time.end < r.time.start {
|
||||||
|
bail!("end time {} must be >= start time {}", r.time.end, r.time.start);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut stmt = tx.prepare_cached(INSERT_RECORDING_SQL)?;
|
||||||
|
stmt.execute_named(&[
|
||||||
|
(":composite_id", &id.0),
|
||||||
|
(":stream_id", &(id.stream() as i64)),
|
||||||
|
(":run_offset", &r.run_offset),
|
||||||
|
(":flags", &r.flags),
|
||||||
|
(":sample_file_bytes", &r.sample_file_bytes),
|
||||||
|
(":start_time_90k", &r.time.start.0),
|
||||||
|
(":duration_90k", &(r.time.end.0 - r.time.start.0)),
|
||||||
|
(":local_time_delta_90k", &r.local_time_delta.0),
|
||||||
|
(":video_samples", &r.video_samples),
|
||||||
|
(":video_sync_samples", &r.video_sync_samples),
|
||||||
|
(":video_sample_entry_id", &r.video_sample_entry_id),
|
||||||
|
])?;
|
||||||
|
|
||||||
|
let mut stmt = tx.prepare_cached(INSERT_RECORDING_PLAYBACK_SQL)?;
|
||||||
|
let sha1 = &r.sample_file_sha1[..];
|
||||||
|
stmt.execute_named(&[
|
||||||
|
(":composite_id", &id.0),
|
||||||
|
(":sample_file_sha1", &sha1),
|
||||||
|
(":video_index", &r.video_index),
|
||||||
|
])?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deletes the given recordings from the `recording` and `recording_playback` tables.
|
||||||
|
/// Note they are not fully removed from the database; the ids are transferred to the
|
||||||
|
/// `garbage` table.
|
||||||
|
pub(crate) fn delete_recordings(tx: &rusqlite::Transaction, rows: &[db::ListOldestSampleFilesRow])
|
||||||
|
-> Result<(), Error> {
|
||||||
|
let mut del1 = tx.prepare_cached(
|
||||||
|
"delete from recording_playback where composite_id = :composite_id")?;
|
||||||
|
let mut del2 = tx.prepare_cached(
|
||||||
|
"delete from recording where composite_id = :composite_id")?;
|
||||||
|
let mut insert = tx.prepare_cached(r#"
|
||||||
|
insert into garbage (sample_file_dir_id, composite_id)
|
||||||
|
values (:sample_file_dir_id, :composite_id)
|
||||||
|
"#)?;
|
||||||
|
for row in rows {
|
||||||
|
let changes = del1.execute_named(&[(":composite_id", &row.id.0)])?;
|
||||||
|
if changes != 1 {
|
||||||
|
bail!("no such recording_playback {}", row.id);
|
||||||
|
}
|
||||||
|
let changes = del2.execute_named(&[(":composite_id", &row.id.0)])?;
|
||||||
|
if changes != 1 {
|
||||||
|
bail!("no such recording {}", row.id);
|
||||||
|
}
|
||||||
|
insert.execute_named(&[
|
||||||
|
(":sample_file_dir_id", &row.sample_file_dir_id),
|
||||||
|
(":composite_id", &row.id.0)],
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marks the given sample files as deleted. This shouldn't be called until the files have
|
||||||
|
/// been `unlink()`ed and the parent directory `fsync()`ed.
|
||||||
|
pub(crate) fn mark_sample_files_deleted(tx: &rusqlite::Transaction, ids: &[CompositeId])
|
||||||
|
-> Result<(), Error> {
|
||||||
|
if ids.is_empty() { return Ok(()); }
|
||||||
|
let mut stmt = tx.prepare_cached("delete from garbage where composite_id = ?")?;
|
||||||
|
for &id in ids {
|
||||||
|
let changes = stmt.execute(&[&id.0])?;
|
||||||
|
if changes != 1 {
|
||||||
|
bail!("no garbage row for {}", id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the time range of recordings for the given stream.
|
||||||
|
pub(crate) fn get_range(conn: &rusqlite::Connection, stream_id: i32)
|
||||||
|
-> Result<Option<Range<recording::Time>>, Error> {
|
||||||
|
// The minimum is straightforward, taking advantage of the start_time_90k index.
|
||||||
|
let mut stmt = conn.prepare_cached(STREAM_MIN_START_SQL)?;
|
||||||
|
let mut rows = stmt.query_named(&[(":stream_id", &stream_id)])?;
|
||||||
|
let min_start = match rows.next() {
|
||||||
|
Some(row) => recording::Time(row?.get_checked(0)?),
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
|
||||||
|
// There was a minimum, so there should be a maximum too. Calculating it is less
|
||||||
|
// straightforward because recordings could overlap. All recordings starting in the
|
||||||
|
// last MAX_RECORDING_DURATION must be examined in order to take advantage of the
|
||||||
|
// start_time_90k index.
|
||||||
|
let mut stmt = conn.prepare_cached(STREAM_MAX_START_SQL)?;
|
||||||
|
let mut rows = stmt.query_named(&[(":stream_id", &stream_id)])?;
|
||||||
|
let mut maxes_opt = None;
|
||||||
|
while let Some(row) = rows.next() {
|
||||||
|
let row = row?;
|
||||||
|
let row_start = recording::Time(row.get_checked(0)?);
|
||||||
|
let row_duration: i64 = row.get_checked(1)?;
|
||||||
|
let row_end = recording::Time(row_start.0 + row_duration);
|
||||||
|
let maxes = match maxes_opt {
|
||||||
|
None => row_start .. row_end,
|
||||||
|
Some(Range{start: s, end: e}) => s .. ::std::cmp::max(e, row_end),
|
||||||
|
};
|
||||||
|
if row_start.0 <= maxes.start.0 - recording::MAX_RECORDING_DURATION {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
maxes_opt = Some(maxes);
|
||||||
|
}
|
||||||
|
let max_end = match maxes_opt {
|
||||||
|
Some(Range{start: _, end: e}) => e,
|
||||||
|
None => bail!("missing max for stream {} which had min {}", stream_id, min_start),
|
||||||
|
};
|
||||||
|
Ok(Some(min_start .. max_end))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all garbage ids for the given sample file directory.
|
||||||
|
pub(crate) fn list_garbage(conn: &rusqlite::Connection, dir_id: i32)
|
||||||
|
-> Result<FnvHashSet<CompositeId>, Error> {
|
||||||
|
let mut garbage = FnvHashSet::default();
|
||||||
|
let mut stmt = conn.prepare_cached(
|
||||||
|
"select composite_id from garbage where sample_file_dir_id = ?")?;
|
||||||
|
let mut rows = stmt.query(&[&dir_id])?;
|
||||||
|
while let Some(row) = rows.next() {
|
||||||
|
let row = row?;
|
||||||
|
garbage.insert(CompositeId(row.get_checked(0)?));
|
||||||
|
}
|
||||||
|
Ok(garbage)
|
||||||
|
}
|
|
@ -108,6 +108,11 @@ create table stream (
|
||||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||||
retain_bytes integer not null check (retain_bytes >= 0),
|
retain_bytes integer not null check (retain_bytes >= 0),
|
||||||
|
|
||||||
|
-- Flush the database when completing a recording if this stream has at
|
||||||
|
-- least this many seconds of unflushed recordings. A value of 0 means that
|
||||||
|
-- every completed recording will cause a flush.
|
||||||
|
flush_if_sec integer not null,
|
||||||
|
|
||||||
-- The low 32 bits of the next recording id to assign for this stream.
|
-- The low 32 bits of the next recording id to assign for this stream.
|
||||||
-- Typically this is the maximum current recording + 1, but it does
|
-- Typically this is the maximum current recording + 1, but it does
|
||||||
-- not decrease if that recording is deleted.
|
-- not decrease if that recording is deleted.
|
||||||
|
|
|
@ -86,28 +86,29 @@ impl TestDb {
|
||||||
let dir;
|
let dir;
|
||||||
{
|
{
|
||||||
let mut l = db.lock();
|
let mut l = db.lock();
|
||||||
{
|
sample_file_dir_id = l.add_sample_file_dir(path.to_owned()).unwrap();
|
||||||
sample_file_dir_id = l.add_sample_file_dir(path.to_owned()).unwrap();
|
assert_eq!(TEST_CAMERA_ID, l.add_camera(db::CameraChange {
|
||||||
assert_eq!(TEST_CAMERA_ID, l.add_camera(db::CameraChange {
|
short_name: "test camera".to_owned(),
|
||||||
short_name: "test camera".to_owned(),
|
description: "".to_owned(),
|
||||||
description: "".to_owned(),
|
host: "test-camera".to_owned(),
|
||||||
host: "test-camera".to_owned(),
|
username: "foo".to_owned(),
|
||||||
username: "foo".to_owned(),
|
password: "bar".to_owned(),
|
||||||
password: "bar".to_owned(),
|
streams: [
|
||||||
streams: [
|
db::StreamChange {
|
||||||
db::StreamChange {
|
sample_file_dir_id: Some(sample_file_dir_id),
|
||||||
sample_file_dir_id: Some(sample_file_dir_id),
|
rtsp_path: "/main".to_owned(),
|
||||||
rtsp_path: "/main".to_owned(),
|
record: true,
|
||||||
record: true,
|
flush_if_sec: 0,
|
||||||
},
|
},
|
||||||
Default::default(),
|
Default::default(),
|
||||||
],
|
],
|
||||||
}).unwrap());
|
}).unwrap());
|
||||||
test_camera_uuid = l.cameras_by_id().get(&TEST_CAMERA_ID).unwrap().uuid;
|
test_camera_uuid = l.cameras_by_id().get(&TEST_CAMERA_ID).unwrap().uuid;
|
||||||
let mut tx = l.tx().unwrap();
|
l.update_retention(&[db::RetentionChange {
|
||||||
tx.update_retention(TEST_STREAM_ID, true, 1048576).unwrap();
|
stream_id: TEST_STREAM_ID,
|
||||||
tx.commit().unwrap();
|
new_record: true,
|
||||||
}
|
new_limit: 1048576,
|
||||||
|
}]).unwrap();
|
||||||
dir = l.sample_file_dirs_by_id().get(&sample_file_dir_id).unwrap().get().unwrap();
|
dir = l.sample_file_dirs_by_id().get(&sample_file_dir_id).unwrap().get().unwrap();
|
||||||
}
|
}
|
||||||
let mut dirs_by_stream_id = FnvHashMap::default();
|
let mut dirs_by_stream_id = FnvHashMap::default();
|
||||||
|
@ -129,28 +130,25 @@ impl TestDb {
|
||||||
let mut db = self.db.lock();
|
let mut db = self.db.lock();
|
||||||
let video_sample_entry_id = db.insert_video_sample_entry(
|
let video_sample_entry_id = db.insert_video_sample_entry(
|
||||||
1920, 1080, [0u8; 100].to_vec(), "avc1.000000".to_owned()).unwrap();
|
1920, 1080, [0u8; 100].to_vec(), "avc1.000000".to_owned()).unwrap();
|
||||||
let next = db.streams_by_id().get(&TEST_STREAM_ID).unwrap().next_recording_id;
|
const START_TIME: recording::Time = recording::Time(1430006400i64 * TIME_UNITS_PER_SEC);
|
||||||
{
|
let (id, u) = db.add_recording(TEST_STREAM_ID).unwrap();
|
||||||
let mut tx = db.tx().unwrap();
|
u.lock().recording = Some(db::RecordingToInsert {
|
||||||
const START_TIME: recording::Time = recording::Time(1430006400i64 * TIME_UNITS_PER_SEC);
|
sample_file_bytes: encoder.sample_file_bytes,
|
||||||
tx.insert_recording(&db::RecordingToInsert {
|
time: START_TIME ..
|
||||||
id: db::CompositeId::new(TEST_STREAM_ID, next),
|
START_TIME + recording::Duration(encoder.total_duration_90k as i64),
|
||||||
sample_file_bytes: encoder.sample_file_bytes,
|
local_time_delta: recording::Duration(0),
|
||||||
time: START_TIME ..
|
video_samples: encoder.video_samples,
|
||||||
START_TIME + recording::Duration(encoder.total_duration_90k as i64),
|
video_sync_samples: encoder.video_sync_samples,
|
||||||
local_time_delta: recording::Duration(0),
|
video_sample_entry_id: video_sample_entry_id,
|
||||||
video_samples: encoder.video_samples,
|
video_index: encoder.video_index,
|
||||||
video_sync_samples: encoder.video_sync_samples,
|
sample_file_sha1: [0u8; 20],
|
||||||
video_sample_entry_id: video_sample_entry_id,
|
run_offset: 0,
|
||||||
video_index: encoder.video_index,
|
flags: db::RecordingFlags::TrailingZero as i32,
|
||||||
sample_file_sha1: [0u8; 20],
|
});
|
||||||
run_offset: 0,
|
u.lock().synced = true;
|
||||||
flags: db::RecordingFlags::TrailingZero as i32,
|
db.flush("create_recording_from_encoder").unwrap();
|
||||||
}).unwrap();
|
|
||||||
tx.commit().unwrap();
|
|
||||||
}
|
|
||||||
let mut row = None;
|
let mut row = None;
|
||||||
db.list_recordings_by_id(TEST_STREAM_ID, next .. next+1,
|
db.list_recordings_by_id(TEST_STREAM_ID, id.recording() .. id.recording()+1,
|
||||||
|r| { row = Some(r); Ok(()) }).unwrap();
|
|r| { row = Some(r); Ok(()) }).unwrap();
|
||||||
row.unwrap()
|
row.unwrap()
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,6 +207,8 @@ is never used.
|
||||||
Version 3 adds over version 1:
|
Version 3 adds over version 1:
|
||||||
|
|
||||||
* recording of sub streams (splits a new `stream` table out of `camera`)
|
* recording of sub streams (splits a new `stream` table out of `camera`)
|
||||||
|
* a per-stream knob `flush_if_sec` meant to reduce database commits (and
|
||||||
|
thus SSD write cycles). This improves practicality of many streams.
|
||||||
* support for multiple sample file directories, to take advantage of
|
* support for multiple sample file directories, to take advantage of
|
||||||
multiple hard drives (or multiple RAID volumes).
|
multiple hard drives (or multiple RAID volumes).
|
||||||
* an interlock between database and sample file directories to avoid various
|
* an interlock between database and sample file directories to avoid various
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
//! Clock interface and implementations for testability.
|
//! Clock interface and implementations for testability.
|
||||||
|
|
||||||
use libc;
|
use libc;
|
||||||
#[cfg(test)] use std::sync::Mutex;
|
#[cfg(test)] use parking_lot::Mutex;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use time::{Duration, Timespec};
|
use time::{Duration, Timespec};
|
||||||
|
@ -123,12 +123,12 @@ impl SimulatedClocks {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
impl Clocks for SimulatedClocks {
|
impl Clocks for SimulatedClocks {
|
||||||
fn realtime(&self) -> Timespec { self.boot + *self.uptime.lock().unwrap() }
|
fn realtime(&self) -> Timespec { self.boot + *self.uptime.lock() }
|
||||||
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.uptime.lock().unwrap() }
|
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.uptime.lock() }
|
||||||
|
|
||||||
/// Advances the clock by the specified amount without actually sleeping.
|
/// Advances the clock by the specified amount without actually sleeping.
|
||||||
fn sleep(&self, how_long: Duration) {
|
fn sleep(&self, how_long: Duration) {
|
||||||
let mut l = self.uptime.lock().unwrap();
|
let mut l = self.uptime.lock();
|
||||||
*l = *l + how_long;
|
*l = *l + how_long;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ use self::cursive::views;
|
||||||
use db::{self, dir};
|
use db::{self, dir};
|
||||||
use failure::Error;
|
use failure::Error;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use stream::{self, Opener, Stream};
|
use stream::{self, Opener, Stream};
|
||||||
use super::{decode_size, encode_size};
|
use super::{decode_size, encode_size};
|
||||||
|
@ -62,6 +63,9 @@ fn get_change(siv: &mut Cursive) -> db::CameraChange {
|
||||||
.unwrap().get_content().as_str().into();
|
.unwrap().get_content().as_str().into();
|
||||||
let r = siv.find_id::<views::Checkbox>(&format!("{}_record", t.as_str()))
|
let r = siv.find_id::<views::Checkbox>(&format!("{}_record", t.as_str()))
|
||||||
.unwrap().is_checked();
|
.unwrap().is_checked();
|
||||||
|
let f = i64::from_str(siv.find_id::<views::EditView>(
|
||||||
|
&format!("{}_flush_if_sec", t.as_str())).unwrap().get_content().as_str())
|
||||||
|
.unwrap_or(0);
|
||||||
let d = *siv.find_id::<views::SelectView<Option<i32>>>(
|
let d = *siv.find_id::<views::SelectView<Option<i32>>>(
|
||||||
&format!("{}_sample_file_dir", t.as_str()))
|
&format!("{}_sample_file_dir", t.as_str()))
|
||||||
.unwrap().selection();
|
.unwrap().selection();
|
||||||
|
@ -69,6 +73,7 @@ fn get_change(siv: &mut Cursive) -> db::CameraChange {
|
||||||
rtsp_path: p,
|
rtsp_path: p,
|
||||||
sample_file_dir_id: d,
|
sample_file_dir_id: d,
|
||||||
record: r,
|
record: r,
|
||||||
|
flush_if_sec: f,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
c
|
c
|
||||||
|
@ -270,9 +275,11 @@ fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i
|
||||||
.popup()
|
.popup()
|
||||||
.with_id(format!("{}_sample_file_dir", type_.as_str())))
|
.with_id(format!("{}_sample_file_dir", type_.as_str())))
|
||||||
.child("record", views::Checkbox::new().with_id(format!("{}_record", type_.as_str())))
|
.child("record", views::Checkbox::new().with_id(format!("{}_record", type_.as_str())))
|
||||||
|
.child("flush_if_sec", views::EditView::new()
|
||||||
|
.with_id(format!("{}_flush_if_sec", type_.as_str())))
|
||||||
.child("usage/capacity",
|
.child("usage/capacity",
|
||||||
views::TextView::new("").with_id(format!("{}_usage_cap", type_.as_str())))
|
views::TextView::new("").with_id(format!("{}_usage_cap", type_.as_str())))
|
||||||
.min_height(4);
|
.min_height(5);
|
||||||
layout.add_child(views::DummyView);
|
layout.add_child(views::DummyView);
|
||||||
layout.add_child(views::TextView::new(format!("{} stream", type_.as_str())));
|
layout.add_child(views::TextView::new(format!("{} stream", type_.as_str())));
|
||||||
layout.add_child(list);
|
layout.add_child(list);
|
||||||
|
@ -313,6 +320,8 @@ fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i
|
||||||
|v: &mut views::TextView| v.set_content(u));
|
|v: &mut views::TextView| v.set_content(u));
|
||||||
dialog.find_id(&format!("{}_record", t.as_str()),
|
dialog.find_id(&format!("{}_record", t.as_str()),
|
||||||
|v: &mut views::Checkbox| v.set_checked(s.record));
|
|v: &mut views::Checkbox| v.set_checked(s.record));
|
||||||
|
dialog.find_id(&format!("{}_flush_if_sec", t.as_str()),
|
||||||
|
|v: &mut views::EditView| v.set_content(s.flush_if_sec.to_string()));
|
||||||
}
|
}
|
||||||
dialog.find_id(&format!("{}_sample_file_dir", t.as_str()),
|
dialog.find_id(&format!("{}_sample_file_dir", t.as_str()),
|
||||||
|v: &mut views::SelectView<Option<i32>>| v.set_selection(selected_dir));
|
|v: &mut views::SelectView<Option<i32>>| v.set_selection(selected_dir));
|
||||||
|
|
|
@ -60,12 +60,15 @@ struct Model {
|
||||||
|
|
||||||
/// Updates the limits in the database. Doesn't delete excess data (if any).
|
/// Updates the limits in the database. Doesn't delete excess data (if any).
|
||||||
fn update_limits_inner(model: &Model) -> Result<(), Error> {
|
fn update_limits_inner(model: &Model) -> Result<(), Error> {
|
||||||
let mut db = model.db.lock();
|
let mut changes = Vec::with_capacity(model.streams.len());
|
||||||
let mut tx = db.tx()?;
|
for (&stream_id, stream) in &model.streams {
|
||||||
for (&id, stream) in &model.streams {
|
changes.push(db::RetentionChange {
|
||||||
tx.update_retention(id, stream.record, stream.retain.unwrap())?;
|
stream_id,
|
||||||
|
new_record: stream.record,
|
||||||
|
new_limit: stream.retain.unwrap(),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
tx.commit()
|
model.db.lock().update_retention(&changes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_limits(model: &Model, siv: &mut Cursive) {
|
fn update_limits(model: &Model, siv: &mut Cursive) {
|
||||||
|
|
|
@ -199,6 +199,9 @@ pub fn run() -> Result<(), Error> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(mut ss) = syncers {
|
if let Some(mut ss) = syncers {
|
||||||
|
// The syncers shut down when all channels to them have been dropped.
|
||||||
|
// The database maintains one; and `ss` holds one. Drop both.
|
||||||
|
db.lock().clear_on_flush();
|
||||||
for (_, s) in ss.drain() {
|
for (_, s) in ss.drain() {
|
||||||
drop(s.channel);
|
drop(s.channel);
|
||||||
s.join.join().unwrap();
|
s.join.join().unwrap();
|
||||||
|
|
|
@ -159,6 +159,7 @@ impl<'a> super::Upgrader for U<'a> {
|
||||||
record integer not null check (record in (1, 0)),
|
record integer not null check (record in (1, 0)),
|
||||||
rtsp_path text not null,
|
rtsp_path text not null,
|
||||||
retain_bytes integer not null check (retain_bytes >= 0),
|
retain_bytes integer not null check (retain_bytes >= 0),
|
||||||
|
flush_if_sec integer not null,
|
||||||
next_recording_id integer not null check (next_recording_id >= 0),
|
next_recording_id integer not null check (next_recording_id >= 0),
|
||||||
unique (camera_id, type)
|
unique (camera_id, type)
|
||||||
);
|
);
|
||||||
|
@ -227,6 +228,7 @@ impl<'a> super::Upgrader for U<'a> {
|
||||||
1,
|
1,
|
||||||
old_camera.main_rtsp_path,
|
old_camera.main_rtsp_path,
|
||||||
old_camera.retain_bytes,
|
old_camera.retain_bytes,
|
||||||
|
0,
|
||||||
old_camera.next_recording_id
|
old_camera.next_recording_id
|
||||||
from
|
from
|
||||||
old_camera cross join sample_file_dir;
|
old_camera cross join sample_file_dir;
|
||||||
|
@ -241,7 +243,8 @@ impl<'a> super::Upgrader for U<'a> {
|
||||||
0,
|
0,
|
||||||
old_camera.sub_rtsp_path,
|
old_camera.sub_rtsp_path,
|
||||||
0,
|
0,
|
||||||
0
|
60,
|
||||||
|
1
|
||||||
from
|
from
|
||||||
old_camera cross join sample_file_dir
|
old_camera cross join sample_file_dir
|
||||||
where
|
where
|
||||||
|
|
|
@ -34,7 +34,7 @@ use serde::ser::{SerializeMap, SerializeSeq, Serializer};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
#[derive(Serialize)]
|
||||||
#[serde(rename_all="camelCase")]
|
#[serde(rename_all="camelCase")]
|
||||||
pub struct TopLevel<'a> {
|
pub struct TopLevel<'a> {
|
||||||
pub time_zone_name: &'a str,
|
pub time_zone_name: &'a str,
|
||||||
|
|
|
@ -2105,6 +2105,7 @@ mod tests {
|
||||||
const EXPECTED_ETAG: &'static str = "c56ef7eb3b4a713ceafebc3dc7958bd9e62a2fae";
|
const EXPECTED_ETAG: &'static str = "c56ef7eb3b4a713ceafebc3dc7958bd9e62a2fae";
|
||||||
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
||||||
drop(db.syncer_channel);
|
drop(db.syncer_channel);
|
||||||
|
db.db.lock().clear_on_flush();
|
||||||
db.syncer_join.join().unwrap();
|
db.syncer_join.join().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2125,6 +2126,7 @@ mod tests {
|
||||||
const EXPECTED_ETAG: &'static str = "3bdc2c8ce521df50155d0ca4d7497ada448fa7c3";
|
const EXPECTED_ETAG: &'static str = "3bdc2c8ce521df50155d0ca4d7497ada448fa7c3";
|
||||||
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
||||||
drop(db.syncer_channel);
|
drop(db.syncer_channel);
|
||||||
|
db.db.lock().clear_on_flush();
|
||||||
db.syncer_join.join().unwrap();
|
db.syncer_join.join().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2145,6 +2147,7 @@ mod tests {
|
||||||
const EXPECTED_ETAG: &'static str = "3986d3bd9b866c3455fb7359fb134aa2d9107af7";
|
const EXPECTED_ETAG: &'static str = "3986d3bd9b866c3455fb7359fb134aa2d9107af7";
|
||||||
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
||||||
drop(db.syncer_channel);
|
drop(db.syncer_channel);
|
||||||
|
db.db.lock().clear_on_flush();
|
||||||
db.syncer_join.join().unwrap();
|
db.syncer_join.join().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2165,6 +2168,7 @@ mod tests {
|
||||||
const EXPECTED_ETAG: &'static str = "9e789398c9a71ca834fec8fbc55b389f99d12dda";
|
const EXPECTED_ETAG: &'static str = "9e789398c9a71ca834fec8fbc55b389f99d12dda";
|
||||||
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
|
||||||
drop(db.syncer_channel);
|
drop(db.syncer_channel);
|
||||||
|
db.db.lock().clear_on_flush();
|
||||||
db.syncer_join.join().unwrap();
|
db.syncer_join.join().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -205,8 +205,9 @@ mod tests {
|
||||||
use failure::Error;
|
use failure::Error;
|
||||||
use h264;
|
use h264;
|
||||||
use moonfire_ffmpeg;
|
use moonfire_ffmpeg;
|
||||||
|
use parking_lot::Mutex;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use stream::{self, Opener, Stream};
|
use stream::{self, Opener, Stream};
|
||||||
use time;
|
use time;
|
||||||
|
@ -290,7 +291,7 @@ mod tests {
|
||||||
stream::Source::Rtsp(url) => assert_eq!(url, &self.expected_url),
|
stream::Source::Rtsp(url) => assert_eq!(url, &self.expected_url),
|
||||||
stream::Source::File(_) => panic!("expected rtsp url"),
|
stream::Source::File(_) => panic!("expected rtsp url"),
|
||||||
};
|
};
|
||||||
let mut l = self.streams.lock().unwrap();
|
let mut l = self.streams.lock();
|
||||||
match l.pop() {
|
match l.pop() {
|
||||||
Some(stream) => {
|
Some(stream) => {
|
||||||
trace!("MockOpener returning next stream");
|
trace!("MockOpener returning next stream");
|
||||||
|
@ -361,7 +362,7 @@ mod tests {
|
||||||
testutil::TEST_STREAM_ID, camera, s, 0, 3);
|
testutil::TEST_STREAM_ID, camera, s, 0, 3);
|
||||||
}
|
}
|
||||||
stream.run();
|
stream.run();
|
||||||
assert!(opener.streams.lock().unwrap().is_empty());
|
assert!(opener.streams.lock().is_empty());
|
||||||
db.syncer_channel.flush();
|
db.syncer_channel.flush();
|
||||||
let db = db.db.lock();
|
let db = db.db.lock();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue