Merge branch 'new-schema'

The Rust portions of the merge are straightforward, but the Javascript
is not. The new-schema branch is based on my hacky prototype UI; the
master branch is based on Dolf's rewrite. I attempted to match the
new-schema changes in Dolf's new structure.
This commit is contained in:
Scott Lamb 2018-04-27 06:42:39 -07:00
commit 23fff5917c
67 changed files with 8771 additions and 4588 deletions

808
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,26 +7,33 @@ authors = ["Scott Lamb <slamb@slamb.org>"]
# The nightly feature is used within moonfire-nvr itself to gate the
# benchmarks. Also pass it along to crates that can benefit from it.
nightly = ["parking_lot/nightly"]
nightly = ["moonfire-db/nightly", "parking_lot/nightly"]
# The bundled feature includes bundled (aka statically linked) versions of
# native libraries where possible.
bundled = ["rusqlite/bundled"]
[workspace]
members = ["base", "db", "ffmpeg"]
[dependencies]
bytes = "0.4.6"
byteorder = "1.0"
docopt = "0.8"
failure = "0.1.1"
futures = "0.1"
futures-cpupool = "0.1"
fnv = "1.0"
http = "0.1.5"
http-serve = { git = "https://github.com/scottlamb/http-serve" }
hyper = "0.11.16"
hyper = "0.11.25"
lazy_static = "1.0"
libc = "0.2"
log = { version = "0.4", features = ["release_max_level_info"] }
lru-cache = "0.1"
memmap = "0.6"
mime = "0.3"
moonfire-base = { path = "base" }
moonfire-db = { path = "db" }
moonfire-ffmpeg = { path = "ffmpeg" }
mylog = { git = "https://github.com/scottlamb/mylog" }
openssl = "0.10"
@ -42,7 +49,7 @@ time = "0.1"
tokio-core = "0.1"
tokio-signal = "0.1"
url = "1.4"
uuid = { version = "0.5", features = ["serde", "v4"] }
uuid = { version = "0.6", features = ["serde", "std", "v4"] }
[dev-dependencies]
reqwest = "0.8"

18
base/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
name = "moonfire-base"
version = "0.0.1"
authors = ["Scott Lamb <slamb@slamb.org>"]
readme = "../README.md"
[features]
nightly = []
[lib]
path = "lib.rs"
[dependencies]
failure = "0.1.1"
libc = "0.2"
log = "0.4"
parking_lot = { version = "0.5", features = [] }
time = "0.1"

View File

@ -1,5 +1,5 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
@ -30,14 +30,17 @@
//! Clock interface and implementations for testability.
use failure::Error;
use libc;
#[cfg(test)] use std::sync::Mutex;
use parking_lot::Mutex;
use std::mem;
use std::sync::{Arc, mpsc};
use std::thread;
use std::time::Duration as StdDuration;
use time::{Duration, Timespec};
/// Abstract interface to the system clocks. This is for testability.
pub trait Clocks : Sync {
pub trait Clocks : Send + Sync + 'static {
/// Gets the current time from `CLOCK_REALTIME`.
fn realtime(&self) -> Timespec;
@ -46,12 +49,26 @@ pub trait Clocks : Sync {
/// Causes the current thread to sleep for the specified time.
fn sleep(&self, how_long: Duration);
/// Calls `rcv.recv_timeout` or substitutes a test implementation.
fn recv_timeout<T>(&self, rcv: &mpsc::Receiver<T>,
timeout: StdDuration) -> Result<T, mpsc::RecvTimeoutError>;
}
/// Singleton "real" clocks.
pub static REAL: RealClocks = RealClocks {};
pub fn retry_forever<C, T, E>(clocks: &C, f: &mut FnMut() -> Result<T, E>) -> T
where C: Clocks, E: Into<Error> {
loop {
let e = match f() {
Ok(t) => return t,
Err(e) => e.into(),
};
let sleep_time = Duration::seconds(1);
warn!("sleeping for {:?} after error: {:?}", sleep_time, e);
clocks.sleep(sleep_time);
}
}
/// Real clocks; see static `REAL` instance.
#[derive(Copy, Clone)]
pub struct RealClocks {}
impl RealClocks {
@ -74,17 +91,22 @@ impl Clocks for RealClocks {
Err(e) => warn!("Invalid duration {:?}: {}", how_long, e),
};
}
fn recv_timeout<T>(&self, rcv: &mpsc::Receiver<T>,
timeout: StdDuration) -> Result<T, mpsc::RecvTimeoutError> {
rcv.recv_timeout(timeout)
}
}
/// Logs a warning if the TimerGuard lives "too long", using the label created by a supplied
/// function.
pub struct TimerGuard<'a, C: Clocks + 'a, S: AsRef<str>, F: FnOnce() -> S + 'a> {
pub struct TimerGuard<'a, C: Clocks + ?Sized, S: AsRef<str>, F: FnOnce() -> S + 'a> {
clocks: &'a C,
label_f: Option<F>,
start: Timespec,
}
impl<'a, C: Clocks + 'a, S: AsRef<str>, F: FnOnce() -> S + 'a> TimerGuard<'a, C, S, F> {
impl<'a, C: Clocks + ?Sized, S: AsRef<str>, F: FnOnce() -> S + 'a> TimerGuard<'a, C, S, F> {
pub fn new(clocks: &'a C, label_f: F) -> Self {
TimerGuard {
clocks,
@ -94,7 +116,8 @@ impl<'a, C: Clocks + 'a, S: AsRef<str>, F: FnOnce() -> S + 'a> TimerGuard<'a, C,
}
}
impl<'a, C: Clocks + 'a, S: AsRef<str>, F: FnOnce() -> S + 'a> Drop for TimerGuard<'a, C, S, F> {
impl<'a, C, S, F> Drop for TimerGuard<'a, C, S, F>
where C: Clocks + ?Sized, S: AsRef<str>, F: FnOnce() -> S + 'a {
fn drop(&mut self) {
let elapsed = self.clocks.monotonic() - self.start;
if elapsed.num_seconds() >= 1 {
@ -105,30 +128,40 @@ impl<'a, C: Clocks + 'a, S: AsRef<str>, F: FnOnce() -> S + 'a> Drop for TimerGua
}
/// Simulated clock for testing.
#[cfg(test)]
pub struct SimulatedClocks {
#[derive(Clone)]
pub struct SimulatedClocks(Arc<SimulatedClocksInner>);
struct SimulatedClocksInner {
boot: Timespec,
uptime: Mutex<Duration>,
}
#[cfg(test)]
impl SimulatedClocks {
pub fn new(boot: Timespec) -> SimulatedClocks {
SimulatedClocks {
pub fn new(boot: Timespec) -> Self {
SimulatedClocks(Arc::new(SimulatedClocksInner {
boot: boot,
uptime: Mutex::new(Duration::seconds(0)),
}
}))
}
}
#[cfg(test)]
impl Clocks for SimulatedClocks {
fn realtime(&self) -> Timespec { self.boot + *self.uptime.lock().unwrap() }
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.uptime.lock().unwrap() }
fn realtime(&self) -> Timespec { self.0.boot + *self.0.uptime.lock() }
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.0.uptime.lock() }
/// Advances the clock by the specified amount without actually sleeping.
fn sleep(&self, how_long: Duration) {
let mut l = self.uptime.lock().unwrap();
let mut l = self.0.uptime.lock();
*l = *l + how_long;
}
/// Advances the clock by the specified amount if data is not immediately available.
fn recv_timeout<T>(&self, rcv: &mpsc::Receiver<T>,
timeout: StdDuration) -> Result<T, mpsc::RecvTimeoutError> {
let r = rcv.recv_timeout(StdDuration::new(0, 0));
if let Err(_) = r {
self.sleep(Duration::from_std(timeout).unwrap());
}
r
}
}

38
base/lib.rs Normal file
View File

@ -0,0 +1,38 @@
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate failure;
extern crate libc;
#[macro_use] extern crate log;
extern crate parking_lot;
extern crate time;
pub mod clock;
pub mod strutil;

29
db/Cargo.toml Normal file
View File

@ -0,0 +1,29 @@
[package]
name = "moonfire-db"
version = "0.0.1"
authors = ["Scott Lamb <slamb@slamb.org>"]
readme = "../README.md"
[features]
nightly = []
[lib]
path = "lib.rs"
[dependencies]
failure = "0.1.1"
fnv = "1.0"
lazy_static = "1.0"
libc = "0.2"
log = "0.4"
lru-cache = "0.1"
moonfire-base = { path = "../base" }
mylog = { git = "https://github.com/scottlamb/mylog" }
openssl = "0.10"
parking_lot = { version = "0.5", features = [] }
protobuf = "1.4"
regex = "0.2"
rusqlite = "0.13"
tempdir = "0.3"
time = "0.1"
uuid = { version = "0.6", features = ["std", "v4"] }

320
db/check.rs Normal file
View File

@ -0,0 +1,320 @@
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! Subcommand to check the database and sample file dir for errors.
use db::{self, CompositeId, FromSqlUuid};
use dir;
use failure::Error;
use fnv::FnvHashMap;
use raw;
use recording;
use rusqlite;
use schema;
use std::os::unix::ffi::OsStrExt;
use std::fs;
pub struct Options {
pub compare_lens: bool,
}
pub fn run(conn: &rusqlite::Connection, opts: &Options) -> Result<(), Error> {
let db_uuid = raw::get_db_uuid(&conn)?;
// Scan directories.
let mut streams_by_dir: FnvHashMap<i32, Dir> = FnvHashMap::default();
{
let mut dir_stmt = conn.prepare(r#"
select d.id, d.path, d.uuid, d.last_complete_open_id, o.uuid
from sample_file_dir d left join open o on (d.last_complete_open_id = o.id)
"#)?;
let mut garbage_stmt = conn.prepare_cached(
"select composite_id from garbage where sample_file_dir_id = ?")?;
let mut rows = dir_stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let mut meta = schema::DirMeta::default();
let dir_id: i32 = row.get_checked(0)?;
let dir_path: String = row.get_checked(1)?;
let dir_uuid: FromSqlUuid = row.get_checked(2)?;
let open_id = row.get_checked(3)?;
let open_uuid: FromSqlUuid = row.get_checked(4)?;
meta.db_uuid.extend_from_slice(&db_uuid.as_bytes()[..]);
meta.dir_uuid.extend_from_slice(&dir_uuid.0.as_bytes()[..]);
{
let o = meta.mut_last_complete_open();
o.id = open_id;
o.uuid.extend_from_slice(&open_uuid.0.as_bytes()[..]);
}
// Open the directory (checking its metadata) and hold it open (for the lock).
let _dir = dir::SampleFileDir::open(&dir_path, &meta)?;
let mut streams = read_dir(&dir_path, opts)?;
let mut rows = garbage_stmt.query(&[&dir_id])?;
while let Some(row) = rows.next() {
let row = row?;
let id = CompositeId(row.get_checked(0)?);
let s = streams.entry(id.stream()).or_insert_with(Stream::default);
s.entry(id.recording()).or_insert_with(Recording::default).garbage_row = true;
}
streams_by_dir.insert(dir_id, streams);
}
}
// Scan known streams.
{
let mut stmt = conn.prepare(r#"
select id, sample_file_dir_id from stream where sample_file_dir_id is not null
"#)?;
let mut rows = stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let stream_id = row.get_checked(0)?;
let dir_id = row.get_checked(1)?;
let stream = match streams_by_dir.get_mut(&dir_id) {
None => Stream::default(),
Some(d) => d.remove(&stream_id).unwrap_or_else(Stream::default),
};
compare_stream(conn, stream_id, opts, stream)?;
}
}
// Expect the rest to have only garbage.
for (&dir_id, streams) in &streams_by_dir {
for (&stream_id, stream) in streams {
for (&recording_id, r) in stream {
let id = CompositeId::new(stream_id, recording_id);
if r.recording_row.is_some() || r.playback_row.is_some() ||
r.integrity_row || !r.garbage_row {
error!("dir {} recording {} for unknown stream: {:#?}", dir_id, id, r);
}
}
}
}
Ok(())
}
#[derive(Debug, Eq, PartialEq)]
struct RecordingSummary {
bytes: u64,
video_samples: i32,
video_sync_samples: i32,
duration: i32,
flags: i32,
}
#[derive(Debug, Default)]
struct Recording {
/// Present iff there is a file. When `args.compare_lens` is true, the length; otherwise 0.
file: Option<u64>,
/// Iff a `recording` row is present, a `RecordingSummary` from those fields.
recording_row: Option<RecordingSummary>,
/// Iff a `recording_playback` row is present, a `RecordingSummary` computed from the index.
/// This should match the recording row.
playback_row: Option<RecordingSummary>,
/// True iff a `recording_integrity` row is present.
integrity_row: bool,
/// True iff a `garbage` row is present.
garbage_row: bool,
}
type Stream = FnvHashMap<i32, Recording>;
type Dir = FnvHashMap<i32, Stream>;
fn summarize_index(video_index: &[u8]) -> Result<RecordingSummary, Error> {
let mut it = recording::SampleIndexIterator::new();
let mut duration = 0;
let mut video_samples = 0;
let mut video_sync_samples = 0;
let mut bytes = 0;
while it.next(video_index)? {
bytes += it.bytes as u64;
duration += it.duration_90k;
video_samples += 1;
video_sync_samples += it.is_key() as i32;
}
Ok(RecordingSummary {
bytes,
video_samples,
video_sync_samples,
duration,
flags: if it.duration_90k == 0 { db::RecordingFlags::TrailingZero as i32 } else { 0 },
})
}
/// Reads through the given sample file directory.
/// Logs unexpected files and creates a hash map of the files found there.
/// If `opts.compare_lens` is set, the values are lengths; otherwise they're insignificant.
fn read_dir(path: &str, opts: &Options) -> Result<Dir, Error> {
let mut dir = Dir::default();
for e in fs::read_dir(path)? {
let e = e?;
let f = e.file_name();
let f = f.as_bytes();
match f {
b"meta" | b"meta-tmp" => continue,
_ => {},
};
let id = match dir::parse_id(f) {
Ok(id) => id,
Err(_) => {
error!("sample file directory contains file {:?} which isn't an id", f);
continue;
}
};
let len = if opts.compare_lens { e.metadata()?.len() } else { 0 };
let stream = dir.entry(id.stream()).or_insert_with(Stream::default);
stream.entry(id.recording()).or_insert_with(Recording::default).file = Some(len);
}
Ok(dir)
}
/// Looks through a known stream for errors.
fn compare_stream(conn: &rusqlite::Connection, stream_id: i32, opts: &Options,
mut stream: Stream) -> Result<(), Error> {
let start = CompositeId::new(stream_id, 0);
let end = CompositeId::new(stream_id, i32::max_value());
// recording row.
{
let mut stmt = conn.prepare_cached(r#"
select
composite_id,
flags,
sample_file_bytes,
duration_90k,
video_samples,
video_sync_samples
from
recording
where
composite_id between ? and ?
"#)?;
let mut rows = stmt.query(&[&start.0, &end.0])?;
while let Some(row) = rows.next() {
let row = row?;
let id = CompositeId(row.get_checked(0)?);
let s = RecordingSummary {
flags: row.get_checked(1)?,
bytes: row.get_checked::<_, i64>(2)? as u64,
duration: row.get_checked(3)?,
video_samples: row.get_checked(4)?,
video_sync_samples: row.get_checked(5)?,
};
stream.entry(id.recording())
.or_insert_with(Recording::default)
.recording_row = Some(s);
}
}
// recording_playback row.
{
let mut stmt = conn.prepare_cached(r#"
select
composite_id,
video_index
from
recording_playback
where
composite_id between ? and ?
"#)?;
let mut rows = stmt.query(&[&start.0, &end.0])?;
while let Some(row) = rows.next() {
let row = row?;
let id = CompositeId(row.get_checked(0)?);
let video_index: Vec<u8> = row.get_checked(1)?;
let s = match summarize_index(&video_index) {
Ok(s) => s,
Err(e) => {
error!("id {} has bad video_index: {}", id, e);
continue;
},
};
stream.entry(id.recording())
.or_insert_with(Recording::default)
.playback_row = Some(s);
}
}
// recording_integrity row.
{
let mut stmt = conn.prepare_cached(r#"
select
composite_id
from
recording_integrity
where
composite_id between ? and ?
"#)?;
let mut rows = stmt.query(&[&start.0, &end.0])?;
while let Some(row) = rows.next() {
let row = row?;
let id = CompositeId(row.get_checked(0)?);
stream.entry(id.recording())
.or_insert_with(Recording::default)
.integrity_row = true;
}
}
for (&id, recording) in &stream {
let id = CompositeId::new(stream_id, id);
let r = match recording.recording_row {
Some(ref r) => r,
None => {
if !recording.garbage_row || recording.playback_row.is_some() ||
recording.integrity_row {
error!("Missing recording row for {}: {:#?}", id, recording);
}
continue;
},
};
match recording.playback_row {
Some(ref p) => {
if r != p {
error!("Recording {} summary doesn't match video_index: {:#?}", id, recording);
}
},
None => error!("Recording {} missing playback row: {:#?}", id, recording),
}
match recording.file {
Some(len) => if opts.compare_lens && r.bytes != len {
error!("Recording {} length mismatch: {:#?}", id, recording);
},
None => error!("Recording {} missing file: {:#?}", id, recording),
}
}
Ok(())
}

2196
db/db.rs Normal file

File diff suppressed because it is too large Load Diff

324
db/dir.rs Normal file
View File

@ -0,0 +1,324 @@
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! Sample file directory management.
//!
//! This includes opening files for serving, rotating away old files, and saving new files.
use db::CompositeId;
use failure::{Error, Fail};
use libc::{self, c_char};
use protobuf::{self, Message};
use schema;
use std::ffi;
use std::fs;
use std::io::{self, Read, Write};
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::FromRawFd;
use std::sync::Arc;
/// A sample file directory. Typically one per physical disk drive.
///
/// If the directory is used for writing, the `start_syncer` function should be called to start
/// a background thread. This thread manages deleting files and writing new files. It synces the
/// directory and commits these operations to the database in the correct order to maintain the
/// invariants described in `design/schema.md`.
#[derive(Debug)]
pub struct SampleFileDir {
/// The open file descriptor for the directory. The worker uses it to create files and sync the
/// directory. Other threads use it to open sample files for reading during video serving.
pub(crate) fd: Fd,
}
/// A file descriptor associated with a directory (not necessarily the sample file dir).
#[derive(Debug)]
pub struct Fd(libc::c_int);
impl Drop for Fd {
fn drop(&mut self) {
if unsafe { libc::close(self.0) } < 0 {
let e = io::Error::last_os_error();
warn!("Unable to close sample file dir: {}", e);
}
}
}
impl Fd {
/// Opens the given path as a directory.
pub fn open(path: &str, mkdir: bool) -> Result<Fd, io::Error> {
let cstring = ffi::CString::new(path)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
if mkdir && unsafe { libc::mkdir(cstring.as_ptr(), 0o700) } != 0 {
let e = io::Error::last_os_error();
if e.kind() != io::ErrorKind::AlreadyExists {
return Err(e.into());
}
}
let fd = unsafe { libc::open(cstring.as_ptr(), libc::O_DIRECTORY | libc::O_RDONLY, 0) };
if fd < 0 {
return Err(io::Error::last_os_error().into());
}
Ok(Fd(fd))
}
pub(crate) fn sync(&self) -> Result<(), io::Error> {
let res = unsafe { libc::fsync(self.0) };
if res < 0 {
return Err(io::Error::last_os_error())
}
Ok(())
}
/// Opens a sample file within this directory with the given flags and (if creating) mode.
unsafe fn openat(&self, p: *const c_char, flags: libc::c_int, mode: libc::c_int)
-> Result<fs::File, io::Error> {
let fd = libc::openat(self.0, p, flags, mode);
if fd < 0 {
return Err(io::Error::last_os_error())
}
Ok(fs::File::from_raw_fd(fd))
}
/// Locks the directory with the specified `flock` operation.
pub fn lock(&self, operation: libc::c_int) -> Result<(), io::Error> {
let ret = unsafe { libc::flock(self.0, operation) };
if ret < 0 {
return Err(io::Error::last_os_error().into());
}
Ok(())
}
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> {
unsafe {
let mut stat: libc::statvfs = mem::zeroed();
if libc::fstatvfs(self.0, &mut stat) < 0 {
return Err(io::Error::last_os_error())
}
Ok(stat)
}
}
}
pub(crate) unsafe fn renameat(from_fd: &Fd, from_path: *const c_char,
to_fd: &Fd, to_path: *const c_char) -> Result<(), io::Error> {
let result = libc::renameat(from_fd.0, from_path, to_fd.0, to_path);
if result < 0 {
return Err(io::Error::last_os_error())
}
Ok(())
}
/// Reads `dir`'s metadata. If none is found, returns an empty proto.
pub(crate) fn read_meta(dir: &Fd) -> Result<schema::DirMeta, Error> {
let mut meta = schema::DirMeta::default();
let p = unsafe { ffi::CStr::from_ptr("meta\0".as_ptr() as *const c_char) };
let mut f = match unsafe { dir.openat(p.as_ptr(), libc::O_RDONLY, 0) } {
Err(e) => {
if e.kind() == ::std::io::ErrorKind::NotFound {
return Ok(meta);
}
return Err(e.into());
},
Ok(f) => f,
};
let mut data = Vec::new();
f.read_to_end(&mut data)?;
let mut s = protobuf::CodedInputStream::from_bytes(&data);
meta.merge_from(&mut s).map_err(|e| e.context("Unable to parse metadata proto: {}"))?;
Ok(meta)
}
/// Write `dir`'s metadata, clobbering existing data.
pub(crate) fn write_meta(dir: &Fd, meta: &schema::DirMeta) -> Result<(), Error> {
let (tmp_path, final_path) = unsafe {
(ffi::CStr::from_ptr("meta.tmp\0".as_ptr() as *const c_char),
ffi::CStr::from_ptr("meta\0".as_ptr() as *const c_char))
};
let mut f = unsafe { dir.openat(tmp_path.as_ptr(),
libc::O_CREAT | libc::O_TRUNC | libc::O_WRONLY, 0o600)? };
meta.write_to_writer(&mut f)?;
f.sync_all()?;
unsafe { renameat(&dir, tmp_path.as_ptr(), &dir, final_path.as_ptr())? };
dir.sync()?;
Ok(())
}
impl SampleFileDir {
/// Opens the directory using the given metadata.
///
/// `db_meta.in_progress_open` should be filled if the directory should be opened in read/write
/// mode; absent in read-only mode.
pub fn open(path: &str, db_meta: &schema::DirMeta)
-> Result<Arc<SampleFileDir>, Error> {
let read_write = db_meta.in_progress_open.is_some();
let s = SampleFileDir::open_self(path, false)?;
s.fd.lock(if read_write { libc::LOCK_EX } else { libc::LOCK_SH } | libc::LOCK_NB)?;
let dir_meta = read_meta(&s.fd)?;
if !SampleFileDir::consistent(db_meta, &dir_meta) {
bail!("metadata mismatch.\ndb: {:#?}\ndir: {:#?}", db_meta, &dir_meta);
}
if db_meta.in_progress_open.is_some() {
s.write_meta(db_meta)?;
}
Ok(s)
}
/// Returns true if the existing directory and database metadata are consistent; the directory
/// is then openable.
fn consistent(db_meta: &schema::DirMeta, dir_meta: &schema::DirMeta) -> bool {
if dir_meta.db_uuid != db_meta.db_uuid { return false; }
if dir_meta.dir_uuid != db_meta.dir_uuid { return false; }
if db_meta.last_complete_open.is_some() &&
(db_meta.last_complete_open != dir_meta.last_complete_open &&
db_meta.last_complete_open != dir_meta.in_progress_open) {
return false;
}
if db_meta.last_complete_open.is_none() && dir_meta.last_complete_open.is_some() {
return false;
}
true
}
pub(crate) fn create(path: &str, db_meta: &schema::DirMeta)
-> Result<Arc<SampleFileDir>, Error> {
let s = SampleFileDir::open_self(path, true)?;
s.fd.lock(libc::LOCK_EX | libc::LOCK_NB)?;
let old_meta = read_meta(&s.fd)?;
// Verify metadata. We only care that it hasn't been completely opened.
// Partial opening by this or another database is fine; we won't overwrite anything.
if old_meta.last_complete_open.is_some() {
bail!("Can't create dir at path {}: is already in use:\n{:?}", path, old_meta);
}
if !SampleFileDir::is_empty(path)? {
bail!("Can't create dir at path {} with existing files", path);
}
s.write_meta(db_meta)?;
Ok(s)
}
/// Determines if the directory is empty, aside form metadata.
pub(crate) fn is_empty(path: &str) -> Result<bool, Error> {
for e in fs::read_dir(path)? {
let e = e?;
match e.file_name().as_bytes() {
b"." | b".." => continue,
b"meta" | b"meta-tmp" => continue, // existing metadata is fine.
_ => return Ok(false),
}
}
Ok(true)
}
fn open_self(path: &str, create: bool) -> Result<Arc<SampleFileDir>, Error> {
let fd = Fd::open(path, create)
.map_err(|e| format_err!("unable to open sample file dir {}: {}", path, e))?;
Ok(Arc::new(SampleFileDir {
fd,
}))
}
/// Opens the given sample file for reading.
pub fn open_file(&self, composite_id: CompositeId) -> Result<fs::File, io::Error> {
let p = SampleFileDir::get_rel_pathname(composite_id);
unsafe { self.fd.openat(p.as_ptr(), libc::O_RDONLY, 0) }
}
pub fn create_file(&self, composite_id: CompositeId) -> Result<fs::File, io::Error> {
let p = SampleFileDir::get_rel_pathname(composite_id);
unsafe { self.fd.openat(p.as_ptr(), libc::O_WRONLY | libc::O_EXCL | libc::O_CREAT, 0o600) }
}
pub(crate) fn write_meta(&self, meta: &schema::DirMeta) -> Result<(), Error> {
write_meta(&self.fd, meta)
}
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> { self.fd.statfs() }
/// Gets a pathname for a sample file suitable for passing to open or unlink.
fn get_rel_pathname(id: CompositeId) -> [libc::c_char; 17] {
let mut buf = [0u8; 17];
write!(&mut buf[..16], "{:016x}", id.0).expect("can't format id to pathname buf");
// libc::c_char seems to be i8 on some platforms (Linux/arm) and u8 on others (Linux/amd64).
unsafe { mem::transmute::<[u8; 17], [libc::c_char; 17]>(buf) }
}
/// Unlinks the given sample file within this directory.
pub(crate) fn unlink_file(&self, id: CompositeId) -> Result<(), io::Error> {
let p = SampleFileDir::get_rel_pathname(id);
let res = unsafe { libc::unlinkat(self.fd.0, p.as_ptr(), 0) };
if res < 0 {
return Err(io::Error::last_os_error())
}
Ok(())
}
/// Syncs the directory itself.
pub(crate) fn sync(&self) -> Result<(), io::Error> {
self.fd.sync()
}
}
/// Parse a composite id filename.
///
/// These are exactly 16 bytes, lowercase hex.
pub(crate) fn parse_id(id: &[u8]) -> Result<CompositeId, ()> {
if id.len() != 16 {
return Err(());
}
let mut v: u64 = 0;
for i in 0..16 {
v = (v << 4) | match id[i] {
b @ b'0'...b'9' => b - b'0',
b @ b'a'...b'f' => b - b'a' + 10,
_ => return Err(()),
} as u64;
}
Ok(CompositeId(v as i64))
}
#[cfg(test)]
mod tests {
#[test]
fn parse_id() {
use super::parse_id;
assert_eq!(parse_id(b"0000000000000000").unwrap().0, 0);
assert_eq!(parse_id(b"0000000100000002").unwrap().0, 0x0000000100000002);
parse_id(b"").unwrap_err();
parse_id(b"meta").unwrap_err();
parse_id(b"0").unwrap_err();
parse_id(b"000000010000000x").unwrap_err();
}
}

64
db/lib.rs Normal file
View File

@ -0,0 +1,64 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#![cfg_attr(all(feature="nightly", test), feature(test))]
#[macro_use] extern crate failure;
extern crate fnv;
#[macro_use] extern crate lazy_static;
extern crate libc;
#[macro_use] extern crate log;
extern crate lru_cache;
extern crate moonfire_base as base;
extern crate mylog;
extern crate openssl;
extern crate parking_lot;
extern crate protobuf;
extern crate regex;
extern crate rusqlite;
extern crate tempdir;
extern crate time;
extern crate uuid;
pub mod check;
mod coding;
pub mod db;
pub mod dir;
mod raw;
pub mod recording;
mod schema;
pub mod upgrade;
pub mod writer;
// This is only for #[cfg(test)], but it's also used by the dependent crate, and it appears that
// #[cfg(test)] is not passed on to dependencies.
pub mod testutil;
pub use db::*;

379
db/raw.rs Normal file
View File

@ -0,0 +1,379 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! Raw database access: SQLite statements which do not touch any cached state.
use db::{self, CompositeId, FromSqlUuid};
use failure::{Error, ResultExt};
use fnv::FnvHashSet;
use recording;
use rusqlite;
use std::ops::Range;
use uuid::Uuid;
// Note: the magic number "27000000" below is recording::MAX_RECORDING_DURATION.
const LIST_RECORDINGS_BY_TIME_SQL: &'static str = r#"
select
recording.composite_id,
recording.run_offset,
recording.flags,
recording.start_time_90k,
recording.duration_90k,
recording.sample_file_bytes,
recording.video_samples,
recording.video_sync_samples,
recording.video_sample_entry_id,
recording.open_id
from
recording
where
stream_id = :stream_id and
recording.start_time_90k > :start_time_90k - 27000000 and
recording.start_time_90k < :end_time_90k and
recording.start_time_90k + recording.duration_90k > :start_time_90k
order by
recording.start_time_90k
"#;
const LIST_RECORDINGS_BY_ID_SQL: &'static str = r#"
select
recording.composite_id,
recording.run_offset,
recording.flags,
recording.start_time_90k,
recording.duration_90k,
recording.sample_file_bytes,
recording.video_samples,
recording.video_sync_samples,
recording.video_sample_entry_id,
recording.open_id
from
recording
where
:start <= composite_id and
composite_id < :end
order by
recording.composite_id
"#;
const STREAM_MIN_START_SQL: &'static str = r#"
select
start_time_90k
from
recording
where
stream_id = :stream_id
order by start_time_90k limit 1
"#;
const STREAM_MAX_START_SQL: &'static str = r#"
select
start_time_90k,
duration_90k
from
recording
where
stream_id = :stream_id
order by start_time_90k desc;
"#;
const LIST_OLDEST_RECORDINGS_SQL: &'static str = r#"
select
composite_id,
start_time_90k,
duration_90k,
sample_file_bytes
from
recording
where
:start <= composite_id and
composite_id < :end
order by
composite_id
"#;
/// Lists the specified recordings in ascending order by start time, passing them to a supplied
/// function. Given that the function is called with the database lock held, it should be quick.
pub(crate) fn list_recordings_by_time(
conn: &rusqlite::Connection, stream_id: i32, desired_time: Range<recording::Time>,
f: &mut FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_TIME_SQL)?;
let rows = stmt.query_named(&[
(":stream_id", &stream_id),
(":start_time_90k", &desired_time.start.0),
(":end_time_90k", &desired_time.end.0)])?;
list_recordings_inner(rows, f)
}
/// Lists the specified recordings in ascending order by id.
pub(crate) fn list_recordings_by_id(
conn: &rusqlite::Connection, stream_id: i32, desired_ids: Range<i32>,
f: &mut FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_ID_SQL)?;
let rows = stmt.query_named(&[
(":start", &CompositeId::new(stream_id, desired_ids.start).0),
(":end", &CompositeId::new(stream_id, desired_ids.end).0),
])?;
list_recordings_inner(rows, f)
}
fn list_recordings_inner(mut rows: rusqlite::Rows,
f: &mut FnMut(db::ListRecordingsRow) -> Result<(), Error>)
-> Result<(), Error> {
while let Some(row) = rows.next() {
let row = row?;
f(db::ListRecordingsRow {
id: CompositeId(row.get_checked(0)?),
run_offset: row.get_checked(1)?,
flags: row.get_checked(2)?,
start: recording::Time(row.get_checked(3)?),
duration_90k: row.get_checked(4)?,
sample_file_bytes: row.get_checked(5)?,
video_samples: row.get_checked(6)?,
video_sync_samples: row.get_checked(7)?,
video_sample_entry_id: row.get_checked(8)?,
open_id: row.get_checked(9)?,
})?;
}
Ok(())
}
pub(crate) fn get_db_uuid(conn: &rusqlite::Connection) -> Result<Uuid, Error> {
conn.query_row("select uuid from meta", &[], |row| -> Result<Uuid, Error> {
let uuid: FromSqlUuid = row.get_checked(0)?;
Ok(uuid.0)
})?
}
/// Inserts the specified recording (for from `try_flush` only).
pub(crate) fn insert_recording(tx: &rusqlite::Transaction, o: &db::Open, id: CompositeId,
r: &db::RecordingToInsert) -> Result<(), Error> {
let mut stmt = tx.prepare_cached(r#"
insert into recording (composite_id, stream_id, open_id, run_offset, flags,
sample_file_bytes, start_time_90k, duration_90k,
video_samples, video_sync_samples, video_sample_entry_id)
values (:composite_id, :stream_id, :open_id, :run_offset, :flags,
:sample_file_bytes, :start_time_90k, :duration_90k,
:video_samples, :video_sync_samples,
:video_sample_entry_id)
"#).with_context(|e| format!("can't prepare recording insert: {}", e))?;
stmt.execute_named(&[
(":composite_id", &id.0),
(":stream_id", &(id.stream() as i64)),
(":open_id", &o.id),
(":run_offset", &r.run_offset),
(":flags", &r.flags),
(":sample_file_bytes", &r.sample_file_bytes),
(":start_time_90k", &r.start.0),
(":duration_90k", &r.duration_90k),
(":video_samples", &r.video_samples),
(":video_sync_samples", &r.video_sync_samples),
(":video_sample_entry_id", &r.video_sample_entry_id),
]).with_context(|e| format!("unable to insert recording for {:#?}: {}", r, e))?;
let mut stmt = tx.prepare_cached(r#"
insert into recording_integrity (composite_id, local_time_delta_90k, sample_file_sha1)
values (:composite_id, :local_time_delta_90k, :sample_file_sha1)
"#).with_context(|e| format!("can't prepare recording_integrity insert: {}", e))?;
let sha1 = &r.sample_file_sha1[..];
let delta = match r.run_offset {
0 => None,
_ => Some(r.local_time_delta.0),
};
stmt.execute_named(&[
(":composite_id", &id.0),
(":local_time_delta_90k", &delta),
(":sample_file_sha1", &sha1),
]).with_context(|e| format!("unable to insert recording_integrity for {:#?}: {}", r, e))?;
let mut stmt = tx.prepare_cached(r#"
insert into recording_playback (composite_id, video_index)
values (:composite_id, :video_index)
"#).with_context(|e| format!("can't prepare recording_playback insert: {}", e))?;
stmt.execute_named(&[
(":composite_id", &id.0),
(":video_index", &r.video_index),
]).with_context(|e| format!("unable to insert recording_playback for {:#?}: {}", r, e))?;
Ok(())
}
/// Tranfers the given recording range from the `recording` and `recording_playback` tables to the
/// `garbage` table. `sample_file_dir_id` is assumed to be correct.
///
/// Returns the number of recordings which were deleted.
pub(crate) fn delete_recordings(tx: &rusqlite::Transaction, sample_file_dir_id: i32,
ids: Range<CompositeId>)
-> Result<i32, Error> {
let mut insert = tx.prepare_cached(r#"
insert into garbage (sample_file_dir_id, composite_id)
select
:sample_file_dir_id,
composite_id
from
recording
where
:start <= composite_id and
composite_id < :end
"#)?;
let mut del1 = tx.prepare_cached(r#"
delete from recording_playback
where
:start <= composite_id and
composite_id < :end
"#)?;
let mut del2 = tx.prepare_cached(r#"
delete from recording_integrity
where
:start <= composite_id and
composite_id < :end
"#)?;
let mut del3 = tx.prepare_cached(r#"
delete from recording
where
:start <= composite_id and
composite_id < :end
"#)?;
let n = insert.execute_named(&[
(":sample_file_dir_id", &sample_file_dir_id),
(":start", &ids.start.0),
(":end", &ids.end.0),
])?;
let p: &[(&str, &rusqlite::types::ToSql)] = &[
(":start", &ids.start.0),
(":end", &ids.end.0),
];
let n1 = del1.execute_named(p)?;
if n1 != n {
bail!("inserted {} garbage rows but deleted {} recording_playback rows!", n, n1);
}
let n2 = del2.execute_named(p)?;
if n2 > n { // fewer is okay; recording_integrity is optional.
bail!("inserted {} garbage rows but deleted {} recording_integrity rows!", n, n2);
}
let n3 = del3.execute_named(p)?;
if n3 != n {
bail!("deleted {} recording rows but {} recording_playback rows!", n3, n);
}
Ok(n)
}
/// Marks the given sample files as deleted. This shouldn't be called until the files have
/// been `unlink()`ed and the parent directory `fsync()`ed.
pub(crate) fn mark_sample_files_deleted(tx: &rusqlite::Transaction, ids: &[CompositeId])
-> Result<(), Error> {
if ids.is_empty() { return Ok(()); }
let mut stmt = tx.prepare_cached("delete from garbage where composite_id = ?")?;
for &id in ids {
let changes = stmt.execute(&[&id.0])?;
if changes != 1 {
bail!("no garbage row for {}", id);
}
}
Ok(())
}
/// Gets the time range of recordings for the given stream.
pub(crate) fn get_range(conn: &rusqlite::Connection, stream_id: i32)
-> Result<Option<Range<recording::Time>>, Error> {
// The minimum is straightforward, taking advantage of the start_time_90k index.
let mut stmt = conn.prepare_cached(STREAM_MIN_START_SQL)?;
let mut rows = stmt.query_named(&[(":stream_id", &stream_id)])?;
let min_start = match rows.next() {
Some(row) => recording::Time(row?.get_checked(0)?),
None => return Ok(None),
};
// There was a minimum, so there should be a maximum too. Calculating it is less
// straightforward because recordings could overlap. All recordings starting in the
// last MAX_RECORDING_DURATION must be examined in order to take advantage of the
// start_time_90k index.
let mut stmt = conn.prepare_cached(STREAM_MAX_START_SQL)?;
let mut rows = stmt.query_named(&[(":stream_id", &stream_id)])?;
let mut maxes_opt = None;
while let Some(row) = rows.next() {
let row = row?;
let row_start = recording::Time(row.get_checked(0)?);
let row_duration: i64 = row.get_checked(1)?;
let row_end = recording::Time(row_start.0 + row_duration);
let maxes = match maxes_opt {
None => row_start .. row_end,
Some(Range{start: s, end: e}) => s .. ::std::cmp::max(e, row_end),
};
if row_start.0 <= maxes.start.0 - recording::MAX_RECORDING_DURATION {
break;
}
maxes_opt = Some(maxes);
}
let max_end = match maxes_opt {
Some(Range{start: _, end: e}) => e,
None => bail!("missing max for stream {} which had min {}", stream_id, min_start),
};
Ok(Some(min_start .. max_end))
}
/// Lists all garbage ids for the given sample file directory.
pub(crate) fn list_garbage(conn: &rusqlite::Connection, dir_id: i32)
-> Result<FnvHashSet<CompositeId>, Error> {
let mut garbage = FnvHashSet::default();
let mut stmt = conn.prepare_cached(
"select composite_id from garbage where sample_file_dir_id = ?")?;
let mut rows = stmt.query(&[&dir_id])?;
while let Some(row) = rows.next() {
let row = row?;
garbage.insert(CompositeId(row.get_checked(0)?));
}
Ok(garbage)
}
/// Lists the oldest recordings for a stream, starting with the given id.
/// `f` should return true as long as further rows are desired.
pub(crate) fn list_oldest_recordings(conn: &rusqlite::Connection, start: CompositeId,
f: &mut FnMut(db::ListOldestRecordingsRow) -> bool)
-> Result<(), Error> {
let mut stmt = conn.prepare_cached(LIST_OLDEST_RECORDINGS_SQL)?;
let mut rows = stmt.query_named(&[
(":start", &start.0),
(":end", &CompositeId::new(start.stream() + 1, 0).0),
])?;
while let Some(row) = rows.next() {
let row = row?;
let should_continue = f(db::ListOldestRecordingsRow {
id: CompositeId(row.get_checked(0)?),
start: recording::Time(row.get_checked(1)?),
duration: row.get_checked(2)?,
sample_file_bytes: row.get_checked(3)?,
});
if !should_continue {
break;
}
}
Ok(())
}

View File

@ -29,14 +29,13 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use coding::{append_varint32, decode_varint32, unzigzag32, zigzag32};
use core::str::FromStr;
use db;
use error::Error;
use failure::Error;
use regex::Regex;
use std::ops;
use std::fmt;
use std::ops::Range;
use std::string::String;
use std::str::FromStr;
use time;
pub const TIME_UNITS_PER_SEC: i64 = 90000;
@ -44,7 +43,7 @@ pub const DESIRED_RECORDING_DURATION: i64 = 60 * TIME_UNITS_PER_SEC;
pub const MAX_RECORDING_DURATION: i64 = 5 * 60 * TIME_UNITS_PER_SEC;
/// A time specified as 90,000ths of a second since 1970-01-01 00:00:00 UTC.
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub struct Time(pub i64);
impl Time {
@ -77,7 +76,7 @@ impl Time {
}
// If that failed, parse as a time string or bust.
let c = RE.captures(s).ok_or_else(|| Error::new(format!("unparseable time {:?}", s)))?;
let c = RE.captures(s).ok_or_else(|| format_err!("unparseable time {:?}", s))?;
let mut tm = time::Tm{
tm_sec: i32::from_str(c.get(6).unwrap().as_str()).unwrap(),
tm_min: i32::from_str(c.get(5).unwrap().as_str()).unwrap(),
@ -92,11 +91,11 @@ impl Time {
tm_nsec: 0,
};
if tm.tm_mon == 0 {
return Err(Error::new(format!("time {:?} has month 0", s)));
bail!("time {:?} has month 0", s);
}
tm.tm_mon -= 1;
if tm.tm_year < 1900 {
return Err(Error::new(format!("time {:?} has year before 1900", s)));
bail!("time {:?} has year before 1900", s);
}
tm.tm_year -= 1900;
@ -154,9 +153,15 @@ impl fmt::Display for Time {
/// A duration specified in 1/90,000ths of a second.
/// Durations are typically non-negative, but a `db::CameraDayValue::duration` may be negative.
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub struct Duration(pub i64);
impl Duration {
pub fn to_tm_duration(&self) -> time::Duration {
time::Duration::nanoseconds(self.0 * 100000 / 9)
}
}
impl fmt::Display for Duration {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut seconds = self.0 / TIME_UNITS_PER_SEC;
@ -250,25 +255,20 @@ impl SampleIndexIterator {
}
let (raw1, i1) = match decode_varint32(data, i) {
Ok(tuple) => tuple,
Err(()) => return Err(Error::new(format!("bad varint 1 at offset {}", i))),
Err(()) => bail!("bad varint 1 at offset {}", i),
};
let (raw2, i2) = match decode_varint32(data, i1) {
Ok(tuple) => tuple,
Err(()) => return Err(Error::new(format!("bad varint 2 at offset {}", i1))),
Err(()) => bail!("bad varint 2 at offset {}", i1),
};
let duration_90k_delta = unzigzag32(raw1 >> 1);
self.duration_90k += duration_90k_delta;
if self.duration_90k < 0 {
return Err(Error{
description: format!("negative duration {} after applying delta {}",
self.duration_90k, duration_90k_delta),
cause: None});
bail!("negative duration {} after applying delta {}",
self.duration_90k, duration_90k_delta);
}
if self.duration_90k == 0 && data.len() > i2 {
return Err(Error{
description: format!("zero duration only allowed at end; have {} bytes left",
data.len() - i2),
cause: None});
bail!("zero duration only allowed at end; have {} bytes left", data.len() - i2);
}
let (prev_bytes_key, prev_bytes_nonkey) = match self.is_key() {
true => (self.bytes, self.bytes_other),
@ -284,11 +284,8 @@ impl SampleIndexIterator {
self.bytes_other = prev_bytes_key;
}
if self.bytes <= 0 {
return Err(Error{
description: format!("non-positive bytes {} after applying delta {} to key={} \
frame at ts {}", self.bytes, bytes_delta, self.is_key(),
self.start_90k),
cause: None});
bail!("non-positive bytes {} after applying delta {} to key={} frame at ts {}",
self.bytes, bytes_delta, self.is_key(), self.start_90k);
}
Ok(true)
}
@ -299,43 +296,30 @@ impl SampleIndexIterator {
#[derive(Debug)]
pub struct SampleIndexEncoder {
// Internal state.
prev_duration_90k: i32,
prev_bytes_key: i32,
prev_bytes_nonkey: i32,
// Eventual output.
// TODO: move to another struct?
pub sample_file_bytes: i32,
pub total_duration_90k: i32,
pub video_samples: i32,
pub video_sync_samples: i32,
pub video_index: Vec<u8>,
}
impl SampleIndexEncoder {
pub fn new() -> Self {
SampleIndexEncoder{
SampleIndexEncoder {
prev_duration_90k: 0,
prev_bytes_key: 0,
prev_bytes_nonkey: 0,
total_duration_90k: 0,
sample_file_bytes: 0,
video_samples: 0,
video_sync_samples: 0,
video_index: Vec::new(),
}
}
pub fn add_sample(&mut self, duration_90k: i32, bytes: i32, is_key: bool) {
pub fn add_sample(&mut self, duration_90k: i32, bytes: i32, is_key: bool,
r: &mut db::RecordingToInsert) {
let duration_delta = duration_90k - self.prev_duration_90k;
self.prev_duration_90k = duration_90k;
self.total_duration_90k += duration_90k;
self.sample_file_bytes += bytes;
self.video_samples += 1;
r.duration_90k += duration_90k;
r.sample_file_bytes += bytes;
r.video_samples += 1;
let bytes_delta = bytes - if is_key {
let prev = self.prev_bytes_key;
self.video_sync_samples += 1;
r.video_sync_samples += 1;
self.prev_bytes_key = bytes;
prev
} else {
@ -343,19 +327,17 @@ impl SampleIndexEncoder {
self.prev_bytes_nonkey = bytes;
prev
};
append_varint32((zigzag32(duration_delta) << 1) | (is_key as u32), &mut self.video_index);
append_varint32(zigzag32(bytes_delta), &mut self.video_index);
append_varint32((zigzag32(duration_delta) << 1) | (is_key as u32), &mut r.video_index);
append_varint32(zigzag32(bytes_delta), &mut r.video_index);
}
pub fn has_trailing_zero(&self) -> bool { self.prev_duration_90k == 0 }
}
/// A segment represents a view of some or all of a single recording, starting from a key frame.
/// Used by the `Mp4FileBuilder` class to splice together recordings into a single virtual .mp4.
#[derive(Debug)]
pub struct Segment {
pub camera_id: i32,
pub recording_id: i32,
pub id: db::CompositeId,
pub open_id: u32,
pub start: Time,
/// An iterator positioned at the beginning of the segment, or `None`. Most segments are
@ -381,9 +363,9 @@ impl Segment {
pub fn new(db: &db::LockedDatabase,
recording: &db::ListRecordingsRow,
desired_range_90k: Range<i32>) -> Result<Segment, Error> {
let mut self_ = Segment{
camera_id: recording.camera_id,
recording_id: recording.id,
let mut self_ = Segment {
id: recording.id,
open_id: recording.open_id,
start: recording.start,
begin: None,
file_end: recording.sample_file_bytes,
@ -391,16 +373,15 @@ impl Segment {
frames: recording.video_samples as u16,
key_frames: recording.video_sync_samples as u16,
video_sample_entry_id_and_trailing_zero:
recording.video_sample_entry.id |
recording.video_sample_entry_id |
((((recording.flags & db::RecordingFlags::TrailingZero as i32) != 0) as i32) << 31),
};
if self_.desired_range_90k.start > self_.desired_range_90k.end ||
self_.desired_range_90k.end > recording.duration_90k {
return Err(Error::new(format!(
"desired range [{}, {}) invalid for recording of length {}",
self_.desired_range_90k.start, self_.desired_range_90k.end,
recording.duration_90k)));
bail!("desired range [{}, {}) invalid for recording of length {}",
self_.desired_range_90k.start, self_.desired_range_90k.end,
recording.duration_90k);
}
if self_.desired_range_90k.start == 0 &&
@ -413,17 +394,15 @@ impl Segment {
// Slow path. Need to iterate through the index.
trace!("recording::Segment::new slow path, desired_range_90k={:?}, recording={:#?}",
self_.desired_range_90k, recording);
db.with_recording_playback(self_.camera_id, self_.recording_id, |playback| {
db.with_recording_playback(self_.id, |playback| {
let mut begin = Box::new(SampleIndexIterator::new());
let data = &(&playback).video_index;
let mut it = SampleIndexIterator::new();
if !it.next(data)? {
return Err(Error{description: String::from("no index"),
cause: None});
bail!("no index");
}
if !it.is_key() {
return Err(Error{description: String::from("not key frame"),
cause: None});
bail!("not key frame");
}
// Stop when hitting a frame with this start time.
@ -456,7 +435,7 @@ impl Segment {
self_.begin = Some(begin);
self_.file_end = it.pos;
self_.video_sample_entry_id_and_trailing_zero =
recording.video_sample_entry.id |
recording.video_sample_entry_id |
(((it.duration_90k == 0) as i32) << 31);
Ok(self_)
})
@ -480,8 +459,8 @@ impl Segment {
/// Must be called without the database lock held; retrieves video index from the cache.
pub fn foreach<F>(&self, playback: &db::RecordingPlayback, mut f: F) -> Result<(), Error>
where F: FnMut(&SampleIndexIterator) -> Result<(), Error> {
trace!("foreach on recording {}/{}: {} frames, actual_start_90k: {}",
self.camera_id, self.recording_id, self.frames, self.actual_start_90k());
trace!("foreach on recording {}: {} frames, actual_start_90k: {}",
self.id, self.frames, self.actual_start_90k());
let data = &(&playback).video_index;
let mut it = match self.begin {
Some(ref b) => **b,
@ -489,28 +468,23 @@ impl Segment {
};
if it.uninitialized() {
if !it.next(data)? {
return Err(Error::new(format!("recording {}/{}: no frames",
self.camera_id, self.recording_id)));
bail!("recording {}: no frames", self.id);
}
if !it.is_key() {
return Err(Error::new(format!("recording {}/{}: doesn't start with key frame",
self.camera_id, self.recording_id)));
bail!("recording {}: doesn't start with key frame", self.id);
}
}
let mut have_frame = true;
let mut key_frame = 0;
for i in 0 .. self.frames {
if !have_frame {
return Err(Error::new(format!("recording {}/{}: expected {} frames, found only {}",
self.camera_id, self.recording_id, self.frames,
i+1)));
bail!("recording {}: expected {} frames, found only {}", self.id, self.frames, i+1);
}
if it.is_key() {
key_frame += 1;
if key_frame > self.key_frames {
return Err(Error::new(format!(
"recording {}/{}: more than expected {} key frames",
self.camera_id, self.recording_id, self.key_frames)));
bail!("recording {}: more than expected {} key frames",
self.id, self.key_frames);
}
}
@ -521,9 +495,8 @@ impl Segment {
have_frame = try!(it.next(data));
}
if key_frame < self.key_frames {
return Err(Error::new(format!("recording {}/{}: expected {} key frames, found only {}",
self.camera_id, self.recording_id, self.key_frames,
key_frame)));
bail!("recording {}: expected {} key frames, found only {}",
self.id, self.key_frames, key_frame);
}
Ok(())
}
@ -531,6 +504,7 @@ impl Segment {
#[cfg(test)]
mod tests {
use base::clock::RealClocks;
use super::*;
use testutil::{self, TestDb};
@ -584,16 +558,17 @@ mod tests {
#[test]
fn test_encode_example() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut e = SampleIndexEncoder::new();
e.add_sample(10, 1000, true);
e.add_sample(9, 10, false);
e.add_sample(11, 15, false);
e.add_sample(10, 12, false);
e.add_sample(10, 1050, true);
assert_eq!(e.video_index, b"\x29\xd0\x0f\x02\x14\x08\x0a\x02\x05\x01\x64");
assert_eq!(10 + 9 + 11 + 10 + 10, e.total_duration_90k);
assert_eq!(5, e.video_samples);
assert_eq!(2, e.video_sync_samples);
e.add_sample(10, 1000, true, &mut r);
e.add_sample(9, 10, false, &mut r);
e.add_sample(11, 15, false, &mut r);
e.add_sample(10, 12, false, &mut r);
e.add_sample(10, 1050, true, &mut r);
assert_eq!(r.video_index, b"\x29\xd0\x0f\x02\x14\x08\x0a\x02\x05\x01\x64");
assert_eq!(10 + 9 + 11 + 10 + 10, r.duration_90k);
assert_eq!(5, r.video_samples);
assert_eq!(2, r.video_sync_samples);
}
/// Tests a round trip from `SampleIndexEncoder` to `SampleIndexIterator`.
@ -613,19 +588,20 @@ mod tests {
Sample{duration_90k: 18, bytes: 31000, is_key: true},
Sample{duration_90k: 0, bytes: 1000, is_key: false},
];
let mut r = db::RecordingToInsert::default();
let mut e = SampleIndexEncoder::new();
for sample in &samples {
e.add_sample(sample.duration_90k, sample.bytes, sample.is_key);
e.add_sample(sample.duration_90k, sample.bytes, sample.is_key, &mut r);
}
let mut it = SampleIndexIterator::new();
for sample in &samples {
assert!(it.next(&e.video_index).unwrap());
assert!(it.next(&r.video_index).unwrap());
assert_eq!(sample,
&Sample{duration_90k: it.duration_90k,
bytes: it.bytes,
is_key: it.is_key()});
}
assert!(!it.next(&e.video_index).unwrap());
assert!(!it.next(&r.video_index).unwrap());
}
/// Tests that `SampleIndexIterator` spots several classes of errors.
@ -649,14 +625,14 @@ mod tests {
];
for test in &tests {
let mut it = SampleIndexIterator::new();
assert_eq!(it.next(test.encoded).unwrap_err().description, test.err);
assert_eq!(it.next(test.encoded).unwrap_err().to_string(), test.err);
}
}
fn get_frames<F, T>(db: &db::Database, segment: &Segment, f: F) -> Vec<T>
where F: Fn(&SampleIndexIterator) -> T {
let mut v = Vec::new();
db.lock().with_recording_playback(segment.camera_id, segment.recording_id, |playback| {
db.lock().with_recording_playback(segment.id, |playback| {
segment.foreach(playback, |it| { v.push(f(it)); Ok(()) })
}).unwrap();
v
@ -667,14 +643,15 @@ mod tests {
#[test]
fn test_segment_clipping_with_all_sync() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, true);
encoder.add_sample(duration_90k, bytes, true, &mut r);
}
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
// Time range [2, 2 + 4 + 6 + 8) means the 2nd, 3rd, 4th samples should be
// included.
let segment = Segment::new(&db.db.lock(), &row, 2 .. 2+4+6+8).unwrap();
@ -685,14 +662,15 @@ mod tests {
#[test]
fn test_segment_clipping_with_half_sync() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, (i % 2) == 1);
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
}
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
// Time range [2 + 4 + 6, 2 + 4 + 6 + 8) means the 4th sample should be included.
// The 3rd also gets pulled in because it is a sync frame and the 4th is not.
let segment = Segment::new(&db.db.lock(), &row, 2+4+6 .. 2+4+6+8).unwrap();
@ -702,12 +680,13 @@ mod tests {
#[test]
fn test_segment_clipping_with_trailing_zero() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
encoder.add_sample(1, 1, true);
encoder.add_sample(1, 2, true);
encoder.add_sample(0, 3, true);
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
encoder.add_sample(1, 1, true, &mut r);
encoder.add_sample(1, 2, true, &mut r);
encoder.add_sample(0, 3, true, &mut r);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
let segment = Segment::new(&db.db.lock(), &row, 1 .. 2).unwrap();
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[2, 3]);
}
@ -716,10 +695,11 @@ mod tests {
#[test]
fn test_segment_zero_desired_duration() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
encoder.add_sample(1, 1, true);
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
encoder.add_sample(1, 1, true, &mut r);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
let segment = Segment::new(&db.db.lock(), &row, 0 .. 0).unwrap();
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1]);
}
@ -729,14 +709,15 @@ mod tests {
#[test]
fn test_segment_fast_path() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, (i % 2) == 1);
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
}
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2+4+6+8+10).unwrap();
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[2, 4, 6, 8, 10]);
}
@ -744,12 +725,13 @@ mod tests {
#[test]
fn test_segment_fast_path_with_trailing_zero() {
testutil::init();
let mut r = db::RecordingToInsert::default();
let mut encoder = SampleIndexEncoder::new();
encoder.add_sample(1, 1, true);
encoder.add_sample(1, 2, true);
encoder.add_sample(0, 3, true);
let db = TestDb::new();
let row = db.create_recording_from_encoder(encoder);
encoder.add_sample(1, 1, true, &mut r);
encoder.add_sample(1, 2, true, &mut r);
encoder.add_sample(0, 3, true, &mut r);
let db = TestDb::new(RealClocks {});
let row = db.insert_recording_from_encoder(r);
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2).unwrap();
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1, 2, 3]);
}

62
db/schema.proto Normal file
View File

@ -0,0 +1,62 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
syntax = "proto3";
// Metadata stored in sample file dirs as "<dir>/meta". This is checked
// against the metadata stored within the database to detect inconsistencies
// between the directory and database, such as those described in
// design/schema.md.
message DirMeta {
// A uuid associated with the database, in binary form. dir_uuid is strictly
// more powerful, but it improves diagnostics to know if the directory
// belongs to the expected database at all or not.
bytes db_uuid = 1;
// A uuid associated with the directory itself.
bytes dir_uuid = 2;
// Corresponds to an entry in the `open` database table.
message Open {
uint32 id = 1;
bytes uuid = 2;
}
// The last open that was known to be recorded in the database as completed.
// Absent if this has never happened. Note this can backtrack in exactly one
// scenario: when deleting the directory, after all associated files have
// been deleted, last_complete_open can be moved to in_progress_open.
Open last_complete_open = 3;
// The last run which is in progress, if different from last_complete_open.
// This may or may not have been recorded in the database, but it's
// guaranteed that no data has yet been written by this open.
Open in_progress_open = 4;
}

712
db/schema.rs Normal file
View File

@ -0,0 +1,712 @@
// This file is generated. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(PartialEq,Clone,Default)]
pub struct DirMeta {
// message fields
pub db_uuid: ::std::vec::Vec<u8>,
pub dir_uuid: ::std::vec::Vec<u8>,
pub last_complete_open: ::protobuf::SingularPtrField<DirMeta_Open>,
pub in_progress_open: ::protobuf::SingularPtrField<DirMeta_Open>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::protobuf::CachedSize,
}
// see codegen.rs for the explanation why impl Sync explicitly
unsafe impl ::std::marker::Sync for DirMeta {}
impl DirMeta {
pub fn new() -> DirMeta {
::std::default::Default::default()
}
pub fn default_instance() -> &'static DirMeta {
static mut instance: ::protobuf::lazy::Lazy<DirMeta> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const DirMeta,
};
unsafe {
instance.get(DirMeta::new)
}
}
// bytes db_uuid = 1;
pub fn clear_db_uuid(&mut self) {
self.db_uuid.clear();
}
// Param is passed by value, moved
pub fn set_db_uuid(&mut self, v: ::std::vec::Vec<u8>) {
self.db_uuid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_db_uuid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.db_uuid
}
// Take field
pub fn take_db_uuid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.db_uuid, ::std::vec::Vec::new())
}
pub fn get_db_uuid(&self) -> &[u8] {
&self.db_uuid
}
fn get_db_uuid_for_reflect(&self) -> &::std::vec::Vec<u8> {
&self.db_uuid
}
fn mut_db_uuid_for_reflect(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.db_uuid
}
// bytes dir_uuid = 2;
pub fn clear_dir_uuid(&mut self) {
self.dir_uuid.clear();
}
// Param is passed by value, moved
pub fn set_dir_uuid(&mut self, v: ::std::vec::Vec<u8>) {
self.dir_uuid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_dir_uuid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.dir_uuid
}
// Take field
pub fn take_dir_uuid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.dir_uuid, ::std::vec::Vec::new())
}
pub fn get_dir_uuid(&self) -> &[u8] {
&self.dir_uuid
}
fn get_dir_uuid_for_reflect(&self) -> &::std::vec::Vec<u8> {
&self.dir_uuid
}
fn mut_dir_uuid_for_reflect(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.dir_uuid
}
// .DirMeta.Open last_complete_open = 3;
pub fn clear_last_complete_open(&mut self) {
self.last_complete_open.clear();
}
pub fn has_last_complete_open(&self) -> bool {
self.last_complete_open.is_some()
}
// Param is passed by value, moved
pub fn set_last_complete_open(&mut self, v: DirMeta_Open) {
self.last_complete_open = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_last_complete_open(&mut self) -> &mut DirMeta_Open {
if self.last_complete_open.is_none() {
self.last_complete_open.set_default();
}
self.last_complete_open.as_mut().unwrap()
}
// Take field
pub fn take_last_complete_open(&mut self) -> DirMeta_Open {
self.last_complete_open.take().unwrap_or_else(|| DirMeta_Open::new())
}
pub fn get_last_complete_open(&self) -> &DirMeta_Open {
self.last_complete_open.as_ref().unwrap_or_else(|| DirMeta_Open::default_instance())
}
fn get_last_complete_open_for_reflect(&self) -> &::protobuf::SingularPtrField<DirMeta_Open> {
&self.last_complete_open
}
fn mut_last_complete_open_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<DirMeta_Open> {
&mut self.last_complete_open
}
// .DirMeta.Open in_progress_open = 4;
pub fn clear_in_progress_open(&mut self) {
self.in_progress_open.clear();
}
pub fn has_in_progress_open(&self) -> bool {
self.in_progress_open.is_some()
}
// Param is passed by value, moved
pub fn set_in_progress_open(&mut self, v: DirMeta_Open) {
self.in_progress_open = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_in_progress_open(&mut self) -> &mut DirMeta_Open {
if self.in_progress_open.is_none() {
self.in_progress_open.set_default();
}
self.in_progress_open.as_mut().unwrap()
}
// Take field
pub fn take_in_progress_open(&mut self) -> DirMeta_Open {
self.in_progress_open.take().unwrap_or_else(|| DirMeta_Open::new())
}
pub fn get_in_progress_open(&self) -> &DirMeta_Open {
self.in_progress_open.as_ref().unwrap_or_else(|| DirMeta_Open::default_instance())
}
fn get_in_progress_open_for_reflect(&self) -> &::protobuf::SingularPtrField<DirMeta_Open> {
&self.in_progress_open
}
fn mut_in_progress_open_for_reflect(&mut self) -> &mut ::protobuf::SingularPtrField<DirMeta_Open> {
&mut self.in_progress_open
}
}
impl ::protobuf::Message for DirMeta {
fn is_initialized(&self) -> bool {
for v in &self.last_complete_open {
if !v.is_initialized() {
return false;
}
};
for v in &self.in_progress_open {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.db_uuid)?;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.dir_uuid)?;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.last_complete_open)?;
},
4 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.in_progress_open)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.db_uuid.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.db_uuid);
}
if !self.dir_uuid.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.dir_uuid);
}
if let Some(ref v) = self.last_complete_open.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.in_progress_open.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.db_uuid.is_empty() {
os.write_bytes(1, &self.db_uuid)?;
}
if !self.dir_uuid.is_empty() {
os.write_bytes(2, &self.dir_uuid)?;
}
if let Some(ref v) = self.last_complete_open.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.in_progress_open.as_ref() {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for DirMeta {
fn new() -> DirMeta {
DirMeta::new()
}
fn descriptor_static(_: ::std::option::Option<DirMeta>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"db_uuid",
DirMeta::get_db_uuid_for_reflect,
DirMeta::mut_db_uuid_for_reflect,
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"dir_uuid",
DirMeta::get_dir_uuid_for_reflect,
DirMeta::mut_dir_uuid_for_reflect,
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<DirMeta_Open>>(
"last_complete_open",
DirMeta::get_last_complete_open_for_reflect,
DirMeta::mut_last_complete_open_for_reflect,
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<DirMeta_Open>>(
"in_progress_open",
DirMeta::get_in_progress_open_for_reflect,
DirMeta::mut_in_progress_open_for_reflect,
));
::protobuf::reflect::MessageDescriptor::new::<DirMeta>(
"DirMeta",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for DirMeta {
fn clear(&mut self) {
self.clear_db_uuid();
self.clear_dir_uuid();
self.clear_last_complete_open();
self.clear_in_progress_open();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for DirMeta {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for DirMeta {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct DirMeta_Open {
// message fields
pub id: u32,
pub uuid: ::std::vec::Vec<u8>,
// special fields
unknown_fields: ::protobuf::UnknownFields,
cached_size: ::protobuf::CachedSize,
}
// see codegen.rs for the explanation why impl Sync explicitly
unsafe impl ::std::marker::Sync for DirMeta_Open {}
impl DirMeta_Open {
pub fn new() -> DirMeta_Open {
::std::default::Default::default()
}
pub fn default_instance() -> &'static DirMeta_Open {
static mut instance: ::protobuf::lazy::Lazy<DirMeta_Open> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const DirMeta_Open,
};
unsafe {
instance.get(DirMeta_Open::new)
}
}
// uint32 id = 1;
pub fn clear_id(&mut self) {
self.id = 0;
}
// Param is passed by value, moved
pub fn set_id(&mut self, v: u32) {
self.id = v;
}
pub fn get_id(&self) -> u32 {
self.id
}
fn get_id_for_reflect(&self) -> &u32 {
&self.id
}
fn mut_id_for_reflect(&mut self) -> &mut u32 {
&mut self.id
}
// bytes uuid = 2;
pub fn clear_uuid(&mut self) {
self.uuid.clear();
}
// Param is passed by value, moved
pub fn set_uuid(&mut self, v: ::std::vec::Vec<u8>) {
self.uuid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_uuid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.uuid
}
// Take field
pub fn take_uuid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.uuid, ::std::vec::Vec::new())
}
pub fn get_uuid(&self) -> &[u8] {
&self.uuid
}
fn get_uuid_for_reflect(&self) -> &::std::vec::Vec<u8> {
&self.uuid
}
fn mut_uuid_for_reflect(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.uuid
}
}
impl ::protobuf::Message for DirMeta_Open {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.id = tmp;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.uuid)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.id != 0 {
my_size += ::protobuf::rt::value_size(1, self.id, ::protobuf::wire_format::WireTypeVarint);
}
if !self.uuid.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.uuid);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if self.id != 0 {
os.write_uint32(1, self.id)?;
}
if !self.uuid.is_empty() {
os.write_bytes(2, &self.uuid)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
::protobuf::MessageStatic::descriptor_static(None::<Self>)
}
}
impl ::protobuf::MessageStatic for DirMeta_Open {
fn new() -> DirMeta_Open {
DirMeta_Open::new()
}
fn descriptor_static(_: ::std::option::Option<DirMeta_Open>) -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint32>(
"id",
DirMeta_Open::get_id_for_reflect,
DirMeta_Open::mut_id_for_reflect,
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"uuid",
DirMeta_Open::get_uuid_for_reflect,
DirMeta_Open::mut_uuid_for_reflect,
));
::protobuf::reflect::MessageDescriptor::new::<DirMeta_Open>(
"DirMeta_Open",
fields,
file_descriptor_proto()
)
})
}
}
}
impl ::protobuf::Clear for DirMeta_Open {
fn clear(&mut self) {
self.clear_id();
self.clear_uuid();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for DirMeta_Open {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for DirMeta_Open {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x0cschema.proto\"\xdf\x01\n\x07DirMeta\x12\x17\n\x07db_uuid\x18\x01\
\x20\x01(\x0cR\x06dbUuid\x12\x19\n\x08dir_uuid\x18\x02\x20\x01(\x0cR\x07\
dirUuid\x12;\n\x12last_complete_open\x18\x03\x20\x01(\x0b2\r.DirMeta.Ope\
nR\x10lastCompleteOpen\x127\n\x10in_progress_open\x18\x04\x20\x01(\x0b2\
\r.DirMeta.OpenR\x0einProgressOpen\x1a*\n\x04Open\x12\x0e\n\x02id\x18\
\x01\x20\x01(\rR\x02id\x12\x12\n\x04uuid\x18\x02\x20\x01(\x0cR\x04uuidJ\
\xdf\x1b\n\x06\x12\x04\x1e\0H\x01\n\xc2\x0b\n\x01\x0c\x12\x03\x1e\0\x122\
\xb7\x0b\x20This\x20file\x20is\x20part\x20of\x20Moonfire\x20NVR,\x20a\
\x20security\x20camera\x20digital\x20video\x20recorder.\n\x20Copyright\
\x20(C)\x202018\x20Scott\x20Lamb\x20<slamb@slamb.org>\n\n\x20This\x20pro\
gram\x20is\x20free\x20software:\x20you\x20can\x20redistribute\x20it\x20a\
nd/or\x20modify\n\x20it\x20under\x20the\x20terms\x20of\x20the\x20GNU\x20\
General\x20Public\x20License\x20as\x20published\x20by\n\x20the\x20Free\
\x20Software\x20Foundation,\x20either\x20version\x203\x20of\x20the\x20Li\
cense,\x20or\n\x20(at\x20your\x20option)\x20any\x20later\x20version.\n\n\
\x20In\x20addition,\x20as\x20a\x20special\x20exception,\x20the\x20copyri\
ght\x20holders\x20give\n\x20permission\x20to\x20link\x20the\x20code\x20o\
f\x20portions\x20of\x20this\x20program\x20with\x20the\n\x20OpenSSL\x20li\
brary\x20under\x20certain\x20conditions\x20as\x20described\x20in\x20each\
\n\x20individual\x20source\x20file,\x20and\x20distribute\x20linked\x20co\
mbinations\x20including\n\x20the\x20two.\n\n\x20You\x20must\x20obey\x20t\
he\x20GNU\x20General\x20Public\x20License\x20in\x20all\x20respects\x20fo\
r\x20all\n\x20of\x20the\x20code\x20used\x20other\x20than\x20OpenSSL.\x20\
If\x20you\x20modify\x20file(s)\x20with\x20this\n\x20exception,\x20you\
\x20may\x20extend\x20this\x20exception\x20to\x20your\x20version\x20of\
\x20the\n\x20file(s),\x20but\x20you\x20are\x20not\x20obligated\x20to\x20\
do\x20so.\x20If\x20you\x20do\x20not\x20wish\x20to\x20do\n\x20so,\x20dele\
te\x20this\x20exception\x20statement\x20from\x20your\x20version.\x20If\
\x20you\x20delete\n\x20this\x20exception\x20statement\x20from\x20all\x20\
source\x20files\x20in\x20the\x20program,\x20then\n\x20also\x20delete\x20\
it\x20here.\n\n\x20This\x20program\x20is\x20distributed\x20in\x20the\x20\
hope\x20that\x20it\x20will\x20be\x20useful,\n\x20but\x20WITHOUT\x20ANY\
\x20WARRANTY;\x20without\x20even\x20the\x20implied\x20warranty\x20of\n\
\x20MERCHANTABILITY\x20or\x20FITNESS\x20FOR\x20A\x20PARTICULAR\x20PURPOS\
E.\x20\x20See\x20the\n\x20GNU\x20General\x20Public\x20License\x20for\x20\
more\x20details.\n\n\x20You\x20should\x20have\x20received\x20a\x20copy\
\x20of\x20the\x20GNU\x20General\x20Public\x20License\n\x20along\x20with\
\x20this\x20program.\x20\x20If\x20not,\x20see\x20<http://www.gnu.org/lic\
enses/>.\n\n\xc4\x07\n\x02\x04\0\x12\x041\0H\x01\x1a\xb7\x07\x20Metadata\
\x20stored\x20in\x20sample\x20file\x20dirs\x20as\x20\"<dir>/meta\".\x20T\
his\x20is\x20checked\n\x20against\x20the\x20metadata\x20stored\x20within\
\x20the\x20database\x20to\x20detect\x20inconsistencies\n\x20between\x20t\
he\x20directory\x20and\x20database,\x20including\x20the\x20following:\n\
\n\x20*\x20sample\x20file\x20directory's\x20disk\x20not\x20being\x20moun\
ted.\n\x20*\x20mixing\x20up\x20mount\x20points\x20of\x20two\x20sample\
\x20file\x20directories\x20belonging\x20to\x20the\n\x20\x20\x20same\x20d\
atabase.\n\x20*\x20directory\x20renames\x20not\x20properly\x20recorded\
\x20in\x20the\x20database.\n\x20*\x20restoration\x20of\x20the\x20databas\
e\x20from\x20backup\x20but\x20not\x20the\x20sample\x20file\n\x20\x20\x20\
directory.\n\x20*\x20restoration\x20of\x20the\x20sample\x20file\x20direc\
tory\x20but\x20not\x20the\x20database.\n\x20*\x20two\x20sample\x20file\
\x20directory\x20paths\x20pointed\x20at\x20the\x20same\x20inode\x20via\
\x20symlinks\n\x20\x20\x20or\x20non-canonical\x20paths.\x20(Note\x20that\
\x20flock(2)\x20has\x20a\x20design\x20flaw\x20in\x20which\n\x20\x20\x20m\
ultiple\x20file\x20descriptors\x20can\x20share\x20a\x20lock,\x20so\x20th\
e\x20current\x20locking\x20scheme\n\x20\x20\x20is\x20not\x20sufficient\
\x20to\x20detect\x20this\x20otherwise.)\n\x20*\x20database\x20and\x20sam\
ple\x20file\x20directories\x20forked\x20from\x20the\x20same\x20version,\
\x20opened\n\x20\x20\x20the\x20same\x20number\x20of\x20times,\x20then\
\x20crossed.\n\n\n\n\x03\x04\0\x01\x12\x031\x08\x0f\n\xcf\x01\n\x04\x04\
\0\x02\0\x12\x035\x02\x14\x1a\xc1\x01\x20A\x20uuid\x20associated\x20with\
\x20the\x20database,\x20in\x20binary\x20form.\x20dir_uuid\x20is\x20stric\
tly\n\x20more\x20powerful,\x20but\x20it\x20improves\x20diagnostics\x20to\
\x20know\x20if\x20the\x20directory\n\x20belongs\x20to\x20the\x20expected\
\x20database\x20at\x20all\x20or\x20not.\n\n\r\n\x05\x04\0\x02\0\x04\x12\
\x045\x021\x11\n\x0c\n\x05\x04\0\x02\0\x05\x12\x035\x02\x07\n\x0c\n\x05\
\x04\0\x02\0\x01\x12\x035\x08\x0f\n\x0c\n\x05\x04\0\x02\0\x03\x12\x035\
\x12\x13\n;\n\x04\x04\0\x02\x01\x12\x038\x02\x15\x1a.\x20A\x20uuid\x20as\
sociated\x20with\x20the\x20directory\x20itself.\n\n\r\n\x05\x04\0\x02\
\x01\x04\x12\x048\x025\x14\n\x0c\n\x05\x04\0\x02\x01\x05\x12\x038\x02\
\x07\n\x0c\n\x05\x04\0\x02\x01\x01\x12\x038\x08\x10\n\x0c\n\x05\x04\0\
\x02\x01\x03\x12\x038\x13\x14\nE\n\x04\x04\0\x03\0\x12\x04;\x02>\x03\x1a\
7\x20Corresponds\x20to\x20an\x20entry\x20in\x20the\x20`open`\x20database\
\x20table.\n\n\x0c\n\x05\x04\0\x03\0\x01\x12\x03;\n\x0e\n\r\n\x06\x04\0\
\x03\0\x02\0\x12\x03<\x04\x12\n\x0f\n\x07\x04\0\x03\0\x02\0\x04\x12\x04<\
\x04;\x10\n\x0e\n\x07\x04\0\x03\0\x02\0\x05\x12\x03<\x04\n\n\x0e\n\x07\
\x04\0\x03\0\x02\0\x01\x12\x03<\x0b\r\n\x0e\n\x07\x04\0\x03\0\x02\0\x03\
\x12\x03<\x10\x11\n\r\n\x06\x04\0\x03\0\x02\x01\x12\x03=\x04\x13\n\x0f\n\
\x07\x04\0\x03\0\x02\x01\x04\x12\x04=\x04<\x12\n\x0e\n\x07\x04\0\x03\0\
\x02\x01\x05\x12\x03=\x04\t\n\x0e\n\x07\x04\0\x03\0\x02\x01\x01\x12\x03=\
\n\x0e\n\x0e\n\x07\x04\0\x03\0\x02\x01\x03\x12\x03=\x11\x12\n|\n\x04\x04\
\0\x02\x02\x12\x03B\x02\x1e\x1ao\x20The\x20last\x20open\x20that\x20was\
\x20known\x20to\x20be\x20recorded\x20in\x20the\x20database\x20as\x20comp\
leted.\n\x20Absent\x20if\x20this\x20has\x20never\x20happened.\n\n\r\n\
\x05\x04\0\x02\x02\x04\x12\x04B\x02>\x03\n\x0c\n\x05\x04\0\x02\x02\x06\
\x12\x03B\x02\x06\n\x0c\n\x05\x04\0\x02\x02\x01\x12\x03B\x07\x19\n\x0c\n\
\x05\x04\0\x02\x02\x03\x12\x03B\x1c\x1d\n\xd6\x01\n\x04\x04\0\x02\x03\
\x12\x03G\x02\x1c\x1a\xc8\x01\x20The\x20last\x20run\x20which\x20is\x20in\
\x20progress,\x20if\x20different\x20from\x20last_complete_open.\n\x20Thi\
s\x20may\x20or\x20may\x20not\x20have\x20been\x20recorded\x20in\x20the\
\x20database,\x20but\x20it's\n\x20guaranteed\x20that\x20no\x20data\x20ha\
s\x20yet\x20been\x20written\x20by\x20this\x20open.\n\n\r\n\x05\x04\0\x02\
\x03\x04\x12\x04G\x02B\x1e\n\x0c\n\x05\x04\0\x02\x03\x06\x12\x03G\x02\
\x06\n\x0c\n\x05\x04\0\x02\x03\x01\x12\x03G\x07\x17\n\x0c\n\x05\x04\0\
\x02\x03\x03\x12\x03G\x1a\x1bb\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}

407
db/schema.sql Normal file
View File

@ -0,0 +1,407 @@
-- This file is part of Moonfire NVR, a security camera digital video recorder.
-- Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- In addition, as a special exception, the copyright holders give
-- permission to link the code of portions of this program with the
-- OpenSSL library under certain conditions as described in each
-- individual source file, and distribute linked combinations including
-- the two.
--
-- You must obey the GNU General Public License in all respects for all
-- of the code used other than OpenSSL. If you modify file(s) with this
-- exception, you may extend this exception to your version of the
-- file(s), but you are not obligated to do so. If you do not wish to do
-- so, delete this exception statement from your version. If you delete
-- this exception statement from all source files in the program, then
-- also delete it here.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
-- schema.sql: SQLite3 database schema for Moonfire NVR.
-- See also design/schema.md.
-- Database metadata. There should be exactly one row in this table.
create table meta (
uuid blob not null check (length(uuid) = 16)
);
-- This table tracks the schema version.
-- There is one row for the initial database creation (inserted below, after the
-- create statements) and one for each upgrade procedure (if any).
create table version (
id integer primary key,
-- The unix time as of the creation/upgrade, as determined by
-- cast(strftime('%s', 'now') as int).
unix_time integer not null,
-- Optional notes on the creation/upgrade; could include the binary version.
notes text
);
-- Tracks every time the database has been opened in read/write mode.
-- This is used to ensure directories are in sync with the database (see
-- schema.proto:DirMeta), to disambiguate uncommitted recordings, and
-- potentially to understand time problems.
create table open (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16),
-- Information about when / how long the database was open. These may be all
-- null, for example in the open that represents all information written
-- prior to database version 3.
-- System time when the database was opened, in 90 kHz units since
-- 1970-01-01 00:00:00Z excluding leap seconds.
start_time_90k integer,
-- System time when the database was closed or (on crash) last flushed.
end_time_90k integer,
-- How long the database was open. This is end_time_90k - start_time_90k if
-- there were no time steps or leap seconds during this time.
duration_90k integer
);
create table sample_file_dir (
id integer primary key,
path text unique not null,
uuid blob unique not null check (length(uuid) = 16),
-- The last (read/write) open of this directory which fully completed.
-- See schema.proto:DirMeta for a more complete description.
last_complete_open_id integer references open (id)
);
create table camera (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16),
-- A short name of the camera, used in log messages.
short_name text not null,
-- A short description of the camera.
description text,
-- The host (or IP address) to use in rtsp:// URLs when accessing the camera.
host text,
-- The username to use when accessing the camera.
-- If empty, no username or password will be supplied.
username text,
-- The password to use when accessing the camera.
password text
);
create table stream (
id integer primary key,
camera_id integer not null references camera (id),
sample_file_dir_id integer references sample_file_dir (id),
type text not null check (type in ('main', 'sub')),
-- If record is true, the stream should start recording when moonfire
-- starts. If false, no new recordings will be made, but old recordings
-- will not be deleted.
record integer not null check (record in (1, 0)),
-- The path (starting with "/") to use in rtsp:// URLs to for this stream.
rtsp_path text not null,
-- The number of bytes of video to retain, excluding the currently-recording
-- file. Older files will be deleted as necessary to stay within this limit.
retain_bytes integer not null check (retain_bytes >= 0),
-- Flush the database when the first instant of completed recording is this
-- many seconds old. A value of 0 means that every completed recording will
-- cause an immediate flush. Higher values may allow flushes to be combined,
-- reducing SSD write cycles. For example, if all streams have a flush_if_sec
-- >= x sec, there will be:
--
-- * at most one flush per x sec in total
-- * at most x sec of completed but unflushed recordings per stream.
-- * at most x completed but unflushed recordings per stream, in the worst
-- case where a recording instantly fails, waits the 1-second retry delay,
-- then fails again, forever.
flush_if_sec integer not null,
-- The low 32 bits of the next recording id to assign for this stream.
-- Typically this is the maximum current recording + 1, but it does
-- not decrease if that recording is deleted.
next_recording_id integer not null check (next_recording_id >= 0),
unique (camera_id, type)
);
-- Each row represents a single completed recorded segment of video.
-- Recordings are typically ~60 seconds; never more than 5 minutes.
create table recording (
-- The high 32 bits of composite_id are taken from the stream's id, which
-- improves locality. The low 32 bits are taken from the stream's
-- next_recording_id (which should be post-incremented in the same
-- transaction). It'd be simpler to use a "without rowid" table and separate
-- fields to make up the primary key, but
-- <https://www.sqlite.org/withoutrowid.html> points out that "without rowid"
-- is not appropriate when the average row size is in excess of 50 bytes.
-- recording_cover rows (which match this id format) are typically 1--5 KiB.
composite_id integer primary key,
-- The open in which this was committed to the database. For a given
-- composite_id, only one recording will ever be committed to the database,
-- but in-memory state may reflect a recording which never gets committed.
-- This field allows disambiguation in etags and such.
open_id integer not null references open (id),
-- This field is redundant with id above, but used to enforce the reference
-- constraint and to structure the recording_start_time index.
stream_id integer not null references stream (id),
-- The offset of this recording within a run. 0 means this was the first
-- recording made from a RTSP session. The start of the run has id
-- (id-run_offset).
run_offset integer not null,
-- flags is a bitmask:
--
-- * 1, or "trailing zero", indicates that this recording is the last in a
-- stream. As the duration of a sample is not known until the next sample
-- is received, the final sample in this recording will have duration 0.
flags integer not null,
sample_file_bytes integer not null check (sample_file_bytes > 0),
-- The starting time of the recording, in 90 kHz units since
-- 1970-01-01 00:00:00 UTC excluding leap seconds. Currently on initial
-- connection, this is taken from the local system time; on subsequent
-- recordings, it exactly matches the previous recording's end time.
start_time_90k integer not null check (start_time_90k > 0),
-- The duration of the recording, in 90 kHz units.
duration_90k integer not null
check (duration_90k >= 0 and duration_90k < 5*60*90000),
video_samples integer not null check (video_samples > 0),
video_sync_samples integer not null check (video_sync_samples > 0),
video_sample_entry_id integer references video_sample_entry (id),
check (composite_id >> 32 = stream_id)
);
create index recording_cover on recording (
-- Typical queries use "where stream_id = ? order by start_time_90k".
stream_id,
start_time_90k,
-- These fields are not used for ordering; they cover most queries so
-- that only database verification and actual viewing of recordings need
-- to consult the underlying row.
open_id,
duration_90k,
video_samples,
video_sync_samples,
video_sample_entry_id,
sample_file_bytes,
run_offset,
flags
);
-- Fields which are only needed to check/correct database integrity problems
-- (such as incorrect timestamps).
create table recording_integrity (
-- See description on recording table.
composite_id integer primary key references recording (composite_id),
-- The number of 90 kHz units the local system's monotonic clock has
-- advanced more than the stated duration of recordings in a run since the
-- first recording ended. Negative numbers indicate the local system time is
-- behind the recording.
--
-- The first recording of a run (that is, one with run_offset=0) has null
-- local_time_delta_90k because errors are assumed to
-- be the result of initial buffering rather than frequency mismatch.
--
-- This value should be near 0 even on long runs in which the camera's clock
-- and local system's clock frequency differ because each recording's delta
-- is used to correct the durations of the next (up to 500 ppm error).
local_time_delta_90k integer,
-- The number of 90 kHz units the local system's monotonic clock had
-- advanced since the database was opened, as of the start of recording.
-- TODO: fill this in!
local_time_since_open_90k integer,
-- The difference between start_time_90k+duration_90k and a wall clock
-- timestamp captured at end of this recording. This is meaningful for all
-- recordings in a run, even the initial one (run_offset=0), because
-- start_time_90k is derived from the wall time as of when recording
-- starts, not when it ends.
-- TODO: fill this in!
wall_time_delta_90k integer,
-- The sha1 hash of the contents of the sample file.
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
);
-- Large fields for a recording which are needed ony for playback.
-- In particular, when serving a byte range within a .mp4 file, the
-- recording_playback row is needed for the recording(s) corresponding to that
-- particular byte range, needed, but the recording rows suffice for all other
-- recordings in the .mp4.
create table recording_playback (
-- See description on recording table.
composite_id integer primary key references recording (composite_id),
-- See design/schema.md#video_index for a description of this field.
video_index blob not null check (length(video_index) > 0)
-- audio_index could be added here in the future.
);
-- Files which are to be deleted (may or may not still exist).
-- Note that besides these files, for each stream, any recordings >= its
-- next_recording_id should be discarded on startup.
create table garbage (
-- This is _mostly_ redundant with composite_id, which contains the stream
-- id and thus a linkage to the sample file directory. Listing it here
-- explicitly means that streams can be deleted without losing the
-- association of garbage to directory.
sample_file_dir_id integer not null references sample_file_dir (id),
-- See description on recording table.
composite_id integer not null,
-- Organize the table first by directory, as that's how it will be queried.
primary key (sample_file_dir_id, composite_id)
) without rowid;
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
-- VisualSampleEntry box. Describes the codec, width, height, etc.
create table video_sample_entry (
id integer primary key,
-- A SHA-1 hash of |bytes|.
sha1 blob unique not null check (length(sha1) = 20),
-- The width and height in pixels; must match values within
-- |sample_entry_bytes|.
width integer not null check (width > 0),
height integer not null check (height > 0),
-- The codec in RFC-6381 format, such as "avc1.4d001f".
rfc6381_codec text not null,
-- The serialized box, including the leading length and box type (avcC in
-- the case of H.264).
data blob not null check (length(data) > 86)
);
create table user (
id integer primary key,
username unique not null,
-- Bitwise mask of flags:
-- 1: disabled. If set, no method of authentication for this user will succeed.
flags integer not null,
-- If set, a hash for password authentication, as generated by `libpasta::hash_password`.
password_hash text,
-- A counter which increments with every password reset or clear.
password_id integer not null default 0,
-- Updated lazily on database flush; reset when password_id is incremented.
-- This could be used to automatically disable the password on hitting a threshold.
password_failure_count integer not null default 0,
-- If set, a Unix UID that is accepted for authentication when using HTTP over
-- a Unix domain socket. (Additionally, the UID running Moonfire NVR can authenticate
-- as anyone; there's no point in trying to do otherwise.) This might be an easy
-- bootstrap method once configuration happens through a web UI rather than text UI.
unix_uid integer
);
-- A single session, whether for browser or robot use.
-- These map at the HTTP layer to two cookies (exact format described
-- elsewhere):
--
-- * "s" holds the session id and an encrypted sequence number for replay
-- protection. To decrease chance of leaks, it's normally marked as
-- HttpOnly, preventing client-side Javascript from accessing it.
--
-- * "sc" holds state needed by client Javascript, such as a CSRF token (which
-- should be copied into POST request bodies) and username (which should be
-- presented in the UI). It should never be marked HttpOnly.
create table user_session (
-- The session id is a 20-byte blob. This is the unencoded, unsalted Blake2b-160
-- (also 20 bytes) of the unencoded session id. Much like `password_hash`, a
-- hash is used here so that a leaked database backup can't be trivially used
-- to steal credentials.
session_id_hash blob primary key not null,
user_id integer references user (id) not null,
-- A TBD-byte random number. Used to derive keys for the replay protection
-- and CSRF tokens.
seed blob not null,
-- A bitwise mask of flags, currently all properties of the HTTP cookies
-- used to hold the session:
-- 1: HttpOnly ("s" cookie only)
-- 2: Secure (both cookies)
-- 4: SameSite=Lax (both cookies)
-- 8: SameSite=Strict ("s" cookie only) - 4 must also be set.
flags integer not null,
-- The domain of the HTTP cookie used to store this session. The outbound
-- `Set-Cookie` header never specifies a scope, so this matches the `Host:` of
-- the inbound HTTP request (minus the :port, if any was specified).
domain text,
-- An editable description which might describe the device/program which uses
-- this session, such as "Chromebook", "iPhone", or "motion detection worker".
description text,
creation_password_id integer, -- the id it was created from, if created via password
creation_time_sec integer not null, -- sec since epoch
creation_user_agent text, -- User-Agent header from inbound HTTP request.
creation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
revocation_time_sec integer, -- sec since epoch
revocation_user_agent text, -- User-Agent header from inbound HTTP request.
revocation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket/no peer.
-- A value indicating the reason for revocation, with optional additional
-- text detail. Enumeration values:
-- 0: logout link clicked (i.e. from within the session itself)
--
-- This might be extended for a variety of other reasons:
-- x: user revoked (while authenticated in another way)
-- x: password change invalidated all sessions created with that password
-- x: expired (due to fixed total time or time inactive)
-- x: evicted (due to too many sessions)
-- x: suspicious activity
revocation_reason integer,
revocation_reason_detail text,
-- Information about requests which used this session, updated lazily on database flush.
last_use_time_sec integer, -- sec since epoch
last_use_user_agent text, -- User-Agent header from inbound HTTP request.
last_use_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
use_count not null default 0
) without rowid;
create index user_session_uid on user_session (user_id);
insert into version (id, unix_time, notes)
values (3, cast(strftime('%s', 'now') as int), 'db creation');

View File

@ -28,23 +28,25 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate tempdir;
use base::clock::Clocks;
use db;
use dir;
use fnv::FnvHashMap;
use mylog;
use recording::{self, TIME_UNITS_PER_SEC};
use rusqlite;
use std::env;
use std::sync;
use std::sync::{self, Arc};
use std::thread;
use tempdir::TempDir;
use time;
use uuid::Uuid;
use writer;
static INIT: sync::Once = sync::ONCE_INIT;
/// id of the camera created by `TestDb::new` below.
pub const TEST_CAMERA_ID: i32 = 1;
pub const TEST_STREAM_ID: i32 = 1;
/// Performs global initialization for tests.
/// * set up logging. (Note the output can be confusing unless `RUST_TEST_THREADS=1` is set in
@ -62,47 +64,60 @@ pub fn init() {
});
}
pub struct TestDb {
pub db: sync::Arc<db::Database>,
pub dir: sync::Arc<dir::SampleFileDir>,
pub syncer_channel: dir::SyncerChannel,
pub struct TestDb<C: Clocks + Clone> {
pub db: Arc<db::Database<C>>,
pub dirs_by_stream_id: Arc<FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
pub syncer_channel: writer::SyncerChannel<::std::fs::File>,
pub syncer_join: thread::JoinHandle<()>,
pub tmpdir: tempdir::TempDir,
pub tmpdir: TempDir,
pub test_camera_uuid: Uuid,
}
impl TestDb {
impl<C: Clocks + Clone> TestDb<C> {
/// Creates a test database with one camera.
pub fn new() -> TestDb {
let tmpdir = tempdir::TempDir::new("moonfire-nvr-test").unwrap();
pub fn new(clocks: C) -> Self {
let tmpdir = TempDir::new("moonfire-nvr-test").unwrap();
let conn = rusqlite::Connection::open_in_memory().unwrap();
let schema = include_str!("schema.sql");
conn.execute_batch(schema).unwrap();
let db = sync::Arc::new(db::Database::new(conn).unwrap());
let test_camera_uuid;
let mut conn = rusqlite::Connection::open_in_memory().unwrap();
db::init(&mut conn).unwrap();
let db = Arc::new(db::Database::new(clocks, conn, true).unwrap());
let (test_camera_uuid, sample_file_dir_id);
let path = tmpdir.path().to_str().unwrap().to_owned();
let dir;
{
let mut l = db.lock();
sample_file_dir_id = l.add_sample_file_dir(path.to_owned()).unwrap();
assert_eq!(TEST_CAMERA_ID, l.add_camera(db::CameraChange {
short_name: "test camera".to_owned(),
description: "".to_owned(),
host: "test-camera".to_owned(),
username: "foo".to_owned(),
password: "bar".to_owned(),
main_rtsp_path: "/main".to_owned(),
sub_rtsp_path: "/sub".to_owned(),
streams: [
db::StreamChange {
sample_file_dir_id: Some(sample_file_dir_id),
rtsp_path: "/main".to_owned(),
record: true,
flush_if_sec: 0,
},
Default::default(),
],
}).unwrap());
test_camera_uuid = l.cameras_by_id().get(&TEST_CAMERA_ID).unwrap().uuid;
let mut tx = l.tx().unwrap();
tx.update_retention(TEST_CAMERA_ID, 1048576).unwrap();
tx.commit().unwrap();
l.update_retention(&[db::RetentionChange {
stream_id: TEST_STREAM_ID,
new_record: true,
new_limit: 1048576,
}]).unwrap();
dir = l.sample_file_dirs_by_id().get(&sample_file_dir_id).unwrap().get().unwrap();
}
let path = tmpdir.path().to_str().unwrap().to_owned();
let dir = dir::SampleFileDir::new(&path, db.clone()).unwrap();
let (syncer_channel, syncer_join) = dir::start_syncer(dir.clone()).unwrap();
let mut dirs_by_stream_id = FnvHashMap::default();
dirs_by_stream_id.insert(TEST_STREAM_ID, dir.clone());
let (syncer_channel, syncer_join) =
writer::start_syncer(db.clone(), sample_file_dir_id).unwrap();
TestDb {
db,
dir,
dirs_by_stream_id: Arc::new(dirs_by_stream_id),
syncer_channel,
syncer_join,
tmpdir,
@ -110,36 +125,25 @@ impl TestDb {
}
}
pub fn create_recording_from_encoder(&self, encoder: recording::SampleIndexEncoder)
-> db::ListRecordingsRow {
/// Creates a recording with a fresh `RecordingToInsert` row which has been touched only by
/// a `SampleIndexEncoder`. Fills in a video sample entry id and such to make it valid.
/// There will no backing sample file, so it won't be possible to generate a full `.mp4`.
pub fn insert_recording_from_encoder(&self, r: db::RecordingToInsert)
-> db::ListRecordingsRow {
use recording::{self, TIME_UNITS_PER_SEC};
let mut db = self.db.lock();
let video_sample_entry_id = db.insert_video_sample_entry(
1920, 1080, [0u8; 100].to_vec(), "avc1.000000".to_owned()).unwrap();
let row_id;
{
let mut tx = db.tx().unwrap();
tx.bypass_reservation_for_testing = true;
const START_TIME: recording::Time = recording::Time(1430006400i64 * TIME_UNITS_PER_SEC);
row_id = tx.insert_recording(&db::RecordingToInsert{
camera_id: TEST_CAMERA_ID,
sample_file_bytes: encoder.sample_file_bytes,
time: START_TIME ..
START_TIME + recording::Duration(encoder.total_duration_90k as i64),
local_time_delta: recording::Duration(0),
video_samples: encoder.video_samples,
video_sync_samples: encoder.video_sync_samples,
video_sample_entry_id: video_sample_entry_id,
sample_file_uuid: Uuid::nil(),
video_index: encoder.video_index,
sample_file_sha1: [0u8; 20],
run_offset: 0, // TODO
flags: 0, // TODO
}).unwrap();
tx.commit().unwrap();
}
let (id, _) = db.add_recording(TEST_STREAM_ID, db::RecordingToInsert {
start: recording::Time(1430006400i64 * TIME_UNITS_PER_SEC),
video_sample_entry_id,
..r
}).unwrap();
db.mark_synced(id).unwrap();
db.flush("create_recording_from_encoder").unwrap();
let mut row = None;
db.list_recordings_by_id(TEST_CAMERA_ID, row_id .. row_id + 1,
|r| { row = Some(r); Ok(()) }).unwrap();
db.list_recordings_by_id(TEST_STREAM_ID, id.recording() .. id.recording()+1,
&mut |r| { row = Some(r); Ok(()) }).unwrap();
row.unwrap()
}
}
@ -147,34 +151,28 @@ impl TestDb {
// For benchmarking
#[cfg(feature="nightly")]
pub fn add_dummy_recordings_to_db(db: &db::Database, num: usize) {
use recording::{self, TIME_UNITS_PER_SEC};
let mut data = Vec::new();
data.extend_from_slice(include_bytes!("testdata/video_sample_index.bin"));
let mut db = db.lock();
let video_sample_entry_id = db.insert_video_sample_entry(
1920, 1080, [0u8; 100].to_vec(), "avc1.000000".to_owned()).unwrap();
const START_TIME: recording::Time = recording::Time(1430006400i64 * TIME_UNITS_PER_SEC);
const DURATION: recording::Duration = recording::Duration(5399985);
let mut recording = db::RecordingToInsert{
camera_id: TEST_CAMERA_ID,
let mut recording = db::RecordingToInsert {
sample_file_bytes: 30104460,
flags: 0,
time: START_TIME .. (START_TIME + DURATION),
local_time_delta: recording::Duration(0),
start: recording::Time(1430006400i64 * TIME_UNITS_PER_SEC),
duration_90k: 5399985,
video_samples: 1800,
video_sync_samples: 60,
video_sample_entry_id: video_sample_entry_id,
sample_file_uuid: Uuid::nil(),
video_index: data,
sample_file_sha1: [0; 20],
run_offset: 0,
..Default::default()
};
let mut tx = db.tx().unwrap();
tx.bypass_reservation_for_testing = true;
for _ in 0..num {
tx.insert_recording(&recording).unwrap();
recording.time.start += DURATION;
recording.time.end += DURATION;
let (id, _) = db.add_recording(TEST_STREAM_ID, recording.clone()).unwrap();
recording.start += recording::Duration(recording.duration_90k as i64);
recording.run_offset += 1;
db.mark_synced(id).unwrap();
}
tx.commit().unwrap();
db.flush("add_dummy_recordings_to_db").unwrap();
}

110
db/upgrade/mod.rs Normal file
View File

@ -0,0 +1,110 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
/// Upgrades the database schema.
///
/// See `guide/schema.md` for more information.
use db;
use failure::Error;
use rusqlite;
mod v0_to_v1;
mod v1_to_v2;
mod v2_to_v3;
const UPGRADE_NOTES: &'static str =
concat!("upgraded using moonfire-db ", env!("CARGO_PKG_VERSION"));
#[derive(Debug)]
pub struct Args<'a> {
pub flag_sample_file_dir: Option<&'a str>,
pub flag_preset_journal: &'a str,
pub flag_no_vacuum: bool,
}
fn set_journal_mode(conn: &rusqlite::Connection, requested: &str) -> Result<(), Error> {
assert!(!requested.contains(';')); // quick check for accidental sql injection.
let actual = conn.query_row(&format!("pragma journal_mode = {}", requested), &[],
|row| row.get_checked::<_, String>(0))??;
info!("...database now in journal_mode {} (requested {}).", actual, requested);
Ok(())
}
pub fn run(args: &Args, conn: &mut rusqlite::Connection) -> Result<(), Error> {
let upgraders = [
v0_to_v1::run,
v1_to_v2::run,
v2_to_v3::run,
];
{
assert_eq!(upgraders.len(), db::EXPECTED_VERSION as usize);
let old_ver =
conn.query_row("select max(id) from version", &[], |row| row.get_checked(0))??;
if old_ver > db::EXPECTED_VERSION {
bail!("Database is at version {}, later than expected {}",
old_ver, db::EXPECTED_VERSION);
} else if old_ver < 0 {
bail!("Database is at negative version {}!", old_ver);
}
info!("Upgrading database from version {} to version {}...", old_ver, db::EXPECTED_VERSION);
set_journal_mode(&conn, args.flag_preset_journal).unwrap();
for ver in old_ver .. db::EXPECTED_VERSION {
info!("...from version {} to version {}", ver, ver + 1);
let tx = conn.transaction()?;
upgraders[ver as usize](&args, &tx)?;
tx.execute(r#"
insert into version (id, unix_time, notes)
values (?, cast(strftime('%s', 'now') as int32), ?)
"#, &[&(ver + 1), &UPGRADE_NOTES])?;
tx.commit()?;
}
}
// Enforce foreign keys. This is on by default with --features=bundled (as rusqlite
// compiles the SQLite3 amalgamation with -DSQLITE_DEFAULT_FOREIGN_KEYS=1). Ensure it's
// always on. Note that our foreign keys are immediate rather than deferred, so we have to
// be careful about the order of operations during the upgrade.
conn.execute("pragma foreign_keys = on", &[])?;
// WAL is the preferred journal mode for normal operation; it reduces the number of syncs
// without compromising safety.
set_journal_mode(&conn, "wal").unwrap();
if !args.flag_no_vacuum {
info!("...vacuuming database after upgrade.");
conn.execute_batch(r#"
pragma page_size = 16384;
vacuum;
"#).unwrap();
}
info!("...done.");
Ok(())
}

View File

@ -31,13 +31,12 @@
/// Upgrades a version 0 schema to a version 1 schema.
use db;
use error::Error;
use failure::Error;
use recording;
use rusqlite;
use std::collections::HashMap;
use strutil;
pub fn run(tx: &rusqlite::Transaction) -> Result<(), Error> {
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
// These create statements match the schema.sql when version 1 was the latest.
tx.execute_batch(r#"
alter table camera rename to old_camera;
@ -162,14 +161,14 @@ fn fill_recording(tx: &rusqlite::Transaction) -> Result<HashMap<i32, CameraState
let video_samples: i32 = row.get_checked(5)?;
let video_sync_samples: i32 = row.get_checked(6)?;
let video_sample_entry_id: i32 = row.get_checked(7)?;
let sample_file_uuid: Vec<u8> = row.get_checked(8)?;
let sample_file_uuid: db::FromSqlUuid = row.get_checked(8)?;
let sample_file_sha1: Vec<u8> = row.get_checked(9)?;
let video_index: Vec<u8> = row.get_checked(10)?;
let old_id: i32 = row.get_checked(11)?;
let trailing_zero = has_trailing_zero(&video_index).unwrap_or_else(|e| {
warn!("recording {}/{} (sample file {}, formerly recording {}) has corrupt \
video_index: {}",
camera_id, composite_id & 0xFFFF, strutil::hex(&sample_file_uuid), old_id, e);
camera_id, composite_id & 0xFFFF, sample_file_uuid.0, old_id, e);
false
});
let run_id = match camera_state.current_run {
@ -191,7 +190,7 @@ fn fill_recording(tx: &rusqlite::Transaction) -> Result<HashMap<i32, CameraState
])?;
insert2.execute_named(&[
(":composite_id", &composite_id),
(":sample_file_uuid", &sample_file_uuid),
(":sample_file_uuid", &&sample_file_uuid.0.as_bytes()[..]),
(":sample_file_sha1", &sample_file_sha1),
(":video_index", &video_index),
])?;

397
db/upgrade/v1_to_v2.rs Normal file
View File

@ -0,0 +1,397 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
/// Upgrades a version 1 schema to a version 2 schema.
use dir;
use failure::Error;
use libc;
use rusqlite;
use schema::DirMeta;
use std::fs;
use std::os::unix::ffi::OsStrExt;
use uuid::Uuid;
pub fn run(args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
let sample_file_path =
args.flag_sample_file_dir
.ok_or_else(|| format_err!("--sample-file-dir required when upgrading from \
schema version 1 to 2."))?;
let d = dir::Fd::open(sample_file_path, false)?;
d.lock(libc::LOCK_EX | libc::LOCK_NB)?;
verify_dir_contents(sample_file_path, tx)?;
// These create statements match the schema.sql when version 2 was the latest.
tx.execute_batch(r#"
create table meta (
uuid blob not null check (length(uuid) = 16)
);
create table open (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16),
start_time_90k integer,
end_time_90k integer,
duration_90k integer
);
create table sample_file_dir (
id integer primary key,
path text unique not null,
uuid blob unique not null check (length(uuid) = 16),
last_complete_open_id integer references open (id)
);
create table user (
id integer primary key,
username unique not null,
flags integer not null,
password_hash text,
password_id integer not null default 0,
password_failure_count integer not null default 0,
unix_uid integer
);
create table user_session (
session_id_hash blob primary key not null,
user_id integer references user (id) not null,
seed blob not null,
flags integer not null,
domain text,
description text,
creation_password_id integer,
creation_time_sec integer not null,
creation_user_agent text,
creation_peer_addr blob,
revocation_time_sec integer,
revocation_user_agent text,
revocation_peer_addr blob,
revocation_reason integer,
revocation_reason_detail text,
last_use_time_sec integer,
last_use_user_agent text,
last_use_peer_addr blob,
use_count not null default 0
) without rowid;
"#)?;
let db_uuid = ::uuid::Uuid::new_v4();
let db_uuid_bytes = &db_uuid.as_bytes()[..];
tx.execute("insert into meta (uuid) values (?)", &[&db_uuid_bytes])?;
let open_uuid = ::uuid::Uuid::new_v4();
let open_uuid_bytes = &open_uuid.as_bytes()[..];
tx.execute("insert into open (uuid) values (?)", &[&open_uuid_bytes])?;
let open_id = tx.last_insert_rowid() as u32;
let dir_uuid = ::uuid::Uuid::new_v4();
let dir_uuid_bytes = &dir_uuid.as_bytes()[..];
// Write matching metadata to the directory.
let mut meta = DirMeta::default();
{
meta.db_uuid.extend_from_slice(db_uuid_bytes);
meta.dir_uuid.extend_from_slice(dir_uuid_bytes);
let open = meta.mut_last_complete_open();
open.id = open_id;
open.uuid.extend_from_slice(&open_uuid_bytes);
}
dir::write_meta(&d, &meta)?;
tx.execute(r#"
insert into sample_file_dir (path, uuid, last_complete_open_id)
values (?, ?, ?)
"#, &[&sample_file_path, &dir_uuid_bytes, &open_id])?;
tx.execute_batch(r#"
drop table reserved_sample_files;
alter table camera rename to old_camera;
alter table recording rename to old_recording;
alter table video_sample_entry rename to old_video_sample_entry;
drop index recording_cover;
create table camera (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16),
short_name text not null,
description text,
host text,
username text,
password text
);
create table stream (
id integer primary key,
camera_id integer not null references camera (id),
sample_file_dir_id integer references sample_file_dir (id),
type text not null check (type in ('main', 'sub')),
record integer not null check (record in (1, 0)),
rtsp_path text not null,
retain_bytes integer not null check (retain_bytes >= 0),
flush_if_sec integer not null,
next_recording_id integer not null check (next_recording_id >= 0),
unique (camera_id, type)
);
create table recording (
composite_id integer primary key,
stream_id integer not null references stream (id),
open_id integer not null,
run_offset integer not null,
flags integer not null,
sample_file_bytes integer not null check (sample_file_bytes > 0),
start_time_90k integer not null check (start_time_90k > 0),
duration_90k integer not null
check (duration_90k >= 0 and duration_90k < 5*60*90000),
video_samples integer not null check (video_samples > 0),
video_sync_samples integer not null check (video_sync_samples > 0),
video_sample_entry_id integer references video_sample_entry (id),
check (composite_id >> 32 = stream_id)
);
create index recording_cover on recording (
stream_id,
start_time_90k,
open_id,
duration_90k,
video_samples,
video_sync_samples,
video_sample_entry_id,
sample_file_bytes,
run_offset,
flags
);
create table recording_integrity (
composite_id integer primary key references recording (composite_id),
local_time_delta_90k integer,
local_time_since_open_90k integer,
wall_time_delta_90k integer,
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
);
create table video_sample_entry (
id integer primary key,
sha1 blob unique not null check (length(sha1) = 20),
width integer not null check (width > 0),
height integer not null check (height > 0),
rfc6381_codec text not null,
data blob not null check (length(data) > 86)
);
create table garbage (
sample_file_dir_id integer references sample_file_dir (id),
composite_id integer,
primary key (sample_file_dir_id, composite_id)
) without rowid;
insert into camera
select
id,
uuid,
short_name,
description,
host,
username,
password
from old_camera;
-- Insert main streams using the same id as the camera, to ease changing recordings.
insert into stream
select
old_camera.id,
old_camera.id,
sample_file_dir.id,
'main',
1,
old_camera.main_rtsp_path,
old_camera.retain_bytes,
0,
old_camera.next_recording_id
from
old_camera cross join sample_file_dir;
-- Insert sub stream (if path is non-empty) using any id.
insert into stream (camera_id, sample_file_dir_id, type, record, rtsp_path,
retain_bytes, flush_if_sec, next_recording_id)
select
old_camera.id,
sample_file_dir.id,
'sub',
0,
old_camera.sub_rtsp_path,
0,
90,
1
from
old_camera cross join sample_file_dir
where
old_camera.sub_rtsp_path != '';
"#)?;
// Add the new video_sample_entry rows, before inserting the recordings referencing them.
fix_video_sample_entry(tx)?;
tx.execute_batch(r#"
insert into recording
select
r.composite_id,
r.camera_id,
o.id,
r.run_offset,
r.flags,
r.sample_file_bytes,
r.start_time_90k,
r.duration_90k,
r.video_samples,
r.video_sync_samples,
r.video_sample_entry_id
from
old_recording r cross join open o;
insert into recording_integrity (composite_id, local_time_delta_90k, sample_file_sha1)
select
r.composite_id,
case when r.run_offset > 0 then local_time_delta_90k else null end,
p.sample_file_sha1
from
old_recording r join recording_playback p on (r.composite_id = p.composite_id);
"#)?;
Ok(())
}
/// Ensures the sample file directory has the expected contents.
/// Among other problems, this catches a fat-fingered `--sample-file-dir`.
/// The expected contents are:
///
/// * required: recording uuids.
/// * optional: reserved sample file uuids.
/// * optional: meta and meta-tmp from half-completed update attempts.
/// * forbidden: anything else.
fn verify_dir_contents(sample_file_path: &str, tx: &rusqlite::Transaction) -> Result<(), Error> {
// Build a hash of the uuids found in the directory.
let n: i64 = tx.query_row(r#"
select
a.c + b.c
from
(select count(*) as c from recording) a,
(select count(*) as c from reserved_sample_files) b;
"#, &[], |r| r.get_checked(0))??;
let mut files = ::fnv::FnvHashSet::with_capacity_and_hasher(n as usize, Default::default());
for e in fs::read_dir(sample_file_path)? {
let e = e?;
let f = e.file_name();
match f.as_bytes() {
b"." | b".." => continue,
b"meta" | b"meta-tmp" => {
// Ignore metadata files. These might from a half-finished update attempt.
// If the directory is actually an in-use >v3 format, other contents won't match.
continue;
},
_ => {},
};
let s = match f.to_str() {
Some(s) => s,
None => bail!("unexpected file {:?} in {:?}", f, sample_file_path),
};
let uuid = match Uuid::parse_str(s) {
Ok(u) => u,
Err(_) => bail!("unexpected file {:?} in {:?}", f, sample_file_path),
};
if s != uuid.hyphenated().to_string() { // non-canonical form.
bail!("unexpected file {:?} in {:?}", f, sample_file_path);
}
files.insert(uuid);
}
// Iterate through the database and check that everything has a matching file.
{
let mut stmt = tx.prepare(r"select sample_file_uuid from recording_playback")?;
let mut rows = stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let uuid: ::db::FromSqlUuid = row.get_checked(0)?;
if !files.remove(&uuid.0) {
bail!("{} is missing from dir {}!", uuid.0, sample_file_path);
}
}
}
let mut stmt = tx.prepare(r"select uuid from reserved_sample_files")?;
let mut rows = stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let uuid: ::db::FromSqlUuid = row.get_checked(0)?;
files.remove(&uuid.0);
}
if !files.is_empty() {
bail!("{} unexpected sample file uuids in dir {}: {:?}!",
files.len(), sample_file_path, files);
}
Ok(())
}
fn fix_video_sample_entry(tx: &rusqlite::Transaction) -> Result<(), Error> {
let mut select = tx.prepare(r#"
select
id,
sha1,
width,
height,
data
from
old_video_sample_entry
"#)?;
let mut insert = tx.prepare(r#"
insert into video_sample_entry values (:id, :sha1, :width, :height, :rfc6381_codec, :data)
"#)?;
let mut rows = select.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let data: Vec<u8> = row.get_checked(4)?;
insert.execute_named(&[
(":id", &row.get_checked::<_, i32>(0)?),
(":sha1", &row.get_checked::<_, Vec<u8>>(1)?),
(":width", &row.get_checked::<_, i32>(2)?),
(":height", &row.get_checked::<_, i32>(3)?),
(":rfc6381_codec", &rfc6381_codec_from_sample_entry(&data)?),
(":data", &data),
])?;
}
Ok(())
}
// This previously lived in h264.rs. As of version 1, H.264 is the only supported codec.
fn rfc6381_codec_from_sample_entry(sample_entry: &[u8]) -> Result<String, Error> {
if sample_entry.len() < 99 || &sample_entry[4..8] != b"avc1" ||
&sample_entry[90..94] != b"avcC" {
bail!("not a valid AVCSampleEntry");
}
let profile_idc = sample_entry[103];
let constraint_flags_byte = sample_entry[104];
let level_idc = sample_entry[105];
Ok(format!("avc1.{:02x}{:02x}{:02x}", profile_idc, constraint_flags_byte, level_idc))
}

137
db/upgrade/v2_to_v3.rs Normal file
View File

@ -0,0 +1,137 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
/// Upgrades a version 2 schema to a version 3 schema.
/// Note that a version 2 schema is never actually used; so we know the upgrade from version 1 was
/// completed, and possibly an upgrade from 2 to 3 is half-finished.
use db::{self, FromSqlUuid};
use dir;
use failure::Error;
use libc;
use schema;
use std::io::{self, Write};
use std::mem;
use std::sync::Arc;
use rusqlite;
use uuid::Uuid;
/// Opens the sample file dir.
///
/// Makes a couple simplifying assumptions valid for version 2:
/// * there's only one dir.
/// * it has a last completed open.
fn open_sample_file_dir(tx: &rusqlite::Transaction) -> Result<Arc<dir::SampleFileDir>, Error> {
let (p, s_uuid, o_id, o_uuid, db_uuid): (String, FromSqlUuid, i32, FromSqlUuid, FromSqlUuid) =
tx.query_row(r#"
select
s.path, s.uuid, s.last_complete_open_id, o.uuid, m.uuid
from
sample_file_dir s
join open o on (s.last_complete_open_id = o.id)
cross join meta m
"#, &[], |row| {
(row.get_checked(0).unwrap(),
row.get_checked(1).unwrap(),
row.get_checked(2).unwrap(),
row.get_checked(3).unwrap(),
row.get_checked(4).unwrap())
})?;
let mut meta = schema::DirMeta::default();
meta.db_uuid.extend_from_slice(&db_uuid.0.as_bytes()[..]);
meta.dir_uuid.extend_from_slice(&s_uuid.0.as_bytes()[..]);
{
let open = meta.mut_last_complete_open();
open.id = o_id as u32;
open.uuid.extend_from_slice(&o_uuid.0.as_bytes()[..]);
}
dir::SampleFileDir::open(&p, &meta)
}
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
let d = open_sample_file_dir(&tx)?;
let mut stmt = tx.prepare(r#"
select
composite_id,
sample_file_uuid
from
recording_playback
"#)?;
let mut rows = stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let id = db::CompositeId(row.get_checked(0)?);
let sample_file_uuid: FromSqlUuid = row.get_checked(1)?;
let from_path = get_uuid_pathname(sample_file_uuid.0);
let to_path = get_id_pathname(id);
let r = unsafe { dir::renameat(&d.fd, from_path.as_ptr(), &d.fd, to_path.as_ptr()) };
if let Err(e) = r {
if e.kind() == io::ErrorKind::NotFound {
continue; // assume it was already moved.
}
Err(e)?;
}
}
// These create statements match the schema.sql when version 3 was the latest.
tx.execute_batch(r#"
alter table recording_playback rename to old_recording_playback;
create table recording_playback (
composite_id integer primary key references recording (composite_id),
video_index blob not null check (length(video_index) > 0)
);
insert into recording_playback
select
composite_id,
video_index
from
old_recording_playback;
drop table old_recording_playback;
drop table old_recording;
drop table old_camera;
drop table old_video_sample_entry;
"#)?;
Ok(())
}
/// Gets a pathname for a sample file suitable for passing to open or unlink.
fn get_uuid_pathname(uuid: Uuid) -> [libc::c_char; 37] {
let mut buf = [0u8; 37];
write!(&mut buf[..36], "{}", uuid.hyphenated()).expect("can't format uuid to pathname buf");
// libc::c_char seems to be i8 on some platforms (Linux/arm) and u8 on others (Linux/amd64).
unsafe { mem::transmute::<[u8; 37], [libc::c_char; 37]>(buf) }
}
fn get_id_pathname(id: db::CompositeId) -> [libc::c_char; 17] {
let mut buf = [0u8; 17];
write!(&mut buf[..16], "{:016x}", id.0).expect("can't format id to pathname buf");
unsafe { mem::transmute::<[u8; 17], [libc::c_char; 17]>(buf) }
}

1063
db/writer.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,6 @@
# Moonfire NVR API
Status: **unstable**. This is an early draft; the API may change without
warning.
Status: **current**.
## Objective
@ -44,29 +43,32 @@ The `application/json` response will have a dict as follows:
* `uuid`: in text format
* `shortName`: a short name (typically one or two words)
* `description`: a longer description (typically a phrase or paragraph)
* `retainBytes`: the configured total number of bytes of completed
recordings to retain.
* `minStartTime90k`: the start time of the earliest recording for this
camera, in 90kHz units since 1970-01-01 00:00:00 UTC.
* `maxEndTime90k`: the end time of the latest recording for this camera,
in 90kHz units since 1970-01-01 00:00:00 UTC.
* `totalDuration90k`: the total duration recorded, in 90 kHz units.
This is no greater than `maxEndTime90k - maxStartTime90k`; it will be
lesser if there are gaps in the recorded data.
* `totalSampleFileBytes`: the total number of bytes of sample data (the
`mdat` portion of a `.mp4` file).
* `days`: object representing calendar days (in the server's time zone)
with non-zero total duration of recordings for that day. The keys are
of the form `YYYY-mm-dd`; the values are objects with the following
attributes:
* `totalDuration90k` is the total duration recorded during that day.
If a recording spans a day boundary, some portion of it is accounted to
each day.
* `startTime90k` is the start of that calendar day in the server's time
zone.
* `endTime90k` is the end of that calendar day in the server's time zone.
It is usually 24 hours after the start time. It might be 23 hours or 25
hours during spring forward or fall back, respectively.
* `streams`: a dict of stream type ("main" or "sub") to a dictionary
describing the stream:
* `retainBytes`: the configured total number of bytes of completed
recordings to retain.
* `minStartTime90k`: the start time of the earliest recording for
this camera, in 90kHz units since 1970-01-01 00:00:00 UTC.
* `maxEndTime90k`: the end time of the latest recording for this
camera, in 90kHz units since 1970-01-01 00:00:00 UTC.
* `totalDuration90k`: the total duration recorded, in 90 kHz units.
This is no greater than `maxEndTime90k - maxStartTime90k`; it will
be lesser if there are gaps in the recorded data.
* `totalSampleFileBytes`: the total number of bytes of sample data
(the `mdat` portion of a `.mp4` file).
* `days`: object representing calendar days (in the server's time
zone) with non-zero total duration of recordings for that day. The
keys are of the form `YYYY-mm-dd`; the values are objects with the
following attributes:
* `totalDuration90k` is the total duration recorded during that
day. If a recording spans a day boundary, some portion of it
is accounted to each day.
* `startTime90k` is the start of that calendar day in the
server's time zone.
* `endTime90k` is the end of that calendar day in the server's
time zone. It is usually 24 hours after the start time. It
might be 23 hours or 25 hours during spring forward or fall
back, respectively.
Example response:
@ -78,23 +80,27 @@ Example response:
"uuid": "fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe",
"shortName": "driveway",
"description": "Hikvision DS-2CD2032 overlooking the driveway from east",
"retainBytes": 536870912000,
"minStartTime90k": 130888729442361,
"maxEndTime90k": 130985466591817,
"totalDuration90k": 96736169725,
"totalSampleFileBytes": 446774393937,
"days": {
"2016-05-01": {
"endTime90k": 131595516000000,
"startTime90k": 131587740000000,
"totalDuration90k": 52617609
},
"2016-05-02": {
"endTime90k": 131603292000000,
"startTime90k": 131595516000000,
"totalDuration90k": 20946022
"streams": {
"main": {
"retainBytes": 536870912000,
"minStartTime90k": 130888729442361,
"maxEndTime90k": 130985466591817,
"totalDuration90k": 96736169725,
"totalSampleFileBytes": 446774393937,
"days": {
"2016-05-01": {
"endTime90k": 131595516000000,
"startTime90k": 131587740000000,
"totalDuration90k": 52617609
},
"2016-05-02": {
"endTime90k": 131603292000000,
"startTime90k": 131595516000000,
"totalDuration90k": 20946022
}
}
}
},
}
},
...
],
@ -109,29 +115,33 @@ Example response:
```json
{
"days": {
"2016-05-01": {
"endTime90k": 131595516000000,
"startTime90k": 131587740000000,
"totalDuration90k": 52617609
},
"2016-05-02": {
"endTime90k": 131603292000000,
"startTime90k": 131595516000000,
"totalDuration90k": 20946022
"description": "",
"streams": {
"main": {
"days": {
"2016-05-01": {
"endTime90k": 131595516000000,
"startTime90k": 131587740000000,
"totalDuration90k": 52617609
},
"2016-05-02": {
"endTime90k": 131603292000000,
"startTime90k": 131595516000000,
"totalDuration90k": 20946022
}
},
"maxEndTime90k": 131598273666690,
"minStartTime90k": 131590386129355,
"retainBytes": 104857600,
"totalDuration90k": 73563631,
"totalSampleFileBytes": 98901406
}
},
"description": "",
"maxEndTime90k": 131598273666690,
"minStartTime90k": 131590386129355,
"retainBytes": 104857600,
"shortName": "driveway",
"totalDuration90k": 73563631,
"totalSampleFileBytes": 98901406
"shortName": "driveway"
}
```
### `/api/cameras/<uuid>/recordings`
### `/api/cameras/<uuid>/<stream>/recordings`
A GET returns information about recordings, in descending order.
@ -159,6 +169,20 @@ Each recording object has the following properties:
together are as described. Adjacent recordings from the same RTSP session
may be coalesced in this fashion to reduce the amount of redundant data
transferred.
* `firstUncommitted` (optional). If this range is not fully committed to the
database, the first id that is uncommitted. This is significant because
it's possible that after a crash and restart, this id will refer to a
completely different recording. That recording will have a different
`openId`.
* `growing` (optional). If this boolean is true, the recording `endId` is
still being written to. Accesses to this id (such as `view.mp4`) may
retrieve more data than described here if not bounded by duration.
Additionally, if `startId` == `endId`, the start time of the recording is
"unanchored" and may change in subsequent accesses.
* `openId`. Each time Moonfire NVR starts in read-write mode, it is assigned
an increasing "open id". This field is the open id as of when these
recordings were written. This can be used to disambiguate ids referring to
uncommitted recordings.
* `startTime90k`: the start time of the given recording. Note this may be
less than the requested `startTime90k` if this recording was ongoing
at the requested time.
@ -175,7 +199,7 @@ Each recording object has the following properties:
Example request URI (with added whitespace between parameters):
```
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/recordings
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/main/recordings
?startTime90k=130888729442361
&endTime90k=130985466591817
```
@ -204,7 +228,7 @@ Example response:
}
```
### `/api/cameras/<uuid>/view.mp4`
### `/api/cameras/<uuid>/<stream>/view.mp4`
A GET returns a `.mp4` file, with an etag and support for range requests. The
MIME type will be `video/mp4`, with a `codecs` parameter as specified in [RFC
@ -213,10 +237,12 @@ MIME type will be `video/mp4`, with a `codecs` parameter as specified in [RFC
Expected query parameters:
* `s` (one or more): a string of the form
`START_ID[-END_ID][.[REL_START_TIME]-[REL_END_TIME]]`. This specifies
recording segments to include. The produced `.mp4` file will be a
`START_ID[-END_ID][@OPEN_ID][.[REL_START_TIME]-[REL_END_TIME]]`. This
specifies recording segments to include. The produced `.mp4` file will be a
concatenation of the segments indicated by all `s` parameters. The ids to
retrieve are as returned by the `/recordings` URL. The optional start and
retrieve are as returned by the `/recordings` URL. The open id is optional
and will be enforced if present; it's recommended for disambiguation when
the requested range includes uncommitted recordings. The optional start and
end times are in 90k units and relative to the start of the first specified
id. These can be used to clip the returned segments. Note they can be used
to skip over some ids entirely; this is allowed so that the caller doesn't
@ -230,27 +256,27 @@ Expected query parameters:
Example request URI to retrieve all of recording id 1 from the given camera:
```
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/view.mp4?s=1
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/main/view.mp4?s=1
```
Example request URI to retrieve all of recording ids 15 from the given camera,
with timestamp subtitles:
```
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/view.mp4?s=1-5&ts=true
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/main/view.mp4?s=1-5&ts=true
```
Example request URI to retrieve recording id 1, skipping its first 26
90,000ths of a second:
```
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/view.mp4?s=1.26
/api/cameras/fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe/main/view.mp4?s=1.26
```
TODO: error behavior on missing segment. It should be a 404, likely with an
`application/json` body describing what portion if any (still) exists.
### `/api/cameras/<uuid>/view.m4s`
### `/api/cameras/<uuid>/<stream>/view.m4s`
A GET returns a `.mp4` suitable for use as a [HTML5 Media Source Extensions
media segment][media-segment]. The MIME type will be `video/mp4`, with a

View File

@ -1,7 +1,6 @@
# Moonfire NVR Storage Schema
Status: **current**. This is largely implemented; there is optimization and
testing work left to do.
Status: **current**.
This is the initial design for the most fundamental parts of the Moonfire NVR
storage schema. See also [guide/schema.md](../guide/schema.md) for more
@ -128,7 +127,7 @@ together.
Each recording is stored in two places:
* the recording samples directory, intended to be stored on spinning disk.
* a sample file directory, intended to be stored on spinning disk.
Each file in this directory is simply a concatenation of the compressed,
timestamped video samples (also called "packets" or encoded frames), as
received from the camera. In MPEG-4 terminology (see [ISO
@ -225,74 +224,213 @@ The design avoids the need for the following constraints:
* Serving close to live. It's possible to serve a recording as it is being
written.
### Lifecycle of a sample file directory
One major disadvantage to splitting the state in two (the SQLite3 database in
flash and the sample file directories on spinning disk) is the possibility of
inconsistency. There are many ways this could arise:
* a sample file directory's disk is unexpectedly not mounted due to hardware
failure or misconfiguration.
* the administrator mixing up the mount points of two filesystems holding
different sample file directories.
* the administrator renaming a sample file directory without updating the database.
* the administrator restoring the database from backup but not the sample file
directory, or vice versa.
* the administrator providing two sample file directory paths pointed at the
same inode via symlinks or non-canonical paths. (Note that flock(2) has a
design flaw in which multiple file descriptors can share a lock, so the current
locking scheme is not sufficient to detect this otherwise.)
* database and sample file directories forked from the same version, opened
the same number of times, then crossed.
To combat this, each sample file directory has some metadata its database row
and stored file called `meta`. These track uuids associated with the database
and directory to avoid mixups. They also track sequence numbers and uuids
associated with "opens": each time the database has been opened in read/write
mode.
```sql
create table open (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16)
);
create table sample_file_dir (
id integer primary key,
path text unique not null,
uuid blob unique not null check (length(uuid) = 16),
-- The last (read/write) open of this directory which fully completed.
-- See schema.proto:DirMeta for a more complete description.
last_complete_open_id integer references open (id)
);
```
```proto
message DirMeta {
// A uuid associated with the database, in binary form. dir_uuid is strictly
// more powerful, but it improves diagnostics to know if the directory
// belongs to the expected database at all or not.
bytes db_uuid = 1;
// A uuid associated with the directory itself.
bytes dir_uuid = 2;
// Corresponds to an entry in the `open` database table.
message Open {
uint32 id = 1;
bytes uuid = 2;
}
// The last open that was known to be recorded in the database as completed.
// Absent if this has never happened. Note this can backtrack in exactly one
// scenario: when deleting the directory, after all associated files have
// been deleted, last_complete_open can be moved to in_progress_open.
Open last_complete_open = 3;
// The last run which is in progress, if different from last_complete_open.
// This may or may not have been recorded in the database, but it's
// guaranteed that no data has yet been written by this open.
Open in_progress_open = 4;
}
```
These are updated through procedures below:
*Write the metadata file*
This is a sub-procedure used in several places below.
Precondition: the directory's lock is held with `LOCK_EX` (exclusive).
1. Write a new `meta.tmp` (opened with `O_CREAT|O_TRUNC` to discard an
existing temporary file if any).
2. `fsync` the `meta.tmp` file descriptor.
3. `rename` `meta.tmp` to `meta`.
4. `fsync` the directory.
*Open the database as read-only*
1. Lock the database directory with `LOCK_SH` (shared).
2. Open the SQLite database with `SQLITE_OPEN_READ_ONLY`.
*Open the database as read-write*
1. Lock the database directory with `LOCK_EX` (exclusive).
2. Open the SQLite database with `SQLITE_OPEN_READ_WRITE`.
3. Insert a new `open` table row with the new sequence number and uuid.
*Create a sample file directory*
Precondition: database open read-write.
1. Lock the sample file directory with `LOCK_EX` (exclusive).
2. Verify there is no metadata file or `last_complete_open` is unset.
3. Write new metadata file with a fresh `dir_uuid` and a `in_progress_open`
matching the database's current open.
4. Add a matching row to the database with `last_complete_open_id` matching
the current open.
5. Update the metadata file to move `in_progress_open` to
`last_complete_open`.
*Open a sample file directory read-only*
Precondition: database open (read-only or read-write).
1. Lock the sample file directory with `LOCK_SH` (shared).
2. Verify the metadata file matches the database:
* database uuid matches.
* dir uuid matches.
* if the database's `last_complete_open` is set, it must match the
directory's `last_complete_open` or `in_progress_open`.
* if the database's `last_complete_open` is absent, the directory's
must be as well.
*Open a sample file directory read-write*
Precondition: database open read-write.
1. Lock the sample file directory with `LOCK_EX` (exclusive).
2. Verify the metadata file matches the database (as above).
3. Update the metadata file with `in_progress_open` matching the current
open.
3. Update the database row with `last_complete_open_id` matching the current
open.
4. Update the metadata file with `last_complete_open` rather than
`in_progress_open`.
5. Run the recording startup procedure for this directory.
*Close a sample file directory*
1. Drop the sample file directory lock.
*Delete a sample file directory*
1. Remove all sample files (of all three categories described below:
`recording` table rows, `garbage` table rows, and files with recording
ids >= their stream's `next_recording_id`); see "delete a recording"
procedure below.
2. Rewrite the directory metadata with `in_progress_open` set to the current open,
`last_complete_open` cleared.
3. Delete the directory's row from the database.
### Lifecycle of a recording
Because a major part of the recording state is outside the SQL database, care
must be taken to guarantee consistency and durability. Moonfire NVR maintains
three invariants about sample files:
1. `recording` table rows have sample files on disk
(named by the given UUID) with the indicated size and SHA-1 hash.
2. There are no sample files without a corresponding `recording` or
`reserved_sample_files` table row referencing their UUID.
3. After an orderly shutdown of Moonfire NVR, there are no
`reserved_sample_files` rows, even if there have been previous crashes.
1. `recording` table rows have sample files on disk with the indicated size
and SHA-1 hash.
2. Exactly one of the following statements is true for every sample file:
* It has a `recording` table row.
* It has a `garbage` table row.
* Its recording id is greater than or equal to the `next_recording_id`
for its stream.
3. After an orderly shutdown of Moonfire NVR, there is a `recording` table row
for every sample file, even if there have been previous crashes.
The first invariant provides certainty that a recording is properly stored. It
would be prohibitively expensive to verify hashes on demand (when listing or
serving recordings), or in some cases even to verify the size of the files via
`stat()` calls.
The second invariant avoids an accidental data loss scenario. On startup, as
part of normal crash recovery, Moonfire NVR should delete sample files which are
half-written (and useless without their indices) and ones which were already in
the process of being deleted (for exceeding their retention time). The absence
of a `recording` table row could be taken to indicate one of these conditions.
But consider another possibility: the SQLite database might not match the sample
directory. This could happen if the wrong disk is mounted at a given path or
after a botched restore from backup. Moonfire NVR would delete everything in
this case! It's far safer to require a specific mention of each file to be
deleted, requiring human intervention before touching unexpected files.
The second invariant improves auditability of the database and sample file
directory.
The third invariant prevents accumulation of garbage files which could fill the
drive and stop recording.
Sample files are named by UUID. Imagine if files were named by autoincrement
instead. One file could be mistaken for another on database vs directory
mismatch. With UUIDs, this is impossible: by design they can be assumed to be
universally unique, so two distinct recordings will never share a UUID.
These invariants are updated through the following procedure:
*Create a recording:*
1. Insert a `reserved_sample_files` row, in state `WRITING`.
2. Write the sample file, aborting if `open(..., O\_WRONLY|O\_CREATE|O\_EXCL)`
fails with `EEXIST`. (This would indicate a non-unique UUID, a serious
defect.)
1. Write the sample file, aborting if `open(..., O\_WRONLY|O\_CREATE|O\_EXCL)`
fails with `EEXIST`.
3. `fsync()` the sample file.
4. `fsync()` the sample file directory.
5. Replace the `reserved_sample_files` row with a `recording` row,
marking its size and SHA-1 hash in the process.
5. Insert the `recording` row, marking its size and SHA-1 hash in the process.
*Delete a recording:*
1. Replace the `recording` row with a `reserved_sample_files` row in state
`DELETED`.
1. Replace the `recording` row with a `garbage` row.
2. `unlink()` the sample file, warning on `ENOENT`. (This would indicate
invariant #2 is false.)
3. `fsync()` the sample file directory.
4. Delete the `reserved_sample_files` row.
4. Delete the `garbage` row.
*Startup (crash recovery):*
1. Acquire a lock to guarantee this is the only Moonfire NVR process running
against the given database. This lock is not released until program shutdown.
2. Query `reserved_sample_files` table.
3. `unlink()` all the sample files associated with rows returned by #2,
ignoring `ENOENT`.
2. Query `garbage` table and `next_recording_id` field in the `stream` table.
3. `unlink()` all the sample files associated with garbage rows, ignoring
`ENOENT.
4. For each stream, `unlink()` all the existing files with recording ids >=
`next_recording_id`.
4. `fsync()` the samples directory.
5. Delete the rows returned by #2 from the `reserved_sample_files` table.
5. Delete all rows from the `garbage` table.
The procedures can be batched: while for a given recording, the steps must be
strictly ordered, multiple recordings can be proceeding through the steps
@ -300,15 +438,6 @@ simultaneously. In particular, there is no need to hurry syncing deletions to
disk, so deletion steps #3 and #4 can be done opportunistically if it's
desirable to avoid extra disk seeks or flash write cycles.
There could be another procedure for moving a sample file from one filesystem
to another. This might be used when splitting cameras across hard drives.
New states could be introduced indicating that a recording is "is moving from
A to B" (thus, A is complete, and B is in an undefined state) or "has just
moved from A to B" (thus, B is complete, and A may be present or not).
Alternatively, a camera might have a search path specified for its recordings,
such that the first directory in which a recording is found must have a
complete copy (and subsequent directories' copies may be partial/corrupt).
It'd also be possible to conserve some partial recordings. Moonfire NVR could,
as a recording is written, record the latest sample tables,
size, and hash fields without marking the recording as fully written. On
@ -335,8 +464,8 @@ The times are roughly:
| level | operation | time |
| :------- | :---------- | -------: |
| presence | `readdir()` | ~3 sec |
| size | `fstat()` | ~3 sec |
| presence | `readdir()` | ~19 sec |
| size | `fstat()` | ~120 sec |
| hash | `read()` | ~8 hours |
The `readdir()` and `fstat()` times can be tested simply:
@ -345,9 +474,9 @@ The `readdir()` and `fstat()` times can be tested simply:
$ cd testdir
$ seq 1 $[60*24*365*6/12*2] | xargs touch
$ sudo sh -c 'echo 1 > /proc/sys/vm/drop_caches'
$ time ls -1 -F | wc -l
$ time ls -1 -f | wc -l
$ sudo sh -c 'echo 1 > /proc/sys/vm/drop_caches'
$ time ls -1 -F --size | wc -l
$ time ls -1 -f --size | wc -l
(The system calls used by `ls` can be verified through strace.)
@ -358,10 +487,11 @@ the Raspberry Pi 2, flash, network, and disk are all on the same USB 2.0 bus
to be about 25 MB/sec on an idle system (~40% of the theoretical 480
Mbit/sec). Therefore the process will take over a day.
The size check is fast enough that it seems reasonable to simply always
perform it on startup. Hash checks are too expensive to wait for in normal
operation; they will either be a rare offline data recovery mechanism or done
in the background at low priority.
The presence check is fast enough that it seems reasonable to simply always
perform it on startup. Size could be checked with a verification command used
for more extensive verification, such as before and after schema upgrades.
Hash checks could be performed in a rare offline data recovery mechanism or in
the background at low priority.
### Recording table
@ -371,6 +501,7 @@ The snippet below is a illustrative excerpt of the SQLite schema; see
-- A single, typically 60-second, recorded segment of video.
create table recording (
id integer primary key,
open_id integer references open (id),
camera_id integer references camera (id) not null,
sample_file_uuid blob unique not null,

View File

@ -1,6 +1,6 @@
# Moonfire NVR Time Handling
Status: **draft**
Status: **current**
> A man with a watch knows what time it is. A man with two watches is never
> sure.
@ -57,8 +57,6 @@ following statements are true:
* the cameras are geographically close to the NVR, so in most cases network
transmission time is under 50 ms. (Occasional delays are to be expected,
however.)
* the cameras issue at least one RTCP sender report per recording.
* the cameras are occasionally synchronized via NTP.
When one or more of those statements are false, the system should degrade
gracefully: preserve what properties it can, gather video anyway, and when
@ -99,8 +97,8 @@ information:
support synchronizing clocks via NTP, but in practice cameras appear to
use SNTP clients which simply step time periodically and provide no
interface to determine if the clock is currently synchronized. This
document's author owns several cameras with clocks that run roughly 100
ppm fast (9 seconds per day) and are adjusted via steps.
document's author owns several cameras with clocks that run roughly 20
ppm fast (2 seconds per day) and are adjusted via steps.
* the RTP timestamps from each of a camera's streams. As described in [RFC
3550 section 5.1](https://tools.ietf.org/html/rfc3550#section-5.1), these
are monotonically increasing with an unspecified reference point. They
@ -201,6 +199,8 @@ operation but may be handy in understanding and correcting errors.
## Caveats
### Stream mismatches
There's no particular reason to believe this will produce perfectly matched
streams between cameras or even of main and sub streams within a camera.
If this is insufficient, there's an alternate calculation of start time that
@ -238,3 +238,90 @@ detect and compensate for these clock splits.
It's unclear if these additional mechanisms are desirable or worthwhile. The
simplest approach will be adopted initially and adapted as necessary.
### Time discontinuities
If the local system's wall clock time jumps during a recording ([as has
happened](https://github.com/scottlamb/moonfire-nvr/issues/9#issuecomment-322663674)),
Moonfire NVR will continue to use the initial wall clock time for as long as
the recording lasts. This can result in some unfortunate behaviors:
* a recording that lasts for months might have an incorrect time all the
way through because `ntpd` took a few minutes on startup.
* two recordings that were in fact simultaneous might be recorded with very
different times because a time jump happened between their starts.
It might be better to use the new time (assuming that ntpd has made a
correction) retroactively. This is unimplemented, but the
`recording_integrity` database table has a `wall_time_delta_90k` field which
could be used for this purpose, either automatically or interactively.
It would also be possible to split a recording in two if a "significant" time
jump is noted, or to allow manually restarting a recording without restarting
the entire program.
### Leap seconds
UTC time is defined as the seconds since epoch _excluding
leap seconds_. Thus, timestamps during the leap second are ambiguous, and
durations across the leap second should be adjusted.
In POSIX, the system clock (as returned by `clock_gettime(CLOCK_REALTIME,
...`) is defined as representing UTC. Note that some
systems may instead be following a [leap
smear](https://developers.google.com/time/smear) policy in which instead of
one second happening twice, the clock runs slower. For a 24-hour period, the
clock runs slower by a factor of 1/86,400 (an extra ~11.6 μs/s).
In Moonfire NVR, all wall times in the database are based on UTC as reported
by the system, and it's assumed that `start + duration = end`. Thus, a leap
second is similar to a one-second time jump (see "Time discontinuities"
above).
Here are some options for improvement:
#### Use `clock_gettime(CLOCK_TAI, ...)` timestamps
Timestamps in the TAI clock system don't skip leap seconds. There's a system
interface intended to provide timestamps in this clock system, and Moonfire
NVR could use it. Unfortunately this has several problems:
* `CLOCK_TAI` is only available on Linux. It'd be preferable to handle
timestamps in a consistent way on other platforms. (At least on macOS,
Moonfire NVR's current primary development platform.)
* `CLOCK_TAI` is wrong on startup and possibly adjusted later. The offset
between TAI and UTC is initially assumed to be 0. It's corrected when/if
a sufficiently new `ntpd` starts.
* We'd need a leap second table to translate this into calendar time. One
would have to be downloaded from the Internet periodically, and we'd need
to consider the case in which the available table is expired.
* `CLOCK_TAI` likely doesn't work properly with leap smear systems. Where
the leap smear prevents a time jump for `CLOCK_REALTIME`, it likely
introduces one for `CLOCK_TAI`.
#### Use a leap second table when calculating differences
Moonfire NVR could retrieve UTC timestamps from the system then translate then
to TAI via a leap second table, either before writing them to the database or
whenever doing math on timestamps.
As with `CLOCK_TAI`, this would require downloading a leap second table from
the Internet periodically.
This would mostly solve the problem at the cost of complexity. Timestamps
obtained from the system for a two-second period starting with each leap
second would still be ambiguous.
#### Use smeared time
Moonfire NVR could make no code changes and ask the system administrator to
use smeared time. This is the simplest option. On a leap smear system, there
are no time jumps. The ~11.6 ppm frequency error and the maximum introduced
absolute error of 0.5 sec can be considered acceptable.
Alternatively, Moonfire NVR could assume a specific leap smear policy (such as
24-hour linear smear from 12:00 the day before to 12:00 the day after) and
attempt to correct the time into TAI with a leap second table. This behavior
would work well on a system with the expected configuration and produce
surprising results on other systems. It's unfortunate that there's no standard
way to determine if a system is using a leap smear and with what policy.

35
ffmpeg/Cargo.lock generated
View File

@ -1,35 +0,0 @@
[root]
name = "moonfire-ffmpeg"
version = "0.0.1"
dependencies = [
"cc 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "cc"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "log"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "pkg-config"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum cc 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ef4019bdb99c0c1ddd56c12c2f507c174d729c6915eca6bd9d27c42f3d93b0f4"
"checksum libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)" = "38f5c2b18a287cf78b4097db62e20f43cace381dc76ae5c0a3073067f78b7ddc"
"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
"checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903"

View File

@ -168,7 +168,6 @@ been done for you. If not, Create
[Service]
ExecStart=/usr/local/bin/moonfire-nvr run \
--sample-file-dir=/var/lib/moonfire-nvr/samples \
--db-dir=/var/lib/moonfire-nvr/db \
--http-addr=0.0.0.0:8080
Environment=TZ=:/etc/localtime

View File

@ -32,9 +32,9 @@ state:
* a SQLite database, typically <1 GiB. It should be stored on flash if
available.
* the "sample file directory", which holds the actual samples/frames of
H.264 video. This should be quite large and typically is stored on a hard
drive.
* the "sample file directories", which hold the actual samples/frames of
H.264 video. These should be quite large and are typically stored on hard
drives.
Both states are intended to be accessed by moonfire-nvr only, but can be
changed after building. See below.
@ -67,30 +67,27 @@ You can configure the system's database through a text-based user interface:
$ sudo -u moonfire-nvr moonfire-nvr config 2>debug-log
If you have used a non-default path for your samples directory, as you most
likely have, you must also supply that location, or the command will fail
with an error message about not being able to open the default location for
that directory:
In the user interface,
$ sudo -u moonfire-nvr moonfire-nvr config --sample-files-dir=/path/to/my/media/samples 2>debug-log
1. add your sample file dirs under "Edit cameras and retention"
2. add cameras under the "Edit cameras and streams" dialog.
There's a "Test" button to verify your settings directly from the dialog.
Be sure to assign each stream you want to capture to a sample file
directory.
3. Assign disk space to your cameras back in "Edit cameras and retention".
Leave a little slack (at least 100 MB per camera) between the total limit
and the filesystem capacity, even if you store nothing else on the disk.
There are several reasons this is needed:
In the user interface, add your cameras under the "Edit cameras" dialog.
There's a "Test" button to verify your settings directly from the dialog.
After the cameras look correct, go to "Edit retention" to assign disk space to
each camera. Leave a little slack (at least 100 MB per camera) between the total
limit and the filesystem capacity, even if you store nothing else on the disk.
There are several reasons this is needed:
* The limit currently controls fully-written files only. There will be up
to two minutes of video per camera of additional video.
* The rotation happens after the limit is exceeded, not proactively.
* Moonfire NVR currently doesn't account for the unused space in the final
filesystem block at the end of each file.
* Moonfire NVR doesn't account for the space used for directory listings.
* If a file is open when it is deleted (such as if a HTTP client is
downloading it), it stays around until the file is closed. Moonfire NVR
currently doesn't account for this.
* The limit currently controls fully-written files only. There will be up
to two minutes of video per camera of additional video.
* The rotation happens after the limit is exceeded, not proactively.
* Moonfire NVR currently doesn't account for the unused space in the final
filesystem block at the end of each file.
* Moonfire NVR doesn't account for the space used for directory listings.
* If a file is open when it is deleted (such as if a HTTP client is
downloading it), it stays around until the file is closed. Moonfire NVR
currently doesn't account for this.
When finished, start the daemon:
@ -108,7 +105,6 @@ been done for you. If not, Create
[Service]
ExecStart=/usr/local/bin/moonfire-nvr run \
--sample-file-dir=/var/lib/moonfire-nvr/sample \
--db-dir=/var/lib/moonfire-nvr/db \
--http-addr=0.0.0.0:8080
Environment=TZ=:/etc/localtime

View File

@ -191,3 +191,34 @@ indexes, the number of (mostly 1024-byte) read syscalls on the database
dropped from 605 to 39.
The general upgrade procedure applies to this upgrade.
### Version 1 to version 2 to version 3
This upgrade affects the sample file directory as well as the database. Thus,
the restore procedure written above of simply copying back the databae is
insufficient. To do a full restore, you would need to back up and restore the
sample file directory as well. This directory is considerably larger, so
consider an alternate procedure of crossing your fingers, and being prepared
to start over from scratch if there's a problem.
Version 2 represents a half-finished upgrade from version 1 to version 3; it
is never used.
Version 3 adds over version 1:
* user authentication
* recording of sub streams (splits a new `stream` table out of `camera`)
* a per-stream knob `flush_if_sec` meant to reduce database commits (and
thus SSD write cycles). This improves practicality of many streams.
* support for multiple sample file directories, to take advantage of
multiple hard drives (or multiple RAID volumes).
* an interlock between database and sample file directories to avoid various
mixups that could cause data integrity problems.
* recording the RFC-6381 codec associated with a video sample entry, so that
logic for determining this is no longer needed as part of the database
layer.
* a simpler sample file directory layout in which files are represented by
the same sequentially increasing id as in the database, rather than a
separate uuid which has to be reserved in advance.
* additional timestamp fields which may be useful in diagnosing/correcting
time jumps/inconsistencies.

View File

@ -104,7 +104,6 @@ After=network-online.target
[Service]
ExecStart=${SERVICE_BIN} run \\
--sample-file-dir=${SAMPLE_MEDIA_DIR}/${SAMPLE_FILE_DIR} \\
--db-dir=${DB_DIR} \\
--ui-dir=${LIB_DIR}/ui \\
--http-addr=0.0.0.0:${NVR_PORT}

View File

@ -359,25 +359,6 @@ fix_localtime()
fi
}
# Add/update cameras in the database
#
# $1: path to cameras.sql file
# $2: moonfire service name
# $3: moonfire user name
# $4: moonfire database path
#
addCameras()
{
local cpath="${CAMERAS_PATH:-$1}"
if [ -r "${cpath}" ]; then
echo_info -x 'Add cameras...'
# Before adding cameras, must stop service
moonfire stop "${SERVICE_NAME:-$2}" >/dev/null 2>&1
sudo -u ${NVR_USER:-$3} -H sqlite3 "${DB_PATH:-$4}" < "${cpath}"
moonfire start "${SERVICE_NAME:-$2}" >/dev/null 2>&1
fi
}
pre_install_prep()
{
prep_moonfire_user

View File

@ -173,25 +173,6 @@ trap finish EXIT
pre_install_prep
# Initialize camera from sql file if present
# (DEPRECATED: Will be removed in future version
#
CAMERAS_PATH="${MOONFIRE_DIR}/cameras.sql"
if [ ! -r "${CAMERAS_PATH}" ]; then
CAMERAS_PATH="${MOONFIRE_DIR}/../cameras.sql"
if [ ! -r "${CAMERAS_PATH}" ]; then
CAMERAS_PATH=
fi
fi
if [ ! -z "${CAMERAS_PATH}" ]; then
echo_warn "Camera configuration through sql file is deprecated and will not be supported in the future." \
"Use \"moonfire-nvr config\" instead."
echo_info -x "Adding camera confguration to db..."
addCameras
else
echo_warn -x "No cameras auto configured. Use \"moonfire-nvr config\" to do it later..."
fi
read_lines <<-'INSTRUCTIONS'
Unless there are errors above, everything you need should have been installed
and you are now ready to build, install, and then use moonfire.

View File

@ -1,5 +1,5 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
@ -30,11 +30,8 @@
//! Subcommand to check the database and sample file dir for errors.
use db;
use error::Error;
use recording;
use std::fs;
use uuid::Uuid;
use db::check;
use failure::Error;
static USAGE: &'static str = r#"
Checks database integrity.
@ -49,159 +46,21 @@ Options:
--db-dir=DIR Set the directory holding the SQLite3 index database.
This is typically on a flash device.
[default: /var/lib/moonfire-nvr/db]
--sample-file-dir=DIR Set the directory holding video data.
This is typically on a hard drive.
[default: /var/lib/moonfire-nvr/sample]
--compare-lens Compare sample file lengths on disk to the database.
"#;
#[derive(Debug, Deserialize)]
struct Args {
flag_db_dir: String,
flag_sample_file_dir: String,
}
#[derive(Debug, Eq, PartialEq)]
struct RecordingSummary {
bytes: u64,
video_samples: i32,
video_sync_samples: i32,
duration: i32,
flags: i32,
}
fn summarize_index(video_index: &[u8]) -> Result<RecordingSummary, Error> {
let mut it = recording::SampleIndexIterator::new();
let mut duration = 0;
let mut video_samples = 0;
let mut video_sync_samples = 0;
let mut bytes = 0;
while it.next(video_index)? {
bytes += it.bytes as u64;
duration += it.duration_90k;
video_samples += 1;
video_sync_samples += it.is_key() as i32;
}
Ok(RecordingSummary{
bytes: bytes,
video_samples: video_samples,
video_sync_samples: video_sync_samples,
duration: duration,
flags: if it.duration_90k == 0 { db::RecordingFlags::TrailingZero as i32 } else { 0 },
})
}
struct File {
uuid: Uuid,
len: u64,
composite_id: Option<i64>,
flag_compare_lens: bool,
}
pub fn run() -> Result<(), Error> {
let args: Args = super::parse_args(USAGE)?;
let (_db_dir, conn) = super::open_conn(&args.flag_db_dir, super::OpenMode::ReadOnly)?;
let mut files = Vec::new();
for e in fs::read_dir(&args.flag_sample_file_dir)? {
let e = e?;
let uuid = match e.file_name().to_str().and_then(|f| Uuid::parse_str(f).ok()) {
Some(f) => f,
None => {
error!("sample file directory contains file {} which isn't a uuid",
e.file_name().to_string_lossy());
continue;
}
};
let len = e.metadata()?.len();
files.push(File{uuid: uuid, len: len, composite_id: None});
}
files.sort_by(|a, b| a.uuid.cmp(&b.uuid));
// This statement should be a full outer join over the recording and recording_playback tables.
// SQLite3 doesn't support that, though, so emulate it with a couple left joins and a union.
const FIELDS: &'static str = r#"
recording.composite_id,
recording.flags,
recording.sample_file_bytes,
recording.duration_90k,
recording.video_samples,
recording.video_sync_samples,
recording_playback.composite_id,
recording_playback.sample_file_uuid,
recording_playback.video_index
"#;
let mut stmt = conn.prepare(&format!(r#"
select {}
from recording left join recording_playback on
(recording.composite_id = recording_playback.composite_id)
union all
select {}
from recording_playback left join recording on
(recording_playback.composite_id = recording.composite_id)
where recording.composite_id is null
"#, FIELDS, FIELDS))?;
let mut rows = stmt.query(&[])?;
while let Some(row) = rows.next() {
let row = row?;
let composite_id: Option<i64> = row.get_checked(0)?;
let playback_composite_id: Option<i64> = row.get_checked(6)?;
let composite_id = match (composite_id, playback_composite_id) {
(Some(id1), Some(_)) => id1,
(Some(id1), None) => {
error!("composite id {} has recording row but no recording_playback row", id1);
continue;
},
(None, Some(id2)) => {
error!("composite id {} has recording_playback row but no recording row", id2);
continue;
},
(None, None) => {
return Err(Error::new("outer join returned fully empty row".to_owned()));
},
};
let row_summary = RecordingSummary{
flags: row.get_checked(1)?,
bytes: row.get_checked::<_, i64>(2)? as u64,
duration: row.get_checked(3)?,
video_samples: row.get_checked(4)?,
video_sync_samples: row.get_checked(5)?,
};
let sample_file_uuid = Uuid::from_bytes(&row.get_checked::<_, Vec<u8>>(7)?)?;
let video_index: Vec<u8> = row.get_checked(8)?;
let index_summary = match summarize_index(&video_index) {
Ok(s) => s,
Err(e) => {
error!("composite id {} has bad video_index: {}", composite_id, e);
continue;
},
};
if row_summary != index_summary {
error!("composite id {} row summary {:#?} inconsistent with index {:#?}",
composite_id, row_summary, index_summary);
}
let f = match files.binary_search_by(|f| f.uuid.cmp(&sample_file_uuid)) {
Ok(i) => &mut files[i],
Err(_) => {
error!("composite id {} refers to missing sample file {}",
composite_id, sample_file_uuid);
continue;
}
};
if let Some(id) = f.composite_id {
error!("composite id {} refers to sample file {} already used by id {}",
composite_id, sample_file_uuid, id);
} else {
f.composite_id = Some(composite_id);
}
if row_summary.bytes != f.len {
error!("composite id {} declares length {}, but its sample file {} has length {}",
composite_id, row_summary.bytes, sample_file_uuid, f.len);
}
}
for f in files {
if f.composite_id.is_none() {
error!("sample file {} not used by any recording", f.uuid);
}
}
info!("Check done.");
Ok(())
// TODO: ReadOnly should be sufficient but seems to fail.
let (_db_dir, conn) = super::open_conn(&args.flag_db_dir, super::OpenMode::ReadWrite)?;
check::run(&conn, &check::Options {
compare_lens: args.flag_compare_lens,
})
}

View File

@ -33,9 +33,10 @@ extern crate cursive;
use self::cursive::Cursive;
use self::cursive::traits::{Boxable, Identifiable, Finder};
use self::cursive::views;
use db;
use dir;
use error::Error;
use db::{self, writer};
use failure::Error;
use std::collections::BTreeMap;
use std::str::FromStr;
use std::sync::Arc;
use stream::{self, Opener, Stream};
use super::{decode_size, encode_size};
@ -49,23 +50,37 @@ fn get_change(siv: &mut Cursive) -> db::CameraChange {
let h = siv.find_id::<views::EditView>("host").unwrap().get_content().as_str().into();
let u = siv.find_id::<views::EditView>("username").unwrap().get_content().as_str().into();
let p = siv.find_id::<views::EditView>("password").unwrap().get_content().as_str().into();
let m = siv.find_id::<views::EditView>("main_rtsp_path").unwrap().get_content().as_str().into();
let s = siv.find_id::<views::EditView>("sub_rtsp_path").unwrap().get_content().as_str().into();
db::CameraChange{
let mut c = db::CameraChange {
short_name: sn,
description: d,
host: h,
username: u,
password: p,
main_rtsp_path: m,
sub_rtsp_path: s,
streams: Default::default(),
};
for &t in &db::ALL_STREAM_TYPES {
let p = siv.find_id::<views::EditView>(&format!("{}_rtsp_path", t.as_str()))
.unwrap().get_content().as_str().into();
let r = siv.find_id::<views::Checkbox>(&format!("{}_record", t.as_str()))
.unwrap().is_checked();
let f = i64::from_str(siv.find_id::<views::EditView>(
&format!("{}_flush_if_sec", t.as_str())).unwrap().get_content().as_str())
.unwrap_or(0);
let d = *siv.find_id::<views::SelectView<Option<i32>>>(
&format!("{}_sample_file_dir", t.as_str()))
.unwrap().selection();
c.streams[t.index()] = db::StreamChange {
rtsp_path: p,
sample_file_dir_id: d,
record: r,
flush_if_sec: f,
};
}
c
}
fn press_edit(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>,
id: Option<i32>) {
fn press_edit(siv: &mut Cursive, db: &Arc<db::Database>, id: Option<i32>) {
let change = get_change(siv);
siv.pop_layer(); // get rid of the add/edit camera dialog.
let result = {
let mut l = db.lock();
@ -80,9 +95,11 @@ fn press_edit(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::SampleFi
.title("Error")
.dismiss_button("Abort"));
} else {
siv.pop_layer(); // get rid of the add/edit camera dialog.
// Recreate the "Edit cameras" dialog from scratch; it's easier than adding the new entry.
siv.pop_layer();
add_dialog(db, dir, siv);
top_dialog(db, siv);
}
}
@ -92,25 +109,44 @@ fn press_test_inner(url: &str) -> Result<String, Error> {
Ok(format!("{}x{} video stream", extra_data.width, extra_data.height))
}
fn press_test(siv: &mut Cursive, c: &db::CameraChange, stream: &str, path: &str) {
let url = format!("rtsp://{}:{}@{}{}", c.username, c.password, c.host, path);
let description = match press_test_inner(&url) {
Err(e) => {
siv.add_layer(
views::Dialog::text(format!("{} stream at {}:\n\n{}", stream, url, e))
.title("Stream test failed")
.dismiss_button("Back"));
return;
},
Ok(d) => d,
};
siv.add_layer(views::Dialog::text(format!("{} stream at {}:\n\n{}", stream, url, description))
.title("Stream test succeeded")
.dismiss_button("Back"));
fn press_test(siv: &mut Cursive, t: db::StreamType) {
let c = get_change(siv);
let url = format!("rtsp://{}:{}@{}{}", c.username, c.password, c.host,
c.streams[t.index()].rtsp_path);
siv.add_layer(views::Dialog::text(format!("Testing {} stream at {}. This may take a while \
on timeout or if you have a long key frame interval",
t.as_str(), url))
.title("Testing"));
// Let siv have this thread for its event loop; do the work in a background thread.
// siv.cb_sink doesn't actually wake up the event loop. Tell siv to poll, as a workaround.
siv.set_fps(5);
let sink = siv.cb_sink().clone();
::std::thread::spawn(move || {
let r = press_test_inner(&url);
sink.send(Box::new(move |siv| {
// Polling is no longer necessary.
siv.set_fps(0);
siv.pop_layer();
let description = match r {
Err(ref e) => {
siv.add_layer(
views::Dialog::text(format!("{} stream at {}:\n\n{}", t.as_str(), url, e))
.title("Stream test failed")
.dismiss_button("Back"));
return;
},
Ok(ref d) => d,
};
siv.add_layer(views::Dialog::text(
format!("{} stream at {}:\n\n{}", t.as_str(), url, description))
.title("Stream test succeeded")
.dismiss_button("Back"));
})).unwrap();
});
}
fn press_delete(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, id: i32,
name: String, to_delete: i64) {
fn press_delete(siv: &mut Cursive, db: &Arc<db::Database>, id: i32, name: String, to_delete: i64) {
let dialog = if to_delete > 0 {
let prompt = format!("Camera {} has recorded video. Please confirm the amount \
of data to delete by typing it back:\n\n{}", name,
@ -121,38 +157,51 @@ fn press_delete(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::Sample
.child(views::DummyView)
.child(views::EditView::new().on_submit({
let db = db.clone();
let dir = dir.clone();
move |siv, _| confirm_deletion(siv, &db, &dir, id, to_delete)
move |siv, _| confirm_deletion(siv, &db, id, to_delete)
}).with_id("confirm")))
.button("Delete", {
let db = db.clone();
let dir = dir.clone();
move |siv| confirm_deletion(siv, &db, &dir, id, to_delete)
move |siv| confirm_deletion(siv, &db, id, to_delete)
})
} else {
views::Dialog::text(format!("Delete camera {}? This camera has no recorded video.", name))
.button("Delete", {
let db = db.clone();
let dir = dir.clone();
move |s| actually_delete(s, &db, &dir, id)
move |s| actually_delete(s, &db, id)
})
}.title("Delete camera").dismiss_button("Cancel");
siv.add_layer(dialog);
}
fn confirm_deletion(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>,
id: i32, to_delete: i64) {
fn confirm_deletion(siv: &mut Cursive, db: &Arc<db::Database>, id: i32, to_delete: i64) {
let typed = siv.find_id::<views::EditView>("confirm").unwrap().get_content();
if decode_size(typed.as_str()).ok() == Some(to_delete) {
siv.pop_layer(); // deletion confirmation dialog
if let Err(e) = dir::lower_retention(dir.clone(),
&[dir::NewLimit{camera_id: id, limit: 0}]) {
let mut zero_limits = BTreeMap::new();
{
let l = db.lock();
for (&stream_id, stream) in l.streams_by_id() {
if stream.camera_id == id {
let dir_id = match stream.sample_file_dir_id {
Some(d) => d,
None => continue,
};
let l = zero_limits.entry(dir_id).or_insert_with(|| Vec::with_capacity(2));
l.push(writer::NewLimit {
stream_id,
limit: 0,
});
}
}
}
if let Err(e) = lower_retention(db, zero_limits) {
siv.add_layer(views::Dialog::text(format!("Unable to delete recordings: {}", e))
.title("Error")
.dismiss_button("Abort"));
return;
}
actually_delete(siv, db, dir, id);
actually_delete(siv, db, id);
} else {
siv.add_layer(views::Dialog::text("Please confirm amount.")
.title("Try again")
@ -160,9 +209,17 @@ fn confirm_deletion(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::Sa
}
}
fn actually_delete(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>,
id: i32) {
info!("actually_delete call");
fn lower_retention(db: &Arc<db::Database>, zero_limits: BTreeMap<i32, Vec<writer::NewLimit>>)
-> Result<(), Error> {
let dirs_to_open: Vec<_> = zero_limits.keys().map(|id| *id).collect();
db.lock().open_sample_file_dirs(&dirs_to_open[..])?;
for (&dir_id, l) in &zero_limits {
writer::lower_retention(db.clone(), dir_id, &l)?;
}
Ok(())
}
fn actually_delete(siv: &mut Cursive, db: &Arc<db::Database>, id: i32) {
siv.pop_layer(); // get rid of the add/edit camera dialog.
let result = {
let mut l = db.lock();
@ -175,15 +232,14 @@ fn actually_delete(siv: &mut Cursive, db: &Arc<db::Database>, dir: &Arc<dir::Sam
} else {
// Recreate the "Edit cameras" dialog from scratch; it's easier than adding the new entry.
siv.pop_layer();
add_dialog(db, dir, siv);
top_dialog(db, siv);
}
}
/// Adds or updates a camera.
/// (The former if `item` is None; the latter otherwise.)
fn edit_camera_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv: &mut Cursive,
item: &Option<i32>) {
let list = views::ListView::new()
fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i32>) {
let camera_list = views::ListView::new()
.child("id", views::TextView::new(match *item {
None => "<new>".to_string(),
Some(id) => id.to_string(),
@ -193,40 +249,88 @@ fn edit_camera_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv
.child("host", views::EditView::new().with_id("host"))
.child("username", views::EditView::new().with_id("username"))
.child("password", views::EditView::new().with_id("password"))
.child("main_rtsp_path", views::LinearLayout::horizontal()
.child(views::EditView::new().with_id("main_rtsp_path").full_width())
.child(views::DummyView)
.child(views::Button::new("Test", |siv| {
let c = get_change(siv);
press_test(siv, &c, "main", &c.main_rtsp_path)
})))
.child("sub_rtsp_path", views::LinearLayout::horizontal()
.child(views::EditView::new().with_id("sub_rtsp_path").full_width())
.child(views::DummyView)
.child(views::Button::new("Test", |siv| {
let c = get_change(siv);
press_test(siv, &c, "sub", &c.sub_rtsp_path)
})))
.min_height(8);
let layout = views::LinearLayout::vertical()
.child(list)
.min_height(6);
let mut layout = views::LinearLayout::vertical()
.child(camera_list)
.child(views::TextView::new("description"))
.child(views::TextArea::new().with_id("description").min_height(3))
.full_width();
.child(views::TextArea::new().with_id("description").min_height(3));
let dirs: Vec<_> = ::std::iter::once(("<none>".to_owned(), None))
.chain(db.lock()
.sample_file_dirs_by_id()
.iter()
.map(|(&id, d)| (d.path.as_str().to_owned(), Some(id))))
.collect();
for &type_ in &db::ALL_STREAM_TYPES {
let list = views::ListView::new()
.child("rtsp path", views::LinearLayout::horizontal()
.child(views::EditView::new()
.with_id(format!("{}_rtsp_path", type_.as_str()))
.full_width())
.child(views::DummyView)
.child(views::Button::new("Test", move |siv| press_test(siv, type_))))
.child("sample file dir",
views::SelectView::<Option<i32>>::new()
.with_all(dirs.iter().map(|d| d.clone()))
.popup()
.with_id(format!("{}_sample_file_dir", type_.as_str())))
.child("record", views::Checkbox::new().with_id(format!("{}_record", type_.as_str())))
.child("flush_if_sec", views::EditView::new()
.with_id(format!("{}_flush_if_sec", type_.as_str())))
.child("usage/capacity",
views::TextView::new("").with_id(format!("{}_usage_cap", type_.as_str())))
.min_height(5);
layout.add_child(views::DummyView);
layout.add_child(views::TextView::new(format!("{} stream", type_.as_str())));
layout.add_child(list);
}
let mut dialog = views::Dialog::around(layout);
let dialog = if let Some(id) = *item {
let dialog = if let Some(camera_id) = *item {
let l = db.lock();
let camera = l.cameras_by_id().get(&id).expect("missing camera");
let camera = l.cameras_by_id().get(&camera_id).expect("missing camera");
dialog.find_id("uuid", |v: &mut views::TextView| v.set_content(camera.uuid.to_string()))
.expect("missing TextView");
let bytes = camera.sample_file_bytes;
let mut bytes = 0;
for (i, sid) in camera.streams.iter().enumerate() {
let t = db::StreamType::from_index(i).unwrap();
// Find the index into dirs of the stored sample file dir.
let mut selected_dir = 0;
if let Some(s) = sid.map(|sid| l.streams_by_id().get(&sid).unwrap()) {
if let Some(id) = s.sample_file_dir_id {
for (i, &(_, d_id)) in dirs.iter().skip(1).enumerate() {
if Some(id) == d_id {
selected_dir = i + 1;
break;
}
}
}
bytes += s.sample_file_bytes;
let u = if s.retain_bytes == 0 {
"0 / 0 (0.0%)".to_owned()
} else {
format!("{} / {} ({:.1}%)", s.sample_file_bytes, s.retain_bytes,
100. * s.sample_file_bytes as f32 / s.retain_bytes as f32)
};
dialog.find_id(&format!("{}_rtsp_path", t.as_str()),
|v: &mut views::EditView| v.set_content(s.rtsp_path.to_owned()));
dialog.find_id(&format!("{}_usage_cap", t.as_str()),
|v: &mut views::TextView| v.set_content(u));
dialog.find_id(&format!("{}_record", t.as_str()),
|v: &mut views::Checkbox| v.set_checked(s.record));
dialog.find_id(&format!("{}_flush_if_sec", t.as_str()),
|v: &mut views::EditView| v.set_content(s.flush_if_sec.to_string()));
}
dialog.find_id(&format!("{}_sample_file_dir", t.as_str()),
|v: &mut views::SelectView<Option<i32>>| v.set_selection(selected_dir));
}
let name = camera.short_name.clone();
for &(view_id, content) in &[("short_name", &camera.short_name),
("host", &camera.host),
("username", &camera.username),
("password", &camera.password),
("main_rtsp_path", &camera.main_rtsp_path),
("sub_rtsp_path", &camera.sub_rtsp_path)] {
for &(view_id, content) in &[("short_name", &*camera.short_name),
("host", &*camera.host),
("username", &*camera.username),
("password", &*camera.password)] {
dialog.find_id(view_id, |v: &mut views::EditView| v.set_content(content.to_string()))
.expect("missing EditView");
}
@ -236,32 +340,32 @@ fn edit_camera_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv
dialog.title("Edit camera")
.button("Edit", {
let db = db.clone();
let dir = dir.clone();
move |s| press_edit(s, &db, &dir, Some(id))
move |s| press_edit(s, &db, Some(camera_id))
})
.button("Delete", {
let db = db.clone();
let dir = dir.clone();
move |s| press_delete(s, &db, &dir, id, name.clone(), bytes)
move |s| press_delete(s, &db, camera_id, name.clone(), bytes)
})
} else {
for t in &db::ALL_STREAM_TYPES {
dialog.find_id(&format!("{}_usage_cap", t.as_str()),
|v: &mut views::TextView| v.set_content("<new>"));
}
dialog.title("Add camera")
.button("Add", {
let db = db.clone();
let dir = dir.clone();
move |s| press_edit(s, &db, &dir, None)
move |s| press_edit(s, &db, None)
})
};
siv.add_layer(dialog.dismiss_button("Cancel"));
}
pub fn add_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv: &mut Cursive) {
pub fn top_dialog(db: &Arc<db::Database>, siv: &mut Cursive) {
siv.add_layer(views::Dialog::around(
views::SelectView::new()
.on_submit({
let db = db.clone();
let dir = dir.clone();
move |siv, item| edit_camera_dialog(&db, &dir, siv, item)
move |siv, item| edit_camera_dialog(&db, siv, item)
})
.item("<new camera>".to_string(), None)
.with_all(db.lock()

View File

@ -33,39 +33,42 @@ extern crate cursive;
use self::cursive::Cursive;
use self::cursive::traits::{Boxable, Identifiable};
use self::cursive::views;
use db;
use dir;
use error::Error;
use db::{self, writer};
use failure::Error;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::Rc;
use std::sync::Arc;
use super::{decode_size, encode_size};
struct Camera {
struct Stream {
label: String,
used: i64,
record: bool,
retain: Option<i64>, // None if unparseable
}
struct Model {
db: Arc<db::Database>,
dir: Arc<dir::SampleFileDir>,
dir_id: i32,
fs_capacity: i64,
total_used: i64,
total_retain: i64,
errors: isize,
cameras: BTreeMap<i32, Camera>,
streams: BTreeMap<i32, Stream>,
}
/// Updates the limits in the database. Doesn't delete excess data (if any).
fn update_limits_inner(model: &Model) -> Result<(), Error> {
let mut db = model.db.lock();
let mut tx = db.tx()?;
for (&id, camera) in &model.cameras {
tx.update_retention(id, camera.retain.unwrap())?;
let mut changes = Vec::with_capacity(model.streams.len());
for (&stream_id, stream) in &model.streams {
changes.push(db::RetentionChange {
stream_id,
new_record: stream.record,
new_limit: stream.retain.unwrap(),
});
}
tx.commit()
model.db.lock().update_retention(&changes)
}
fn update_limits(model: &Model, siv: &mut Cursive) {
@ -77,12 +80,12 @@ fn update_limits(model: &Model, siv: &mut Cursive) {
}
fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str) {
info!("on_edit called for id {}", id);
debug!("on_edit called for id {}", id);
let mut model = model.borrow_mut();
let model: &mut Model = &mut *model;
let camera = model.cameras.get_mut(&id).unwrap();
let stream = model.streams.get_mut(&id).unwrap();
let new_value = decode_size(content).ok();
let delta = new_value.unwrap_or(0) - camera.retain.unwrap_or(0);
let delta = new_value.unwrap_or(0) - stream.retain.unwrap_or(0);
let old_errors = model.errors;
if delta != 0 {
let prev_over = model.total_retain > model.fs_capacity;
@ -91,7 +94,6 @@ fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str)
.unwrap()
.set_content(encode_size(model.total_retain));
let now_over = model.total_retain > model.fs_capacity;
info!("now_over: {}", now_over);
if now_over != prev_over {
model.errors += if now_over { 1 } else { -1 };
siv.find_id::<views::TextView>("total_ok")
@ -99,27 +101,34 @@ fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str)
.set_content(if now_over { "*" } else { " " });
}
}
if new_value.is_none() != camera.retain.is_none() {
if new_value.is_none() != stream.retain.is_none() {
model.errors += if new_value.is_none() { 1 } else { -1 };
siv.find_id::<views::TextView>(&format!("{}_ok", id))
.unwrap()
.set_content(if new_value.is_none() { "*" } else { " " });
}
camera.retain = new_value;
info!("model.errors = {}", model.errors);
stream.retain = new_value;
debug!("model.errors = {}", model.errors);
if (model.errors == 0) != (old_errors == 0) {
info!("toggling change state: errors={}", model.errors);
trace!("toggling change state: errors={}", model.errors);
siv.find_id::<views::Button>("change")
.unwrap()
.set_enabled(model.errors == 0);
}
}
fn edit_record(model: &RefCell<Model>, id: i32, record: bool) {
let mut model = model.borrow_mut();
let model: &mut Model = &mut *model;
let stream = model.streams.get_mut(&id).unwrap();
stream.record = record;
}
fn confirm_deletion(model: &RefCell<Model>, siv: &mut Cursive, to_delete: i64) {
let typed = siv.find_id::<views::EditView>("confirm")
.unwrap()
.get_content();
info!("confirm, typed: {} vs expected: {}", typed.as_str(), to_delete);
debug!("confirm, typed: {} vs expected: {}", typed.as_str(), to_delete);
if decode_size(typed.as_str()).ok() == Some(to_delete) {
actually_delete(model, siv);
} else {
@ -132,12 +141,16 @@ fn confirm_deletion(model: &RefCell<Model>, siv: &mut Cursive, to_delete: i64) {
fn actually_delete(model: &RefCell<Model>, siv: &mut Cursive) {
let model = &*model.borrow();
let new_limits: Vec<_> =
model.cameras.iter()
.map(|(&id, c)| dir::NewLimit{camera_id: id, limit: c.retain.unwrap()})
model.streams.iter()
.map(|(&id, s)| writer::NewLimit {stream_id: id, limit: s.retain.unwrap()})
.collect();
siv.pop_layer(); // deletion confirmation
siv.pop_layer(); // retention dialog
if let Err(e) = dir::lower_retention(model.dir.clone(), &new_limits[..]) {
{
let mut l = model.db.lock();
l.open_sample_file_dirs(&[model.dir_id]).unwrap(); // TODO: don't unwrap.
}
if let Err(e) = writer::lower_retention(model.db.clone(), model.dir_id, &new_limits[..]) {
siv.add_layer(views::Dialog::text(format!("Unable to delete excess video: {}", e))
.title("Error")
.dismiss_button("Abort"));
@ -150,11 +163,11 @@ fn press_change(model: &Rc<RefCell<Model>>, siv: &mut Cursive) {
if model.borrow().errors > 0 {
return;
}
let to_delete = model.borrow().cameras.values().map(
|c| ::std::cmp::max(c.used - c.retain.unwrap(), 0)).sum();
info!("change press, to_delete={}", to_delete);
let to_delete = model.borrow().streams.values().map(
|s| ::std::cmp::max(s.used - s.retain.unwrap(), 0)).sum();
debug!("change press, to_delete={}", to_delete);
if to_delete > 0 {
let prompt = format!("Some cameras' usage exceeds new limit. Please confirm the amount \
let prompt = format!("Some streams' usage exceeds new limit. Please confirm the amount \
of data to delete by typing it back:\n\n{}", encode_size(to_delete));
let dialog = views::Dialog::around(
views::LinearLayout::vertical()
@ -172,54 +185,163 @@ fn press_change(model: &Rc<RefCell<Model>>, siv: &mut Cursive) {
.title("Confirm deletion");
siv.add_layer(dialog);
} else {
siv.screen_mut().pop_layer();
siv.pop_layer();
update_limits(&model.borrow(), siv);
}
}
pub fn add_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv: &mut Cursive) {
pub fn top_dialog(db: &Arc<db::Database>, siv: &mut Cursive) {
siv.add_layer(views::Dialog::around(
views::SelectView::new()
.on_submit({
let db = db.clone();
move |siv, item| match *item {
Some(d) => edit_dir_dialog(&db, siv, d),
None => add_dir_dialog(&db, siv),
}
})
.item("<new sample file dir>".to_string(), None)
.with_all(db.lock()
.sample_file_dirs_by_id()
.iter()
.map(|(&id, d)| (d.path.to_string(), Some(id))))
.full_width())
.dismiss_button("Done")
.title("Edit sample file directories"));
}
fn add_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive) {
siv.add_layer(
views::Dialog::around(
views::LinearLayout::vertical()
.child(views::TextView::new("path"))
.child(views::EditView::new()
.on_submit({
let db = db.clone();
move |siv, path| add_dir(&db, siv, path)
})
.with_id("path")
.fixed_width(60)))
.button("Add", {
let db = db.clone();
move |siv| {
let path = siv.find_id::<views::EditView>("path").unwrap().get_content();
add_dir(&db, siv, &path)
}
})
.button("Cancel", |siv| siv.pop_layer())
.title("Add sample file directory"));
}
fn add_dir(db: &Arc<db::Database>, siv: &mut Cursive, path: &str) {
if let Err(e) = db.lock().add_sample_file_dir(path.to_owned()) {
siv.add_layer(views::Dialog::text(format!("Unable to add path {}: {}", path, e))
.dismiss_button("Back")
.title("Error"));
return;
}
siv.pop_layer();
// Recreate the edit dialog from scratch; it's easier than adding the new entry.
siv.pop_layer();
top_dialog(db, siv);
}
fn delete_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
siv.add_layer(
views::Dialog::around(
views::TextView::new("Empty (no associated streams)."))
.button("Delete", {
let db = db.clone();
move |siv| {
delete_dir(&db, siv, dir_id)
}
})
.button("Cancel", |siv| siv.pop_layer())
.title("Delete sample file directory"));
}
fn delete_dir(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
if let Err(e) = db.lock().delete_sample_file_dir(dir_id) {
siv.add_layer(views::Dialog::text(format!("Unable to delete dir id {}: {}", dir_id, e))
.dismiss_button("Back")
.title("Error"));
return;
}
siv.pop_layer();
// Recreate the edit dialog from scratch; it's easier than adding the new entry.
siv.pop_layer();
top_dialog(db, siv);
}
fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
let path;
let model = {
let mut cameras = BTreeMap::new();
let mut streams = BTreeMap::new();
let mut total_used = 0;
let mut total_retain = 0;
let fs_capacity;
{
let db = db.lock();
for (&id, camera) in db.cameras_by_id() {
cameras.insert(id, Camera{
label: format!("{}: {}", id, camera.short_name),
used: camera.sample_file_bytes,
retain: Some(camera.retain_bytes),
let mut l = db.lock();
for (&id, s) in l.streams_by_id() {
let c = l.cameras_by_id().get(&s.camera_id).expect("stream without camera");
if s.sample_file_dir_id != Some(dir_id) {
continue;
}
streams.insert(id, Stream {
label: format!("{}: {}: {}", id, c.short_name, s.type_.as_str()),
used: s.sample_file_bytes,
record: s.record,
retain: Some(s.retain_bytes),
});
total_used += camera.sample_file_bytes;
total_retain += camera.retain_bytes;
total_used += s.sample_file_bytes;
total_retain += s.retain_bytes;
}
if streams.is_empty() {
return delete_dir_dialog(db, siv, dir_id);
}
l.open_sample_file_dirs(&[dir_id]).unwrap(); // TODO: don't unwrap.
let dir = l.sample_file_dirs_by_id().get(&dir_id).unwrap();
let stat = dir.get().unwrap().statfs().unwrap();
fs_capacity = stat.f_bsize as i64 * stat.f_bavail as i64 + total_used;
path = dir.path.clone();
}
let stat = dir.statfs().unwrap();
let fs_capacity = stat.f_bsize as i64 * stat.f_bavail as i64 + total_used;
Rc::new(RefCell::new(Model{
dir: dir.clone(),
Rc::new(RefCell::new(Model {
dir_id,
db: db.clone(),
fs_capacity: fs_capacity,
total_used: total_used,
total_retain: total_retain,
fs_capacity,
total_used,
total_retain,
errors: (total_retain > fs_capacity) as isize,
cameras: cameras,
streams,
}))
};
const RECORD_WIDTH: usize = 8;
const BYTES_WIDTH: usize = 22;
let mut list = views::ListView::new();
list.add_child(
"camera",
"stream",
views::LinearLayout::horizontal()
.child(views::TextView::new("usage").fixed_width(25))
.child(views::TextView::new("limit").fixed_width(25)));
for (&id, camera) in &model.borrow().cameras {
.child(views::TextView::new("record").fixed_width(RECORD_WIDTH))
.child(views::TextView::new("usage").fixed_width(BYTES_WIDTH))
.child(views::TextView::new("limit").fixed_width(BYTES_WIDTH)));
for (&id, stream) in &model.borrow().streams {
let mut record_cb = views::Checkbox::new();
record_cb.set_checked(stream.record);
record_cb.set_on_change({
let model = model.clone();
move |_siv, record| edit_record(&model, id, record)
});
list.add_child(
&camera.label,
&stream.label,
views::LinearLayout::horizontal()
.child(views::TextView::new(encode_size(camera.used)).fixed_width(25))
.child(record_cb.fixed_width(RECORD_WIDTH))
.child(views::TextView::new(encode_size(stream.used)).fixed_width(BYTES_WIDTH))
.child(views::EditView::new()
.content(encode_size(camera.retain.unwrap()))
.content(encode_size(stream.retain.unwrap()))
.on_edit({
let model = model.clone();
move |siv, content, _pos| edit_limit(&model, siv, id, content)
@ -228,21 +350,24 @@ pub fn add_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv: &m
let model = model.clone();
move |siv, _| press_change(&model, siv)
})
.fixed_width(25))
.fixed_width(20))
.child(views::TextView::new("").with_id(format!("{}_ok", id)).fixed_width(1)));
}
let over = model.borrow().total_retain > model.borrow().fs_capacity;
list.add_child(
"total",
views::LinearLayout::horizontal()
.child(views::TextView::new(encode_size(model.borrow().total_used)).fixed_width(25))
.child(views::DummyView{}.fixed_width(RECORD_WIDTH))
.child(views::TextView::new(encode_size(model.borrow().total_used))
.fixed_width(BYTES_WIDTH))
.child(views::TextView::new(encode_size(model.borrow().total_retain))
.with_id("total_retain").fixed_width(25))
.with_id("total_retain").fixed_width(BYTES_WIDTH))
.child(views::TextView::new(if over { "*" } else { " " }).with_id("total_ok")));
list.add_child(
"filesystem",
views::LinearLayout::horizontal()
.child(views::TextView::new("").fixed_width(25))
.child(views::DummyView{}.fixed_width(3))
.child(views::DummyView{}.fixed_width(20))
.child(views::TextView::new(encode_size(model.borrow().fs_capacity)).fixed_width(25)));
let mut change_button = views::Button::new("Change", {
let model = model.clone();
@ -253,12 +378,12 @@ pub fn add_dialog(db: &Arc<db::Database>, dir: &Arc<dir::SampleFileDir>, siv: &m
.child(views::DummyView.full_width());
buttons.add_child(change_button.with_id("change"));
buttons.add_child(views::DummyView);
buttons.add_child(views::Button::new("Cancel", |siv| siv.screen_mut().pop_layer()));
buttons.add_child(views::Button::new("Cancel", |siv| siv.pop_layer()));
siv.add_layer(
views::Dialog::around(
views::LinearLayout::vertical()
.child(list)
.child(views::DummyView)
.child(buttons))
.title("Edit retention"));
.title(format!("Edit retention for {}", path)));
}

View File

@ -37,16 +37,16 @@ extern crate cursive;
use self::cursive::Cursive;
use self::cursive::views;
use clock;
use db;
use dir;
use error::Error;
use failure::Error;
use regex::Regex;
use std::sync::Arc;
use std::fmt::Write;
use std::str::FromStr;
mod cameras;
mod retention;
mod dirs;
static USAGE: &'static str = r#"
Interactive configuration editor.
@ -61,9 +61,6 @@ Options:
--db-dir=DIR Set the directory holding the SQLite3 index database.
This is typically on a flash device.
[default: /var/lib/moonfire-nvr/db]
--sample-file-dir=DIR Set the directory holding video data.
This is typically on a hard drive.
[default: /var/lib/moonfire-nvr/sample]
"#;
static MULTIPLIERS: [(char, u64); 4] = [
@ -123,28 +120,26 @@ fn decode_size(encoded: &str) -> Result<i64, ()> {
#[derive(Debug, Deserialize)]
struct Args {
flag_db_dir: String,
flag_sample_file_dir: String,
}
pub fn run() -> Result<(), Error> {
let args: Args = super::parse_args(USAGE)?;
let (_db_dir, conn) = super::open_conn(&args.flag_db_dir, super::OpenMode::ReadWrite)?;
let db = Arc::new(db::Database::new(conn)?);
//let dir = Arc::new(dir::Fd::open(&args.flag_sample_file_dir)?);
let dir = dir::SampleFileDir::new(&args.flag_sample_file_dir, db.clone())?;
let clocks = clock::RealClocks {};
let db = Arc::new(db::Database::new(clocks, conn, true)?);
let mut siv = Cursive::new();
//siv.add_global_callback('q', |s| s.quit());
siv.add_layer(views::Dialog::around(
views::SelectView::<fn(&Arc<db::Database>, &Arc<dir::SampleFileDir>, &mut Cursive)>::new()
views::SelectView::<fn(&Arc<db::Database>, &mut Cursive)>::new()
.on_submit({
let db = db.clone();
let dir = dir.clone();
move |siv, item| item(&db, &dir, siv)
move |siv, item| item(&db, siv)
})
.item("Edit cameras".to_string(), cameras::add_dialog)
.item("Edit retention".to_string(), retention::add_dialog))
.item("Directories and retention".to_string(), dirs::top_dialog)
.item("Cameras and streams".to_string(), cameras::top_dialog)
)
.button("Quit", |siv| siv.quit())
.title("Main menu"));

View File

@ -29,7 +29,7 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use db;
use error::Error;
use failure::Error;
static USAGE: &'static str = r#"
Initializes a database.
@ -66,9 +66,7 @@ pub fn run() -> Result<(), Error> {
pragma journal_mode = wal;
pragma page_size = 16384;
"#)?;
let tx = conn.transaction()?;
tx.execute_batch(include_str!("../schema.sql"))?;
tx.commit()?;
db::init(&mut conn)?;
info!("Database initialized.");
Ok(())
}

View File

@ -28,9 +28,9 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use dir;
use db::dir;
use docopt;
use error::Error;
use failure::{Error, Fail};
use libc;
use rusqlite;
use std::path::Path;
@ -75,13 +75,11 @@ enum OpenMode {
/// Locks and opens the database.
/// The returned `dir::Fd` holds the lock and should be kept open as long as the `Connection` is.
fn open_conn(db_dir: &str, mode: OpenMode) -> Result<(dir::Fd, rusqlite::Connection), Error> {
let dir = dir::Fd::open(db_dir)?;
let dir = dir::Fd::open(db_dir, mode == OpenMode::Create)?;
let ro = mode == OpenMode::ReadOnly;
dir.lock(if ro { libc::LOCK_SH } else { libc::LOCK_EX } | libc::LOCK_NB)
.map_err(|e| Error{description: format!("db dir {:?} already in use; can't get {} lock",
db_dir,
if ro { "shared" } else { "exclusive" }),
cause: Some(Box::new(e))})?;
.map_err(|e| e.context(format!("db dir {:?} already in use; can't get {} lock",
db_dir, if ro { "shared" } else { "exclusive" })))?;
let conn = rusqlite::Connection::open_with_flags(
Path::new(&db_dir).join("db"),
match mode {

View File

@ -29,9 +29,9 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use clock;
use db;
use dir;
use error::Error;
use db::{self, dir, writer};
use failure::Error;
use fnv::FnvHashMap;
use futures::{Future, Stream};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
@ -43,9 +43,12 @@ use tokio_signal::unix::{Signal, SIGINT, SIGTERM};
use web;
// These are used in a hack to get the name of the current time zone (e.g. America/Los_Angeles).
// They seem to be correct for Linux and OS X at least.
// They seem to be correct for Linux and macOS at least.
const LOCALTIME_PATH: &'static str = "/etc/localtime";
const ZONEINFO_PATH: &'static str = "/usr/share/zoneinfo/";
const ZONEINFO_PATHS: [&'static str; 2] = [
"/usr/share/zoneinfo/", // Linux, macOS < High Sierra
"/var/db/timezone/zoneinfo/" // macOS High Sierra
];
const USAGE: &'static str = r#"
Usage: moonfire-nvr run [options]
@ -55,9 +58,6 @@ Options:
--db-dir=DIR Set the directory holding the SQLite3 index database.
This is typically on a flash device.
[default: /var/lib/moonfire-nvr/db]
--sample-file-dir=DIR Set the directory holding video data.
This is typically on a hard drive.
[default: /var/lib/moonfire-nvr/sample]
--ui-dir=DIR Set the directory with the user interface files
(.html, .js, etc).
[default: /usr/local/lib/moonfire-nvr/ui]
@ -72,7 +72,6 @@ Options:
#[derive(Debug, Deserialize)]
struct Args {
flag_db_dir: String,
flag_sample_file_dir: String,
flag_http_addr: String,
flag_ui_dir: String,
flag_read_only: bool,
@ -90,50 +89,106 @@ fn setup_shutdown_future(h: &reactor::Handle) -> Box<Future<Item = (), Error = (
fn resolve_zone() -> String {
let p = ::std::fs::read_link(LOCALTIME_PATH).expect("unable to read localtime symlink");
let p = p.to_str().expect("localtime symlink destination must be valid UTF-8");
if !p.starts_with(ZONEINFO_PATH) {
panic!("Expected {} to point to a path within {}; actually points to {}",
LOCALTIME_PATH, ZONEINFO_PATH, p);
for zp in &ZONEINFO_PATHS {
if p.starts_with(zp) {
return p[zp.len()..].into();
}
}
p[ZONEINFO_PATH.len()..].into()
panic!("{} points to unexpected path {}", LOCALTIME_PATH, p)
}
struct Syncer {
dir: Arc<dir::SampleFileDir>,
channel: writer::SyncerChannel<::std::fs::File>,
join: thread::JoinHandle<()>,
}
pub fn run() -> Result<(), Error> {
let args: Args = super::parse_args(USAGE)?;
let clocks = clock::RealClocks {};
let (_db_dir, conn) = super::open_conn(
&args.flag_db_dir,
if args.flag_read_only { super::OpenMode::ReadOnly } else { super::OpenMode::ReadWrite })?;
let db = Arc::new(db::Database::new(conn).unwrap());
let dir = dir::SampleFileDir::new(&args.flag_sample_file_dir, db.clone()).unwrap();
let db = Arc::new(db::Database::new(clocks.clone(), conn, !args.flag_read_only).unwrap());
info!("Database is loaded.");
let s = web::Service::new(db.clone(), dir.clone(), Some(&args.flag_ui_dir),
args.flag_allow_origin, resolve_zone())?;
{
let mut l = db.lock();
let dirs_to_open: Vec<_> =
l.streams_by_id().values().filter_map(|s| s.sample_file_dir_id).collect();
l.open_sample_file_dirs(&dirs_to_open)?;
}
info!("Directories are opened.");
// Start a streamer for each camera.
let s = web::Service::new(db.clone(), Some(&args.flag_ui_dir), args.flag_allow_origin,
resolve_zone())?;
// Start a streamer for each stream.
let shutdown_streamers = Arc::new(AtomicBool::new(false));
let mut streamers = Vec::new();
let syncer = if !args.flag_read_only {
let (syncer_channel, syncer_join) = dir::start_syncer(dir.clone()).unwrap();
let syncers = if !args.flag_read_only {
let l = db.lock();
let cameras = l.cameras_by_id().len();
let env = streamer::Environment{
let mut dirs = FnvHashMap::with_capacity_and_hasher(
l.sample_file_dirs_by_id().len(), Default::default());
let streams = l.streams_by_id().len();
let env = streamer::Environment {
db: &db,
dir: &dir,
clocks: &clock::REAL,
opener: &*stream::FFMPEG,
shutdown: &shutdown_streamers,
};
for (i, (id, camera)) in l.cameras_by_id().iter().enumerate() {
let rotate_offset_sec = streamer::ROTATE_INTERVAL_SEC * i as i64 / cameras as i64;
let mut streamer = streamer::Streamer::new(&env, syncer_channel.clone(), *id, camera,
// Get the directories that need syncers.
for stream in l.streams_by_id().values() {
if let (Some(id), true) = (stream.sample_file_dir_id, stream.record) {
dirs.entry(id).or_insert_with(|| {
let d = l.sample_file_dirs_by_id().get(&id).unwrap();
info!("Starting syncer for path {}", d.path);
d.get().unwrap()
});
}
}
// Then, with the lock dropped, create syncers.
drop(l);
let mut syncers = FnvHashMap::with_capacity_and_hasher(dirs.len(), Default::default());
for (id, dir) in dirs.drain() {
let (channel, join) = writer::start_syncer(db.clone(), id)?;
syncers.insert(id, Syncer {
dir,
channel,
join,
});
}
// Then start up streams.
let l = db.lock();
for (i, (id, stream)) in l.streams_by_id().iter().enumerate() {
if !stream.record {
continue;
}
let camera = l.cameras_by_id().get(&stream.camera_id).unwrap();
let sample_file_dir_id = match stream.sample_file_dir_id {
Some(s) => s,
None => {
warn!("Can't record stream {} ({}/{}) because it has no sample file dir",
id, camera.short_name, stream.type_.as_str());
continue;
},
};
let rotate_offset_sec = streamer::ROTATE_INTERVAL_SEC * i as i64 / streams as i64;
let syncer = syncers.get(&sample_file_dir_id).unwrap();
let mut streamer = streamer::Streamer::new(&env, syncer.dir.clone(),
syncer.channel.clone(), *id, camera, stream,
rotate_offset_sec,
streamer::ROTATE_INTERVAL_SEC);
let name = format!("stream-{}", streamer.short_name());
info!("Starting streamer for {}", streamer.short_name());
let name = format!("s-{}", streamer.short_name());
streamers.push(thread::Builder::new().name(name).spawn(move|| {
streamer.run();
}).expect("can't create thread"));
}
Some((syncer_channel, syncer_join))
drop(l);
Some(syncers)
} else { None };
// Start the web interface.
@ -153,10 +208,14 @@ pub fn run() -> Result<(), Error> {
streamer.join().unwrap();
}
if let Some((syncer_channel, syncer_join)) = syncer {
info!("Shutting down syncer.");
drop(syncer_channel);
syncer_join.join().unwrap();
if let Some(mut ss) = syncers {
// The syncers shut down when all channels to them have been dropped.
// The database maintains one; and `ss` holds one. Drop both.
db.lock().clear_on_flush();
for (_, s) in ss.drain() {
drop(s.channel);
s.join.join().unwrap();
}
}
info!("Exiting.");

View File

@ -28,8 +28,8 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use error::Error;
use recording;
use db::recording;
use failure::Error;
const USAGE: &'static str = r#"
Usage: moonfire-nvr ts <ts>...

View File

@ -33,10 +33,7 @@
/// See `guide/schema.md` for more information.
use db;
use error::Error;
use rusqlite;
mod v0_to_v1;
use failure::Error;
const USAGE: &'static str = r#"
Upgrade to the latest database schema.
@ -48,9 +45,8 @@ Options:
--db-dir=DIR Set the directory holding the SQLite3 index database.
This is typically on a flash device.
[default: /var/lib/moonfire-nvr/db]
--sample-file-dir=DIR Set the directory holding video data.
--sample-file-dir=DIR When upgrading from schema version 1 to 2, the sample file directory.
This is typically on a hard drive.
[default: /var/lib/moonfire-nvr/sample]
--preset-journal=MODE Resets the SQLite journal_mode to the specified mode
prior to the upgrade. The default, delete, is
recommended. off is very dangerous but may be
@ -61,67 +57,21 @@ Options:
--no-vacuum Skips the normal post-upgrade vacuum operation.
"#;
const UPGRADE_NOTES: &'static str =
concat!("upgraded using moonfire-nvr ", env!("CARGO_PKG_VERSION"));
const UPGRADERS: [fn(&rusqlite::Transaction) -> Result<(), Error>; 1] = [
v0_to_v1::run,
];
#[derive(Debug, Deserialize)]
struct Args {
pub struct Args {
flag_db_dir: String,
flag_sample_file_dir: String,
flag_sample_file_dir: Option<String>,
flag_preset_journal: String,
flag_no_vacuum: bool,
}
fn set_journal_mode(conn: &rusqlite::Connection, requested: &str) -> Result<(), Error> {
assert!(!requested.contains(';')); // quick check for accidental sql injection.
let actual = conn.query_row(&format!("pragma journal_mode = {}", requested), &[],
|row| row.get_checked::<_, String>(0))??;
info!("...database now in journal_mode {} (requested {}).", actual, requested);
Ok(())
}
pub fn run() -> Result<(), Error> {
let args: Args = super::parse_args(USAGE)?;
let (_db_dir, mut conn) = super::open_conn(&args.flag_db_dir, super::OpenMode::ReadWrite)?;
{
assert_eq!(UPGRADERS.len(), db::EXPECTED_VERSION as usize);
let old_ver =
conn.query_row("select max(id) from version", &[], |row| row.get_checked(0))??;
if old_ver > db::EXPECTED_VERSION {
return Err(Error::new(format!("Database is at version {}, later than expected {}",
old_ver, db::EXPECTED_VERSION)))?;
} else if old_ver < 0 {
return Err(Error::new(format!("Database is at negative version {}!", old_ver)));
}
info!("Upgrading database from version {} to version {}...", old_ver, db::EXPECTED_VERSION);
set_journal_mode(&conn, &args.flag_preset_journal).unwrap();
for ver in old_ver .. db::EXPECTED_VERSION {
info!("...from version {} to version {}", ver, ver + 1);
let tx = conn.transaction()?;
UPGRADERS[ver as usize](&tx)?;
tx.execute(r#"
insert into version (id, unix_time, notes)
values (?, cast(strftime('%s', 'now') as int32), ?)
"#, &[&(ver + 1), &UPGRADE_NOTES])?;
tx.commit()?;
}
}
// WAL is the preferred journal mode for normal operation; it reduces the number of syncs
// without compromising safety.
set_journal_mode(&conn, "wal").unwrap();
if !args.flag_no_vacuum {
info!("...vacuuming database after upgrade.");
conn.execute_batch(r#"
pragma page_size = 16384;
vacuum;
"#).unwrap();
}
info!("...done.");
Ok(())
db::upgrade::run(&db::upgrade::Args {
flag_sample_file_dir: args.flag_sample_file_dir.as_ref().map(|s| s.as_str()),
flag_preset_journal: &args.flag_preset_journal,
flag_no_vacuum: args.flag_no_vacuum,
}, &mut conn)
}

1730
src/db.rs

File diff suppressed because it is too large Load Diff

View File

@ -1,762 +0,0 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! Sample file directory management.
//!
//! This includes opening files for serving, rotating away old files, and saving new files.
use db;
use error::Error;
use libc;
use recording;
use openssl::hash;
use std::cmp;
use std::ffi;
use std::fs;
use std::io::{self, Write};
use std::mem;
use std::os::unix::io::FromRawFd;
use std::sync::{Arc, Mutex};
use std::sync::mpsc;
use std::thread;
use uuid::Uuid;
/// A sample file directory. This is currently a singleton in production. (Maybe in the future
/// Moonfire will be extended to support multiple directories on different spindles.)
///
/// If the directory is used for writing, the `start_syncer` function should be called to start
/// a background thread. This thread manages deleting files and writing new files. It synces the
/// directory and commits these operations to the database in the correct order to maintain the
/// invariants described in `design/schema.md`.
pub struct SampleFileDir {
db: Arc<db::Database>,
/// The open file descriptor for the directory. The worker uses it to create files and sync the
/// directory. Other threads use it to open sample files for reading during video serving.
fd: Fd,
// Lock order: don't acquire mutable.lock() while holding db.lock().
mutable: Mutex<SharedMutableState>,
}
/// A file descriptor associated with a directory (not necessarily the sample file dir).
pub struct Fd(libc::c_int);
impl Drop for Fd {
fn drop(&mut self) {
if unsafe { libc::close(self.0) } < 0 {
let e = io::Error::last_os_error();
warn!("Unable to close sample file dir: {}", e);
}
}
}
impl Fd {
/// Opens the given path as a directory.
pub fn open(path: &str) -> Result<Fd, io::Error> {
let cstring = ffi::CString::new(path)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
let fd = unsafe { libc::open(cstring.as_ptr(), libc::O_DIRECTORY | libc::O_RDONLY, 0) };
if fd < 0 {
return Err(io::Error::last_os_error().into());
}
Ok(Fd(fd))
}
/// Locks the directory with the specified `flock` operation.
pub fn lock(&self, operation: libc::c_int) -> Result<(), io::Error> {
let ret = unsafe { libc::flock(self.0, operation) };
if ret < 0 {
return Err(io::Error::last_os_error().into());
}
Ok(())
}
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> {
unsafe {
let mut stat: libc::statvfs = mem::zeroed();
if libc::fstatvfs(self.0, &mut stat) < 0 {
return Err(io::Error::last_os_error())
}
Ok(stat)
}
}
}
impl SampleFileDir {
pub fn new(path: &str, db: Arc<db::Database>) -> Result<Arc<SampleFileDir>, Error> {
let fd = Fd::open(path)
.map_err(|e| Error::new(format!("unable to open sample file dir {}: {}", path, e)))?;
Ok(Arc::new(SampleFileDir{
db: db,
fd: fd,
mutable: Mutex::new(SharedMutableState{
next_uuid: None,
}),
}))
}
/// Opens the given sample file for reading.
pub fn open_sample_file(&self, uuid: Uuid) -> Result<fs::File, io::Error> {
self.open_int(uuid, libc::O_RDONLY, 0)
}
/// Creates a new writer.
/// Note this doesn't wait for previous rotation to complete; it's assumed the sample file
/// directory has sufficient space for a couple recordings per camera in addition to the
/// cameras' total `retain_bytes`.
///
/// The new recording will continue from `prev` if specified; this should be as returned from
/// a previous `close` call.
pub fn create_writer<'a>(&self, channel: &'a SyncerChannel, prev: Option<PreviousWriter>,
camera_id: i32, video_sample_entry_id: i32)
-> Result<Writer<'a>, Error> {
// Grab the next uuid. Typically one is cached—a sync has usually completed since the last
// writer was created, and syncs ensure `next_uuid` is filled while performing their
// transaction. But if not, perform an extra database transaction to reserve a new one.
let uuid = match self.mutable.lock().unwrap().next_uuid.take() {
Some(u) => u,
None => {
info!("Committing extra transaction because there's no cached uuid");
let mut db = self.db.lock();
let mut tx = db.tx()?;
let u = tx.reserve_sample_file()?;
tx.commit()?;
u
},
};
let f = match self.open_int(uuid, libc::O_WRONLY | libc::O_EXCL | libc::O_CREAT, 0o600) {
Ok(f) => f,
Err(e) => {
self.mutable.lock().unwrap().next_uuid = Some(uuid);
return Err(e.into());
},
};
Writer::open(f, uuid, prev, camera_id, video_sample_entry_id, channel)
}
pub fn statfs(&self) -> Result<libc::statvfs, io::Error> { self.fd.statfs() }
/// Opens a sample file within this directory with the given flags and (if creating) mode.
fn open_int(&self, uuid: Uuid, flags: libc::c_int, mode: libc::c_int)
-> Result<fs::File, io::Error> {
let p = SampleFileDir::get_rel_pathname(uuid);
let fd = unsafe { libc::openat(self.fd.0, p.as_ptr(), flags, mode) };
if fd < 0 {
return Err(io::Error::last_os_error())
}
unsafe { Ok(fs::File::from_raw_fd(fd)) }
}
/// Gets a pathname for a sample file suitable for passing to open or unlink.
fn get_rel_pathname(uuid: Uuid) -> [libc::c_char; 37] {
let mut buf = [0u8; 37];
write!(&mut buf[..36], "{}", uuid.hyphenated()).expect("can't format uuid to pathname buf");
// libc::c_char seems to be i8 on some platforms (Linux/arm) and u8 on others (Linux/amd64).
unsafe { mem::transmute::<[u8; 37], [libc::c_char; 37]>(buf) }
}
/// Unlinks the given sample file within this directory.
fn unlink(fd: &Fd, uuid: Uuid) -> Result<(), io::Error> {
let p = SampleFileDir::get_rel_pathname(uuid);
let res = unsafe { libc::unlinkat(fd.0, p.as_ptr(), 0) };
if res < 0 {
return Err(io::Error::last_os_error())
}
Ok(())
}
/// Syncs the directory itself.
fn sync(&self) -> Result<(), io::Error> {
let res = unsafe { libc::fsync(self.fd.0) };
if res < 0 {
return Err(io::Error::last_os_error())
};
Ok(())
}
}
/// State shared between users of the `SampleFileDirectory` struct and the syncer.
struct SharedMutableState {
next_uuid: Option<Uuid>,
}
/// A command sent to the syncer. These correspond to methods in the `SyncerChannel` struct.
enum SyncerCommand {
AsyncSaveRecording(db::RecordingToInsert, fs::File),
AsyncAbandonRecording(Uuid),
#[cfg(test)]
Flush(mpsc::SyncSender<()>),
}
/// A channel which can be used to send commands to the syncer.
/// Can be cloned to allow multiple threads to send commands.
#[derive(Clone)]
pub struct SyncerChannel(mpsc::Sender<SyncerCommand>);
/// State of the worker thread.
struct Syncer {
dir: Arc<SampleFileDir>,
to_unlink: Vec<Uuid>,
to_mark_deleted: Vec<Uuid>,
}
/// Starts a syncer for the given sample file directory.
/// There should be only one syncer per directory, or 0 if operating in read-only mode.
/// This function will perform the initial rotation synchronously, so that it is finished before
/// file writing starts. Afterward the syncing happens in a background thread.
///
/// Returns a `SyncerChannel` which can be used to send commands (and can be cloned freely) and
/// a `JoinHandle` for the syncer thread. At program shutdown, all `SyncerChannel` clones should be
/// removed and then the handle joined to allow all recordings to be persisted.
pub fn start_syncer(dir: Arc<SampleFileDir>)
-> Result<(SyncerChannel, thread::JoinHandle<()>), Error> {
let to_unlink = dir.db.lock().list_reserved_sample_files()?;
let (snd, rcv) = mpsc::channel();
let mut syncer = Syncer {
dir: dir,
to_unlink: to_unlink,
to_mark_deleted: Vec::new(),
};
syncer.initial_rotation()?;
Ok((SyncerChannel(snd),
thread::Builder::new().name("syncer".into()).spawn(move || syncer.run(rcv)).unwrap()))
}
pub struct NewLimit {
pub camera_id: i32,
pub limit: i64,
}
/// Deletes recordings if necessary to fit within the given new `retain_bytes` limit.
/// Note this doesn't change the limit in the database; it only deletes files.
/// Pass a limit of 0 to delete all recordings associated with a camera.
pub fn lower_retention(dir: Arc<SampleFileDir>, limits: &[NewLimit]) -> Result<(), Error> {
let to_unlink = dir.db.lock().list_reserved_sample_files()?;
let mut syncer = Syncer {
dir: dir,
to_unlink: to_unlink,
to_mark_deleted: Vec::new(),
};
syncer.do_rotation(|db| {
let mut to_delete = Vec::new();
for l in limits {
let before = to_delete.len();
let camera = db.cameras_by_id().get(&l.camera_id)
.ok_or_else(|| Error::new(format!("no such camera {}", l.camera_id)))?;
if l.limit >= camera.sample_file_bytes { continue }
get_rows_to_delete(db, l.camera_id, camera, camera.retain_bytes - l.limit,
&mut to_delete)?;
info!("camera {}, {}->{}, deleting {} rows", camera.short_name,
camera.sample_file_bytes, l.limit, to_delete.len() - before);
}
Ok(to_delete)
})
}
/// Gets rows to delete to bring a camera's disk usage within bounds.
fn get_rows_to_delete(db: &db::LockedDatabase, camera_id: i32,
camera: &db::Camera, extra_bytes_needed: i64,
to_delete: &mut Vec<db::ListOldestSampleFilesRow>) -> Result<(), Error> {
let bytes_needed = camera.sample_file_bytes + extra_bytes_needed - camera.retain_bytes;
let mut bytes_to_delete = 0;
if bytes_needed <= 0 {
debug!("{}: have remaining quota of {}", camera.short_name, -bytes_needed);
return Ok(());
}
let mut n = 0;
db.list_oldest_sample_files(camera_id, |row| {
bytes_to_delete += row.sample_file_bytes as i64;
to_delete.push(row);
n += 1;
bytes_needed > bytes_to_delete // continue as long as more deletions are needed.
})?;
if bytes_needed > bytes_to_delete {
return Err(Error::new(format!("{}: couldn't find enough files to delete: {} left.",
camera.short_name, bytes_needed)));
}
info!("{}: deleting {} bytes in {} recordings ({} bytes needed)",
camera.short_name, bytes_to_delete, n, bytes_needed);
Ok(())
}
impl SyncerChannel {
/// Asynchronously syncs the given writer, closes it, records it into the database, and
/// starts rotation.
fn async_save_recording(&self, recording: db::RecordingToInsert, f: fs::File) {
self.0.send(SyncerCommand::AsyncSaveRecording(recording, f)).unwrap();
}
fn async_abandon_recording(&self, uuid: Uuid) {
self.0.send(SyncerCommand::AsyncAbandonRecording(uuid)).unwrap();
}
/// For testing: flushes the syncer, waiting for all currently-queued commands to complete.
#[cfg(test)]
pub fn flush(&self) {
let (snd, rcv) = mpsc::sync_channel(0);
self.0.send(SyncerCommand::Flush(snd)).unwrap();
rcv.recv().unwrap_err(); // syncer should just drop the channel, closing it.
}
}
impl Syncer {
fn run(&mut self, cmds: mpsc::Receiver<SyncerCommand>) {
loop {
match cmds.recv() {
Err(_) => return, // all senders have closed the channel; shutdown
Ok(SyncerCommand::AsyncSaveRecording(recording, f)) => self.save(recording, f),
Ok(SyncerCommand::AsyncAbandonRecording(uuid)) => self.abandon(uuid),
#[cfg(test)]
Ok(SyncerCommand::Flush(_)) => {}, // just drop the supplied sender, closing it.
};
}
}
/// Rotates files for all cameras and deletes stale reserved uuids from previous runs.
fn initial_rotation(&mut self) -> Result<(), Error> {
self.do_rotation(|db| {
let mut to_delete = Vec::new();
for (camera_id, camera) in db.cameras_by_id() {
get_rows_to_delete(&db, *camera_id, camera, 0, &mut to_delete)?;
}
Ok(to_delete)
})
}
fn do_rotation<F>(&mut self, get_rows_to_delete: F) -> Result<(), Error>
where F: FnOnce(&db::LockedDatabase) -> Result<Vec<db::ListOldestSampleFilesRow>, Error> {
let to_delete = {
let mut db = self.dir.db.lock();
let to_delete = get_rows_to_delete(&*db)?;
let mut tx = db.tx()?;
tx.delete_recordings(&to_delete)?;
tx.commit()?;
to_delete
};
for row in to_delete {
self.to_unlink.push(row.uuid);
}
self.try_unlink();
if !self.to_unlink.is_empty() {
return Err(Error::new(format!("failed to unlink {} sample files",
self.to_unlink.len())));
}
self.dir.sync()?;
{
let mut db = self.dir.db.lock();
let mut tx = db.tx()?;
tx.mark_sample_files_deleted(&self.to_mark_deleted)?;
tx.commit()?;
}
self.to_mark_deleted.clear();
Ok(())
}
/// Saves the given recording and causes rotation to happen.
/// Note that part of rotation is deferred for the next cycle (saved writing or program startup)
/// so that there can be only one dir sync and database transaction per save.
fn save(&mut self, recording: db::RecordingToInsert, f: fs::File) {
if let Err(e) = self.save_helper(&recording, f) {
error!("camera {}: will discard recording {} due to error while saving: {}",
recording.camera_id, recording.sample_file_uuid, e);
self.to_unlink.push(recording.sample_file_uuid);
return;
}
}
fn abandon(&mut self, uuid: Uuid) {
self.to_unlink.push(uuid);
self.try_unlink();
}
/// Internal helper for `save`. This is separated out so that the question-mark operator
/// can be used in the many error paths.
fn save_helper(&mut self, recording: &db::RecordingToInsert, f: fs::File)
-> Result<(), Error> {
self.try_unlink();
if !self.to_unlink.is_empty() {
return Err(Error::new(format!("failed to unlink {} files.", self.to_unlink.len())));
}
f.sync_all()?;
self.dir.sync()?;
let mut to_delete = Vec::new();
let mut l = self.dir.mutable.lock().unwrap();
let mut db = self.dir.db.lock();
let mut new_next_uuid = l.next_uuid;
{
let camera =
db.cameras_by_id().get(&recording.camera_id)
.ok_or_else(|| Error::new(format!("no such camera {}", recording.camera_id)))?;
get_rows_to_delete(&db, recording.camera_id, camera,
recording.sample_file_bytes as i64, &mut to_delete)?;
}
let mut tx = db.tx()?;
tx.mark_sample_files_deleted(&self.to_mark_deleted)?;
tx.delete_recordings(&to_delete)?;
if new_next_uuid.is_none() {
new_next_uuid = Some(tx.reserve_sample_file()?);
}
tx.insert_recording(recording)?;
tx.commit()?;
l.next_uuid = new_next_uuid;
self.to_mark_deleted.clear();
self.to_unlink.extend(to_delete.iter().map(|row| row.uuid));
Ok(())
}
/// Tries to unlink all the uuids in `self.to_unlink`. Any which can't be unlinked will
/// be retained in the vec.
fn try_unlink(&mut self) {
let to_mark_deleted = &mut self.to_mark_deleted;
let fd = &self.dir.fd;
self.to_unlink.retain(|uuid| {
if let Err(e) = SampleFileDir::unlink(fd, *uuid) {
if e.kind() == io::ErrorKind::NotFound {
warn!("dir: Sample file {} already deleted!", uuid.hyphenated());
to_mark_deleted.push(*uuid);
false
} else {
warn!("dir: Unable to unlink {}: {}", uuid.hyphenated(), e);
true
}
} else {
to_mark_deleted.push(*uuid);
false
}
});
}
}
/// Single-use struct to write a single recording to disk and commit its metadata to the database.
/// Use `SampleFileDir::create_writer` to create a new writer. `Writer` hands off its state to the
/// syncer when done. It either saves the recording to the database (if I/O errors do not prevent
/// this) or marks it as abandoned so that the syncer will attempt to unlink the file.
pub struct Writer<'a>(Option<InnerWriter<'a>>);
/// The state associated with a `Writer`. The indirection is for the `Drop` trait; `close` moves
/// `f` and `index.video_index` out of the `InnerWriter`, which is not allowed on a struct with
/// a `Drop` trait. To avoid this problem, the real state is surrounded by an `Option`. The
/// `Option` should none only after close is called, and thus never in a way visible to callers.
struct InnerWriter<'a> {
syncer_channel: &'a SyncerChannel,
f: fs::File,
index: recording::SampleIndexEncoder,
uuid: Uuid,
corrupt: bool,
hasher: hash::Hasher,
/// The end time of the previous segment in this run, if any.
prev_end: Option<recording::Time>,
/// The start time of this segment, based solely on examining the local clock after frames in
/// this segment were received. Frames can suffer from various kinds of delay (initial
/// buffering, encoding, and network transmission), so this time is set to far in the future on
/// construction, given a real value on the first packet, and decreased as less-delayed packets
/// are discovered. See design/time.md for details.
local_start: recording::Time,
adjuster: ClockAdjuster,
camera_id: i32,
video_sample_entry_id: i32,
run_offset: i32,
/// A sample which has been written to disk but not added to `index`. Index writes are one
/// sample behind disk writes because the duration of a sample is the difference between its
/// pts and the next sample's pts. A sample is flushed when the next sample is written, when
/// the writer is closed cleanly (the caller supplies the next pts), or when the writer is
/// closed uncleanly (with a zero duration, which the `.mp4` format allows only at the end).
unflushed_sample: Option<UnflushedSample>,
}
/// Adjusts durations given by the camera to correct its clock frequency error.
#[derive(Copy, Clone, Debug)]
struct ClockAdjuster {
/// Every `every_minus_1 + 1` units, add `-ndir`.
/// Note i32::max_value() disables adjustment.
every_minus_1: i32,
/// Should be 1 or -1 (unless disabled).
ndir: i32,
/// Keeps accumulated difference from previous values.
cur: i32,
}
impl ClockAdjuster {
fn new(local_time_delta: Option<i64>) -> Self {
// Pick an adjustment rate to correct local_time_delta over the next minute (the
// desired duration of a single recording). Cap the rate at 500 ppm (which corrects
// 2,700/90,000ths of a second over a minute) to prevent noticeably speeding up or slowing
// down playback.
let (every_minus_1, ndir) = match local_time_delta {
Some(d) if d <= -2700 => (1999, 1),
Some(d) if d >= 2700 => (1999, -1),
Some(d) if d < -60 => ((60 * 90000) / -(d as i32) - 1, 1),
Some(d) if d > 60 => ((60 * 90000) / (d as i32) - 1, -1),
_ => (i32::max_value(), 0),
};
ClockAdjuster{
every_minus_1,
ndir,
cur: 0,
}
}
fn adjust(&mut self, mut val: i32) -> i32 {
self.cur += val;
// The "val > self.ndir" here is so that if decreasing durations (ndir == 1), we don't
// cause a duration of 1 to become a duration of 0. It has no effect when increasing
// durations. (There's no danger of a duration of 0 becoming a duration of 1; cur wouldn't
// be newly > self.every_minus_1.)
while self.cur > self.every_minus_1 && val > self.ndir {
val -= self.ndir;
self.cur -= self.every_minus_1 + 1;
}
val
}
}
struct UnflushedSample {
local_time: recording::Time,
pts_90k: i64,
len: i32,
is_key: bool,
}
#[derive(Copy, Clone)]
pub struct PreviousWriter {
end_time: recording::Time,
local_time_delta: recording::Duration,
run_offset: i32,
}
impl<'a> Writer<'a> {
/// Opens the writer; for use by `SampleFileDir` (which should supply `f`).
fn open(f: fs::File, uuid: Uuid, prev: Option<PreviousWriter>, camera_id: i32,
video_sample_entry_id: i32, syncer_channel: &'a SyncerChannel) -> Result<Self, Error> {
Ok(Writer(Some(InnerWriter{
syncer_channel: syncer_channel,
f: f,
index: recording::SampleIndexEncoder::new(),
uuid: uuid,
corrupt: false,
hasher: hash::Hasher::new(hash::MessageDigest::sha1())?,
prev_end: prev.map(|p| p.end_time),
local_start: recording::Time(i64::max_value()),
adjuster: ClockAdjuster::new(prev.map(|p| p.local_time_delta.0)),
camera_id: camera_id,
video_sample_entry_id: video_sample_entry_id,
run_offset: prev.map(|p| p.run_offset + 1).unwrap_or(0),
unflushed_sample: None,
})))
}
/// Writes a new frame to this segment.
/// `local_time` should be the local clock's time as of when this packet was received.
pub fn write(&mut self, pkt: &[u8], local_time: recording::Time, pts_90k: i64,
is_key: bool) -> Result<(), Error> {
let w = self.0.as_mut().unwrap();
if let Some(unflushed) = w.unflushed_sample.take() {
let duration = (pts_90k - unflushed.pts_90k) as i32;
if duration <= 0 {
return Err(Error::new(format!("pts not monotonically increasing; got {} then {}",
unflushed.pts_90k, pts_90k)));
}
let duration = w.adjuster.adjust(duration);
w.index.add_sample(duration, unflushed.len, unflushed.is_key);
w.extend_local_start(unflushed.local_time);
}
let mut remaining = pkt;
while !remaining.is_empty() {
let written = match w.f.write(remaining) {
Ok(b) => b,
Err(e) => {
if remaining.len() < pkt.len() {
// Partially written packet. Truncate if possible.
if let Err(e2) = w.f.set_len(w.index.sample_file_bytes as u64) {
error!("After write to {} failed with {}, truncate failed with {}; \
sample file is corrupt.", w.uuid.hyphenated(), e, e2);
w.corrupt = true;
}
}
return Err(Error::from(e));
},
};
remaining = &remaining[written..];
}
w.unflushed_sample = Some(UnflushedSample{
local_time: local_time,
pts_90k: pts_90k,
len: pkt.len() as i32,
is_key: is_key});
w.hasher.update(pkt)?;
Ok(())
}
/// Cleanly closes the writer, using a supplied pts of the next sample for the last sample's
/// duration (if known). If `close` is not called, the `Drop` trait impl will close the trait,
/// swallowing errors and using a zero duration for the last sample.
pub fn close(mut self, next_pts: Option<i64>) -> Result<PreviousWriter, Error> {
self.0.take().unwrap().close(next_pts)
}
}
impl<'a> InnerWriter<'a> {
fn extend_local_start(&mut self, pkt_local_time: recording::Time) {
let new = pkt_local_time - recording::Duration(self.index.total_duration_90k as i64);
self.local_start = cmp::min(self.local_start, new);
}
fn close(mut self, next_pts: Option<i64>) -> Result<PreviousWriter, Error> {
if self.corrupt {
self.syncer_channel.async_abandon_recording(self.uuid);
return Err(Error::new(format!("recording {} is corrupt", self.uuid)));
}
let unflushed =
self.unflushed_sample.take().ok_or_else(|| Error::new("no packets!".to_owned()))?;
let duration = self.adjuster.adjust(match next_pts {
None => 0,
Some(p) => (p - unflushed.pts_90k) as i32,
});
self.index.add_sample(duration, unflushed.len, unflushed.is_key);
self.extend_local_start(unflushed.local_time);
let mut sha1_bytes = [0u8; 20];
sha1_bytes.copy_from_slice(&self.hasher.finish()?[..]);
let start = self.prev_end.unwrap_or(self.local_start);
let end = start + recording::Duration(self.index.total_duration_90k as i64);
let flags = if self.index.has_trailing_zero() { db::RecordingFlags::TrailingZero as i32 }
else { 0 };
let local_start_delta = self.local_start - start;
let recording = db::RecordingToInsert{
camera_id: self.camera_id,
sample_file_bytes: self.index.sample_file_bytes,
time: start .. end,
local_time_delta: local_start_delta,
video_samples: self.index.video_samples,
video_sync_samples: self.index.video_sync_samples,
video_sample_entry_id: self.video_sample_entry_id,
sample_file_uuid: self.uuid,
video_index: self.index.video_index,
sample_file_sha1: sha1_bytes,
run_offset: self.run_offset,
flags: flags,
};
self.syncer_channel.async_save_recording(recording, self.f);
Ok(PreviousWriter{
end_time: end,
local_time_delta: local_start_delta,
run_offset: self.run_offset,
})
}
}
impl<'a> Drop for Writer<'a> {
fn drop(&mut self) {
if let Some(w) = self.0.take() {
// Swallow any error. The caller should only drop the Writer without calling close()
// if there's already been an error. The caller should report that. No point in
// complaining again.
let _ = w.close(None);
}
}
}
#[cfg(test)]
mod tests {
use super::ClockAdjuster;
use testutil;
#[test]
fn adjust() {
testutil::init();
// no-ops.
for v in &[None, Some(0), Some(-10), Some(10)] {
let mut a = ClockAdjuster::new(*v);
for _ in 0..1800 {
assert_eq!(3000, a.adjust(3000), "v={:?}", *v);
}
}
// typical, 100 ppm adjustment.
let mut a = ClockAdjuster::new(Some(-540));
let mut total = 0;
for _ in 0..1800 {
let new = a.adjust(3000);
assert!(new == 2999 || new == 3000);
total += new;
}
let expected = 1800*3000 - 540;
assert!(total == expected || total == expected + 1, "total={} vs expected={}",
total, expected);
a = ClockAdjuster::new(Some(540));
let mut total = 0;
for _ in 0..1800 {
let new = a.adjust(3000);
assert!(new == 3000 || new == 3001);
total += new;
}
let expected = 1800*3000 + 540;
assert!(total == expected || total == expected + 1, "total={} vs expected={}",
total, expected);
// capped at 500 ppm (change of 2,700/90,000ths over 1 minute).
a = ClockAdjuster::new(Some(-1_000_000));
total = 0;
for _ in 0..1800 {
let new = a.adjust(3000);
assert!(new == 2998 || new == 2999, "new={}", new);
total += new;
}
let expected = 1800*3000 - 2700;
assert!(total == expected || total == expected + 1, "total={} vs expected={}",
total, expected);
a = ClockAdjuster::new(Some(1_000_000));
total = 0;
for _ in 0..1800 {
let new = a.adjust(3000);
assert!(new == 3001 || new == 3002, "new={}", new);
total += new;
}
let expected = 1800*3000 + 2700;
assert!(total == expected || total == expected + 1, "total={} vs expected={}",
total, expected);
}
}

View File

@ -1,154 +0,0 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
extern crate rusqlite;
extern crate time;
extern crate uuid;
use core::ops::Deref;
use core::num;
use openssl::error::ErrorStack;
use moonfire_ffmpeg;
use serde_json;
use std::boxed::Box;
use std::convert::From;
use std::error;
use std::error::Error as E;
use std::fmt;
use std::io;
use std::result;
use std::string::String;
#[derive(Debug)]
pub struct Error {
pub description: String,
pub cause: Option<Box<error::Error + Send + Sync>>,
}
impl Error {
pub fn new(description: String) -> Self {
Error{description: description, cause: None }
}
}
pub trait ResultExt<T> {
/// Returns a new `Result` like this one except that errors are of type `Error` and annotated
/// with the given prefix.
fn annotate_err(self, prefix: &'static str) -> Result<T>;
}
impl<T, E> ResultExt<T> for result::Result<T, E> where E: 'static + error::Error + Send + Sync {
fn annotate_err(self, prefix: &'static str) -> Result<T> {
self.map_err(|e| Error{
description: format!("{}: {}", prefix, e.description()),
cause: Some(Box::new(e)),
})
}
}
impl error::Error for Error {
fn description(&self) -> &str { &self.description }
fn cause(&self) -> Option<&error::Error> {
match self.cause {
Some(ref b) => Some(b.deref()),
None => None
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
write!(f, "Error: {}\ncause: {:?}", self.description, self.cause)
}
}
// TODO(slamb): isn't there a "<? implements error::Error>" or some such?
impl From<rusqlite::Error> for Error {
fn from(err: rusqlite::Error) -> Self {
Error{description: String::from(err.description()), cause: Some(Box::new(err))}
}
}
impl From<fmt::Error> for Error {
fn from(err: fmt::Error) -> Self {
Error{description: String::from(err.description()), cause: Some(Box::new(err))}
}
}
impl From<::hyper::Error> for Error {
fn from(err: ::hyper::Error) -> Self {
Error{description: String::from(err.description()), cause: Some(Box::new(err))}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error{description: String::from(err.description()), cause: Some(Box::new(err))}
}
}
impl From<time::ParseError> for Error {
fn from(err: time::ParseError) -> Self {
Error{description: String::from(err.description()), cause: Some(Box::new(err))}
}
}
impl From<num::ParseIntError> for Error {
fn from(err: num::ParseIntError) -> Self {
Error{description: err.description().to_owned(), cause: Some(Box::new(err))}
}
}
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Self {
Error{description: format!("{} ({})", err.description(), err), cause: Some(Box::new(err))}
}
}
impl From<moonfire_ffmpeg::Error> for Error {
fn from(err: moonfire_ffmpeg::Error) -> Self {
Error{description: format!("ffmpeg: {}", err), cause: Some(Box::new(err))}
}
}
impl From<uuid::ParseError> for Error {
fn from(_: uuid::ParseError) -> Self {
Error{description: String::from("UUID parse error"), cause: None}
}
}
impl From<ErrorStack> for Error {
fn from(_: ErrorStack) -> Self {
Error{description: String::from("openssl error"), cause: None}
}
}
pub type Result<T> = result::Result<T, Error>;

View File

@ -41,7 +41,7 @@
//! would be more trouble than it's worth.
use byteorder::{BigEndian, WriteBytesExt};
use error::{Error, Result};
use failure::Error;
use regex::bytes::Regex;
// See ISO/IEC 14496-10 table 7-1 - NAL unit type codes, syntax element categories, and NAL unit
@ -59,8 +59,8 @@ const NAL_UNIT_TYPE_MASK: u8 = 0x1F; // bottom 5 bits of first byte of unit.
///
/// TODO: detect invalid byte streams. For example, several 0x00s not followed by a 0x01, a stream
/// stream not starting with 0x00 0x00 0x00 0x01, or an empty NAL unit.
fn decode_h264_annex_b<'a, F>(data: &'a [u8], mut f: F) -> Result<()>
where F: FnMut(&'a [u8]) -> Result<()> {
fn decode_h264_annex_b<'a, F>(data: &'a [u8], mut f: F) -> Result<(), Error>
where F: FnMut(&'a [u8]) -> Result<(), Error> {
lazy_static! {
static ref START_CODE: Regex = Regex::new(r"(\x00{2,}\x01)").unwrap();
}
@ -73,21 +73,21 @@ where F: FnMut(&'a [u8]) -> Result<()> {
}
/// Parses Annex B extra data, returning a tuple holding the `sps` and `pps` substrings.
fn parse_annex_b_extra_data(data: &[u8]) -> Result<(&[u8], &[u8])> {
fn parse_annex_b_extra_data(data: &[u8]) -> Result<(&[u8], &[u8]), Error> {
let mut sps = None;
let mut pps = None;
decode_h264_annex_b(data, |unit| {
let nal_type = (unit[0] as u8) & NAL_UNIT_TYPE_MASK;
match nal_type {
NAL_UNIT_SEQ_PARAMETER_SET => { sps = Some(unit); },
NAL_UNIT_PIC_PARAMETER_SET => { pps = Some(unit); },
_ => { return Err(Error::new(format!("Expected SPS and PPS; got type {}", nal_type))); }
NAL_UNIT_SEQ_PARAMETER_SET => sps = Some(unit),
NAL_UNIT_PIC_PARAMETER_SET => pps = Some(unit),
_ => bail!("Expected SPS and PPS; got type {}", nal_type),
};
Ok(())
})?;
match (sps, pps) {
(Some(s), Some(p)) => Ok((s, p)),
_ => Err(Error::new("SPS and PPS must be specified".to_owned())),
_ => bail!("SPS and PPS must be specified"),
}
}
@ -107,7 +107,7 @@ pub struct ExtraData {
impl ExtraData {
/// Parses "extradata" from ffmpeg. This data may be in either Annex B format or AVC format.
pub fn parse(extradata: &[u8], width: u16, height: u16) -> Result<ExtraData> {
pub fn parse(extradata: &[u8], width: u16, height: u16) -> Result<ExtraData, Error> {
let mut sps_and_pps = None;
let need_transform;
let avcc_len = if extradata.starts_with(b"\x00\x00\x00\x01") ||
@ -198,11 +198,9 @@ impl ExtraData {
sample_entry.extend_from_slice(pps);
if sample_entry.len() - avcc_len_pos != avcc_len {
return Err(Error::new(format!("internal error: anticipated AVCConfigurationBox \
length {}, but was actually {}; sps length \
{}, pps length {}",
avcc_len, sample_entry.len() - avcc_len_pos,
sps.len(), pps.len())));
bail!("internal error: anticipated AVCConfigurationBox \
length {}, but was actually {}; sps length {}, pps length {}",
avcc_len, sample_entry.len() - avcc_len_pos, sps.len(), pps.len());
}
sample_entry.len() - before
} else {
@ -211,15 +209,17 @@ impl ExtraData {
};
if sample_entry.len() - avc1_len_pos != avc1_len {
return Err(Error::new(format!("internal error: anticipated AVCSampleEntry length \
{}, but was actually {}; AVCDecoderConfiguration \
length {}", avc1_len, sample_entry.len() - avc1_len_pos,
avc_decoder_config_len)));
bail!("internal error: anticipated AVCSampleEntry length \
{}, but was actually {}; AVCDecoderConfiguration length {}",
avc1_len, sample_entry.len() - avc1_len_pos, avc_decoder_config_len);
}
let rfc6381_codec = rfc6381_codec_from_sample_entry(&sample_entry)?;
let profile_idc = sample_entry[103];
let constraint_flags = sample_entry[104];
let level_idc = sample_entry[105];
let codec = format!("avc1.{:02x}{:02x}{:02x}", profile_idc, constraint_flags, level_idc);
Ok(ExtraData {
sample_entry,
rfc6381_codec,
rfc6381_codec: codec,
width,
height,
need_transform,
@ -227,21 +227,10 @@ impl ExtraData {
}
}
pub fn rfc6381_codec_from_sample_entry(sample_entry: &[u8]) -> Result<String> {
if sample_entry.len() < 99 || &sample_entry[4..8] != b"avc1" ||
&sample_entry[90..94] != b"avcC" {
return Err(Error::new("not a valid AVCSampleEntry".to_owned()));
}
let profile_idc = sample_entry[103];
let constraint_flags_byte = sample_entry[104];
let level_idc = sample_entry[105];
Ok(format!("avc1.{:02x}{:02x}{:02x}", profile_idc, constraint_flags_byte, level_idc))
}
/// Transforms sample data from Annex B format to AVC format. Should be called on samples iff
/// `ExtraData::need_transform` is true. Uses an out parameter `avc_sample` rather than a return
/// so that memory allocations can be reused from sample to sample.
pub fn transform_sample_data(annexb_sample: &[u8], avc_sample: &mut Vec<u8>) -> Result<()> {
pub fn transform_sample_data(annexb_sample: &[u8], avc_sample: &mut Vec<u8>) -> Result<(), Error> {
// See AVCParameterSamples, ISO/IEC 14496-15 section 5.3.2.
avc_sample.clear();
@ -259,7 +248,7 @@ pub fn transform_sample_data(annexb_sample: &[u8], avc_sample: &mut Vec<u8>) ->
#[cfg(test)]
mod tests {
use testutil;
use db::testutil;
const ANNEX_B_TEST_INPUT: [u8; 35] = [
0x00, 0x00, 0x00, 0x01, 0x67, 0x4d, 0x00, 0x1f,

View File

@ -29,11 +29,13 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use db;
use failure::Error;
use serde::ser::{SerializeMap, SerializeSeq, Serializer};
use std::collections::BTreeMap;
use std::ops::Not;
use uuid::Uuid;
#[derive(Debug, Serialize)]
#[derive(Serialize)]
#[serde(rename_all="camelCase")]
pub struct TopLevel<'a> {
pub time_zone_name: &'a str,
@ -41,17 +43,25 @@ pub struct TopLevel<'a> {
// Use a custom serializer which presents the map's values as a sequence and includes the
// "days" attribute or not, according to the bool in the tuple.
#[serde(serialize_with = "TopLevel::serialize_cameras")]
pub cameras: (&'a BTreeMap<i32, db::Camera>, bool),
pub cameras: (&'a db::LockedDatabase, bool),
}
/// JSON serialization wrapper for a single camera when processing `/cameras/` and
/// `/cameras/<uuid>/`. See `design/api.md` for details.
/// JSON serialization wrapper for a single camera when processing `/api/` and
/// `/api/cameras/<uuid>/`. See `design/api.md` for details.
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
pub struct Camera<'a> {
pub uuid: Uuid,
pub short_name: &'a str,
pub description: &'a str,
#[serde(serialize_with = "Camera::serialize_streams")]
pub streams: [Option<Stream<'a>>; 2],
}
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
pub struct Stream<'a> {
pub retain_bytes: i64,
pub min_start_time_90k: Option<i64>,
pub max_end_time_90k: Option<i64>,
@ -59,26 +69,54 @@ pub struct Camera<'a> {
pub total_sample_file_bytes: i64,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "Camera::serialize_days")]
pub days: Option<&'a BTreeMap<db::CameraDayKey, db::CameraDayValue>>,
#[serde(serialize_with = "Stream::serialize_days")]
pub days: Option<&'a BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
}
impl<'a> Camera<'a> {
pub fn new(c: &'a db::Camera, include_days: bool) -> Self {
Camera{
pub fn wrap(c: &'a db::Camera, db: &'a db::LockedDatabase, include_days: bool) -> Result<Self, Error> {
Ok(Camera {
uuid: c.uuid,
short_name: &c.short_name,
description: &c.description,
retain_bytes: c.retain_bytes,
min_start_time_90k: c.range.as_ref().map(|r| r.start.0),
max_end_time_90k: c.range.as_ref().map(|r| r.end.0),
total_duration_90k: c.duration.0,
total_sample_file_bytes: c.sample_file_bytes,
days: if include_days { Some(&c.days) } else { None },
}
streams: [
Stream::wrap(db, c.streams[0], include_days)?,
Stream::wrap(db, c.streams[1], include_days)?,
],
})
}
fn serialize_days<S>(days: &Option<&BTreeMap<db::CameraDayKey, db::CameraDayValue>>,
fn serialize_streams<S>(streams: &[Option<Stream<'a>>; 2], serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut map = serializer.serialize_map(Some(streams.len()))?;
for (i, s) in streams.iter().enumerate() {
if let &Some(ref s) = s {
map.serialize_key(db::StreamType::from_index(i).expect("invalid stream type index").as_str())?;
map.serialize_value(s)?;
}
}
map.end()
}
}
impl<'a> Stream<'a> {
fn wrap(db: &'a db::LockedDatabase, id: Option<i32>, include_days: bool) -> Result<Option<Self>, Error> {
let id = match id {
Some(id) => id,
None => return Ok(None),
};
let s = db.streams_by_id().get(&id).ok_or_else(|| format_err!("missing stream {}", id))?;
Ok(Some(Stream {
retain_bytes: s.retain_bytes,
min_start_time_90k: s.range.as_ref().map(|r| r.start.0),
max_end_time_90k: s.range.as_ref().map(|r| r.end.0),
total_duration_90k: s.duration.0,
total_sample_file_bytes: s.sample_file_bytes,
days: if include_days { Some(&s.days) } else { None },
}))
}
fn serialize_days<S>(days: &Option<&BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let days = match *days {
@ -89,7 +127,7 @@ impl<'a> Camera<'a> {
for (k, v) in days {
map.serialize_key(k.as_ref())?;
let bounds = k.bounds();
map.serialize_value(&CameraDayValue{
map.serialize_value(&StreamDayValue{
start_time_90k: bounds.start.0,
end_time_90k: bounds.end.0,
total_duration_90k: v.duration.0,
@ -101,7 +139,7 @@ impl<'a> Camera<'a> {
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
struct CameraDayValue {
struct StreamDayValue {
pub start_time_90k: i64,
pub end_time_90k: i64,
pub total_duration_90k: i64,
@ -109,12 +147,14 @@ struct CameraDayValue {
impl<'a> TopLevel<'a> {
/// Serializes cameras as a list (rather than a map), optionally including the `days` field.
fn serialize_cameras<S>(cameras: &(&BTreeMap<i32, db::Camera>, bool),
fn serialize_cameras<S>(cameras: &(&db::LockedDatabase, bool),
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut seq = serializer.serialize_seq(Some(cameras.0.len()))?;
for c in cameras.0.values() {
seq.serialize_element(&Camera::new(c, cameras.1))?;
let (db, include_days) = *cameras;
let cs = db.cameras_by_id();
let mut seq = serializer.serialize_seq(Some(cs.len()))?;
for (_, c) in cs {
seq.serialize_element(&Camera::wrap(c, db, include_days).unwrap())?; // TODO: no unwrap.
}
seq.end()
}
@ -134,9 +174,16 @@ pub struct Recording {
pub video_samples: i64,
pub video_sample_entry_sha1: String,
pub start_id: i32,
pub open_id: u32,
#[serde(skip_serializing_if = "Option::is_none")]
pub first_uncommitted: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub end_id: Option<i32>,
pub video_sample_entry_width: u16,
pub video_sample_entry_height: u16,
#[serde(skip_serializing_if = "Not::not")]
pub growing: bool,
}

View File

@ -30,22 +30,26 @@
#![cfg_attr(all(feature="nightly", test), feature(test))]
extern crate bytes;
extern crate byteorder;
extern crate core;
extern crate docopt;
extern crate futures;
extern crate futures_cpupool;
#[macro_use] extern crate failure;
extern crate fnv;
extern crate http;
extern crate http_serve;
extern crate hyper;
#[macro_use] extern crate lazy_static;
extern crate libc;
#[macro_use] extern crate log;
extern crate lru_cache;
extern crate reffers;
extern crate rusqlite;
extern crate memmap;
extern crate mime;
extern crate moonfire_base as base;
extern crate moonfire_db as db;
extern crate moonfire_ffmpeg;
extern crate mylog;
extern crate openssl;
@ -61,21 +65,15 @@ extern crate tokio_signal;
extern crate url;
extern crate uuid;
mod clock;
mod coding;
use base::clock as clock;
mod cmds;
mod db;
mod dir;
mod error;
mod h264;
mod json;
mod mp4;
mod recording;
mod slices;
mod stream;
mod streamer;
mod strutil;
#[cfg(test)] mod testutil;
mod web;
/// Commandline usage string. This is in the particular format expected by the `docopt` crate.
@ -141,7 +139,7 @@ fn main() {
h.clone().install().unwrap();
if let Err(e) = { let _a = h.async(); args.arg_command.unwrap().run() } {
error!("{}", e);
error!("{:?}", e);
::std::process::exit(1);
}
info!("Success.");

View File

@ -78,17 +78,19 @@
extern crate time;
use base::strutil;
use bytes::BytesMut;
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
use db;
use dir;
use error::Error;
use db::recording::{self, TIME_UNITS_PER_SEC};
use db::{self, dir};
use failure::Error;
use futures::stream;
use http;
use http_serve;
use hyper::header;
use memmap;
use openssl::hash;
use parking_lot::{Once, ONCE_INIT};
use recording::{self, TIME_UNITS_PER_SEC};
use reffers::ARefs;
use slices::{self, Body, Chunk, Slices};
use smallvec::SmallVec;
@ -99,7 +101,6 @@ use std::io;
use std::ops::Range;
use std::mem;
use std::sync::Arc;
use strutil;
/// This value should be incremented any time a change is made to this file that causes different
/// bytes to be output for a particular set of `Mp4Builder` options. Incrementing this value will
@ -381,16 +382,13 @@ impl Segment {
self.index_once.call_once(|| {
let index = unsafe { &mut *self.index.get() };
*index = db.lock()
.with_recording_playback(self.s.camera_id, self.s.recording_id,
|playback| self.build_index(playback))
.with_recording_playback(self.s.id, |playback| self.build_index(playback))
.map_err(|e| { error!("Unable to build index for segment: {:?}", e); });
});
let index: &'a _ = unsafe { &*self.index.get() };
match *index {
Ok(ref b) => return Ok(f(&b[..], self.lens())),
Err(()) => {
return Err(Error::new("Unable to build index; see previous error.".to_owned()))
},
Err(()) => bail!("Unable to build index; see previous error."),
}
}
@ -599,7 +597,7 @@ enum SliceType {
impl Slice {
fn new(end: u64, t: SliceType, p: usize) -> Result<Self, Error> {
if end >= (1<<40) || p >= (1<<20) {
return Err(Error::new(format!("end={} p={} too large for Slice", end, p)));
bail!("end={} p={} too large for Slice", end, p);
}
Ok(Slice(end | ((t as u64) << 40) | ((p as u64) << 44)))
@ -629,9 +627,7 @@ impl Slice {
}
let truns =
mp4.0.db.lock()
.with_recording_playback(s.s.camera_id, s.s.recording_id,
|playback| s.truns(playback, pos, len))
.map_err(|e| { Error::new(format!("Unable to build index for segment: {:?}", e)) })?;
.with_recording_playback(s.s.id, |playback| s.truns(playback, pos, len))?;
let truns = ARefs::new(truns);
Ok(truns.map(|t| &t[r.start as usize .. r.end as usize]))
}
@ -760,23 +756,24 @@ impl FileBuilder {
rel_range_90k: Range<i32>) -> Result<(), Error> {
if let Some(prev) = self.segments.last() {
if prev.s.have_trailing_zero() {
return Err(Error::new(format!(
"unable to append recording {}/{} after recording {}/{} with trailing zero",
row.camera_id, row.id, prev.s.camera_id, prev.s.recording_id)));
bail!("unable to append recording {} after recording {} with trailing zero",
row.id, prev.s.id);
}
}
let s = Segment::new(db, &row, rel_range_90k, self.next_frame_num)?;
self.next_frame_num += s.s.frames as u32;
self.segments.push(s);
if !self.video_sample_entries.iter().any(|e| e.id == row.video_sample_entry.id) {
self.video_sample_entries.push(row.video_sample_entry);
if !self.video_sample_entries.iter().any(|e| e.id == row.video_sample_entry_id) {
let vse = db.video_sample_entries_by_id().get(&row.video_sample_entry_id).unwrap();
self.video_sample_entries.push(vse.clone());
}
Ok(())
}
/// Builds the `File`, consuming the builder.
pub fn build(mut self, db: Arc<db::Database>, dir: Arc<dir::SampleFileDir>)
pub fn build(mut self, db: Arc<db::Database>,
dirs_by_stream_id: Arc<::fnv::FnvHashMap<i32, Arc<dir::SampleFileDir>>>)
-> Result<File, Error> {
let mut max_end = None;
let mut etag = hash::Hasher::new(hash::MessageDigest::sha1())?;
@ -809,11 +806,11 @@ impl FileBuilder {
}
// Update the etag to reflect this segment.
let mut data = [0_u8; 24];
let mut data = [0_u8; 28];
let mut cursor = io::Cursor::new(&mut data[..]);
cursor.write_i32::<BigEndian>(s.s.camera_id)?;
cursor.write_i32::<BigEndian>(s.s.recording_id)?;
cursor.write_i64::<BigEndian>(s.s.id.0)?;
cursor.write_i64::<BigEndian>(s.s.start.0)?;
cursor.write_u32::<BigEndian>(s.s.open_id)?;
cursor.write_i32::<BigEndian>(d.start)?;
cursor.write_i32::<BigEndian>(d.end)?;
etag.update(cursor.into_inner())?;
@ -838,9 +835,8 @@ impl FileBuilder {
// If the segment is > 4 GiB, the 32-bit trun data offsets are untrustworthy.
// We'd need multiple moof+mdat sequences to support large media segments properly.
if self.body.slices.len() > u32::max_value() as u64 {
return Err(Error::new(format!(
"media segment has length {}, greater than allowed 4 GiB",
self.body.slices.len())));
bail!("media segment has length {}, greater than allowed 4 GiB",
self.body.slices.len());
}
p
@ -876,7 +872,7 @@ impl FileBuilder {
::std::time::Duration::from_secs(max_end as u64);
Ok(File(Arc::new(FileInner {
db,
dir,
dirs_by_stream_id,
segments: self.segments,
slices: self.body.slices,
buf: self.body.buf,
@ -1088,7 +1084,7 @@ impl FileBuilder {
let skip = s.s.desired_range_90k.start - actual_start_90k;
let keep = s.s.desired_range_90k.end - s.s.desired_range_90k.start;
if skip < 0 || keep < 0 {
return Err(Error::new(format!("skip={} keep={} on segment {:#?}", skip, keep, s)));
bail!("skip={} keep={} on segment {:#?}", skip, keep, s);
}
cur_media_time += skip as u64;
if unflushed.segment_duration + unflushed.media_time == cur_media_time {
@ -1418,7 +1414,7 @@ impl BodyState {
struct FileInner {
db: Arc<db::Database>,
dir: Arc<dir::SampleFileDir>,
dirs_by_stream_id: Arc<::fnv::FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
segments: Vec<Segment>,
slices: Slices<Slice>,
buf: Vec<u8>,
@ -1451,11 +1447,10 @@ impl FileInner {
/// happen because nothing should be touching Moonfire NVR's files but itself.
fn get_video_sample_data(&self, i: usize, r: Range<u64>) -> Result<Chunk, Error> {
let s = &self.segments[i];
let uuid = {
self.db.lock().with_recording_playback(s.s.camera_id, s.s.recording_id,
|p| Ok(p.sample_file_uuid))?
};
let f = self.dir.open_sample_file(uuid)?;
let f = self.dirs_by_stream_id
.get(&s.s.id.stream())
.ok_or_else(|| format_err!("{}: stream not found", s.s.id))?
.open_file(s.s.id)?;
let start = s.s.sample_file_range().start + r.start;
let mmap = Box::new(unsafe {
memmap::MmapOptions::new()
@ -1491,20 +1486,21 @@ impl http_serve::Entity for File {
type Chunk = slices::Chunk;
type Body = slices::Body;
fn add_headers(&self, hdrs: &mut header::Headers) {
let mut mime = String::with_capacity(64);
mime.push_str("video/mp4; codecs=\"");
fn add_headers(&self, hdrs: &mut http::header::HeaderMap) {
let mut mime = BytesMut::with_capacity(64);
mime.extend_from_slice(b"video/mp4; codecs=\"");
let mut first = true;
for e in &self.0.video_sample_entries {
if first {
first = false
} else {
mime.push_str(", ");
mime.extend_from_slice(b", ");
}
mime.push_str(&e.rfc6381_codec);
mime.extend_from_slice(e.rfc6381_codec.as_bytes());
}
mime.push('"');
hdrs.set(header::ContentType(mime.parse().unwrap()));
mime.extend_from_slice(b"\"");
hdrs.insert(http::header::CONTENT_TYPE,
http::header::HeaderValue::from_shared(mime.freeze()).unwrap());
}
fn last_modified(&self) -> Option<header::HttpDate> { Some(self.0.last_modified) }
fn etag(&self) -> Option<header::EntityTag> { Some(self.0.etag.clone()) }
@ -1524,24 +1520,23 @@ impl http_serve::Entity for File {
/// to verify the output is byte-for-byte as expected.
#[cfg(test)]
mod tests {
use base::strutil;
use byteorder::{BigEndian, ByteOrder};
use db;
use dir;
use clock::RealClocks;
use db::recording::{self, TIME_UNITS_PER_SEC};
use db::testutil::{self, TestDb, TEST_STREAM_ID};
use db::writer;
use futures::Future;
use futures::Stream as FuturesStream;
use hyper::header;
use openssl::hash;
use recording::{self, TIME_UNITS_PER_SEC};
use http_serve::{self, Entity};
use std::fs;
use std::ops::Range;
use std::path::Path;
use std::sync::Arc;
use std::str;
use strutil;
use super::*;
use stream::{self, Opener, Stream};
use testutil::{self, TestDb, TEST_CAMERA_ID};
fn fill_slice<E: http_serve::Entity>(slice: &mut [u8], e: &E, start: u64) {
let mut p = 0;
@ -1754,7 +1749,7 @@ mod tests {
}
}
fn copy_mp4_to_db(db: &TestDb) {
fn copy_mp4_to_db(db: &TestDb<RealClocks>) {
let mut input =
stream::FFMPEG.open(stream::Source::File("src/testdata/clip.mp4")).unwrap();
@ -1764,8 +1759,9 @@ mod tests {
let video_sample_entry_id = db.db.lock().insert_video_sample_entry(
extra_data.width, extra_data.height, extra_data.sample_entry,
extra_data.rfc6381_codec).unwrap();
let mut output = db.dir.create_writer(&db.syncer_channel, None,
TEST_CAMERA_ID, video_sample_entry_id).unwrap();
let dir = db.dirs_by_stream_id.get(&TEST_STREAM_ID).unwrap();
let mut output = writer::Writer::new(dir, &db.db, &db.syncer_channel, TEST_STREAM_ID,
video_sample_entry_id);
// end_pts is the pts of the end of the most recent frame (start + duration).
// It's needed because dir::Writer calculates a packet's duration from its pts and the
@ -1788,25 +1784,25 @@ mod tests {
pkt.is_key()).unwrap();
end_pts = Some(pts + pkt.duration() as i64);
}
output.close(end_pts).unwrap();
output.close(end_pts);
db.syncer_channel.flush();
}
pub fn create_mp4_from_db(db: Arc<db::Database>, dir: Arc<dir::SampleFileDir>,
pub fn create_mp4_from_db(tdb: &TestDb<RealClocks>,
skip_90k: i32, shorten_90k: i32, include_subtitles: bool) -> File {
let mut builder = FileBuilder::new(Type::Normal);
builder.include_timestamp_subtitle_track(include_subtitles);
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
{
let db = db.lock();
db.list_recordings_by_time(TEST_CAMERA_ID, all_time, |r| {
let db = tdb.db.lock();
db.list_recordings_by_time(TEST_STREAM_ID, all_time, &mut |r| {
let d = r.duration_90k;
assert!(skip_90k + shorten_90k < d);
builder.append(&*db, r, skip_90k .. d - shorten_90k).unwrap();
Ok(())
}).unwrap();
}
builder.build(db, dir).unwrap()
builder.build(tdb.db.clone(), tdb.dirs_by_stream_id.clone()).unwrap()
}
fn write_mp4(mp4: &File, dir: &Path) -> String {
@ -1865,13 +1861,13 @@ mod tests {
/// Makes a `.mp4` file which is only good for exercising the `Slice` logic for producing
/// sample tables that match the supplied encoder.
fn make_mp4_from_encoders(type_: Type, db: &TestDb,
mut encoders: Vec<recording::SampleIndexEncoder>,
fn make_mp4_from_encoders(type_: Type, db: &TestDb<RealClocks>,
mut recordings: Vec<db::RecordingToInsert>,
desired_range_90k: Range<i32>) -> File {
let mut builder = FileBuilder::new(type_);
let mut duration_so_far = 0;
for e in encoders.drain(..) {
let row = db.create_recording_from_encoder(e);
for r in recordings.drain(..) {
let row = db.insert_recording_from_encoder(r);
let d_start = if desired_range_90k.start < duration_so_far { 0 }
else { desired_range_90k.start - duration_so_far };
let d_end = if desired_range_90k.end > duration_so_far + row.duration_90k
@ -1879,23 +1875,24 @@ mod tests {
duration_so_far += row.duration_90k;
builder.append(&db.db.lock(), row, d_start .. d_end).unwrap();
}
builder.build(db.db.clone(), db.dir.clone()).unwrap()
builder.build(db.db.clone(), db.dirs_by_stream_id.clone()).unwrap()
}
/// Tests sample table for a simple video index of all sync frames.
#[test]
fn test_all_sync_frames() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, true);
encoder.add_sample(duration_90k, bytes, true, &mut r);
}
// Time range [2, 2+4+6+8) means the 2nd, 3rd, and 4th samples should be included.
let mp4 = make_mp4_from_encoders(Type::Normal, &db, vec![encoder], 2 .. 2+4+6+8);
let mp4 = make_mp4_from_encoders(Type::Normal, &db, vec![r], 2 .. 2+4+6+8);
let track = find_track(mp4, 1);
assert!(track.edts_cursor.is_none());
let mut cursor = track.stbl_cursor;
@ -1939,17 +1936,18 @@ mod tests {
#[test]
fn test_half_sync_frames() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, (i % 2) == 1);
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
}
// Time range [2+4+6, 2+4+6+8) means the 4th sample should be included.
// The 3rd gets pulled in also because it's a sync frame and the 4th isn't.
let mp4 = make_mp4_from_encoders(Type::Normal, &db, vec![encoder], 2+4+6 .. 2+4+6+8);
let mp4 = make_mp4_from_encoders(Type::Normal, &db, vec![r], 2+4+6 .. 2+4+6+8);
let track = find_track(mp4, 1);
// Examine edts. It should skip the 3rd frame.
@ -2001,17 +1999,19 @@ mod tests {
#[test]
fn test_multi_segment() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
let mut encoders = Vec::new();
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
encoder.add_sample(1, 1, true);
encoder.add_sample(2, 2, false);
encoder.add_sample(3, 3, true);
encoders.push(encoder);
encoder.add_sample(1, 1, true, &mut r);
encoder.add_sample(2, 2, false, &mut r);
encoder.add_sample(3, 3, true, &mut r);
encoders.push(r);
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
encoder.add_sample(4, 4, true);
encoder.add_sample(5, 5, false);
encoders.push(encoder);
encoder.add_sample(4, 4, true, &mut r);
encoder.add_sample(5, 5, false, &mut r);
encoders.push(r);
// This should include samples 3 and 4 only, both sync frames.
let mp4 = make_mp4_from_encoders(Type::Normal, &db, encoders, 1+2 .. 1+2+3+4);
@ -2036,15 +2036,17 @@ mod tests {
#[test]
fn test_zero_duration_recording() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
let mut encoders = Vec::new();
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
encoder.add_sample(2, 1, true);
encoder.add_sample(3, 2, false);
encoders.push(encoder);
encoder.add_sample(2, 1, true, &mut r);
encoder.add_sample(3, 2, false, &mut r);
encoders.push(r);
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
encoder.add_sample(0, 3, true);
encoders.push(encoder);
encoder.add_sample(0, 3, true, &mut r);
encoders.push(r);
// Multi-segment recording with an edit list, encoding with a zero-duration recording.
let mp4 = make_mp4_from_encoders(Type::Normal, &db, encoders, 1 .. 2+3);
@ -2060,17 +2062,18 @@ mod tests {
#[test]
fn test_media_segment() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
let mut r = db::RecordingToInsert::default();
let mut encoder = recording::SampleIndexEncoder::new();
for i in 1..6 {
let duration_90k = 2 * i;
let bytes = 3 * i;
encoder.add_sample(duration_90k, bytes, (i % 2) == 1);
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
}
// Time range [2+4+6, 2+4+6+8+1) means the 4th sample and part of the 5th are included.
// The 3rd gets pulled in also because it's a sync frame and the 4th isn't.
let mp4 = make_mp4_from_encoders(Type::MediaSegment, &db, vec![encoder],
let mp4 = make_mp4_from_encoders(Type::MediaSegment, &db, vec![r],
2+4+6 .. 2+4+6+8+1);
let mut cursor = BoxCursor::new(mp4);
cursor.down();
@ -2102,9 +2105,9 @@ mod tests {
#[test]
fn test_round_trip() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
copy_mp4_to_db(&db);
let mp4 = create_mp4_from_db(db.db.clone(), db.dir.clone(), 0, 0, false);
let mp4 = create_mp4_from_db(&db, 0, 0, false);
let new_filename = write_mp4(&mp4, db.tmpdir.path());
compare_mp4s(&new_filename, 0, 0);
@ -2113,18 +2116,19 @@ mod tests {
// combine ranges from the new format with ranges from the old format.
let sha1 = digest(&mp4);
assert_eq!("1e5331e8371bd97ac3158b3a86494abc87cdc70e", strutil::hex(&sha1[..]));
const EXPECTED_ETAG: &'static str = "c56ef7eb3b4a713ceafebc3dc7958bd9e62a2fae";
const EXPECTED_ETAG: &'static str = "04298efb2df0cc45a6cea65dfdf2e817a3b42ca8";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
#[test]
fn test_round_trip_with_subtitles() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
copy_mp4_to_db(&db);
let mp4 = create_mp4_from_db(db.db.clone(), db.dir.clone(), 0, 0, true);
let mp4 = create_mp4_from_db(&db, 0, 0, true);
let new_filename = write_mp4(&mp4, db.tmpdir.path());
compare_mp4s(&new_filename, 0, 0);
@ -2133,18 +2137,19 @@ mod tests {
// combine ranges from the new format with ranges from the old format.
let sha1 = digest(&mp4);
assert_eq!("de382684a471f178e4e3a163762711b0653bfd83", strutil::hex(&sha1[..]));
const EXPECTED_ETAG: &'static str = "3bdc2c8ce521df50155d0ca4d7497ada448fa7c3";
const EXPECTED_ETAG: &'static str = "16a4f6348560c3de0d149675dccba21ef7906be3";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
#[test]
fn test_round_trip_with_edit_list() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
copy_mp4_to_db(&db);
let mp4 = create_mp4_from_db(db.db.clone(), db.dir.clone(), 1, 0, false);
let mp4 = create_mp4_from_db(&db, 1, 0, false);
let new_filename = write_mp4(&mp4, db.tmpdir.path());
compare_mp4s(&new_filename, 1, 0);
@ -2153,18 +2158,19 @@ mod tests {
// combine ranges from the new format with ranges from the old format.
let sha1 = digest(&mp4);
assert_eq!("d655945f94e18e6ed88a2322d27522aff6f76403", strutil::hex(&sha1[..]));
const EXPECTED_ETAG: &'static str = "3986d3bd9b866c3455fb7359fb134aa2d9107af7";
const EXPECTED_ETAG: &'static str = "80e418b029e81aa195f90aa6b806015a5030e5be";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
#[test]
fn test_round_trip_with_shorten() {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
copy_mp4_to_db(&db);
let mp4 = create_mp4_from_db(db.db.clone(), db.dir.clone(), 0, 1, false);
let mp4 = create_mp4_from_db(&db, 0, 1, false);
let new_filename = write_mp4(&mp4, db.tmpdir.path());
compare_mp4s(&new_filename, 0, 1);
@ -2173,9 +2179,10 @@ mod tests {
// combine ranges from the new format with ranges from the old format.
let sha1 = digest(&mp4);
assert_eq!("e0d28ddf08e24575a82657b1ce0b2da73f32fd88", strutil::hex(&sha1[..]));
const EXPECTED_ETAG: &'static str = "9e789398c9a71ca834fec8fbc55b389f99d12dda";
const EXPECTED_ETAG: &'static str = "5bfea0f20108a7c5b77ef1e21d82ef2abc29540f";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
}
@ -2185,15 +2192,16 @@ mod bench {
extern crate reqwest;
extern crate test;
use base::clock::RealClocks;
use db::recording;
use db::testutil::{self, TestDb};
use futures::Stream;
use futures::future;
use hyper;
use http_serve;
use recording;
use reffers::ARefs;
use self::test::Bencher;
use super::tests::create_mp4_from_db;
use testutil::{self, TestDb};
use url::Url;
/// An HTTP server for benchmarking.
@ -2210,9 +2218,9 @@ mod bench {
impl BenchServer {
fn new() -> BenchServer {
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
testutil::add_dummy_recordings_to_db(&db.db, 60);
let mp4 = create_mp4_from_db(db.db.clone(), db.dir.clone(), 0, 0, false);
let mp4 = create_mp4_from_db(&db, 0, 0, false);
let p = mp4.0.initial_sample_byte_pos;
let (tx, rx) = ::std::sync::mpsc::channel();
::std::thread::spawn(move || {
@ -2252,14 +2260,14 @@ mod bench {
#[bench]
fn build_index(b: &mut Bencher) {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
testutil::add_dummy_recordings_to_db(&db.db, 1);
let db = db.db.lock();
let segment = {
let all_time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
let mut row = None;
db.list_recordings_by_time(testutil::TEST_CAMERA_ID, all_time, |r| {
db.list_recordings_by_time(testutil::TEST_STREAM_ID, all_time, &mut |r| {
row = Some(r);
Ok(())
}).unwrap();
@ -2267,7 +2275,7 @@ mod bench {
let rel_range_90k = 0 .. row.duration_90k;
super::Segment::new(&db, &row, rel_range_90k, 1).unwrap()
};
db.with_recording_playback(segment.s.camera_id, segment.s.recording_id, |playback| {
db.with_recording_playback(segment.s.id, |playback| {
let v = segment.build_index(playback).unwrap(); // warm.
b.bytes = v.len() as u64; // define the benchmark performance in terms of output bytes.
b.iter(|| segment.build_index(playback).unwrap());
@ -2303,10 +2311,10 @@ mod bench {
#[bench]
fn mp4_construction(b: &mut Bencher) {
testutil::init();
let db = TestDb::new();
let db = TestDb::new(RealClocks {});
testutil::add_dummy_recordings_to_db(&db.db, 60);
b.iter(|| {
create_mp4_from_db(db.db.clone(), db.dir.clone(), 0, 0, false);
create_mp4_from_db(&db, 0, 0, false);
});
}
}

View File

@ -1,205 +0,0 @@
-- This file is part of Moonfire NVR, a security camera digital video recorder.
-- Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- In addition, as a special exception, the copyright holders give
-- permission to link the code of portions of this program with the
-- OpenSSL library under certain conditions as described in each
-- individual source file, and distribute linked combinations including
-- the two.
--
-- You must obey the GNU General Public License in all respects for all
-- of the code used other than OpenSSL. If you modify file(s) with this
-- exception, you may extend this exception to your version of the
-- file(s), but you are not obligated to do so. If you do not wish to do
-- so, delete this exception statement from your version. If you delete
-- this exception statement from all source files in the program, then
-- also delete it here.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
-- schema.sql: SQLite3 database schema for Moonfire NVR.
-- See also design/schema.md.
-- This table tracks the schema version.
-- There is one row for the initial database creation (inserted below, after the
-- create statements) and one for each upgrade procedure (if any).
create table version (
id integer primary key,
-- The unix time as of the creation/upgrade, as determined by
-- cast(strftime('%s', 'now') as int).
unix_time integer not null,
-- Optional notes on the creation/upgrade; could include the binary version.
notes text
);
create table camera (
id integer primary key,
uuid blob unique not null check (length(uuid) = 16),
-- A short name of the camera, used in log messages.
short_name text not null,
-- A short description of the camera.
description text,
-- The host (or IP address) to use in rtsp:// URLs when accessing the camera.
host text,
-- The username to use when accessing the camera.
-- If empty, no username or password will be supplied.
username text,
-- The password to use when accessing the camera.
password text,
-- The path (starting with "/") to use in rtsp:// URLs to reference this
-- camera's "main" (full-quality) video stream.
main_rtsp_path text,
-- The path (starting with "/") to use in rtsp:// URLs to reference this
-- camera's "sub" (low-bandwidth) video stream.
sub_rtsp_path text,
-- The number of bytes of video to retain, excluding the currently-recording
-- file. Older files will be deleted as necessary to stay within this limit.
retain_bytes integer not null check (retain_bytes >= 0),
-- The low 32 bits of the next recording id to assign for this camera.
-- Typically this is the maximum current recording + 1, but it does
-- not decrease if that recording is deleted.
next_recording_id integer not null check (next_recording_id >= 0)
);
-- Each row represents a single completed recorded segment of video.
-- Recordings are typically ~60 seconds; never more than 5 minutes.
create table recording (
-- The high 32 bits of composite_id are taken from the camera's id, which
-- improves locality. The low 32 bits are taken from the camera's
-- next_recording_id (which should be post-incremented in the same
-- transaction). It'd be simpler to use a "without rowid" table and separate
-- fields to make up the primary key, but
-- <https://www.sqlite.org/withoutrowid.html> points out that "without rowid"
-- is not appropriate when the average row size is in excess of 50 bytes.
-- recording_cover rows (which match this id format) are typically 1--5 KiB.
composite_id integer primary key,
-- This field is redundant with id above, but used to enforce the reference
-- constraint and to structure the recording_start_time index.
camera_id integer not null references camera (id),
-- The offset of this recording within a run. 0 means this was the first
-- recording made from a RTSP session. The start of the run has id
-- (id-run_offset).
run_offset integer not null,
-- flags is a bitmask:
--
-- * 1, or "trailing zero", indicates that this recording is the last in a
-- stream. As the duration of a sample is not known until the next sample
-- is received, the final sample in this recording will have duration 0.
flags integer not null,
sample_file_bytes integer not null check (sample_file_bytes > 0),
-- The starting time of the recording, in 90 kHz units since
-- 1970-01-01 00:00:00 UTC. Currently on initial connection, this is taken
-- from the local system time; on subsequent recordings, it exactly
-- matches the previous recording's end time.
start_time_90k integer not null check (start_time_90k > 0),
-- The duration of the recording, in 90 kHz units.
duration_90k integer not null
check (duration_90k >= 0 and duration_90k < 5*60*90000),
-- The number of 90 kHz units the local system time is ahead of the
-- recording; negative numbers indicate the local system time is behind
-- the recording. Large absolute values would indicate that the local time
-- has jumped during recording or that the local time and camera time
-- frequencies do not match.
local_time_delta_90k integer not null,
video_samples integer not null check (video_samples > 0),
video_sync_samples integer not null check (video_sync_samples > 0),
video_sample_entry_id integer references video_sample_entry (id),
check (composite_id >> 32 = camera_id)
);
create index recording_cover on recording (
-- Typical queries use "where camera_id = ? order by start_time_90k".
camera_id,
start_time_90k,
-- These fields are not used for ordering; they cover most queries so
-- that only database verification and actual viewing of recordings need
-- to consult the underlying row.
duration_90k,
video_samples,
video_sync_samples,
video_sample_entry_id,
sample_file_bytes,
run_offset,
flags
);
-- Large fields for a recording which are not needed when simply listing all
-- of the recordings in a given range. In particular, when serving a byte
-- range within a .mp4 file, the recording_playback row is needed for the
-- recording(s) corresponding to that particular byte range, needed, but the
-- recording rows suffice for all other recordings in the .mp4.
create table recording_playback (
-- See description on recording table.
composite_id integer primary key references recording (composite_id),
-- The binary representation of the sample file's uuid. The canonical text
-- representation of this uuid is the filename within the sample file dir.
sample_file_uuid blob not null check (length(sample_file_uuid) = 16),
-- The sha1 hash of the contents of the sample file.
sample_file_sha1 blob not null check (length(sample_file_sha1) = 20),
-- See design/schema.md#video_index for a description of this field.
video_index blob not null check (length(video_index) > 0)
);
-- Files in the sample file directory which may be present but should simply be
-- discarded on startup. (Recordings which were never completed or have been
-- marked for completion.)
create table reserved_sample_files (
uuid blob primary key check (length(uuid) = 16),
state integer not null -- 0 (writing) or 1 (deleted)
) without rowid;
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
-- VisualSampleEntry box. Describes the codec, width, height, etc.
create table video_sample_entry (
id integer primary key,
-- A SHA-1 hash of |bytes|.
sha1 blob unique not null check (length(sha1) = 20),
-- The width and height in pixels; must match values within
-- |sample_entry_bytes|.
width integer not null check (width > 0),
height integer not null check (height > 0),
-- The serialized box, including the leading length and box type (avcC in
-- the case of H.264).
data blob not null check (length(data) > 86)
);
insert into version (id, unix_time, notes)
values (1, cast(strftime('%s', 'now') as int), 'db creation');

View File

@ -30,10 +30,10 @@
//! Tools for implementing a `http_serve::Entity` body composed from many "slices".
use error::Error;
use reffers::ARefs;
use failure::Error;
use futures::stream;
use futures::Stream;
use reffers::ARefs;
use std::fmt;
use std::ops::Range;
@ -96,9 +96,8 @@ impl<S> Slices<S> where S: Slice {
/// Appends the given slice, which must have end > the Slice's current len.
pub fn append(&mut self, slice: S) -> Result<(), Error> {
if slice.end() <= self.len {
return Err(Error::new(
format!("end {} <= len {} while adding slice {:?} to slices:\n{:?}",
slice.end(), self.len, slice, self)));
bail!("end {} <= len {} while adding slice {:?} to slices:\n{:?}",
slice.end(), self.len, slice, self);
}
self.len = slice.end();
self.slices.push(slice);
@ -152,11 +151,11 @@ impl<S> Slices<S> where S: Slice {
#[cfg(test)]
mod tests {
use db::testutil;
use futures::{Future, Stream};
use futures::stream;
use std::ops::Range;
use super::{Slice, Slices};
use testutil;
#[derive(Debug, Eq, PartialEq)]
pub struct FakeChunk {

View File

@ -28,7 +28,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use error::Error;
use failure::Error;
use h264;
use moonfire_ffmpeg;
use std::os::raw::c_char;
@ -129,7 +129,7 @@ impl Opener<FfmpegStream> for Ffmpeg {
}
let video_i = match video_i {
Some(i) => i,
None => { return Err(Error::new("no video stream".to_owned())) },
None => bail!("no video stream"),
};
let mut stream = FfmpegStream{
@ -156,13 +156,12 @@ impl Stream for FfmpegStream {
let video = self.input.streams().get(self.video_i);
let tb = video.time_base();
if tb.num != 1 || tb.den != 90000 {
return Err(Error::new(format!("video stream has timebase {}/{}; expected 1/90000",
tb.num, tb.den)));
bail!("video stream has timebase {}/{}; expected 1/90000", tb.num, tb.den);
}
let codec = video.codec();
let codec_id = codec.codec_id();
if !codec_id.is_h264() {
return Err(Error::new(format!("stream's video codec {:?} is not h264", codec_id)));
bail!("stream's video codec {:?} is not h264", codec_id);
}
h264::ExtraData::parse(codec.extradata(), codec.width() as u16, codec.height() as u16)
}

View File

@ -29,11 +29,9 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use clock::{Clocks, TimerGuard};
use db::{Camera, Database};
use dir;
use error::Error;
use db::{Camera, Database, Stream, dir, recording, writer};
use failure::Error;
use h264;
use recording;
use std::result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
@ -43,55 +41,45 @@ use time;
pub static ROTATE_INTERVAL_SEC: i64 = 60;
/// Common state that can be used by multiple `Streamer` instances.
pub struct Environment<'a, 'b, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
pub clocks: &'a C,
pub struct Environment<'a, 'b, C, S> where C: Clocks + Clone, S: 'a + stream::Stream {
pub opener: &'a stream::Opener<S>,
pub db: &'b Arc<Database>,
pub dir: &'b Arc<dir::SampleFileDir>,
pub db: &'b Arc<Database<C>>,
pub shutdown: &'b Arc<AtomicBool>,
}
pub struct Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
pub struct Streamer<'a, C, S> where C: Clocks + Clone, S: 'a + stream::Stream {
shutdown: Arc<AtomicBool>,
// State below is only used by the thread in Run.
rotate_offset_sec: i64,
rotate_interval_sec: i64,
db: Arc<Database>,
db: Arc<Database<C>>,
dir: Arc<dir::SampleFileDir>,
syncer_channel: dir::SyncerChannel,
clocks: &'a C,
syncer_channel: writer::SyncerChannel<::std::fs::File>,
opener: &'a stream::Opener<S>,
camera_id: i32,
stream_id: i32,
short_name: String,
url: String,
redacted_url: String,
}
struct WriterState<'a> {
writer: dir::Writer<'a>,
/// Seconds since epoch at which to next rotate.
rotate: i64,
}
impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
pub fn new<'b>(env: &Environment<'a, 'b, C, S>, syncer_channel: dir::SyncerChannel,
camera_id: i32, c: &Camera, rotate_offset_sec: i64,
impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks + Clone, S: 'a + stream::Stream {
pub fn new<'b>(env: &Environment<'a, 'b, C, S>, dir: Arc<dir::SampleFileDir>,
syncer_channel: writer::SyncerChannel<::std::fs::File>,
stream_id: i32, c: &Camera, s: &Stream, rotate_offset_sec: i64,
rotate_interval_sec: i64) -> Self {
Streamer{
Streamer {
shutdown: env.shutdown.clone(),
rotate_offset_sec: rotate_offset_sec,
rotate_interval_sec: rotate_interval_sec,
db: env.db.clone(),
dir: env.dir.clone(),
dir,
syncer_channel: syncer_channel,
clocks: env.clocks,
opener: env.opener,
camera_id: camera_id,
short_name: c.short_name.to_owned(),
url: format!("rtsp://{}:{}@{}{}", c.username, c.password, c.host, c.main_rtsp_path),
redacted_url: format!("rtsp://{}:redacted@{}{}", c.username, c.host, c.main_rtsp_path),
stream_id: stream_id,
short_name: format!("{}-{}", c.short_name, s.type_.as_str()),
url: format!("rtsp://{}:{}@{}{}", c.username, c.password, c.host, s.rtsp_path),
redacted_url: format!("rtsp://{}:redacted@{}{}", c.username, c.host, s.rtsp_path),
}
}
@ -101,8 +89,8 @@ impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
while !self.shutdown.load(Ordering::SeqCst) {
if let Err(e) = self.run_once() {
let sleep_time = time::Duration::seconds(1);
warn!("{}: sleeping for {:?} after error: {}", self.short_name, sleep_time, e);
self.clocks.sleep(sleep_time);
warn!("{}: sleeping for {:?} after error: {:?}", self.short_name, sleep_time, e);
self.db.clocks().sleep(sleep_time);
}
}
info!("{}: shutting down", self.short_name);
@ -110,51 +98,55 @@ impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
fn run_once(&mut self) -> Result<(), Error> {
info!("{}: Opening input: {}", self.short_name, self.redacted_url);
let clocks = self.db.clocks();
let mut stream = {
let _t = TimerGuard::new(self.clocks, || format!("opening {}", self.redacted_url));
let _t = TimerGuard::new(&clocks, || format!("opening {}", self.redacted_url));
self.opener.open(stream::Source::Rtsp(&self.url))?
};
let realtime_offset = self.clocks.realtime() - self.clocks.monotonic();
let realtime_offset = self.db.clocks().realtime() - clocks.monotonic();
// TODO: verify width/height.
let extra_data = stream.get_extra_data()?;
let video_sample_entry_id = {
let _t = TimerGuard::new(self.clocks, || "inserting video sample entry");
let _t = TimerGuard::new(&clocks, || "inserting video sample entry");
self.db.lock().insert_video_sample_entry(extra_data.width, extra_data.height,
extra_data.sample_entry,
extra_data.rfc6381_codec)?
};
debug!("{}: video_sample_entry_id={}", self.short_name, video_sample_entry_id);
let mut seen_key_frame = false;
let mut state: Option<WriterState> = None;
// Seconds since epoch at which to next rotate.
let mut rotate: Option<i64> = None;
let mut transformed = Vec::new();
let mut prev = None;
let mut w = writer::Writer::new(&self.dir, &self.db, &self.syncer_channel, self.stream_id,
video_sample_entry_id);
while !self.shutdown.load(Ordering::SeqCst) {
let pkt = {
let _t = TimerGuard::new(self.clocks, || "getting next packet");
let _t = TimerGuard::new(&clocks, || "getting next packet");
stream.get_next()?
};
let pts = pkt.pts().ok_or_else(|| Error::new("packet with no pts".to_owned()))?;
let pts = pkt.pts().ok_or_else(|| format_err!("packet with no pts"))?;
if !seen_key_frame && !pkt.is_key() {
continue;
} else if !seen_key_frame {
debug!("{}: have first key frame", self.short_name);
seen_key_frame = true;
}
let frame_realtime = self.clocks.monotonic() + realtime_offset;
let frame_realtime = clocks.monotonic() + realtime_offset;
let local_time = recording::Time::new(frame_realtime);
state = if let Some(s) = state {
if frame_realtime.sec > s.rotate && pkt.is_key() {
rotate = if let Some(r) = rotate {
if frame_realtime.sec > r && pkt.is_key() {
trace!("{}: write on normal rotation", self.short_name);
let _t = TimerGuard::new(self.clocks, || "closing writer");
prev = Some(s.writer.close(Some(pts))?);
let _t = TimerGuard::new(&clocks, || "closing writer");
w.close(Some(pts));
None
} else {
Some(s)
Some(r)
}
} else { None };
let mut s = match state {
Some(s) => s,
let r = match rotate {
Some(r) => r,
None => {
let sec = frame_realtime.sec;
let r = sec - (sec % self.rotate_interval_sec) + self.rotate_offset_sec;
@ -164,20 +156,14 @@ impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
// the one after, so that it's longer than usual rather than shorter than
// usual. This ensures there's plenty of frame times to use when calculating
// the start time.
let r = r + if prev.is_none() { self.rotate_interval_sec } else { 0 };
let _t = TimerGuard::new(self.clocks, || "creating writer");
let w = self.dir.create_writer(&self.syncer_channel, prev, self.camera_id,
video_sample_entry_id)?;
WriterState{
writer: w,
rotate: r,
}
let r = r + if w.previously_opened()? { 0 } else { self.rotate_interval_sec };
let _t = TimerGuard::new(&clocks, || "creating writer");
r
},
};
let orig_data = match pkt.data() {
Some(d) => d,
None => return Err(Error::new("packet has no data".to_owned())),
None => bail!("packet has no data"),
};
let transformed_data = if extra_data.need_transform {
h264::transform_sample_data(orig_data, &mut transformed)?;
@ -185,14 +171,14 @@ impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
} else {
orig_data
};
let _t = TimerGuard::new(self.clocks,
let _t = TimerGuard::new(&clocks,
|| format!("writing {} bytes", transformed_data.len()));
s.writer.write(transformed_data, local_time, pts, pkt.is_key())?;
state = Some(s);
w.write(transformed_data, local_time, pts, pkt.is_key())?;
rotate = Some(r);
}
if let Some(s) = state {
let _t = TimerGuard::new(self.clocks, || "closing writer");
s.writer.close(None)?;
if rotate.is_some() {
let _t = TimerGuard::new(&clocks, || "closing writer");
w.close(None);
}
Ok(())
}
@ -201,16 +187,17 @@ impl<'a, C, S> Streamer<'a, C, S> where C: 'a + Clocks, S: 'a + stream::Stream {
#[cfg(test)]
mod tests {
use clock::{self, Clocks};
use db;
use error::Error;
use db::{self, CompositeId};
use db::recording;
use db::testutil;
use failure::Error;
use h264;
use moonfire_ffmpeg;
use recording;
use parking_lot::Mutex;
use std::cmp;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use stream::{self, Opener, Stream};
use testutil;
use time;
struct ProxyingStream<'a> {
@ -292,7 +279,7 @@ mod tests {
stream::Source::Rtsp(url) => assert_eq!(url, &self.expected_url),
stream::Source::File(_) => panic!("expected rtsp url"),
};
let mut l = self.streams.lock().unwrap();
let mut l = self.streams.lock();
match l.pop() {
Some(stream) => {
trace!("MockOpener returning next stream");
@ -301,7 +288,7 @@ mod tests {
None => {
trace!("MockOpener shutting down");
self.shutdown.store(true, Ordering::SeqCst);
Err(Error::new("done".to_owned()))
bail!("done")
},
}
}
@ -314,8 +301,8 @@ mod tests {
is_key: bool,
}
fn get_frames(db: &db::LockedDatabase, camera_id: i32, recording_id: i32) -> Vec<Frame> {
db.with_recording_playback(camera_id, recording_id, |rec| {
fn get_frames(db: &db::LockedDatabase, id: CompositeId) -> Vec<Frame> {
db.with_recording_playback(id, |rec| {
let mut it = recording::SampleIndexIterator::new();
let mut frames = Vec::new();
while it.next(&rec.video_index).unwrap() {
@ -346,23 +333,23 @@ mod tests {
streams: Mutex::new(vec![stream]),
shutdown: Arc::new(AtomicBool::new(false)),
};
let db = testutil::TestDb::new();
let env = super::Environment{
clocks: &clocks,
let db = testutil::TestDb::new(clocks.clone());
let env = super::Environment {
opener: &opener,
db: &db.db,
dir: &db.dir,
shutdown: &opener.shutdown,
};
let mut stream;
{
let l = db.db.lock();
let camera = l.cameras_by_id().get(&testutil::TEST_CAMERA_ID).unwrap();
stream = super::Streamer::new(&env, db.syncer_channel.clone(), testutil::TEST_CAMERA_ID,
camera, 0, 3);
let s = l.streams_by_id().get(&testutil::TEST_STREAM_ID).unwrap();
let dir = db.dirs_by_stream_id.get(&testutil::TEST_STREAM_ID).unwrap().clone();
stream = super::Streamer::new(&env, dir, db.syncer_channel.clone(),
testutil::TEST_STREAM_ID, camera, s, 0, 3);
}
stream.run();
assert!(opener.streams.lock().unwrap().is_empty());
assert!(opener.streams.lock().is_empty());
db.syncer_channel.flush();
let db = db.db.lock();
@ -370,7 +357,7 @@ mod tests {
// 3-second boundaries (such as 2016-04-26 00:00:03), rotation happens somewhat later:
// * the first rotation is always skipped
// * the second rotation is deferred until a key frame.
assert_eq!(get_frames(&db, testutil::TEST_CAMERA_ID, 1), &[
assert_eq!(get_frames(&db, CompositeId::new(testutil::TEST_STREAM_ID, 1)), &[
Frame{start_90k: 0, duration_90k: 90379, is_key: true},
Frame{start_90k: 90379, duration_90k: 89884, is_key: false},
Frame{start_90k: 180263, duration_90k: 89749, is_key: false},
@ -380,20 +367,20 @@ mod tests {
Frame{start_90k: 540015, duration_90k: 90021, is_key: false}, // pts_time 6.0001...
Frame{start_90k: 630036, duration_90k: 89958, is_key: false},
]);
assert_eq!(get_frames(&db, testutil::TEST_CAMERA_ID, 2), &[
assert_eq!(get_frames(&db, CompositeId::new(testutil::TEST_STREAM_ID, 2)), &[
Frame{start_90k: 0, duration_90k: 90011, is_key: true},
Frame{start_90k: 90011, duration_90k: 0, is_key: false},
]);
let mut recordings = Vec::new();
db.list_recordings_by_id(testutil::TEST_CAMERA_ID, 1..3, |r| {
db.list_recordings_by_id(testutil::TEST_STREAM_ID, 1..3, &mut |r| {
recordings.push(r);
Ok(())
}).unwrap();
assert_eq!(2, recordings.len());
assert_eq!(1, recordings[0].id);
assert_eq!(1, recordings[0].id.recording());
assert_eq!(recording::Time(128700575999999), recordings[0].start);
assert_eq!(0, recordings[0].flags);
assert_eq!(2, recordings[1].id);
assert_eq!(2, recordings[1].id.recording());
assert_eq!(recording::Time(128700576719993), recordings[1].start);
assert_eq!(db::RecordingFlags::TrailingZero as i32, recordings[1].flags);
}

View File

@ -30,20 +30,22 @@
extern crate hyper;
use base::strutil;
use core::borrow::Borrow;
use core::str::FromStr;
use db;
use dir::SampleFileDir;
use error::Error;
use db::{self, recording};
use db::dir::SampleFileDir;
use failure::Error;
use fnv::FnvHashMap;
use futures::{future, stream};
use futures_cpupool;
use json;
use http;
use http_serve;
use hyper::header::{self, Header};
use hyper::server::{self, Request, Response};
use mime;
use mp4;
use recording;
use reffers::ARefs;
use regex::Regex;
use serde_json;
@ -54,25 +56,25 @@ use std::fs;
use std::ops::Range;
use std::path::PathBuf;
use std::sync::Arc;
use strutil;
use url::form_urlencoded;
use uuid::Uuid;
lazy_static! {
/// Regex used to parse the `s` query parameter to `view.mp4`.
/// As described in `design/api.md`, this is of the form
/// `START_ID[-END_ID][.[REL_START_TIME]-[REL_END_TIME]]`.
static ref SEGMENTS_RE: Regex = Regex::new(r"^(\d+)(-\d+)?(?:\.(\d+)?-(\d+)?)?$").unwrap();
/// `START_ID[-END_ID][@OPEN_ID][.[REL_START_TIME]-[REL_END_TIME]]`.
static ref SEGMENTS_RE: Regex =
Regex::new(r"^(\d+)(-\d+)?(@\d+)?(?:\.(\d+)?-(\d+)?)?$").unwrap();
}
enum Path {
TopLevel, // "/api/"
InitSegment([u8; 20]), // "/api/init/<sha1>.mp4"
Camera(Uuid), // "/api/cameras/<uuid>/"
CameraRecordings(Uuid), // "/api/cameras/<uuid>/recordings"
CameraViewMp4(Uuid), // "/api/cameras/<uuid>/view.mp4"
CameraViewMp4Segment(Uuid), // "/api/cameras/<uuid>/view.m4s"
Static, // "<other path>"
TopLevel, // "/api/"
InitSegment([u8; 20]), // "/api/init/<sha1>.mp4"
Camera(Uuid), // "/api/cameras/<uuid>/"
StreamRecordings(Uuid, db::StreamType), // "/api/cameras/<uuid>/<type>/recordings"
StreamViewMp4(Uuid, db::StreamType), // "/api/cameras/<uuid>/<type>/view.mp4"
StreamViewMp4Segment(Uuid, db::StreamType), // "/api/cameras/<uuid>/<type>/view.m4s"
Static, // "<other path>"
NotFound,
}
@ -101,18 +103,33 @@ fn decode_path(path: &str) -> Path {
None => { return Path::NotFound; },
Some(s) => s,
};
let (uuid, path) = path.split_at(slash);
let uuid = &path[0 .. slash];
let path = &path[slash+1 .. ];
// TODO(slamb): require uuid to be in canonical format.
let uuid = match Uuid::parse_str(uuid) {
Ok(u) => u,
Err(_) => { return Path::NotFound },
};
if path.is_empty() {
return Path::Camera(uuid);
}
let slash = match path.find('/') {
None => { return Path::NotFound; },
Some(s) => s,
};
let (type_, path) = path.split_at(slash);
let type_ = match db::StreamType::parse(type_) {
None => { return Path::NotFound; },
Some(t) => t,
};
match path {
"/" => Path::Camera(uuid),
"/recordings" => Path::CameraRecordings(uuid),
"/view.mp4" => Path::CameraViewMp4(uuid),
"/view.m4s" => Path::CameraViewMp4Segment(uuid),
"/recordings" => Path::StreamRecordings(uuid, type_),
"/view.mp4" => Path::StreamViewMp4(uuid, type_),
"/view.m4s" => Path::StreamViewMp4Segment(uuid, type_),
_ => Path::NotFound,
}
}
@ -120,6 +137,7 @@ fn decode_path(path: &str) -> Path {
#[derive(Debug, Eq, PartialEq)]
struct Segments {
ids: Range<i32>,
open_id: Option<u32>,
start_time: i64,
end_time: Option<i64>,
}
@ -129,17 +147,21 @@ impl Segments {
let caps = SEGMENTS_RE.captures(input).ok_or(())?;
let ids_start = i32::from_str(caps.get(1).unwrap().as_str()).map_err(|_| ())?;
let ids_end = match caps.get(2) {
Some(e) => i32::from_str(&e.as_str()[1..]).map_err(|_| ())?,
Some(m) => i32::from_str(&m.as_str()[1..]).map_err(|_| ())?,
None => ids_start,
} + 1;
let open_id = match caps.get(3) {
Some(m) => Some(u32::from_str(&m.as_str()[1..]).map_err(|_| ())?),
None => None,
};
if ids_start < 0 || ids_end <= ids_start {
return Err(());
}
let start_time = caps.get(3).map_or(Ok(0), |m| i64::from_str(m.as_str())).map_err(|_| ())?;
let start_time = caps.get(4).map_or(Ok(0), |m| i64::from_str(m.as_str())).map_err(|_| ())?;
if start_time < 0 {
return Err(());
}
let end_time = match caps.get(4) {
let end_time = match caps.get(5) {
Some(v) => {
let e = i64::from_str(v.as_str()).map_err(|_| ())?;
if e <= start_time {
@ -149,10 +171,11 @@ impl Segments {
},
None => None
};
Ok(Segments{
Ok(Segments {
ids: ids_start .. ids_end,
start_time: start_time,
end_time: end_time,
open_id,
start_time,
end_time,
})
}
}
@ -162,13 +185,13 @@ impl Segments {
/// The files themselves are opened on every request so they can be changed during development.
#[derive(Debug)]
struct UiFile {
mime: mime::Mime,
mime: http::header::HeaderValue,
path: PathBuf,
}
struct ServiceInner {
db: Arc<db::Database>,
dir: Arc<SampleFileDir>,
dirs_by_stream_id: Arc<FnvHashMap<i32, Arc<SampleFileDir>>>,
ui_files: HashMap<String, UiFile>,
allow_origin: Option<header::AccessControlAllowOrigin>,
pool: futures_cpupool::CpuPool,
@ -201,7 +224,7 @@ impl ServiceInner {
let db = self.db.lock();
serde_json::to_writer(&mut w, &json::TopLevel {
time_zone_name: &self.time_zone_name,
cameras: (db.cameras_by_id(), days),
cameras: (&db, days),
})?;
}
Ok(resp)
@ -212,13 +235,13 @@ impl ServiceInner {
if let Some(mut w) = http_serve::streaming_body(&req, &mut resp).build() {
let db = self.db.lock();
let camera = db.get_camera(uuid)
.ok_or_else(|| Error::new("no such camera".to_owned()))?;
serde_json::to_writer(&mut w, &json::Camera::new(camera, true))?
.ok_or_else(|| format_err!("no such camera {}", uuid))?;
serde_json::to_writer(&mut w, &json::Camera::wrap(camera, &db, true)?)?
};
Ok(resp)
}
fn camera_recordings(&self, req: &Request, uuid: Uuid)
fn stream_recordings(&self, req: &Request, uuid: Uuid, type_: db::StreamType)
-> Result<Response<slices::Body>, Error> {
let (r, split) = {
let mut time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
@ -240,19 +263,25 @@ impl ServiceInner {
{
let db = self.db.lock();
let camera = db.get_camera(uuid)
.ok_or_else(|| Error::new("no such camera".to_owned()))?;
db.list_aggregated_recordings(camera.id, r, split, |row| {
.ok_or_else(|| format_err!("no such camera {}", uuid))?;
let stream_id = camera.streams[type_.index()]
.ok_or_else(|| format_err!("no such stream {}/{}", uuid, type_))?;
db.list_aggregated_recordings(stream_id, r, split, &mut |row| {
let end = row.ids.end - 1; // in api, ids are inclusive.
let vse = db.video_sample_entries_by_id().get(&row.video_sample_entry_id).unwrap();
out.recordings.push(json::Recording {
start_id: row.ids.start,
end_id: if end == row.ids.start + 1 { None } else { Some(end) },
end_id: if end == row.ids.start { None } else { Some(end) },
start_time_90k: row.time.start.0,
end_time_90k: row.time.end.0,
sample_file_bytes: row.sample_file_bytes,
open_id: row.open_id,
first_uncommitted: row.first_uncommitted,
video_samples: row.video_samples,
video_sample_entry_width: row.video_sample_entry.width,
video_sample_entry_height: row.video_sample_entry.height,
video_sample_entry_sha1: strutil::hex(&row.video_sample_entry.sha1),
video_sample_entry_width: vse.width,
video_sample_entry_height: vse.height,
video_sample_entry_sha1: strutil::hex(&vse.sha1),
growing: row.growing,
});
Ok(())
})?;
@ -267,33 +296,34 @@ impl ServiceInner {
fn init_segment(&self, sha1: [u8; 20], req: &Request) -> Result<Response<slices::Body>, Error> {
let mut builder = mp4::FileBuilder::new(mp4::Type::InitSegment);
let db = self.db.lock();
for ent in db.video_sample_entries() {
for ent in db.video_sample_entries_by_id().values() {
if ent.sha1 == sha1 {
builder.append_video_sample_entry(ent.clone());
let mp4 = builder.build(self.db.clone(), self.dir.clone())?;
let mp4 = builder.build(self.db.clone(), self.dirs_by_stream_id.clone())?;
return Ok(http_serve::serve(mp4, req));
}
}
self.not_found()
}
fn camera_view_mp4(&self, uuid: Uuid, type_: mp4::Type, query: Option<&str>, req: &Request)
-> Result<Response<slices::Body>, Error> {
let camera_id = {
fn stream_view_mp4(&self, req: &Request, uuid: Uuid, stream_type_: db::StreamType,
mp4_type_: mp4::Type) -> Result<Response<slices::Body>, Error> {
let stream_id = {
let db = self.db.lock();
let camera = db.get_camera(uuid)
.ok_or_else(|| Error::new("no such camera".to_owned()))?;
camera.id
.ok_or_else(|| format_err!("no such camera {}", uuid))?;
camera.streams[stream_type_.index()]
.ok_or_else(|| format_err!("no such stream {}/{}", uuid, stream_type_))?
};
let mut builder = mp4::FileBuilder::new(type_);
if let Some(q) = query {
let mut builder = mp4::FileBuilder::new(mp4_type_);
if let Some(q) = req.uri().query() {
for (key, value) in form_urlencoded::parse(q.as_bytes()) {
let (key, value) = (key.borrow(), value.borrow());
match key {
"s" => {
let s = Segments::parse(value).map_err(
|_| Error::new(format!("invalid s parameter: {}", value)))?;
debug!("camera_view_mp4: appending s={:?}", s);
|_| format_err!("invalid s parameter: {}", value))?;
debug!("stream_view_mp4: appending s={:?}", s);
let mut est_segments = (s.ids.end - s.ids.start) as usize;
if let Some(end) = s.end_time {
// There should be roughly ceil((end - start) /
@ -310,19 +340,26 @@ impl ServiceInner {
let db = self.db.lock();
let mut prev = None;
let mut cur_off = 0;
db.list_recordings_by_id(camera_id, s.ids.clone(), |r| {
db.list_recordings_by_id(stream_id, s.ids.clone(), &mut |r| {
let recording_id = r.id.recording();
if let Some(o) = s.open_id {
if r.open_id != o {
bail!("recording {} has open id {}, requested {}",
r.id, r.open_id, o);
}
}
// Check for missing recordings.
match prev {
None if r.id == s.ids.start => {},
None => return Err(Error::new(format!("no such recording {}/{}",
camera_id, s.ids.start))),
Some(id) if r.id != id + 1 => {
return Err(Error::new(format!("no such recording {}/{}",
camera_id, id + 1)));
None if recording_id == s.ids.start => {},
None => bail!("no such recording {}/{}", stream_id, s.ids.start),
Some(id) if r.id.recording() != id + 1 => {
bail!("no such recording {}/{}", stream_id, id + 1);
},
_ => {},
};
prev = Some(r.id);
prev = Some(recording_id);
// Add a segment for the relevant part of the recording, if any.
let end_time = s.end_time.unwrap_or(i64::max_value());
@ -331,11 +368,11 @@ impl ServiceInner {
let start = cmp::max(0, s.start_time - cur_off);
let end = cmp::min(d, end_time - cur_off);
let times = start as i32 .. end as i32;
debug!("...appending recording {}/{} with times {:?} (out of dur {})",
r.camera_id, r.id, times, d);
debug!("...appending recording {} with times {:?} \
(out of dur {})", r.id, times, d);
builder.append(&db, r, start as i32 .. end as i32)?;
} else {
debug!("...skipping recording {}/{} dur {}", r.camera_id, r.id, d);
debug!("...skipping recording {} dur {}", r.id, d);
}
cur_off += d;
Ok(())
@ -344,28 +381,25 @@ impl ServiceInner {
// Check for missing recordings.
match prev {
Some(id) if s.ids.end != id + 1 => {
return Err(Error::new(format!("no such recording {}/{}",
camera_id, s.ids.end - 1)));
bail!("no such recording {}/{}", stream_id, s.ids.end - 1);
},
None => {
return Err(Error::new(format!("no such recording {}/{}",
camera_id, s.ids.start)));
bail!("no such recording {}/{}", stream_id, s.ids.start);
},
_ => {},
};
if let Some(end) = s.end_time {
if end > cur_off {
return Err(Error::new(
format!("end time {} is beyond specified recordings", end)));
bail!("end time {} is beyond specified recordings", end);
}
}
},
"ts" => builder.include_timestamp_subtitle_track(value == "true"),
_ => return Err(Error::new(format!("parameter {} not understood", key))),
_ => bail!("parameter {} not understood", key),
}
};
}
let mp4 = builder.build(self.db.clone(), self.dir.clone())?;
let mp4 = builder.build(self.db.clone(), self.dirs_by_stream_id.clone())?;
Ok(http_serve::serve(mp4, req))
}
@ -375,7 +409,9 @@ impl ServiceInner {
Some(s) => s,
};
let f = fs::File::open(&s.path)?;
let e = http_serve::ChunkedReadFile::new(f, Some(self.pool.clone()), s.mime.clone())?;
let mut hdrs = http::HeaderMap::new();
hdrs.insert(http::header::CONTENT_TYPE, s.mime.clone());
let e = http_serve::ChunkedReadFile::new(f, Some(self.pool.clone()), hdrs)?;
Ok(http_serve::serve(e, &req))
}
}
@ -384,20 +420,36 @@ impl ServiceInner {
pub struct Service(Arc<ServiceInner>);
impl Service {
pub fn new(db: Arc<db::Database>, dir: Arc<SampleFileDir>, ui_dir: Option<&str>,
allow_origin: Option<String>, zone: String) -> Result<Self, Error> {
pub fn new(db: Arc<db::Database>, ui_dir: Option<&str>, allow_origin: Option<String>,
zone: String) -> Result<Self, Error> {
let mut ui_files = HashMap::new();
if let Some(d) = ui_dir {
Service::fill_ui_files(d, &mut ui_files);
}
debug!("UI files: {:#?}", ui_files);
let dirs_by_stream_id = {
let l = db.lock();
let mut d =
FnvHashMap::with_capacity_and_hasher(l.streams_by_id().len(), Default::default());
for (&id, s) in l.streams_by_id().iter() {
let dir_id = match s.sample_file_dir_id {
Some(d) => d,
None => continue,
};
d.insert(id, l.sample_file_dirs_by_id()
.get(&dir_id)
.unwrap()
.get()?);
}
Arc::new(d)
};
let allow_origin = match allow_origin {
None => None,
Some(o) => Some(header::AccessControlAllowOrigin::parse_header(&header::Raw::from(o))?),
};
Ok(Service(Arc::new(ServiceInner {
db,
dir,
dirs_by_stream_id,
ui_files,
allow_origin,
pool: futures_cpupool::Builder::new().pool_size(1).name_prefix("static").create(),
@ -423,13 +475,12 @@ impl Service {
},
};
let (p, mime) = match e.file_name().to_str() {
Some(n) if n == "index.html" => ("/".to_owned(), mime::TEXT_HTML),
Some(n) if n.ends_with(".html") => (format!("/{}", n), mime::TEXT_HTML),
Some(n) if n.ends_with(".ico") => (format!("/{}", n),
"image/vnd.microsoft.icon".parse().unwrap()),
Some(n) if n.ends_with(".js") => (format!("/{}", n), mime::TEXT_JAVASCRIPT),
Some(n) if n.ends_with(".map") => (format!("/{}", n), mime::TEXT_JAVASCRIPT),
Some(n) if n.ends_with(".png") => (format!("/{}", n), mime::IMAGE_PNG),
Some(n) if n == "index.html" => ("/".to_owned(), "text/html"),
Some(n) if n.ends_with(".html") => (format!("/{}", n), "text/html"),
Some(n) if n.ends_with(".ico") => (format!("/{}", n), "image/vnd.microsoft.icon"),
Some(n) if n.ends_with(".js") => (format!("/{}", n), "text/javascript"),
Some(n) if n.ends_with(".map") => (format!("/{}", n), "text/javascript"),
Some(n) if n.ends_with(".png") => (format!("/{}", n), "image/png"),
Some(n) => {
warn!("UI directory file {:?} has unknown extension; skipping", n);
continue;
@ -441,7 +492,7 @@ impl Service {
},
};
files.insert(p, UiFile {
mime,
mime: http::header::HeaderValue::from_static(mime),
path: e.path(),
});
}
@ -460,12 +511,12 @@ impl server::Service for Service {
Path::InitSegment(sha1) => self.0.init_segment(sha1, &req),
Path::TopLevel => self.0.top_level(&req),
Path::Camera(uuid) => self.0.camera(&req, uuid),
Path::CameraRecordings(uuid) => self.0.camera_recordings(&req, uuid),
Path::CameraViewMp4(uuid) => {
self.0.camera_view_mp4(uuid, mp4::Type::Normal, req.uri().query(), &req)
Path::StreamRecordings(uuid, type_) => self.0.stream_recordings(&req, uuid, type_),
Path::StreamViewMp4(uuid, type_) => {
self.0.stream_view_mp4(&req, uuid, type_, mp4::Type::Normal)
},
Path::CameraViewMp4Segment(uuid) => {
self.0.camera_view_mp4(uuid, mp4::Type::MediaSegment, req.uri().query(), &req)
Path::StreamViewMp4Segment(uuid, type_) => {
self.0.stream_view_mp4(&req, uuid, type_, mp4::Type::MediaSegment)
},
Path::NotFound => self.0.not_found(),
Path::Static => self.0.static_file(&req),
@ -484,27 +535,31 @@ impl server::Service for Service {
#[cfg(test)]
mod tests {
use db::testutil;
use super::Segments;
use testutil;
#[test]
fn test_segments() {
testutil::init();
assert_eq!(Segments{ids: 1..2, start_time: 0, end_time: None},
assert_eq!(Segments{ids: 1..2, open_id: None, start_time: 0, end_time: None},
Segments::parse("1").unwrap());
assert_eq!(Segments{ids: 1..2, start_time: 26, end_time: None},
assert_eq!(Segments{ids: 1..2, open_id: Some(42), start_time: 0, end_time: None},
Segments::parse("1@42").unwrap());
assert_eq!(Segments{ids: 1..2, open_id: None, start_time: 26, end_time: None},
Segments::parse("1.26-").unwrap());
assert_eq!(Segments{ids: 1..2, start_time: 0, end_time: Some(42)},
assert_eq!(Segments{ids: 1..2, open_id: Some(42), start_time: 26, end_time: None},
Segments::parse("1@42.26-").unwrap());
assert_eq!(Segments{ids: 1..2, open_id: None, start_time: 0, end_time: Some(42)},
Segments::parse("1.-42").unwrap());
assert_eq!(Segments{ids: 1..2, start_time: 26, end_time: Some(42)},
assert_eq!(Segments{ids: 1..2, open_id: None, start_time: 26, end_time: Some(42)},
Segments::parse("1.26-42").unwrap());
assert_eq!(Segments{ids: 1..6, start_time: 0, end_time: None},
assert_eq!(Segments{ids: 1..6, open_id: None, start_time: 0, end_time: None},
Segments::parse("1-5").unwrap());
assert_eq!(Segments{ids: 1..6, start_time: 26, end_time: None},
assert_eq!(Segments{ids: 1..6, open_id: None, start_time: 26, end_time: None},
Segments::parse("1-5.26-").unwrap());
assert_eq!(Segments{ids: 1..6, start_time: 0, end_time: Some(42)},
assert_eq!(Segments{ids: 1..6, open_id: None, start_time: 0, end_time: Some(42)},
Segments::parse("1-5.-42").unwrap());
assert_eq!(Segments{ids: 1..6, start_time: 26, end_time: Some(42)},
assert_eq!(Segments{ids: 1..6, open_id: None, start_time: 26, end_time: Some(42)},
Segments::parse("1-5.26-42").unwrap());
}
}
@ -514,9 +569,9 @@ mod bench {
extern crate reqwest;
extern crate test;
use db::testutil::{self, TestDb};
use hyper;
use self::test::Bencher;
use testutil::{self, TestDb};
use uuid::Uuid;
struct Server {
@ -526,14 +581,13 @@ mod bench {
impl Server {
fn new() -> Server {
let db = TestDb::new();
let db = TestDb::new(::base::clock::RealClocks {});
let test_camera_uuid = db.test_camera_uuid;
testutil::add_dummy_recordings_to_db(&db.db, 1440);
let (tx, rx) = ::std::sync::mpsc::channel();
::std::thread::spawn(move || {
let addr = "127.0.0.1:0".parse().unwrap();
let (db, dir) = (db.db.clone(), db.dir.clone());
let service = super::Service::new(db.clone(), dir.clone(), None, None,
let service = super::Service::new(db.db.clone(), None, None,
"".to_owned()).unwrap();
let server = hyper::server::Http::new()
.bind(&addr, move || Ok(service.clone()))
@ -554,10 +608,10 @@ mod bench {
}
#[bench]
fn serve_camera_recordings(b: &mut Bencher) {
fn serve_stream_recordings(b: &mut Bencher) {
testutil::init();
let server = &*SERVER;
let url = reqwest::Url::parse(&format!("{}/api/cameras/{}/recordings", server.base_url,
let url = reqwest::Url::parse(&format!("{}/api/cameras/{}/main/recordings", server.base_url,
server.test_camera_uuid)).unwrap();
let mut buf = Vec::new();
let client = reqwest::Client::new();

View File

@ -52,18 +52,18 @@ import './assets/index.css';
import 'jquery-ui/ui/widgets/tooltip';
import Camera from './lib/models/Camera';
import CameraView from './lib/views/CameraView';
import CalendarView from './lib/views/CalendarView';
import VideoDialogView from './lib/views/VideoDialogView';
import NVRSettingsView from './lib/views/NVRSettingsView';
import CheckboxGroupView from './lib/views/CheckboxGroupView';
import RecordingFormatter from './lib/support/RecordingFormatter';
import StreamSelectorView from './lib/views/StreamSelectorView';
import StreamView from './lib/views/StreamView';
import TimeFormatter from './lib/support/TimeFormatter';
import TimeStamp90kFormatter from './lib/support/TimeStamp90kFormatter';
import MoonfireAPI from './lib/MoonfireAPI';
const api = new MoonfireAPI();
let cameraViews = null; // CameraView objects
let streamViews = null; // StreamView objects
let calendarView = null; // CalendarView object
/**
@ -118,15 +118,17 @@ function newTimeFormat(format) {
*
* @param {NVRSettings} nvrSettingsView NVRSettingsView in effect
* @param {object} camera Object for the camera
* @param {String} streamType "main" or "sub"
* @param {object} range Range Object
* @param {object} recording Recording object
* @return {void}
*/
function onSelectVideo(nvrSettingsView, camera, range, recording) {
function onSelectVideo(nvrSettingsView, camera, streamType, range, recording) {
console.log('Recording clicked: ', recording);
const trimmedRange = recording.range90k(nvrSettingsView.trim ? range : null);
const url = api.videoPlayUrl(
camera.uuid,
streamType,
recording,
trimmedRange,
nvrSettingsView.timeStampTrack
@ -146,7 +148,7 @@ function onSelectVideo(nvrSettingsView, camera, range, recording) {
}
/**
* Fetch camera view data for a given date/time range.
* Fetch stream view data for a given date/time range.
*
* @param {Range90k} selectedRange Desired time range
* @param {Number} videoLength Desired length of video segments, or Infinity
@ -161,28 +163,29 @@ function fetch(selectedRange, videoLength) {
' to ' +
selectedRange.formatTimeStamp90k(selectedRange.endTime90k)
);
for (let cameraView of cameraViews) {
for (let streamView of streamViews) {
let url = api.recordingsUrl(
cameraView.camera.uuid,
streamView.camera.uuid,
streamView.streamType,
selectedRange.startTime90k,
selectedRange.endTime90k,
videoLength
);
if (cameraView.recordingsReq !== null) {
if (streamView.recordingsReq !== null) {
/*
* If there is another request, it would be because settings changed
* and so an abort is to make room for this new request, now necessary
* for the changed situation.
*/
cameraView.recordingsReq.abort();
streamView.recordingsReq.abort();
}
cameraView.delayedShowLoading(500);
streamView.delayedShowLoading(500);
let r = api.request(url);
cameraView.recordingsUrl = url;
cameraView.recordingsReq = r;
cameraView.recordingsRange = selectedRange.range90k();
streamView.recordingsUrl = url;
streamView.recordingsReq = r;
streamView.recordingsRange = selectedRange.range90k();
r.always(function() {
cameraView.recordingsReq = null;
streamView.recordingsReq = null;
});
r
.then(function(data /* , status, req */) {
@ -191,10 +194,10 @@ function fetch(selectedRange, videoLength) {
return b.startId - a.startId;
});
console.log(
'Fetched results for "%s" > updating recordings',
cameraView.camera.shortName
'Fetched results for "%s-%s" > updating recordings',
streamView.camera.shortName, streamView.streamType
);
cameraView.recordingsJSON = data.recordings;
streamView.recordingsJSON = data.recordings;
})
.catch(function(data, status, err) {
console.error(url, ' load failed: ', status, ': ', err);
@ -207,7 +210,7 @@ function fetch(selectedRange, videoLength) {
*
* Sets the following globals:
* zone - timezone from data received
* cameraViews - array of views, one per camera
* streamViews - array of views, one per stream
*
* Builds the dom for the left side controllers
*
@ -221,56 +224,56 @@ function onReceivedCameras(data) {
nvrSettingsView.onVideoLengthChange = (vl) =>
fetch(calendarView.selectedRange, vl);
nvrSettingsView.onTimeFormatChange = (format) =>
cameraViews.forEach((view) => (view.timeFormat = format));
streamViews.forEach((view) => (view.timeFormat = format));
nvrSettingsView.onTrimChange = (t) =>
cameraViews.forEach((view) => (view.trimmed = t));
streamViews.forEach((view) => (view.trimmed = t));
newTimeFormat(nvrSettingsView.timeFormatString);
calendarView = new CalendarView({timeZone: timeFormatter.tz});
calendarView.onRangeChange = (selectedRange) =>
fetch(selectedRange, nvrSettingsView.videoLength);
const camerasParent = $('#cameras');
const streamsParent = $('#streams');
const videos = $('#videos');
cameraViews = data.cameras.map((cameraJson) => {
streamViews = [];
let streamSelectorCameras = [];
for (const cameraJson of data.cameras) {
const camera = new Camera(cameraJson);
const cv = new CameraView(
camera,
new RecordingFormatter(timeFormatter.formatStr, timeFormatter.tz),
nvrSettingsView.trim,
videos
);
cv.onRecordingClicked = (recordingModel) => {
console.log('Recording clicked', recordingModel);
onSelectVideo(
nvrSettingsView,
camera,
calendarView.selectedRange,
recordingModel
);
};
return cv;
});
// Create camera enable checkboxes
const cameraCheckBoxes = new CheckboxGroupView(
cameraViews.map((cv) => ({
id: cv.camera.uuid,
checked: true,
text: cv.camera.shortName,
camView: cv,
})),
camerasParent
);
cameraCheckBoxes.onCheckChange = (groupEl) => {
groupEl.camView.enabled = groupEl.checked;
calendarView.initializeWith(cameraViews);
let cameraStreams = {};
Object.keys(camera.streams).forEach((streamType) => {
const sv = new StreamView(
camera,
streamType,
new RecordingFormatter(timeFormatter.formatStr, timeFormatter.tz),
nvrSettingsView.trim,
videos);
sv.onRecordingClicked = (recordingModel) => {
console.log('Recording clicked', recordingModel);
onSelectVideo(
nvrSettingsView,
camera,
streamType,
calendarView.selectedRange,
recordingModel
);
};
streamViews.push(sv);
cameraStreams[streamType] = sv;
});
streamSelectorCameras.push({
camera: camera,
streamViews: cameraStreams,
});
};
calendarView.initializeWith(cameraViews);
// Create stream enable checkboxes
const streamSelector =
new StreamSelectorView(streamSelectorCameras, streamsParent);
streamSelector.onChange = () => calendarView.initializeWith(streamViews);
calendarView.initializeWith(streamViews);
console.log('Loaded: ' + cameraViews.length + ' camera views');
console.log('Loaded: ' + streamViews.length + ' stream views');
}
/**

View File

@ -7,8 +7,9 @@
<body>
<div id="nav">
<form action="#">
<fieldset id="cameras">
<legend>Cameras</legend>
<fieldset>
<legend>Streams</legend>
<table id="streams"></table>
</fieldset>
<fieldset id="datetime">
<legend>Date &amp; Time Range</legend>
@ -73,4 +74,4 @@
</div>
<table id="videos"></table>
</body>
</html>
</html>

View File

@ -77,13 +77,14 @@ export default class MoonfireAPI {
* URL that will cause the state of a specific recording to be returned.
*
* @param {String} cameraUUID UUID for the camera
* @param {String} streamType "main" or "sub"
* @param {String} start90k Timestamp for beginning of range of interest
* @param {String} end90k Timestamp for end of range of interest
* @param {String} split90k Desired maximum size of segments returned, or
* Infinity for infinite range
* @return {String} Constructed url
*/
recordingsUrl(cameraUUID, start90k, end90k, split90k = Infinity) {
recordingsUrl(cameraUUID, streamType, start90k, end90k, split90k = Infinity) {
const query = {
startTime90k: start90k,
endTime90k: end90k,
@ -92,7 +93,7 @@ export default class MoonfireAPI {
query.split90k = split90k;
}
return this._builder.makeUrl(
'cameras/' + cameraUUID + '/recordings',
'cameras/' + cameraUUID + '/' + streamType + '/recordings',
query
);
}
@ -101,16 +102,21 @@ export default class MoonfireAPI {
* URL that will playback a video segment.
*
* @param {String} cameraUUID UUID for the camera from whence comes the video
* @param {String} streamType "main" or "sub"
* @param {Recording} recording Recording model object
* @param {Range90k} trimmedRange Range restricting segments
* @param {Boolean} timestampTrack True if track should be timestamped
* @param {Range90k} trimmedRange Range restricting segments
* @param {Boolean} timestampTrack True if track should be timestamped
* @return {String} Constructed url
*/
videoPlayUrl(cameraUUID, recording, trimmedRange, timestampTrack = true) {
videoPlayUrl(cameraUUID, streamType, recording, trimmedRange,
timestampTrack = true) {
let sParam = recording.startId;
if (recording.endId !== undefined) {
sParam += '-' + recording.endId;
}
if (recording.firstUncommitted !== undefined) {
sParam += '@' + recording.openId; // disambiguate.
}
let rel = '';
if (recording.startTime90k < trimmedRange.startTime90k) {
rel += trimmedRange.startTime90k - recording.startTime90k;
@ -118,6 +124,9 @@ export default class MoonfireAPI {
rel += '-';
if (recording.endTime90k > trimmedRange.endTime90k) {
rel += trimmedRange.endTime90k - recording.startTime90k;
} else if (recording.growing !== undefined) {
// View just the portion described by recording.
rel += recording.endTime90k - recording.startTime90k;
}
if (rel !== '-') {
sParam += '.' + rel;
@ -126,7 +135,8 @@ export default class MoonfireAPI {
s: sParam,
ts: timestampTrack,
});
return this._builder.makeUrl('cameras/' + cameraUUID + '/view.mp4', {
return this._builder.makeUrl('cameras/' + cameraUUID + '/' + streamType +
'/view.mp4', {
s: sParam,
ts: timestampTrack,
});

View File

@ -31,7 +31,7 @@
// along with this program. If not, see <http://www.gnu.org/licenses/>.
import JsonWrapper from './JsonWrapper';
import Range90k from './Range90k';
import Stream from './Stream';
/**
* Camera JSON wrapper.
@ -44,89 +44,29 @@ export default class Camera extends JsonWrapper {
*/
constructor(cameraJson) {
super(cameraJson);
this.streams_ = {};
Object.keys(cameraJson.streams).forEach((streamType) => {
this.streams_[streamType] = new Stream(cameraJson.streams[streamType]);
});
}
/**
* Get camera uuid.
*
* @return {String} Camera's uuid
*/
/** @return {String} */
get uuid() {
return this.json.uuid;
}
/**
* Get camera's short name.
*
* @return {String} Name of the camera
*/
/** @return {String} */
get shortName() {
return this.json.shortName;
}
/**
* Get camera's description.
*
* @return {String} Camera's description
*/
/** @return {String} */
get description() {
return this.json.description;
}
/**
* Get maximimum amount of storage allowed to be used for camera's video
* samples.
*
* @return {Number} Amount in bytes
*/
get retainBytes() {
return this.json.retainBytes;
}
/**
* Get a Range90K object representing the range encompassing all available
* video samples for the camera.
*
* This range does not mean every second of the range has video!
*
* @return {Range90k} The camera's available recordings range
*/
get range90k() {
return new Range90k(
this.json.minStartTime90k,
this.json.maxEndTime90k,
this.json.totalDuration90k
);
}
/**
* Get the total amount of storage currently taken up by the camera's video
* samples.
*
* @return {Number} Amount in bytes
*/
get totalSampleFileBytes() {
return this.json.totalSampleFileBytes;
}
/**
* Get the list of the camera's days for which there are video samples.
*
* The result is a Map with dates as keys (in YYYY-MM-DD format) and each
* value is a Range90k object for that day. Here too, the range does not
* mean every second in the range has video, but presence of an entry for
* a day does mean there is at least one (however short) video segment
* available.
*
* @return {Map} Dates are keys, values are Range90K objects.
*/
get days() {
return new Map(
Object.entries(this.json.days).map(function(t) {
let [k, v] = t;
v = new Range90k(v.startTime90k, v.endTime90k, v.totalDuration90k);
return [k, v];
})
);
/** @return {Object.<string, Stream>} */
get streams() {
return this.streams_;
}
}

View File

@ -46,24 +46,31 @@ export default class Recording extends JsonWrapper {
super(recordingJson);
}
/**
* Get recording's startId.
*
* @return {String} startId for recording
*/
/** @return {Number} */
get startId() {
return this.json.startId;
}
/**
* Get recording's endId.
*
* @return {String} endId for recording
*/
/** @return {Number} */
get endId() {
return this.json.endId;
}
/** @return {Number} */
get openId() {
return this.json.openId;
}
/** @return {Number} or undefined */
get firstUncommitted() {
return this.json.firstUncommitted;
}
/** @return {Boolean} or undefined */
get growing() {
return this.json.growing;
}
/**
* Return start time of recording in 90k units.
* @return {Number} Time in units of 90k parts of a second

105
ui-src/lib/models/Stream.js Normal file
View File

@ -0,0 +1,105 @@
// vim: set et sw=2 ts=2:
//
// This file is part of Moonfire NVR, a security stream digital video recorder.
// Copyright (C) 2018 Dolf Starreveld <dolf@starreveld.com>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
import JsonWrapper from './JsonWrapper';
import Range90k from './Range90k';
/**
* Stream JSON wrapper.
*/
export default class Stream extends JsonWrapper {
/**
* Construct from JSON.
*
* @param {JSON} streamJson JSON for single stream.
*/
constructor(streamJson) {
super(streamJson);
}
/**
* Get maximimum amount of storage allowed to be used for stream's video
* samples.
*
* @return {Number} Amount in bytes
*/
get retainBytes() {
return this.json.retainBytes;
}
/**
* Get a Range90K object representing the range encompassing all available
* video samples for the stream.
*
* This range does not mean every second of the range has video!
*
* @return {Range90k} The stream's available recordings range
*/
get range90k() {
return new Range90k(
this.json.minStartTime90k,
this.json.maxEndTime90k,
this.json.totalDuration90k
);
}
/**
* Get the total amount of storage currently taken up by the stream's video
* samples.
*
* @return {Number} Amount in bytes
*/
get totalSampleFileBytes() {
return this.json.totalSampleFileBytes;
}
/**
* Get the list of the stream's days for which there are video samples.
*
* The result is a Map with dates as keys (in YYYY-MM-DD format) and each
* value is a Range90k object for that day. Here too, the range does not
* mean every second in the range has video, but presence of an entry for
* a day does mean there is at least one (however short) video segment
* available.
*
* @return {Map} Dates are keys, values are Range90K objects.
*/
get days() {
return new Map(
Object.entries(this.json.days).map(function(t) {
let [k, v] = t;
v = new Range90k(v.startTime90k, v.endTime90k, v.totalDuration90k);
return [k, v];
})
);
}
}

View File

@ -38,7 +38,7 @@ import TimeStamp90kFormatter from '../support/TimeStamp90kFormatter';
import Time90kParser from '../support/Time90kParser';
/**
* Find the earliest and latest dates from an array of CameraView
* Find the earliest and latest dates from an array of StreamView
* objects.
*
* Each camera view has a "days" property, whose keys identify days with
@ -47,20 +47,20 @@ import Time90kParser from '../support/Time90kParser';
*
* "days" for camera views that are not enabled are ignored.
*
* @param {[Iterable]} cameraViews Camera views to look into
* @param {[Iterable]} streamViews Camera views to look into
* @return {[Set, String, String]} Array with set of all dates, and
* earliest and latest dates
*/
function minMaxDates(cameraViews) {
function minMaxDates(streamViews) {
/*
* Produce a set with all dates, across all enabled cameras, that
* have at least one recording available (allDates).
*/
const allDates = new Set(
[].concat(
...cameraViews
...streamViews
.filter((v) => v.enabled)
.map((v) => Array.from(v.camera.days.keys()))
.map((v) => Array.from(v.stream.days.keys()))
)
);
return [
@ -137,7 +137,7 @@ export default class CalendarView {
this._minDateStr = null;
this._maxDateStr = null;
this._singleDateStr = null;
this._cameraViews = null;
this._streamViews = null;
this._rangeChangedHandler = null;
}
@ -329,12 +329,12 @@ export default class CalendarView {
* This is necessary as the camera views ultimately define the limits on
* the date pickers.
*
* @param {Iterable} cameraViews Collection of camera views
* @param {Iterable} streamViews Collection of camera views
*/
initializeWith(cameraViews) {
this._cameraViews = cameraViews;
initializeWith(streamViews) {
this._streamViews = streamViews;
[this._availableDates, this._minDateStr, this._maxDateStr] = minMaxDates(
cameraViews
streamViews
);
this._configureDatePickers();

View File

@ -1,113 +0,0 @@
// vim: set et sw=2 ts=2:
//
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Dolf Starreveld <dolf@starreveld.com>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
import $ from 'jquery';
/**
* Class to handle a group of (related) checkboxes.
*
* Each checkbox is managed through a simple object containing properties:
* - id: {String} Id (some unique value within the group)
* - selector: {String} jQuery compatible selector to find the dom element
* - checked: {Boolean} Value for checkbox
* - jq: {jQuery} jQuery element for the checkbox, or null if not found
*
* A handler can be called if a checbox changes value.
*/
export default class CheckboxGroupView {
/**
* Construct the seteup for the checkboxes.
*
* The passed group array should contain individual maps describing each
* checkbox. THe maps should contain:
* - id
* - selector: optional. If not provided #id will be used
* - checked: Initial value for checkbox, default true
* - text: Text for the checkbox label (not generated if empty)
*
* @param {Array} group Array of maps, one for each checkbox
* @param {jQuery} parent jQuery parent element to append to
*/
constructor(group = [], parent = null) {
this._group = group.slice(); // Copy
this._group.forEach((element) => {
// If parent specified, create and append
if (parent) {
let cb = `<input type="checkbox" id="${element.id}" name="${
element.id
}">`;
if (element.text) {
cb += `<label for="${element.id}">${element.text}</label>`;
}
parent.append($(cb + '<br/>'));
}
const jq = $(element.selector || `#${element.id}`);
element.jq = jq;
if (jq !== null) {
jq.prop('checked', element.checked || true);
jq.change((e) => {
if (this._checkChangeHandler) {
element.checked = e.target.checked;
this._checkChangeHandler(element);
}
});
}
});
this._checkChangeHandler = null;
}
/**
* Get the checkbox object for the specified checkbox.
*
* The checkbox is looked up by the specified id or selector, which must
* match what was specified during construction.
*
* @param {String} idOrSelector Identifying string
* @return {Object} Object for checkbox, or null if not found
*/
checkBox(idOrSelector) {
return this._group.find(
(el) => el.id === idOrSelector || el.selector === idOrSelector
);
}
/**
* Set a handler for checkbox changes.
*
* Handler will be called with same result as would be found by checkBox().
*
* @param {Function} handler function (checbox)
*/
set onCheckChange(handler) {
this._checkChangeHandler = handler;
}
}

View File

@ -73,18 +73,19 @@ export default class RecordingsView {
* Construct display from camera data and use supplied formatter.
*
* @param {Camera} camera camera object (immutable)
* @param {String} streamType "main" or "sub"
* @param {RecordingFormatter} recordingFormatter Desired formatter
* @param {Boolean} trimmed True if the display should include trimmed ranges
* @param {jQuery} parent Parent to which new DOM is attached, or null
*/
constructor(camera, recordingFormatter, trimmed = false, parent = null) {
constructor(camera, streamType, recordingFormatter, trimmed = false,
parent = null) {
this._cameraName = camera.shortName;
this._cameraRange = camera.range90k;
this._formatter = recordingFormatter;
this._element = $(`tab-${camera.uuid}`); // Might not be there initially
if (this._element.length == 0) {
this._element = this._createElement(camera.uuid, camera.shortName);
}
const id = `tab-${camera.uuid}-${streamType}`;
this._element = this._createElement(id, camera.shortName, streamType);
this._trimmed = trimmed;
this._recordings = null;
this._recordingsRange = null;
@ -100,12 +101,14 @@ export default class RecordingsView {
*
* @param {String} id DOM id for the main element
* @param {String} cameraName Name of the corresponding camera
* @param {String} streamType "main" or "sub"
* @return {jQuery} Partial DOM as jQuery object
*/
_createElement(id, cameraName) {
_createElement(id, cameraName, streamType) {
const tab = $('<tbody>').attr('id', id);
tab.append(
$('<tr class="name">').append($('<th colspan=6/>').text(cameraName)),
$('<tr class="name">').append($('<th colspan=6/>')
.text(cameraName + ' ' + streamType)),
$('<tr class="hdr">').append(
$(
_columnOrder

View File

@ -0,0 +1,99 @@
// vim: set et sw=2 ts=2:
//
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Dolf Starreveld <dolf@starreveld.com>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
import $ from 'jquery';
const allStreamTypes = ['main', 'sub'];
/**
* View for selecting the enabled streams.
*
* This displays a table with a camera per row and stream type per column.
* It propagates the enabled status on to the stream view. It also calls
* the optional onChange handler on any change.
*/
export default class StreamSelectorView {
/**
* @param {Array} cameras An element for each camera with
* - camera: a {Camera}
* - streamViews: a map of stream type to {StreamView}
* @param {jQuery} parent jQuery parent element to append to
*/
constructor(cameras, parent) {
this._cameras = cameras;
if (cameras.length !== 0) {
// Add a header row.
let hdr = $('<tr/>').append($('<th/>'));
for (const streamType of allStreamTypes) {
hdr.append($('<th/>').text(streamType));
}
parent.append(hdr);
}
this._cameras.forEach((c) => {
let row = $('<tr/>').append($('<td>').text(c.camera.shortName));
let firstStreamType = true;
for (const streamType of allStreamTypes) {
const streamView = c.streamViews[streamType];
if (streamView === undefined) {
row.append('<td/>');
} else {
const id = 'cam-' + c.camera.uuid + '-' + streamType;
let cb = $('<input type="checkbox">').attr('name', id).attr('id', id);
// Only the first stream type for each camera should be checked
// initially.
cb.prop('checked', firstStreamType);
streamView.enabled = firstStreamType;
firstStreamType = false;
cb.change((e) => {
streamView.enabled = e.target.checked;
if (this._onChangeHandler) {
this._onChangeHandler();
}
});
row.append($('<td/>').append(cb));
}
}
parent.append(row);
});
this._onChangeHandler = null;
}
/** @param {function()} handler a handler to run after toggling a stream */
set onChange(handler) {
this._onChangeHandler = handler;
}
}

View File

@ -33,29 +33,29 @@
import RecordingsView from './RecordingsView';
/**
* Class handling a camer view.
*
* A camera view consists of a list of available recording segments for
* playback.
* Stream view: a list of available recording segments for playback.
*/
export default class CameraView {
export default class StreamView {
/**
* Construct the view.
*
* @param {Camera} cameraModel Model object for camera
* @param {String} streamType "main" or "sub"
* @param {[type]} recordingFormatter Formatter to be used by recordings
* @param {[type]} trimmed True if rec. ranges should be trimmed
* @param {[type]} recordingsParent Parent element to attach to or null)
*/
constructor(
cameraModel,
streamType,
recordingFormatter,
trimmed,
recordingsParent = null
) {
this.camera = cameraModel;
this.streamType = streamType;
this.stream = cameraModel.streams[streamType];
this.recordingsView = new RecordingsView(
this.camera,
this.streamType,
recordingFormatter,
trimmed,
recordingsParent
@ -82,11 +82,8 @@ export default class CameraView {
set enabled(enabled) {
this._enabled = enabled;
this.recordingsView.show = enabled;
console.log(
'Camera %s %s',
this.camera.shortName,
this.enabled ? 'enabled' : 'disabled'
);
console.log('Stream %s-%s %s', this.camera.shortName, this.streamType,
this.enabled ? 'enabled' : 'disabled');
}
/**