mirror of
https://github.com/scottlamb/moonfire-nvr.git
synced 2025-11-10 05:59:44 -05:00
restructure into "server" and "ui" subdirs
Besides being more clear about what belongs to which, this helps with docker caching. The server and ui parts are only rebuilt when their respective subdirectories change. Extend this a bit further by making the webpack build not depend on the target architecture. And adding cache dirs so parts of the server and ui build process can be reused when layer-wide caching fails.
This commit is contained in:
43
server/db/Cargo.toml
Normal file
43
server/db/Cargo.toml
Normal file
@@ -0,0 +1,43 @@
|
||||
[package]
|
||||
name = "moonfire-db"
|
||||
version = "0.6.0"
|
||||
authors = ["Scott Lamb <slamb@slamb.org>"]
|
||||
readme = "../README.md"
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
nightly = []
|
||||
|
||||
[lib]
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
base = { package = "moonfire-base", path = "../base" }
|
||||
base64 = "0.13.0"
|
||||
blake3 = "0.3.7"
|
||||
byteorder = "1.0"
|
||||
cstr = "0.2.5"
|
||||
failure = "0.1.1"
|
||||
fnv = "1.0"
|
||||
h264-reader = { git = "https://github.com/dholroyd/h264-reader" }
|
||||
hashlink = "0.6.0"
|
||||
lazy_static = "1.0"
|
||||
libc = "0.2"
|
||||
libpasta = { git = "https://github.com/scottlamb/libpasta", branch = "pr-deps" }
|
||||
log = "0.4"
|
||||
mylog = { git = "https://github.com/scottlamb/mylog" }
|
||||
nix = "0.19.0"
|
||||
odds = { version = "0.4.0", features = ["std-vec"] }
|
||||
parking_lot = { version = "0.11.1", features = [] }
|
||||
prettydiff = "0.3.1"
|
||||
protobuf = { git = "https://github.com/stepancheg/rust-protobuf" }
|
||||
ring = "0.16.2"
|
||||
rusqlite = "0.24.1"
|
||||
smallvec = "1.0"
|
||||
tempdir = "0.3"
|
||||
time = "0.1"
|
||||
uuid = { version = "0.8", features = ["std", "v4"] }
|
||||
itertools = "0.9.0"
|
||||
|
||||
[build-dependencies]
|
||||
protobuf-codegen-pure = { git = "https://github.com/stepancheg/rust-protobuf" }
|
||||
1062
server/db/auth.rs
Normal file
1062
server/db/auth.rs
Normal file
File diff suppressed because it is too large
Load Diff
41
server/db/build.rs
Normal file
41
server/db/build.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2019-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
Ok(protobuf_codegen_pure::Codegen::new()
|
||||
.out_dir(std::env::var("OUT_DIR")?)
|
||||
.inputs(&["proto/schema.proto"])
|
||||
.include("proto")
|
||||
.customize(protobuf_codegen_pure::Customize {
|
||||
gen_mod_rs: Some(true),
|
||||
..Default::default()
|
||||
})
|
||||
.run()?)
|
||||
}
|
||||
331
server/db/check.rs
Normal file
331
server/db/check.rs
Normal file
@@ -0,0 +1,331 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Subcommand to check the database and sample file dir for errors.
|
||||
|
||||
use crate::compare;
|
||||
use crate::db::{self, CompositeId, FromSqlUuid};
|
||||
use crate::dir;
|
||||
use crate::raw;
|
||||
use crate::recording;
|
||||
use failure::Error;
|
||||
use fnv::FnvHashMap;
|
||||
use log::{info, error};
|
||||
use nix::fcntl::AtFlags;
|
||||
use rusqlite::params;
|
||||
use crate::schema;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
|
||||
pub struct Options {
|
||||
pub compare_lens: bool,
|
||||
}
|
||||
|
||||
pub fn run(conn: &rusqlite::Connection, opts: &Options) -> Result<(), Error> {
|
||||
// Compare schemas.
|
||||
{
|
||||
let mut expected = rusqlite::Connection::open_in_memory()?;
|
||||
db::init(&mut expected)?;
|
||||
if let Some(diffs) = compare::get_diffs("actual", conn, "expected", &expected)? {
|
||||
println!("{}", &diffs);
|
||||
} else {
|
||||
println!("Schema is as expected.");
|
||||
}
|
||||
info!("Done comparing schemas.");
|
||||
}
|
||||
|
||||
let db_uuid = raw::get_db_uuid(&conn)?;
|
||||
|
||||
// Scan directories.
|
||||
let mut streams_by_dir: FnvHashMap<i32, Dir> = FnvHashMap::default();
|
||||
{
|
||||
let mut dir_stmt = conn.prepare(r#"
|
||||
select d.id, d.path, d.uuid, d.last_complete_open_id, o.uuid
|
||||
from sample_file_dir d left join open o on (d.last_complete_open_id = o.id)
|
||||
"#)?;
|
||||
let mut garbage_stmt = conn.prepare_cached(
|
||||
"select composite_id from garbage where sample_file_dir_id = ?")?;
|
||||
let mut rows = dir_stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let mut meta = schema::DirMeta::default();
|
||||
let dir_id: i32 = row.get(0)?;
|
||||
let dir_path: String = row.get(1)?;
|
||||
let dir_uuid: FromSqlUuid = row.get(2)?;
|
||||
let open_id = row.get(3)?;
|
||||
let open_uuid: FromSqlUuid = row.get(4)?;
|
||||
meta.db_uuid.extend_from_slice(&db_uuid.as_bytes()[..]);
|
||||
meta.dir_uuid.extend_from_slice(&dir_uuid.0.as_bytes()[..]);
|
||||
{
|
||||
let o = meta.last_complete_open.set_default();
|
||||
o.id = open_id;
|
||||
o.uuid.extend_from_slice(&open_uuid.0.as_bytes()[..]);
|
||||
}
|
||||
|
||||
// Open the directory (checking its metadata) and hold it open (for the lock).
|
||||
let dir = dir::SampleFileDir::open(&dir_path, &meta)?;
|
||||
let mut streams = read_dir(&dir, opts)?;
|
||||
let mut rows = garbage_stmt.query(params![dir_id])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = CompositeId(row.get(0)?);
|
||||
let s = streams.entry(id.stream()).or_insert_with(Stream::default);
|
||||
s.entry(id.recording()).or_insert_with(Recording::default).garbage_row = true;
|
||||
}
|
||||
streams_by_dir.insert(dir_id, streams);
|
||||
}
|
||||
}
|
||||
|
||||
// Scan known streams.
|
||||
{
|
||||
let mut stmt = conn.prepare(r#"
|
||||
select id, sample_file_dir_id from stream where sample_file_dir_id is not null
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let stream_id = row.get(0)?;
|
||||
let dir_id = row.get(1)?;
|
||||
let stream = match streams_by_dir.get_mut(&dir_id) {
|
||||
None => Stream::default(),
|
||||
Some(d) => d.remove(&stream_id).unwrap_or_else(Stream::default),
|
||||
};
|
||||
compare_stream(conn, stream_id, opts, stream)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Expect the rest to have only garbage.
|
||||
for (&dir_id, streams) in &streams_by_dir {
|
||||
for (&stream_id, stream) in streams {
|
||||
for (&recording_id, r) in stream {
|
||||
let id = CompositeId::new(stream_id, recording_id);
|
||||
if r.recording_row.is_some() || r.playback_row.is_some() ||
|
||||
r.integrity_row || !r.garbage_row {
|
||||
error!("dir {} recording {} for unknown stream: {:#?}", dir_id, id, r);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
struct RecordingSummary {
|
||||
bytes: u64,
|
||||
video_samples: i32,
|
||||
video_sync_samples: i32,
|
||||
media_duration: i32,
|
||||
flags: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Recording {
|
||||
/// Present iff there is a file. When `args.compare_lens` is true, the length; otherwise 0.
|
||||
file: Option<u64>,
|
||||
|
||||
/// Iff a `recording` row is present, a `RecordingSummary` from those fields.
|
||||
recording_row: Option<RecordingSummary>,
|
||||
|
||||
/// Iff a `recording_playback` row is present, a `RecordingSummary` computed from the index.
|
||||
/// This should match the recording row.
|
||||
playback_row: Option<RecordingSummary>,
|
||||
|
||||
/// True iff a `recording_integrity` row is present.
|
||||
integrity_row: bool,
|
||||
|
||||
/// True iff a `garbage` row is present.
|
||||
garbage_row: bool,
|
||||
}
|
||||
|
||||
type Stream = FnvHashMap<i32, Recording>;
|
||||
type Dir = FnvHashMap<i32, Stream>;
|
||||
|
||||
fn summarize_index(video_index: &[u8]) -> Result<RecordingSummary, Error> {
|
||||
let mut it = recording::SampleIndexIterator::new();
|
||||
let mut media_duration = 0;
|
||||
let mut video_samples = 0;
|
||||
let mut video_sync_samples = 0;
|
||||
let mut bytes = 0;
|
||||
while it.next(video_index)? {
|
||||
bytes += it.bytes as u64;
|
||||
media_duration += it.duration_90k;
|
||||
video_samples += 1;
|
||||
video_sync_samples += it.is_key() as i32;
|
||||
}
|
||||
Ok(RecordingSummary {
|
||||
bytes,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
media_duration,
|
||||
flags: if it.duration_90k == 0 { db::RecordingFlags::TrailingZero as i32 } else { 0 },
|
||||
})
|
||||
}
|
||||
|
||||
/// Reads through the given sample file directory.
|
||||
/// Logs unexpected files and creates a hash map of the files found there.
|
||||
/// If `opts.compare_lens` is set, the values are lengths; otherwise they're insignificant.
|
||||
fn read_dir(d: &dir::SampleFileDir, opts: &Options) -> Result<Dir, Error> {
|
||||
let mut dir = Dir::default();
|
||||
let mut d = d.opendir()?;
|
||||
let fd = d.as_raw_fd();
|
||||
for e in d.iter() {
|
||||
let e = e?;
|
||||
let f = e.file_name();
|
||||
match f.to_bytes() {
|
||||
b"." | b".." | b"meta" => continue,
|
||||
_ => {},
|
||||
};
|
||||
let id = match dir::parse_id(f.to_bytes()) {
|
||||
Ok(id) => id,
|
||||
Err(_) => {
|
||||
error!("sample file directory contains file {:?} which isn't an id", f);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let len = if opts.compare_lens {
|
||||
nix::sys::stat::fstatat(fd, f, AtFlags::empty())?.st_size as u64
|
||||
} else { 0 };
|
||||
let stream = dir.entry(id.stream()).or_insert_with(Stream::default);
|
||||
stream.entry(id.recording()).or_insert_with(Recording::default).file = Some(len);
|
||||
}
|
||||
Ok(dir)
|
||||
}
|
||||
|
||||
/// Looks through a known stream for errors.
|
||||
fn compare_stream(conn: &rusqlite::Connection, stream_id: i32, opts: &Options,
|
||||
mut stream: Stream) -> Result<(), Error> {
|
||||
let start = CompositeId::new(stream_id, 0);
|
||||
let end = CompositeId::new(stream_id, i32::max_value());
|
||||
|
||||
// recording row.
|
||||
{
|
||||
let mut stmt = conn.prepare_cached(r#"
|
||||
select
|
||||
composite_id,
|
||||
flags,
|
||||
sample_file_bytes,
|
||||
wall_duration_90k + media_duration_delta_90k,
|
||||
video_samples,
|
||||
video_sync_samples
|
||||
from
|
||||
recording
|
||||
where
|
||||
composite_id between ? and ?
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![start.0, end.0])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = CompositeId(row.get(0)?);
|
||||
let s = RecordingSummary {
|
||||
flags: row.get(1)?,
|
||||
bytes: row.get::<_, i64>(2)? as u64,
|
||||
media_duration: row.get(3)?,
|
||||
video_samples: row.get(4)?,
|
||||
video_sync_samples: row.get(5)?,
|
||||
};
|
||||
stream.entry(id.recording())
|
||||
.or_insert_with(Recording::default)
|
||||
.recording_row = Some(s);
|
||||
}
|
||||
}
|
||||
|
||||
// recording_playback row.
|
||||
{
|
||||
let mut stmt = conn.prepare_cached(r#"
|
||||
select
|
||||
composite_id,
|
||||
video_index
|
||||
from
|
||||
recording_playback
|
||||
where
|
||||
composite_id between ? and ?
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![start.0, end.0])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = CompositeId(row.get(0)?);
|
||||
let video_index: Vec<u8> = row.get(1)?;
|
||||
let s = match summarize_index(&video_index) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!("id {} has bad video_index: {}", id, e);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
stream.entry(id.recording())
|
||||
.or_insert_with(Recording::default)
|
||||
.playback_row = Some(s);
|
||||
}
|
||||
}
|
||||
|
||||
// recording_integrity row.
|
||||
{
|
||||
let mut stmt = conn.prepare_cached(r#"
|
||||
select
|
||||
composite_id
|
||||
from
|
||||
recording_integrity
|
||||
where
|
||||
composite_id between ? and ?
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![start.0, end.0])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = CompositeId(row.get(0)?);
|
||||
stream.entry(id.recording())
|
||||
.or_insert_with(Recording::default)
|
||||
.integrity_row = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (&id, recording) in &stream {
|
||||
let id = CompositeId::new(stream_id, id);
|
||||
let r = match recording.recording_row {
|
||||
Some(ref r) => r,
|
||||
None => {
|
||||
if !recording.garbage_row || recording.playback_row.is_some() ||
|
||||
recording.integrity_row {
|
||||
error!("Missing recording row for {}: {:#?}", id, recording);
|
||||
}
|
||||
continue;
|
||||
},
|
||||
};
|
||||
match recording.playback_row {
|
||||
Some(ref p) => {
|
||||
if r != p {
|
||||
error!("Recording {} summary doesn't match video_index: {:#?}", id, recording);
|
||||
}
|
||||
},
|
||||
None => error!("Recording {} missing playback row: {:#?}", id, recording),
|
||||
}
|
||||
match recording.file {
|
||||
Some(len) => if opts.compare_lens && r.bytes != len {
|
||||
error!("Recording {} length mismatch: {:#?}", id, recording);
|
||||
},
|
||||
None => error!("Recording {} missing file: {:#?}", id, recording),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
192
server/db/coding.rs
Normal file
192
server/db/coding.rs
Normal file
@@ -0,0 +1,192 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2016 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Binary encoding/decoding.
|
||||
|
||||
/// Zigzag-encodes a signed integer, as in [protocol buffer
|
||||
/// encoding](https://developers.google.com/protocol-buffers/docs/encoding#types). Uses the low bit
|
||||
/// to indicate signedness (1 = negative, 0 = non-negative).
|
||||
#[inline(always)]
|
||||
pub fn zigzag32(i: i32) -> u32 { ((i << 1) as u32) ^ ((i >> 31) as u32) }
|
||||
|
||||
/// Zigzag-decodes to a signed integer.
|
||||
/// See `zigzag`.
|
||||
#[inline(always)]
|
||||
pub fn unzigzag32(i: u32) -> i32 { ((i >> 1) as i32) ^ -((i & 1) as i32) }
|
||||
|
||||
#[inline(always)]
|
||||
pub fn decode_varint32(data: &[u8], i: usize) -> Result<(u32, usize), ()> {
|
||||
// Unroll a few likely possibilities before going into the robust out-of-line loop.
|
||||
// This aids branch prediction.
|
||||
if data.len() > i && (data[i] & 0x80) == 0 {
|
||||
return Ok((data[i] as u32, i+1))
|
||||
} else if data.len() > i + 1 && (data[i+1] & 0x80) == 0 {
|
||||
return Ok((( (data[i] & 0x7f) as u32) |
|
||||
(( data[i+1] as u32) << 7),
|
||||
i+2))
|
||||
} else if data.len() > i + 2 && (data[i+2] & 0x80) == 0 {
|
||||
return Ok((( (data[i] & 0x7f) as u32) |
|
||||
(((data[i+1] & 0x7f) as u32) << 7) |
|
||||
(( data[i+2] as u32) << 14),
|
||||
i+3))
|
||||
}
|
||||
decode_varint32_slow(data, i)
|
||||
}
|
||||
|
||||
#[cold]
|
||||
fn decode_varint32_slow(data: &[u8], mut i: usize) -> Result<(u32, usize), ()> {
|
||||
let l = data.len();
|
||||
let mut out = 0;
|
||||
let mut shift = 0;
|
||||
loop {
|
||||
if i == l {
|
||||
return Err(())
|
||||
}
|
||||
let b = data[i];
|
||||
if shift == 28 && (b & 0xf0) != 0 {
|
||||
return Err(())
|
||||
}
|
||||
out |= ((b & 0x7f) as u32) << shift;
|
||||
shift += 7;
|
||||
i += 1;
|
||||
if (b & 0x80) == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok((out, i))
|
||||
}
|
||||
|
||||
pub fn append_varint32(i: u32, data: &mut Vec<u8>) {
|
||||
if i < 1u32 << 7 {
|
||||
data.push(i as u8);
|
||||
} else if i < 1u32 << 14 {
|
||||
data.extend_from_slice(&[(( i & 0x7F) | 0x80) as u8,
|
||||
(i >> 7) as u8]);
|
||||
} else if i < 1u32 << 21 {
|
||||
data.extend_from_slice(&[(( i & 0x7F) | 0x80) as u8,
|
||||
(((i >> 7) & 0x7F) | 0x80) as u8,
|
||||
(i >> 14) as u8]);
|
||||
} else if i < 1u32 << 28 {
|
||||
data.extend_from_slice(&[(( i & 0x7F) | 0x80) as u8,
|
||||
(((i >> 7) & 0x7F) | 0x80) as u8,
|
||||
(((i >> 14) & 0x7F) | 0x80) as u8,
|
||||
(i >> 21) as u8]);
|
||||
} else {
|
||||
data.extend_from_slice(&[(( i & 0x7F) | 0x80) as u8,
|
||||
(((i >> 7) & 0x7F) | 0x80) as u8,
|
||||
(((i >> 14) & 0x7F) | 0x80) as u8,
|
||||
(((i >> 21) & 0x7F) | 0x80) as u8,
|
||||
(i >> 28) as u8]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_zigzag() {
|
||||
struct Test {
|
||||
decoded: i32,
|
||||
encoded: u32,
|
||||
}
|
||||
let tests = [
|
||||
Test{decoded: 0, encoded: 0},
|
||||
Test{decoded: -1, encoded: 1},
|
||||
Test{decoded: 1, encoded: 2},
|
||||
Test{decoded: -2, encoded: 3},
|
||||
Test{decoded: 2147483647, encoded: 4294967294},
|
||||
Test{decoded: -2147483648, encoded: 4294967295},
|
||||
];
|
||||
for test in &tests {
|
||||
assert_eq!(test.encoded, zigzag32(test.decoded));
|
||||
assert_eq!(test.decoded, unzigzag32(test.encoded));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_correct_varints() {
|
||||
struct Test {
|
||||
decoded: u32,
|
||||
encoded: &'static [u8],
|
||||
}
|
||||
let tests = [
|
||||
Test{decoded: 1, encoded: b"\x01"},
|
||||
Test{decoded: 257, encoded: b"\x81\x02"},
|
||||
Test{decoded: 49409, encoded: b"\x81\x82\x03"},
|
||||
Test{decoded: 8438017, encoded: b"\x81\x82\x83\x04"},
|
||||
Test{decoded: 1350615297, encoded: b"\x81\x82\x83\x84\x05"},
|
||||
];
|
||||
for test in &tests {
|
||||
// Test encoding to an empty buffer.
|
||||
let mut out = Vec::new();
|
||||
append_varint32(test.decoded, &mut out);
|
||||
assert_eq!(&out[..], test.encoded);
|
||||
|
||||
// ...and to a non-empty buffer.
|
||||
let mut buf = Vec::new();
|
||||
out.clear();
|
||||
out.push(b'x');
|
||||
buf.push(b'x');
|
||||
buf.extend_from_slice(test.encoded);
|
||||
append_varint32(test.decoded, &mut out);
|
||||
assert_eq!(out, buf);
|
||||
|
||||
// Test decoding from the beginning of the string.
|
||||
assert_eq!((test.decoded, test.encoded.len()),
|
||||
decode_varint32(test.encoded, 0).unwrap());
|
||||
|
||||
// ...and from the middle of a buffer.
|
||||
buf.push(b'x');
|
||||
assert_eq!((test.decoded, test.encoded.len() + 1),
|
||||
decode_varint32(&buf, 1).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_varints() {
|
||||
let tests: &[&[u8]] = &[
|
||||
// buffer underruns
|
||||
b"",
|
||||
b"\x80",
|
||||
b"\x80\x80",
|
||||
b"\x80\x80\x80",
|
||||
b"\x80\x80\x80\x80",
|
||||
|
||||
// int32 overflows
|
||||
b"\x80\x80\x80\x80\x80",
|
||||
b"\x80\x80\x80\x80\x80\x00",
|
||||
];
|
||||
for (i, encoded) in tests.iter().enumerate() {
|
||||
assert!(decode_varint32(encoded, 0).is_err(), "while on test {}", i);
|
||||
}
|
||||
}
|
||||
}
|
||||
182
server/db/compare.rs
Normal file
182
server/db/compare.rs
Normal file
@@ -0,0 +1,182 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use failure::Error;
|
||||
use prettydiff::diff_slice;
|
||||
use rusqlite::params;
|
||||
use std::fmt::Write;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Column {
|
||||
cid: u32,
|
||||
name: String,
|
||||
type_: String,
|
||||
notnull: bool,
|
||||
dflt_value: rusqlite::types::Value,
|
||||
pk: u32,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Column {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
struct Index {
|
||||
seq: u32,
|
||||
name: String,
|
||||
unique: bool,
|
||||
origin: String,
|
||||
partial: bool,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Index {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
struct IndexColumn {
|
||||
seqno: u32,
|
||||
cid: u32,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for IndexColumn {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a sorted vec of table names in the given connection.
|
||||
fn get_tables(c: &rusqlite::Connection) -> Result<Vec<String>, rusqlite::Error> {
|
||||
c.prepare(r#"
|
||||
select
|
||||
name
|
||||
from
|
||||
sqlite_master
|
||||
where
|
||||
type = 'table' and
|
||||
name not like 'sqlite_%'
|
||||
order by name
|
||||
"#)?
|
||||
.query_map(params![], |r| r.get(0))?
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a vec of columns in the given table.
|
||||
fn get_table_columns(c: &rusqlite::Connection, table: &str)
|
||||
-> Result<Vec<Column>, rusqlite::Error> {
|
||||
// Note that placeholders aren't allowed for these pragmas. Just assume sane table names
|
||||
// (no escaping). "select * from pragma_..." syntax would be nicer but requires SQLite
|
||||
// 3.16.0 (2017-01-02). Ubuntu 16.04 Xenial (still used on Travis CI) has an older SQLite.
|
||||
c.prepare(&format!("pragma table_info(\"{}\")", table))?
|
||||
.query_map(params![], |r| Ok(Column {
|
||||
cid: r.get(0)?,
|
||||
name: r.get(1)?,
|
||||
type_: r.get(2)?,
|
||||
notnull: r.get(3)?,
|
||||
dflt_value: r.get(4)?,
|
||||
pk: r.get(5)?,
|
||||
}))?
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a vec of indices associated with the given table.
|
||||
fn get_indices(c: &rusqlite::Connection, table: &str) -> Result<Vec<Index>, rusqlite::Error> {
|
||||
// See note at get_tables_columns about placeholders.
|
||||
c.prepare(&format!("pragma index_list(\"{}\")", table))?
|
||||
.query_map(params![], |r| Ok(Index {
|
||||
seq: r.get(0)?,
|
||||
name: r.get(1)?,
|
||||
unique: r.get(2)?,
|
||||
origin: r.get(3)?,
|
||||
partial: r.get(4)?,
|
||||
}))?
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a vec of all the columns in the given index.
|
||||
fn get_index_columns(c: &rusqlite::Connection, index: &str)
|
||||
-> Result<Vec<IndexColumn>, rusqlite::Error> {
|
||||
// See note at get_tables_columns about placeholders.
|
||||
c.prepare(&format!("pragma index_info(\"{}\")", index))?
|
||||
.query_map(params![], |r| Ok(IndexColumn {
|
||||
seqno: r.get(0)?,
|
||||
cid: r.get(1)?,
|
||||
name: r.get(2)?,
|
||||
}))?
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_diffs(n1: &str, c1: &rusqlite::Connection, n2: &str, c2: &rusqlite::Connection)
|
||||
-> Result<Option<String>, Error> {
|
||||
let mut diffs = String::new();
|
||||
|
||||
// Compare table list.
|
||||
let tables1 = get_tables(c1)?;
|
||||
let tables2 = get_tables(c2)?;
|
||||
if tables1 != tables2 {
|
||||
write!(&mut diffs, "table list mismatch, {} vs {}:\n{}",
|
||||
n1, n2, diff_slice(&tables1, &tables2))?;
|
||||
}
|
||||
|
||||
// Compare columns and indices for each table.
|
||||
for t in &tables1 {
|
||||
let columns1 = get_table_columns(c1, &t)?;
|
||||
let columns2 = get_table_columns(c2, &t)?;
|
||||
if columns1 != columns2 {
|
||||
write!(&mut diffs, "table {:?} column, {} vs {}:\n{}",
|
||||
t, n1, n2, diff_slice(&columns1, &columns2))?;
|
||||
}
|
||||
|
||||
let mut indices1 = get_indices(c1, &t)?;
|
||||
let mut indices2 = get_indices(c2, &t)?;
|
||||
indices1.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
indices2.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
if indices1 != indices2 {
|
||||
write!(&mut diffs, "table {:?} indices, {} vs {}:\n{}",
|
||||
t, n1, n2, diff_slice(&indices1, &indices2))?;
|
||||
}
|
||||
|
||||
for i in &indices1 {
|
||||
let ic1 = get_index_columns(c1, &i.name)?;
|
||||
let ic2 = get_index_columns(c2, &i.name)?;
|
||||
if ic1 != ic2 {
|
||||
write!(&mut diffs, "table {:?} index {:?} columns {} vs {}:\n{}",
|
||||
t, i, n1, n2, diff_slice(&ic1, &ic2))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(if diffs.is_empty() { None } else { Some(diffs) })
|
||||
}
|
||||
2558
server/db/db.rs
Normal file
2558
server/db/db.rs
Normal file
File diff suppressed because it is too large
Load Diff
358
server/db/dir.rs
Normal file
358
server/db/dir.rs
Normal file
@@ -0,0 +1,358 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Sample file directory management.
|
||||
//!
|
||||
//! This includes opening files for serving, rotating away old files, and saving new files.
|
||||
|
||||
use crate::coding;
|
||||
use crate::db::CompositeId;
|
||||
use crate::schema;
|
||||
use cstr::cstr;
|
||||
use failure::{Error, Fail, bail, format_err};
|
||||
use log::warn;
|
||||
use protobuf::Message;
|
||||
use nix::{NixPath, fcntl::{FlockArg, OFlag}, sys::stat::Mode};
|
||||
use nix::sys::statvfs::Statvfs;
|
||||
use std::ffi::CStr;
|
||||
use std::fs;
|
||||
use std::io::{Read, Write};
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// The fixed length of a directory's `meta` file.
|
||||
///
|
||||
/// See DirMeta comments within proto/schema.proto for more explanation.
|
||||
const FIXED_DIR_META_LEN: usize = 512;
|
||||
|
||||
/// A sample file directory. Typically one per physical disk drive.
|
||||
///
|
||||
/// If the directory is used for writing, the `start_syncer` function should be called to start
|
||||
/// a background thread. This thread manages deleting files and writing new files. It synces the
|
||||
/// directory and commits these operations to the database in the correct order to maintain the
|
||||
/// invariants described in `design/schema.md`.
|
||||
#[derive(Debug)]
|
||||
pub struct SampleFileDir {
|
||||
/// The open file descriptor for the directory. The worker uses it to create files and sync the
|
||||
/// directory. Other threads use it to open sample files for reading during video serving.
|
||||
pub(crate) fd: Fd,
|
||||
}
|
||||
|
||||
pub(crate) struct CompositeIdPath([u8; 17]);
|
||||
|
||||
impl CompositeIdPath {
|
||||
pub(crate) fn from(id: CompositeId) -> Self {
|
||||
let mut buf = [0u8; 17];
|
||||
write!(&mut buf[..16], "{:016x}", id.0).expect("can't format id to pathname buf");
|
||||
CompositeIdPath(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl NixPath for CompositeIdPath {
|
||||
fn is_empty(&self) -> bool { false }
|
||||
fn len(&self) -> usize { 16 }
|
||||
|
||||
fn with_nix_path<T, F>(&self, f: F) -> Result<T, nix::Error>
|
||||
where F: FnOnce(&CStr) -> T {
|
||||
let p = CStr::from_bytes_with_nul(&self.0[..]).expect("no interior nuls");
|
||||
Ok(f(p))
|
||||
}
|
||||
}
|
||||
|
||||
/// A file descriptor associated with a directory (not necessarily the sample file dir).
|
||||
#[derive(Debug)]
|
||||
pub struct Fd(std::os::unix::io::RawFd);
|
||||
|
||||
impl std::os::unix::io::AsRawFd for Fd {
|
||||
fn as_raw_fd(&self) -> std::os::unix::io::RawFd { self.0 }
|
||||
}
|
||||
|
||||
impl Drop for Fd {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = nix::unistd::close(self.0) {
|
||||
warn!("Unable to close sample file dir: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Fd {
|
||||
/// Opens the given path as a directory.
|
||||
pub fn open<P: ?Sized + NixPath>(path: &P, mkdir: bool) -> Result<Fd, nix::Error> {
|
||||
if mkdir {
|
||||
match nix::unistd::mkdir(path, nix::sys::stat::Mode::S_IRWXU) {
|
||||
Ok(()) | Err(nix::Error::Sys(nix::errno::Errno::EEXIST)) => {},
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
let fd = nix::fcntl::open(path, OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
Ok(Fd(fd))
|
||||
}
|
||||
|
||||
pub(crate) fn sync(&self) -> Result<(), nix::Error> {
|
||||
nix::unistd::fsync(self.0)
|
||||
}
|
||||
|
||||
/// Locks the directory with the specified `flock` operation.
|
||||
pub fn lock(&self, arg: FlockArg) -> Result<(), nix::Error> {
|
||||
nix::fcntl::flock(self.0, arg)
|
||||
}
|
||||
|
||||
pub fn statfs(&self) -> Result<nix::sys::statvfs::Statvfs, nix::Error> {
|
||||
nix::sys::statvfs::fstatvfs(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads `dir`'s metadata. If none is found, returns an empty proto.
|
||||
pub(crate) fn read_meta(dir: &Fd) -> Result<schema::DirMeta, Error> {
|
||||
let mut meta = schema::DirMeta::default();
|
||||
let mut f = match crate::fs::openat(dir.0, cstr!("meta"), OFlag::O_RDONLY, Mode::empty()) {
|
||||
Err(e) => {
|
||||
if e == nix::Error::Sys(nix::errno::Errno::ENOENT) {
|
||||
return Ok(meta);
|
||||
}
|
||||
return Err(e.into());
|
||||
},
|
||||
Ok(f) => f,
|
||||
};
|
||||
let mut data = Vec::new();
|
||||
f.read_to_end(&mut data)?;
|
||||
let (len, pos) = coding::decode_varint32(&data, 0)
|
||||
.map_err(|_| format_err!("Unable to decode varint length in meta file"))?;
|
||||
if data.len() != FIXED_DIR_META_LEN || len as usize + pos > FIXED_DIR_META_LEN {
|
||||
bail!("Expected a {}-byte file with a varint length of a DirMeta message; got \
|
||||
a {}-byte file with length {}", FIXED_DIR_META_LEN, data.len(), len);
|
||||
}
|
||||
let data = &data[pos..pos+len as usize];
|
||||
let mut s = protobuf::CodedInputStream::from_bytes(&data);
|
||||
meta.merge_from(&mut s).map_err(|e| e.context("Unable to parse metadata proto: {}"))?;
|
||||
Ok(meta)
|
||||
}
|
||||
|
||||
/// Write `dir`'s metadata, clobbering existing data.
|
||||
pub(crate) fn write_meta(dirfd: RawFd, meta: &schema::DirMeta) -> Result<(), Error> {
|
||||
let mut data = meta.write_length_delimited_to_bytes().expect("proto3->vec is infallible");
|
||||
if data.len() > FIXED_DIR_META_LEN {
|
||||
bail!("Length-delimited DirMeta message requires {} bytes, over limit of {}",
|
||||
data.len(), FIXED_DIR_META_LEN);
|
||||
}
|
||||
data.resize(FIXED_DIR_META_LEN, 0); // pad to required length.
|
||||
let mut f = crate::fs::openat(dirfd, cstr!("meta"), OFlag::O_CREAT | OFlag::O_WRONLY,
|
||||
Mode::S_IRUSR | Mode::S_IWUSR)?;
|
||||
let stat = f.metadata()?;
|
||||
if stat.len() == 0 {
|
||||
// Need to sync not only the data but also the file metadata and dirent.
|
||||
f.write_all(&data)?;
|
||||
f.sync_all()?;
|
||||
nix::unistd::fsync(dirfd)?;
|
||||
} else if stat.len() == FIXED_DIR_META_LEN as u64 {
|
||||
// Just syncing the data will suffice; existing metadata and dirent are fine.
|
||||
f.write_all(&data)?;
|
||||
f.sync_data()?;
|
||||
} else {
|
||||
bail!("Existing meta file is {}-byte; expected {}", stat.len(), FIXED_DIR_META_LEN);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl SampleFileDir {
|
||||
/// Opens the directory using the given metadata.
|
||||
///
|
||||
/// `db_meta.in_progress_open` should be filled if the directory should be opened in read/write
|
||||
/// mode; absent in read-only mode.
|
||||
pub fn open(path: &str, db_meta: &schema::DirMeta)
|
||||
-> Result<Arc<SampleFileDir>, Error> {
|
||||
let read_write = db_meta.in_progress_open.is_some();
|
||||
let s = SampleFileDir::open_self(path, false)?;
|
||||
s.fd.lock(if read_write {
|
||||
FlockArg::LockExclusiveNonblock
|
||||
} else {
|
||||
FlockArg::LockSharedNonblock
|
||||
})?;
|
||||
let dir_meta = read_meta(&s.fd)?;
|
||||
if !SampleFileDir::consistent(db_meta, &dir_meta) {
|
||||
let serialized =
|
||||
db_meta.write_length_delimited_to_bytes().expect("proto3->vec is infallible");
|
||||
bail!("metadata mismatch.\ndb: {:#?}\ndir: {:#?}\nserialized db: {:#?}",
|
||||
db_meta, &dir_meta, &serialized);
|
||||
}
|
||||
if db_meta.in_progress_open.is_some() {
|
||||
s.write_meta(db_meta)?;
|
||||
}
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
/// Returns true if the existing directory and database metadata are consistent; the directory
|
||||
/// is then openable.
|
||||
pub(crate) fn consistent(db_meta: &schema::DirMeta, dir_meta: &schema::DirMeta) -> bool {
|
||||
if dir_meta.db_uuid != db_meta.db_uuid { return false; }
|
||||
if dir_meta.dir_uuid != db_meta.dir_uuid { return false; }
|
||||
|
||||
if db_meta.last_complete_open.is_some() &&
|
||||
(db_meta.last_complete_open != dir_meta.last_complete_open &&
|
||||
db_meta.last_complete_open != dir_meta.in_progress_open) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if db_meta.last_complete_open.is_none() && dir_meta.last_complete_open.is_some() {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn create(path: &str, db_meta: &schema::DirMeta)
|
||||
-> Result<Arc<SampleFileDir>, Error> {
|
||||
let s = SampleFileDir::open_self(path, true)?;
|
||||
s.fd.lock(FlockArg::LockExclusiveNonblock)?;
|
||||
let old_meta = read_meta(&s.fd)?;
|
||||
|
||||
// Verify metadata. We only care that it hasn't been completely opened.
|
||||
// Partial opening by this or another database is fine; we won't overwrite anything.
|
||||
if old_meta.last_complete_open.is_some() {
|
||||
bail!("Can't create dir at path {}: is already in use:\n{:?}", path, old_meta);
|
||||
}
|
||||
if !s.is_empty()? {
|
||||
bail!("Can't create dir at path {} with existing files", path);
|
||||
}
|
||||
s.write_meta(db_meta)?;
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
pub(crate) fn opendir(&self) -> Result<nix::dir::Dir, nix::Error> {
|
||||
nix::dir::Dir::openat(self.fd.as_raw_fd(), ".", OFlag::O_DIRECTORY | OFlag::O_RDONLY,
|
||||
Mode::empty())
|
||||
}
|
||||
|
||||
/// Determines if the directory is empty, aside form metadata.
|
||||
pub(crate) fn is_empty(&self) -> Result<bool, Error> {
|
||||
let mut dir = self.opendir()?;
|
||||
for e in dir.iter() {
|
||||
let e = e?;
|
||||
match e.file_name().to_bytes() {
|
||||
b"." | b".." => continue,
|
||||
b"meta" => continue, // existing metadata is fine.
|
||||
_ => return Ok(false),
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn open_self(path: &str, create: bool) -> Result<Arc<SampleFileDir>, Error> {
|
||||
let fd = Fd::open(path, create)
|
||||
.map_err(|e| format_err!("unable to open sample file dir {}: {}", path, e))?;
|
||||
Ok(Arc::new(SampleFileDir {
|
||||
fd,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Opens the given sample file for reading.
|
||||
pub fn open_file(&self, composite_id: CompositeId) -> Result<fs::File, nix::Error> {
|
||||
let p = CompositeIdPath::from(composite_id);
|
||||
crate::fs::openat(self.fd.0, &p, OFlag::O_RDONLY, Mode::empty())
|
||||
}
|
||||
|
||||
pub fn create_file(&self, composite_id: CompositeId) -> Result<fs::File, nix::Error> {
|
||||
let p = CompositeIdPath::from(composite_id);
|
||||
crate::fs::openat(self.fd.0, &p, OFlag::O_WRONLY | OFlag::O_EXCL | OFlag::O_CREAT,
|
||||
Mode::S_IRUSR | Mode::S_IWUSR)
|
||||
}
|
||||
|
||||
pub(crate) fn write_meta(&self, meta: &schema::DirMeta) -> Result<(), Error> {
|
||||
write_meta(self.fd.0, meta)
|
||||
}
|
||||
|
||||
pub fn statfs(&self) -> Result<Statvfs, nix::Error> { self.fd.statfs() }
|
||||
|
||||
/// Unlinks the given sample file within this directory.
|
||||
pub(crate) fn unlink_file(&self, id: CompositeId) -> Result<(), nix::Error> {
|
||||
let p = CompositeIdPath::from(id);
|
||||
nix::unistd::unlinkat(Some(self.fd.0), &p, nix::unistd::UnlinkatFlags::NoRemoveDir)
|
||||
}
|
||||
|
||||
/// Syncs the directory itself.
|
||||
pub(crate) fn sync(&self) -> Result<(), nix::Error> {
|
||||
self.fd.sync()
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a composite id filename.
|
||||
///
|
||||
/// These are exactly 16 bytes, lowercase hex.
|
||||
pub(crate) fn parse_id(id: &[u8]) -> Result<CompositeId, ()> {
|
||||
if id.len() != 16 {
|
||||
return Err(());
|
||||
}
|
||||
let mut v: u64 = 0;
|
||||
for i in 0..16 {
|
||||
v = (v << 4) | match id[i] {
|
||||
b @ b'0'..=b'9' => b - b'0',
|
||||
b @ b'a'..=b'f' => b - b'a' + 10,
|
||||
_ => return Err(()),
|
||||
} as u64;
|
||||
}
|
||||
Ok(CompositeId(v as i64))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_id() {
|
||||
use super::parse_id;
|
||||
assert_eq!(parse_id(b"0000000000000000").unwrap().0, 0);
|
||||
assert_eq!(parse_id(b"0000000100000002").unwrap().0, 0x0000000100000002);
|
||||
parse_id(b"").unwrap_err();
|
||||
parse_id(b"meta").unwrap_err();
|
||||
parse_id(b"0").unwrap_err();
|
||||
parse_id(b"000000010000000x").unwrap_err();
|
||||
}
|
||||
|
||||
/// Ensures that a DirMeta with all fields filled fits within the maximum size.
|
||||
#[test]
|
||||
fn max_len_meta() {
|
||||
let mut meta = schema::DirMeta::new();
|
||||
let fake_uuid = &[0u8; 16][..];
|
||||
meta.db_uuid.extend_from_slice(fake_uuid);
|
||||
meta.dir_uuid.extend_from_slice(fake_uuid);
|
||||
{
|
||||
let o = meta.last_complete_open.set_default();
|
||||
o.id = u32::max_value();
|
||||
o.uuid.extend_from_slice(fake_uuid);
|
||||
}
|
||||
{
|
||||
let o = meta.in_progress_open.set_default();
|
||||
o.id = u32::max_value();
|
||||
o.uuid.extend_from_slice(fake_uuid);
|
||||
}
|
||||
let data = meta.write_length_delimited_to_bytes().expect("proto3->vec is infallible");
|
||||
assert!(data.len() <= FIXED_DIR_META_LEN, "{} vs {}", data.len(), FIXED_DIR_META_LEN);
|
||||
}
|
||||
}
|
||||
40
server/db/fs.rs
Normal file
40
server/db/fs.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2019 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::os::unix::io::{FromRawFd, RawFd};
|
||||
use nix::NixPath;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
pub fn openat<P: ?Sized + NixPath>(dirfd: RawFd, path: &P, oflag: OFlag, mode: Mode)
|
||||
-> Result<std::fs::File, nix::Error> {
|
||||
let fd = nix::fcntl::openat(dirfd, path, oflag, mode)?;
|
||||
Ok(unsafe { std::fs::File::from_raw_fd(fd) })
|
||||
}
|
||||
56
server/db/lib.rs
Normal file
56
server/db/lib.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![cfg_attr(all(feature="nightly", test), feature(test))]
|
||||
|
||||
pub mod auth;
|
||||
pub mod check;
|
||||
mod coding;
|
||||
mod compare;
|
||||
pub mod db;
|
||||
pub mod dir;
|
||||
mod fs;
|
||||
mod proto {
|
||||
include!(concat!(env!("OUT_DIR"), "/mod.rs"));
|
||||
}
|
||||
mod raw;
|
||||
pub mod recording;
|
||||
use proto::schema;
|
||||
pub mod signal;
|
||||
pub mod upgrade;
|
||||
pub mod writer;
|
||||
|
||||
// This is only for #[cfg(test)], but it's also used by the dependent crate, and it appears that
|
||||
// #[cfg(test)] is not passed on to dependencies.
|
||||
pub mod testutil;
|
||||
|
||||
pub use crate::db::*;
|
||||
pub use crate::schema::Permissions;
|
||||
pub use crate::signal::Signal;
|
||||
101
server/db/proto/schema.proto
Normal file
101
server/db/proto/schema.proto
Normal file
@@ -0,0 +1,101 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
// Metadata stored in sample file dirs as "<dir>/meta". This is checked
|
||||
// against the metadata stored within the database to detect inconsistencies
|
||||
// between the directory and database, such as those described in
|
||||
// design/schema.md.
|
||||
//
|
||||
// As of schema version 4, the overall file format is as follows: a
|
||||
// varint-encoded length, followed by a serialized DirMeta message, followed
|
||||
// by NUL bytes padding to a total length of 512 bytes. This message never
|
||||
// exceeds that length.
|
||||
//
|
||||
// The goal of this format is to allow atomically rewriting a meta file
|
||||
// in-place. I hope that on modern OSs and hardware, a single-sector
|
||||
// rewrite is atomic, though POSIX frustratingly doesn't seem to guarantee
|
||||
// this. There's some discussion of that here:
|
||||
// <https://stackoverflow.com/a/2068608/23584>. At worst, there's a short
|
||||
// window during which the meta file can be corrupted. As the file's purpose
|
||||
// is to check for inconsistencies, it can be reconstructed if you assume no
|
||||
// inconsistency exists.
|
||||
//
|
||||
// Schema version 3 wrote a serialized DirMeta message with no length or
|
||||
// padding, and renamed new meta files over the top of old. This scheme
|
||||
// requires extra space while opening the directory. If the filesystem is
|
||||
// completely full, it requires freeing space manually, an undocumented and
|
||||
// error-prone administrator procedure.
|
||||
message DirMeta {
|
||||
// A uuid associated with the database, in binary form. dir_uuid is strictly
|
||||
// more powerful, but it improves diagnostics to know if the directory
|
||||
// belongs to the expected database at all or not.
|
||||
bytes db_uuid = 1;
|
||||
|
||||
// A uuid associated with the directory itself.
|
||||
bytes dir_uuid = 2;
|
||||
|
||||
// Corresponds to an entry in the `open` database table.
|
||||
message Open {
|
||||
uint32 id = 1;
|
||||
bytes uuid = 2;
|
||||
}
|
||||
|
||||
// The last open that was known to be recorded in the database as completed.
|
||||
// Absent if this has never happened. Note this can backtrack in exactly one
|
||||
// scenario: when deleting the directory, after all associated files have
|
||||
// been deleted, last_complete_open can be moved to in_progress_open.
|
||||
Open last_complete_open = 3;
|
||||
|
||||
// The last run which is in progress, if different from last_complete_open.
|
||||
// This may or may not have been recorded in the database, but it's
|
||||
// guaranteed that no data has yet been written by this open.
|
||||
Open in_progress_open = 4;
|
||||
}
|
||||
|
||||
// Permissions to perform actions, currently all simple bools.
|
||||
//
|
||||
// These indicate actions which may be unnecessary in some contexts. Some
|
||||
// basic access - like listing the cameras - is currently always allowed.
|
||||
// See design/api.md for a description of what requires these permissions.
|
||||
//
|
||||
// These are used in a few contexts:
|
||||
// * a session - affects what can be done when using that session to
|
||||
// authenticate.
|
||||
// * a user - when a new session is created, it inherits these permissions.
|
||||
// * on the commandline - to specify what permissions are available for
|
||||
// unauthenticated access.
|
||||
message Permissions {
|
||||
bool view_video = 1;
|
||||
bool read_camera_configs = 2;
|
||||
|
||||
bool update_signals = 3;
|
||||
}
|
||||
398
server/db/raw.rs
Normal file
398
server/db/raw.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Raw database access: SQLite statements which do not touch any cached state.
|
||||
|
||||
use crate::db::{self, CompositeId, FromSqlUuid};
|
||||
use failure::{Error, ResultExt, bail};
|
||||
use fnv::FnvHashSet;
|
||||
use crate::recording;
|
||||
use rusqlite::{named_params, params};
|
||||
use std::ops::Range;
|
||||
use uuid::Uuid;
|
||||
|
||||
// Note: the magic number "27000000" below is recording::MAX_RECORDING_DURATION.
|
||||
const LIST_RECORDINGS_BY_TIME_SQL: &'static str = r#"
|
||||
select
|
||||
recording.composite_id,
|
||||
recording.run_offset,
|
||||
recording.flags,
|
||||
recording.start_time_90k,
|
||||
recording.wall_duration_90k,
|
||||
recording.media_duration_delta_90k,
|
||||
recording.sample_file_bytes,
|
||||
recording.video_samples,
|
||||
recording.video_sync_samples,
|
||||
recording.video_sample_entry_id,
|
||||
recording.open_id
|
||||
from
|
||||
recording
|
||||
where
|
||||
stream_id = :stream_id and
|
||||
recording.start_time_90k > :start_time_90k - 27000000 and
|
||||
recording.start_time_90k < :end_time_90k and
|
||||
recording.start_time_90k + recording.wall_duration_90k > :start_time_90k
|
||||
order by
|
||||
recording.start_time_90k
|
||||
"#;
|
||||
|
||||
const LIST_RECORDINGS_BY_ID_SQL: &'static str = r#"
|
||||
select
|
||||
recording.composite_id,
|
||||
recording.run_offset,
|
||||
recording.flags,
|
||||
recording.start_time_90k,
|
||||
recording.wall_duration_90k,
|
||||
recording.media_duration_delta_90k,
|
||||
recording.sample_file_bytes,
|
||||
recording.video_samples,
|
||||
recording.video_sync_samples,
|
||||
recording.video_sample_entry_id,
|
||||
recording.open_id,
|
||||
recording.prev_media_duration_90k,
|
||||
recording.prev_runs
|
||||
from
|
||||
recording
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
order by
|
||||
recording.composite_id
|
||||
"#;
|
||||
|
||||
const STREAM_MIN_START_SQL: &'static str = r#"
|
||||
select
|
||||
start_time_90k
|
||||
from
|
||||
recording
|
||||
where
|
||||
stream_id = :stream_id
|
||||
order by start_time_90k limit 1
|
||||
"#;
|
||||
|
||||
const STREAM_MAX_START_SQL: &'static str = r#"
|
||||
select
|
||||
start_time_90k,
|
||||
wall_duration_90k
|
||||
from
|
||||
recording
|
||||
where
|
||||
stream_id = :stream_id
|
||||
order by start_time_90k desc;
|
||||
"#;
|
||||
|
||||
const LIST_OLDEST_RECORDINGS_SQL: &'static str = r#"
|
||||
select
|
||||
composite_id,
|
||||
start_time_90k,
|
||||
wall_duration_90k,
|
||||
sample_file_bytes
|
||||
from
|
||||
recording
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
order by
|
||||
composite_id
|
||||
"#;
|
||||
|
||||
/// Lists the specified recordings in ascending order by start time, passing them to a supplied
|
||||
/// function. Given that the function is called with the database lock held, it should be quick.
|
||||
pub(crate) fn list_recordings_by_time(
|
||||
conn: &rusqlite::Connection, stream_id: i32, desired_time: Range<recording::Time>,
|
||||
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
|
||||
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_TIME_SQL)?;
|
||||
let rows = stmt.query_named(named_params!{
|
||||
":stream_id": stream_id,
|
||||
":start_time_90k": desired_time.start.0,
|
||||
":end_time_90k": desired_time.end.0,
|
||||
})?;
|
||||
list_recordings_inner(rows, false, f)
|
||||
}
|
||||
|
||||
/// Lists the specified recordings in ascending order by id.
|
||||
pub(crate) fn list_recordings_by_id(
|
||||
conn: &rusqlite::Connection, stream_id: i32, desired_ids: Range<i32>,
|
||||
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
|
||||
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_ID_SQL)?;
|
||||
let rows = stmt.query_named(named_params!{
|
||||
":start": CompositeId::new(stream_id, desired_ids.start).0,
|
||||
":end": CompositeId::new(stream_id, desired_ids.end).0,
|
||||
})?;
|
||||
list_recordings_inner(rows, true, f)
|
||||
}
|
||||
|
||||
fn list_recordings_inner(mut rows: rusqlite::Rows, include_prev: bool,
|
||||
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>)
|
||||
-> Result<(), Error> {
|
||||
while let Some(row) = rows.next()? {
|
||||
let wall_duration_90k = row.get(4)?;
|
||||
let media_duration_delta_90k: i32 = row.get(5)?;
|
||||
f(db::ListRecordingsRow {
|
||||
id: CompositeId(row.get(0)?),
|
||||
run_offset: row.get(1)?,
|
||||
flags: row.get(2)?,
|
||||
start: recording::Time(row.get(3)?),
|
||||
wall_duration_90k,
|
||||
media_duration_90k: wall_duration_90k + media_duration_delta_90k,
|
||||
sample_file_bytes: row.get(6)?,
|
||||
video_samples: row.get(7)?,
|
||||
video_sync_samples: row.get(8)?,
|
||||
video_sample_entry_id: row.get(9)?,
|
||||
open_id: row.get(10)?,
|
||||
prev_media_duration_and_runs: match include_prev {
|
||||
false => None,
|
||||
true => Some((recording::Duration(row.get(11)?), row.get(12)?)),
|
||||
},
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn get_db_uuid(conn: &rusqlite::Connection) -> Result<Uuid, Error> {
|
||||
Ok(conn.query_row("select uuid from meta", params![], |row| -> rusqlite::Result<Uuid> {
|
||||
let uuid: FromSqlUuid = row.get(0)?;
|
||||
Ok(uuid.0)
|
||||
})?)
|
||||
}
|
||||
|
||||
/// Inserts the specified recording (for from `try_flush` only).
|
||||
pub(crate) fn insert_recording(tx: &rusqlite::Transaction, o: &db::Open, id: CompositeId,
|
||||
r: &db::RecordingToInsert) -> Result<(), Error> {
|
||||
let mut stmt = tx.prepare_cached(r#"
|
||||
insert into recording (composite_id, stream_id, open_id, run_offset, flags,
|
||||
sample_file_bytes, start_time_90k, prev_media_duration_90k,
|
||||
prev_runs, wall_duration_90k, media_duration_delta_90k,
|
||||
video_samples, video_sync_samples, video_sample_entry_id)
|
||||
values (:composite_id, :stream_id, :open_id, :run_offset, :flags,
|
||||
:sample_file_bytes, :start_time_90k, :prev_media_duration_90k,
|
||||
:prev_runs, :wall_duration_90k, :media_duration_delta_90k,
|
||||
:video_samples, :video_sync_samples, :video_sample_entry_id)
|
||||
"#).with_context(|e| format!("can't prepare recording insert: {}", e))?;
|
||||
stmt.execute_named(named_params!{
|
||||
":composite_id": id.0,
|
||||
":stream_id": i64::from(id.stream()),
|
||||
":open_id": o.id,
|
||||
":run_offset": r.run_offset,
|
||||
":flags": r.flags,
|
||||
":sample_file_bytes": r.sample_file_bytes,
|
||||
":start_time_90k": r.start.0,
|
||||
":wall_duration_90k": r.wall_duration_90k,
|
||||
":media_duration_delta_90k": r.media_duration_90k - r.wall_duration_90k,
|
||||
":prev_media_duration_90k": r.prev_media_duration.0,
|
||||
":prev_runs": r.prev_runs,
|
||||
":video_samples": r.video_samples,
|
||||
":video_sync_samples": r.video_sync_samples,
|
||||
":video_sample_entry_id": r.video_sample_entry_id,
|
||||
}).with_context(|e| format!("unable to insert recording for recording {} {:#?}: {}",
|
||||
id, r, e))?;
|
||||
|
||||
let mut stmt = tx.prepare_cached(r#"
|
||||
insert into recording_integrity (composite_id, local_time_delta_90k, sample_file_blake3)
|
||||
values (:composite_id, :local_time_delta_90k, :sample_file_blake3)
|
||||
"#).with_context(|e| format!("can't prepare recording_integrity insert: {}", e))?;
|
||||
let blake3 = r.sample_file_blake3.as_ref().map(|b| &b[..]);
|
||||
let delta = match r.run_offset {
|
||||
0 => None,
|
||||
_ => Some(r.local_time_delta.0),
|
||||
};
|
||||
stmt.execute_named(named_params!{
|
||||
":composite_id": id.0,
|
||||
":local_time_delta_90k": delta,
|
||||
":sample_file_blake3": blake3,
|
||||
}).with_context(|e| format!("unable to insert recording_integrity for {:#?}: {}", r, e))?;
|
||||
|
||||
let mut stmt = tx.prepare_cached(r#"
|
||||
insert into recording_playback (composite_id, video_index)
|
||||
values (:composite_id, :video_index)
|
||||
"#).with_context(|e| format!("can't prepare recording_playback insert: {}", e))?;
|
||||
stmt.execute_named(named_params!{
|
||||
":composite_id": id.0,
|
||||
":video_index": &r.video_index,
|
||||
}).with_context(|e| format!("unable to insert recording_playback for {:#?}: {}", r, e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Transfers the given recording range from the `recording` and associated tables to the `garbage`
|
||||
/// table. `sample_file_dir_id` is assumed to be correct.
|
||||
///
|
||||
/// Returns the number of recordings which were deleted.
|
||||
pub(crate) fn delete_recordings(tx: &rusqlite::Transaction, sample_file_dir_id: i32,
|
||||
ids: Range<CompositeId>)
|
||||
-> Result<usize, Error> {
|
||||
let mut insert = tx.prepare_cached(r#"
|
||||
insert into garbage (sample_file_dir_id, composite_id)
|
||||
select
|
||||
:sample_file_dir_id,
|
||||
composite_id
|
||||
from
|
||||
recording
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
"#)?;
|
||||
let mut del_playback = tx.prepare_cached(r#"
|
||||
delete from recording_playback
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
"#)?;
|
||||
let mut del_integrity = tx.prepare_cached(r#"
|
||||
delete from recording_integrity
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
"#)?;
|
||||
let mut del_main = tx.prepare_cached(r#"
|
||||
delete from recording
|
||||
where
|
||||
:start <= composite_id and
|
||||
composite_id < :end
|
||||
"#)?;
|
||||
let n = insert.execute_named(named_params!{
|
||||
":sample_file_dir_id": sample_file_dir_id,
|
||||
":start": ids.start.0,
|
||||
":end": ids.end.0,
|
||||
})?;
|
||||
let p = named_params!{
|
||||
":start": ids.start.0,
|
||||
":end": ids.end.0,
|
||||
};
|
||||
let n_playback = del_playback.execute_named(p)?;
|
||||
if n_playback != n {
|
||||
bail!("inserted {} garbage rows but deleted {} recording_playback rows!", n, n_playback);
|
||||
}
|
||||
let n_integrity = del_integrity.execute_named(p)?;
|
||||
if n_integrity > n { // fewer is okay; recording_integrity is optional.
|
||||
bail!("inserted {} garbage rows but deleted {} recording_integrity rows!", n, n_integrity);
|
||||
}
|
||||
let n_main = del_main.execute_named(p)?;
|
||||
if n_main != n {
|
||||
bail!("inserted {} garbage rows but deleted {} recording rows!", n, n_main);
|
||||
}
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
/// Marks the given sample files as deleted. This shouldn't be called until the files have
|
||||
/// been `unlink()`ed and the parent directory `fsync()`ed.
|
||||
pub(crate) fn mark_sample_files_deleted(tx: &rusqlite::Transaction, ids: &[CompositeId])
|
||||
-> Result<(), Error> {
|
||||
if ids.is_empty() { return Ok(()); }
|
||||
let mut stmt = tx.prepare_cached("delete from garbage where composite_id = ?")?;
|
||||
for &id in ids {
|
||||
let changes = stmt.execute(params![id.0])?;
|
||||
if changes != 1 {
|
||||
// panic rather than return error. Errors get retried indefinitely, but there's no
|
||||
// recovery from this condition.
|
||||
//
|
||||
// Tempting to just consider logging error and moving on, but this represents a logic
|
||||
// flaw, so complain loudly. The freshly deleted file might still be referenced in the
|
||||
// recording table.
|
||||
panic!("no garbage row for {}", id);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the time range of recordings for the given stream.
|
||||
pub(crate) fn get_range(conn: &rusqlite::Connection, stream_id: i32)
|
||||
-> Result<Option<Range<recording::Time>>, Error> {
|
||||
// The minimum is straightforward, taking advantage of the start_time_90k index.
|
||||
let mut stmt = conn.prepare_cached(STREAM_MIN_START_SQL)?;
|
||||
let mut rows = stmt.query_named(named_params!{":stream_id": stream_id})?;
|
||||
let min_start = match rows.next()? {
|
||||
Some(row) => recording::Time(row.get(0)?),
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
// There was a minimum, so there should be a maximum too. Calculating it is less
|
||||
// straightforward because recordings could overlap. All recordings starting in the
|
||||
// last MAX_RECORDING_DURATION must be examined in order to take advantage of the
|
||||
// start_time_90k index.
|
||||
let mut stmt = conn.prepare_cached(STREAM_MAX_START_SQL)?;
|
||||
let mut rows = stmt.query_named(named_params!{":stream_id": stream_id})?;
|
||||
let mut maxes_opt = None;
|
||||
while let Some(row) = rows.next()? {
|
||||
let row_start = recording::Time(row.get(0)?);
|
||||
let row_duration: i64 = row.get(1)?;
|
||||
let row_end = recording::Time(row_start.0 + row_duration);
|
||||
let maxes = match maxes_opt {
|
||||
None => row_start .. row_end,
|
||||
Some(Range{start: s, end: e}) => s .. ::std::cmp::max(e, row_end),
|
||||
};
|
||||
if row_start.0 <= maxes.start.0 - recording::MAX_RECORDING_WALL_DURATION {
|
||||
break;
|
||||
}
|
||||
maxes_opt = Some(maxes);
|
||||
}
|
||||
let max_end = match maxes_opt {
|
||||
Some(Range{start: _, end: e}) => e,
|
||||
None => bail!("missing max for stream {} which had min {}", stream_id, min_start),
|
||||
};
|
||||
Ok(Some(min_start .. max_end))
|
||||
}
|
||||
|
||||
/// Lists all garbage ids for the given sample file directory.
|
||||
pub(crate) fn list_garbage(conn: &rusqlite::Connection, dir_id: i32)
|
||||
-> Result<FnvHashSet<CompositeId>, Error> {
|
||||
let mut garbage = FnvHashSet::default();
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"select composite_id from garbage where sample_file_dir_id = ?")?;
|
||||
let mut rows = stmt.query(&[&dir_id])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
garbage.insert(CompositeId(row.get(0)?));
|
||||
}
|
||||
Ok(garbage)
|
||||
}
|
||||
|
||||
/// Lists the oldest recordings for a stream, starting with the given id.
|
||||
/// `f` should return true as long as further rows are desired.
|
||||
pub(crate) fn list_oldest_recordings(conn: &rusqlite::Connection, start: CompositeId,
|
||||
f: &mut dyn FnMut(db::ListOldestRecordingsRow) -> bool)
|
||||
-> Result<(), Error> {
|
||||
let mut stmt = conn.prepare_cached(LIST_OLDEST_RECORDINGS_SQL)?;
|
||||
let mut rows = stmt.query_named(named_params!{
|
||||
":start": start.0,
|
||||
":end": CompositeId::new(start.stream() + 1, 0).0,
|
||||
})?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let should_continue = f(db::ListOldestRecordingsRow {
|
||||
id: CompositeId(row.get(0)?),
|
||||
start: recording::Time(row.get(1)?),
|
||||
wall_duration_90k: row.get(2)?,
|
||||
sample_file_bytes: row.get(3)?,
|
||||
});
|
||||
if !should_continue {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
592
server/db/recording.rs
Normal file
592
server/db/recording.rs
Normal file
@@ -0,0 +1,592 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2016-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::coding::{append_varint32, decode_varint32, unzigzag32, zigzag32};
|
||||
use crate::db;
|
||||
use failure::{Error, bail};
|
||||
use log::trace;
|
||||
use std::convert::TryFrom;
|
||||
use std::ops::Range;
|
||||
|
||||
pub use base::time::TIME_UNITS_PER_SEC;
|
||||
|
||||
pub const DESIRED_RECORDING_WALL_DURATION: i64 = 60 * TIME_UNITS_PER_SEC;
|
||||
pub const MAX_RECORDING_WALL_DURATION: i64 = 5 * 60 * TIME_UNITS_PER_SEC;
|
||||
|
||||
pub use base::time::Time;
|
||||
pub use base::time::Duration;
|
||||
|
||||
/// Converts from a wall time offset into a recording to a media time offset or vice versa.
|
||||
pub fn rescale(from_off_90k: i32, from_duration_90k: i32, to_duration_90k: i32) -> i32 {
|
||||
debug_assert!(from_off_90k <= from_duration_90k,
|
||||
"from_off_90k={} from_duration_90k={} to_duration_90k={}",
|
||||
from_off_90k, from_duration_90k, to_duration_90k);
|
||||
if from_duration_90k == 0 {
|
||||
return 0; // avoid a divide by zero.
|
||||
}
|
||||
|
||||
// The intermediate values here may overflow i32, so use an i64 instead. The max wall
|
||||
// time is recording::MAX_RECORDING_WALL_DURATION; the max media duration should be
|
||||
// roughly the same (design limit of 500 ppm correction). The final result should fit
|
||||
// within i32.
|
||||
i32::try_from(i64::from(from_off_90k) *
|
||||
i64::from(to_duration_90k) /
|
||||
i64::from(from_duration_90k))
|
||||
.map_err(|_| format!("rescale overflow: {} * {} / {} > i32::max_value()",
|
||||
from_off_90k, to_duration_90k, from_duration_90k))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// An iterator through a sample index.
|
||||
/// Initially invalid; call `next()` before each read.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct SampleIndexIterator {
|
||||
/// The index byte position of the next sample to read (low 31 bits) and if the current
|
||||
/// same is a key frame (high bit).
|
||||
i_and_is_key: u32,
|
||||
|
||||
/// The starting data byte position of this sample within the segment.
|
||||
pub pos: i32,
|
||||
|
||||
/// The starting time of this sample within the segment (in 90 kHz units).
|
||||
pub start_90k: i32,
|
||||
|
||||
/// The duration of this sample (in 90 kHz units).
|
||||
pub duration_90k: i32,
|
||||
|
||||
/// The byte length of this frame.
|
||||
pub bytes: i32,
|
||||
|
||||
/// The byte length of the last frame of the "other" type: if this one is key, the last
|
||||
/// non-key; if this one is non-key, the last key.
|
||||
bytes_other: i32,
|
||||
}
|
||||
|
||||
impl SampleIndexIterator {
|
||||
pub fn new() -> SampleIndexIterator {
|
||||
SampleIndexIterator{i_and_is_key: 0,
|
||||
pos: 0,
|
||||
start_90k: 0,
|
||||
duration_90k: 0,
|
||||
bytes: 0,
|
||||
bytes_other: 0}
|
||||
}
|
||||
|
||||
pub fn next(&mut self, data: &[u8]) -> Result<bool, Error> {
|
||||
self.pos += self.bytes;
|
||||
self.start_90k += self.duration_90k;
|
||||
let i = (self.i_and_is_key & 0x7FFF_FFFF) as usize;
|
||||
if i == data.len() {
|
||||
return Ok(false)
|
||||
}
|
||||
let (raw1, i1) = match decode_varint32(data, i) {
|
||||
Ok(tuple) => tuple,
|
||||
Err(()) => bail!("bad varint 1 at offset {}", i),
|
||||
};
|
||||
let (raw2, i2) = match decode_varint32(data, i1) {
|
||||
Ok(tuple) => tuple,
|
||||
Err(()) => bail!("bad varint 2 at offset {}", i1),
|
||||
};
|
||||
let duration_90k_delta = unzigzag32(raw1 >> 1);
|
||||
self.duration_90k += duration_90k_delta;
|
||||
if self.duration_90k < 0 {
|
||||
bail!("negative duration {} after applying delta {}",
|
||||
self.duration_90k, duration_90k_delta);
|
||||
}
|
||||
if self.duration_90k == 0 && data.len() > i2 {
|
||||
bail!("zero duration only allowed at end; have {} bytes left", data.len() - i2);
|
||||
}
|
||||
let (prev_bytes_key, prev_bytes_nonkey) = match self.is_key() {
|
||||
true => (self.bytes, self.bytes_other),
|
||||
false => (self.bytes_other, self.bytes),
|
||||
};
|
||||
self.i_and_is_key = (i2 as u32) | (((raw1 & 1) as u32) << 31);
|
||||
let bytes_delta = unzigzag32(raw2);
|
||||
if self.is_key() {
|
||||
self.bytes = prev_bytes_key + bytes_delta;
|
||||
self.bytes_other = prev_bytes_nonkey;
|
||||
} else {
|
||||
self.bytes = prev_bytes_nonkey + bytes_delta;
|
||||
self.bytes_other = prev_bytes_key;
|
||||
}
|
||||
if self.bytes <= 0 {
|
||||
bail!("non-positive bytes {} after applying delta {} to key={} frame at ts {}",
|
||||
self.bytes, bytes_delta, self.is_key(), self.start_90k);
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_key(&self) -> bool { (self.i_and_is_key & 0x8000_0000) != 0 }
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SampleIndexEncoder {
|
||||
prev_duration_90k: i32,
|
||||
prev_bytes_key: i32,
|
||||
prev_bytes_nonkey: i32,
|
||||
}
|
||||
|
||||
impl SampleIndexEncoder {
|
||||
pub fn new() -> Self {
|
||||
SampleIndexEncoder {
|
||||
prev_duration_90k: 0,
|
||||
prev_bytes_key: 0,
|
||||
prev_bytes_nonkey: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_sample(&mut self, duration_90k: i32, bytes: i32, is_key: bool,
|
||||
r: &mut db::RecordingToInsert) {
|
||||
let duration_delta = duration_90k - self.prev_duration_90k;
|
||||
self.prev_duration_90k = duration_90k;
|
||||
r.media_duration_90k += duration_90k;
|
||||
r.sample_file_bytes += bytes;
|
||||
r.video_samples += 1;
|
||||
let bytes_delta = bytes - if is_key {
|
||||
let prev = self.prev_bytes_key;
|
||||
r.video_sync_samples += 1;
|
||||
self.prev_bytes_key = bytes;
|
||||
prev
|
||||
} else {
|
||||
let prev = self.prev_bytes_nonkey;
|
||||
self.prev_bytes_nonkey = bytes;
|
||||
prev
|
||||
};
|
||||
append_varint32((zigzag32(duration_delta) << 1) | (is_key as u32), &mut r.video_index);
|
||||
append_varint32(zigzag32(bytes_delta), &mut r.video_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// A segment represents a view of some or all of a single recording, starting from a key frame.
|
||||
/// This struct is not specific to a container format; for `.mp4`s, it's wrapped in a
|
||||
/// `mp4::Segment`. Other container/transport formats could be supported in a similar manner.
|
||||
#[derive(Debug)]
|
||||
pub struct Segment {
|
||||
pub id: db::CompositeId,
|
||||
pub open_id: u32,
|
||||
|
||||
/// An iterator positioned at the beginning of the segment, or `None`. Most segments are
|
||||
/// positioned at the beginning of the recording, so this is an optional box to shrink a long
|
||||
/// of segments. `None` is equivalent to `SampleIndexIterator::new()`.
|
||||
begin: Option<Box<SampleIndexIterator>>,
|
||||
pub file_end: i32,
|
||||
|
||||
pub frames: u16,
|
||||
pub key_frames: u16,
|
||||
video_sample_entry_id_and_trailing_zero: i32,
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
/// Creates a segment.
|
||||
///
|
||||
/// `desired_media_range_90k` represents the desired range of the segment relative to the start
|
||||
/// of the recording, in media time units.
|
||||
///
|
||||
/// The actual range will start at the most recent acceptable frame's start at or before the
|
||||
/// desired start time. If `start_at_key` is true, only key frames are acceptable; otherwise
|
||||
/// any frame is. The caller is responsible for skipping over the undesired prefix, perhaps
|
||||
/// with an edit list in the case of a `.mp4`.
|
||||
///
|
||||
/// The actual range will end at the first frame after the desired range (unless the desired
|
||||
/// range extends beyond the recording). Likewise, the caller is responsible for trimming the
|
||||
/// final frame's duration if desired.
|
||||
pub fn new(db: &db::LockedDatabase,
|
||||
recording: &db::ListRecordingsRow,
|
||||
desired_media_range_90k: Range<i32>,
|
||||
start_at_key: bool) -> Result<Segment, Error> {
|
||||
let mut self_ = Segment {
|
||||
id: recording.id,
|
||||
open_id: recording.open_id,
|
||||
begin: None,
|
||||
file_end: recording.sample_file_bytes,
|
||||
frames: recording.video_samples as u16,
|
||||
key_frames: recording.video_sync_samples as u16,
|
||||
video_sample_entry_id_and_trailing_zero:
|
||||
recording.video_sample_entry_id |
|
||||
((((recording.flags & db::RecordingFlags::TrailingZero as i32) != 0) as i32) << 31),
|
||||
};
|
||||
|
||||
if desired_media_range_90k.start > desired_media_range_90k.end ||
|
||||
desired_media_range_90k.end > recording.media_duration_90k {
|
||||
bail!("desired media range [{}, {}) invalid for recording of length {}",
|
||||
desired_media_range_90k.start, desired_media_range_90k.end,
|
||||
recording.media_duration_90k);
|
||||
}
|
||||
|
||||
if desired_media_range_90k.start == 0 &&
|
||||
desired_media_range_90k.end == recording.media_duration_90k {
|
||||
// Fast path. Existing entry is fine.
|
||||
trace!("recording::Segment::new fast path, recording={:#?}", recording);
|
||||
return Ok(self_)
|
||||
}
|
||||
|
||||
// Slow path. Need to iterate through the index.
|
||||
trace!("recording::Segment::new slow path, desired_media_range_90k={:?}, recording={:#?}",
|
||||
desired_media_range_90k, recording);
|
||||
db.with_recording_playback(self_.id, &mut |playback| {
|
||||
let mut begin = Box::new(SampleIndexIterator::new());
|
||||
let data = &(&playback).video_index;
|
||||
let mut it = SampleIndexIterator::new();
|
||||
if !it.next(data)? {
|
||||
bail!("no index");
|
||||
}
|
||||
if !it.is_key() {
|
||||
bail!("not key frame");
|
||||
}
|
||||
|
||||
// Stop when hitting a frame with this start time.
|
||||
// Going until the end of the recording is special-cased because there can be a trailing
|
||||
// frame of zero duration. It's unclear exactly how this should be handled, but let's
|
||||
// include it for consistency with the fast path. It'd be bizarre to have it included or
|
||||
// not based on desired_media_range_90k.start.
|
||||
let end_90k = if desired_media_range_90k.end == recording.media_duration_90k {
|
||||
i32::max_value()
|
||||
} else {
|
||||
desired_media_range_90k.end
|
||||
};
|
||||
|
||||
loop {
|
||||
if it.start_90k <= desired_media_range_90k.start &&
|
||||
(!start_at_key || it.is_key()) {
|
||||
// new start candidate.
|
||||
*begin = it;
|
||||
self_.frames = 0;
|
||||
self_.key_frames = 0;
|
||||
}
|
||||
if it.start_90k >= end_90k && self_.frames > 0 {
|
||||
break;
|
||||
}
|
||||
self_.frames += 1;
|
||||
self_.key_frames += it.is_key() as u16;
|
||||
if !it.next(data)? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
self_.begin = Some(begin);
|
||||
self_.file_end = it.pos;
|
||||
self_.video_sample_entry_id_and_trailing_zero =
|
||||
recording.video_sample_entry_id |
|
||||
(((it.duration_90k == 0) as i32) << 31);
|
||||
Ok(())
|
||||
})?;
|
||||
Ok(self_)
|
||||
}
|
||||
|
||||
pub fn video_sample_entry_id(&self) -> i32 {
|
||||
self.video_sample_entry_id_and_trailing_zero & 0x7FFFFFFF
|
||||
}
|
||||
|
||||
pub fn have_trailing_zero(&self) -> bool { self.video_sample_entry_id_and_trailing_zero < 0 }
|
||||
|
||||
/// Returns the byte range within the sample file of data associated with this segment.
|
||||
pub fn sample_file_range(&self) -> Range<u64> {
|
||||
self.begin.as_ref().map(|b| b.pos as u64).unwrap_or(0) .. self.file_end as u64
|
||||
}
|
||||
|
||||
/// Returns the actual media start time. As described in `new`, this can be less than the
|
||||
/// desired media start time if there is no key frame at the right position.
|
||||
pub fn actual_start_90k(&self) -> i32 { self.begin.as_ref().map(|b| b.start_90k).unwrap_or(0) }
|
||||
|
||||
/// Iterates through each frame in the segment.
|
||||
/// Must be called without the database lock held; retrieves video index from the cache.
|
||||
pub fn foreach<F>(&self, playback: &db::RecordingPlayback, mut f: F) -> Result<(), Error>
|
||||
where F: FnMut(&SampleIndexIterator) -> Result<(), Error> {
|
||||
trace!("foreach on recording {}: {} frames, actual_start_90k: {}",
|
||||
self.id, self.frames, self.actual_start_90k());
|
||||
let data = &(&playback).video_index;
|
||||
let mut it = match self.begin {
|
||||
Some(ref b) => **b,
|
||||
None => {
|
||||
let mut it = SampleIndexIterator::new();
|
||||
if !it.next(data)? {
|
||||
bail!("recording {} has no frames", self.id);
|
||||
}
|
||||
if !it.is_key() {
|
||||
bail!("recording {} doesn't start with key frame", self.id);
|
||||
}
|
||||
it
|
||||
}
|
||||
};
|
||||
let mut have_frame = true;
|
||||
let mut key_frame = 0;
|
||||
|
||||
for i in 0 .. self.frames {
|
||||
if !have_frame {
|
||||
bail!("recording {}: expected {} frames, found only {}", self.id, self.frames, i+1);
|
||||
}
|
||||
if it.is_key() {
|
||||
key_frame += 1;
|
||||
if key_frame > self.key_frames {
|
||||
bail!("recording {}: more than expected {} key frames",
|
||||
self.id, self.key_frames);
|
||||
}
|
||||
}
|
||||
|
||||
// Note: this inner loop avoids ? for performance. Don't change these lines without
|
||||
// reading https://github.com/rust-lang/rust/issues/37939 and running
|
||||
// mp4::bench::build_index.
|
||||
if let Err(e) = f(&it) {
|
||||
return Err(e);
|
||||
}
|
||||
have_frame = match it.next(data) {
|
||||
Err(e) => return Err(e),
|
||||
Ok(hf) => hf,
|
||||
};
|
||||
}
|
||||
if key_frame < self.key_frames {
|
||||
bail!("recording {}: expected {} key frames, found only {}",
|
||||
self.id, self.key_frames, key_frame);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns true if this starts with a non-key frame.
|
||||
pub fn starts_with_nonkey(&self) -> bool {
|
||||
match self.begin {
|
||||
Some(ref b) => !b.is_key(),
|
||||
|
||||
// Fast-path case, in which this holds an entire recording. They always start with a
|
||||
// key frame.
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use base::clock::RealClocks;
|
||||
use super::*;
|
||||
use crate::testutil::{self, TestDb};
|
||||
|
||||
/// Tests encoding the example from design/schema.md.
|
||||
#[test]
|
||||
fn test_encode_example() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut e = SampleIndexEncoder::new();
|
||||
e.add_sample(10, 1000, true, &mut r);
|
||||
e.add_sample(9, 10, false, &mut r);
|
||||
e.add_sample(11, 15, false, &mut r);
|
||||
e.add_sample(10, 12, false, &mut r);
|
||||
e.add_sample(10, 1050, true, &mut r);
|
||||
assert_eq!(r.video_index, b"\x29\xd0\x0f\x02\x14\x08\x0a\x02\x05\x01\x64");
|
||||
assert_eq!(10 + 9 + 11 + 10 + 10, r.media_duration_90k);
|
||||
assert_eq!(5, r.video_samples);
|
||||
assert_eq!(2, r.video_sync_samples);
|
||||
}
|
||||
|
||||
/// Tests a round trip from `SampleIndexEncoder` to `SampleIndexIterator`.
|
||||
#[test]
|
||||
fn test_round_trip() {
|
||||
testutil::init();
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct Sample {
|
||||
duration_90k: i32,
|
||||
bytes: i32,
|
||||
is_key: bool,
|
||||
}
|
||||
let samples = [
|
||||
Sample{duration_90k: 10, bytes: 30000, is_key: true},
|
||||
Sample{duration_90k: 9, bytes: 1000, is_key: false},
|
||||
Sample{duration_90k: 11, bytes: 1100, is_key: false},
|
||||
Sample{duration_90k: 18, bytes: 31000, is_key: true},
|
||||
Sample{duration_90k: 0, bytes: 1000, is_key: false},
|
||||
];
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut e = SampleIndexEncoder::new();
|
||||
for sample in &samples {
|
||||
e.add_sample(sample.duration_90k, sample.bytes, sample.is_key, &mut r);
|
||||
}
|
||||
let mut it = SampleIndexIterator::new();
|
||||
for sample in &samples {
|
||||
assert!(it.next(&r.video_index).unwrap());
|
||||
assert_eq!(sample,
|
||||
&Sample{duration_90k: it.duration_90k,
|
||||
bytes: it.bytes,
|
||||
is_key: it.is_key()});
|
||||
}
|
||||
assert!(!it.next(&r.video_index).unwrap());
|
||||
}
|
||||
|
||||
/// Tests that `SampleIndexIterator` spots several classes of errors.
|
||||
/// TODO: test and fix overflow cases.
|
||||
#[test]
|
||||
fn test_iterator_errors() {
|
||||
testutil::init();
|
||||
struct Test {
|
||||
encoded: &'static [u8],
|
||||
err: &'static str,
|
||||
}
|
||||
let tests = [
|
||||
Test{encoded: b"\x80", err: "bad varint 1 at offset 0"},
|
||||
Test{encoded: b"\x00\x80", err: "bad varint 2 at offset 1"},
|
||||
Test{encoded: b"\x00\x02\x00\x00",
|
||||
err: "zero duration only allowed at end; have 2 bytes left"},
|
||||
Test{encoded: b"\x02\x02",
|
||||
err: "negative duration -1 after applying delta -1"},
|
||||
Test{encoded: b"\x04\x00",
|
||||
err: "non-positive bytes 0 after applying delta 0 to key=false frame at ts 0"},
|
||||
];
|
||||
for test in &tests {
|
||||
let mut it = SampleIndexIterator::new();
|
||||
assert_eq!(it.next(test.encoded).unwrap_err().to_string(), test.err);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_frames<F, T>(db: &db::Database, segment: &Segment, f: F) -> Vec<T>
|
||||
where F: Fn(&SampleIndexIterator) -> T {
|
||||
let mut v = Vec::new();
|
||||
db.lock().with_recording_playback(segment.id, &mut |playback| {
|
||||
segment.foreach(playback, |it| { v.push(f(it)); Ok(()) })
|
||||
}).unwrap();
|
||||
v
|
||||
}
|
||||
|
||||
/// Tests that a `Segment` correctly can clip at the beginning and end.
|
||||
/// This is a simpler case; all sync samples means we can start on any frame.
|
||||
#[test]
|
||||
fn test_segment_clipping_with_all_sync() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
for i in 1..6 {
|
||||
let duration_90k = 2 * i;
|
||||
let bytes = 3 * i;
|
||||
encoder.add_sample(duration_90k, bytes, true, &mut r);
|
||||
}
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
// Time range [2, 2 + 4 + 6 + 8) means the 2nd, 3rd, 4th samples should be
|
||||
// included.
|
||||
let segment = Segment::new(&db.db.lock(), &row, 2 .. 2+4+6+8, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[4, 6, 8]);
|
||||
}
|
||||
|
||||
/// Half sync frames means starting from the last sync frame <= desired point.
|
||||
#[test]
|
||||
fn test_segment_clipping_with_half_sync() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
for i in 1..6 {
|
||||
let duration_90k = 2 * i;
|
||||
let bytes = 3 * i;
|
||||
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
|
||||
}
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
// Time range [2 + 4 + 6, 2 + 4 + 6 + 8) means the 4th sample should be included.
|
||||
// The 3rd also gets pulled in because it is a sync frame and the 4th is not.
|
||||
let segment = Segment::new(&db.db.lock(), &row, 2+4+6 .. 2+4+6+8, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[6, 8]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_segment_clipping_with_trailing_zero() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
encoder.add_sample(1, 1, true, &mut r);
|
||||
encoder.add_sample(1, 2, true, &mut r);
|
||||
encoder.add_sample(0, 3, true, &mut r);
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
let segment = Segment::new(&db.db.lock(), &row, 1 .. 2, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[2, 3]);
|
||||
}
|
||||
|
||||
/// Even if the desired duration is 0, there should still be a frame.
|
||||
#[test]
|
||||
fn test_segment_zero_desired_duration() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
encoder.add_sample(1, 1, true, &mut r);
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
let segment = Segment::new(&db.db.lock(), &row, 0 .. 0, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1]);
|
||||
}
|
||||
|
||||
/// Test a `Segment` which uses the whole recording.
|
||||
/// This takes a fast path which skips scanning the index in `new()`.
|
||||
#[test]
|
||||
fn test_segment_fast_path() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
for i in 1..6 {
|
||||
let duration_90k = 2 * i;
|
||||
let bytes = 3 * i;
|
||||
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r);
|
||||
}
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2+4+6+8+10, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[2, 4, 6, 8, 10]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_segment_fast_path_with_trailing_zero() {
|
||||
testutil::init();
|
||||
let mut r = db::RecordingToInsert::default();
|
||||
let mut encoder = SampleIndexEncoder::new();
|
||||
encoder.add_sample(1, 1, true, &mut r);
|
||||
encoder.add_sample(1, 2, true, &mut r);
|
||||
encoder.add_sample(0, 3, true, &mut r);
|
||||
let db = TestDb::new(RealClocks {});
|
||||
let row = db.insert_recording_from_encoder(r);
|
||||
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2, true).unwrap();
|
||||
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1, 2, 3]);
|
||||
}
|
||||
|
||||
// TODO: test segment error cases involving mismatch between row frames/key_frames and index.
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature="nightly"))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Benchmarks the decoder, which is performance-critical for .mp4 serving.
|
||||
#[bench]
|
||||
fn bench_decoder(b: &mut test::Bencher) {
|
||||
let data = include_bytes!("testdata/video_sample_index.bin");
|
||||
b.bytes = data.len() as u64;
|
||||
b.iter(|| {
|
||||
let mut it = SampleIndexIterator::new();
|
||||
while it.next(data).unwrap() {}
|
||||
assert_eq!(30104460, it.pos);
|
||||
assert_eq!(5399985, it.start_90k);
|
||||
});
|
||||
}
|
||||
}
|
||||
523
server/db/schema.sql
Normal file
523
server/db/schema.sql
Normal file
@@ -0,0 +1,523 @@
|
||||
-- This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
-- Copyright (C) 2016-2020 The Moonfire NVR Authors
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU General Public License as published by
|
||||
-- the Free Software Foundation, either version 3 of the License, or
|
||||
-- (at your option) any later version.
|
||||
--
|
||||
-- In addition, as a special exception, the copyright holders give
|
||||
-- permission to link the code of portions of this program with the
|
||||
-- OpenSSL library under certain conditions as described in each
|
||||
-- individual source file, and distribute linked combinations including
|
||||
-- the two.
|
||||
--
|
||||
-- You must obey the GNU General Public License in all respects for all
|
||||
-- of the code used other than OpenSSL. If you modify file(s) with this
|
||||
-- exception, you may extend this exception to your version of the
|
||||
-- file(s), but you are not obligated to do so. If you do not wish to do
|
||||
-- so, delete this exception statement from your version. If you delete
|
||||
-- this exception statement from all source files in the program, then
|
||||
-- also delete it here.
|
||||
--
|
||||
-- This program is distributed in the hope that it will be useful,
|
||||
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
-- GNU General Public License for more details.
|
||||
--
|
||||
-- You should have received a copy of the GNU General Public License
|
||||
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
--
|
||||
-- schema.sql: SQLite3 database schema for Moonfire NVR.
|
||||
-- See also design/schema.md.
|
||||
|
||||
-- Database metadata. There should be exactly one row in this table.
|
||||
create table meta (
|
||||
uuid blob not null check (length(uuid) = 16),
|
||||
|
||||
-- The maximum number of entries in the signal_state table. If an update
|
||||
-- causes this to be exceeded, older times will be garbage collected to stay
|
||||
-- within the limit.
|
||||
max_signal_changes integer check (max_signal_changes >= 0)
|
||||
);
|
||||
|
||||
-- This table tracks the schema version.
|
||||
-- There is one row for the initial database creation (inserted below, after the
|
||||
-- create statements) and one for each upgrade procedure (if any).
|
||||
create table version (
|
||||
id integer primary key,
|
||||
|
||||
-- The unix time as of the creation/upgrade, as determined by
|
||||
-- cast(strftime('%s', 'now') as int).
|
||||
unix_time integer not null,
|
||||
|
||||
-- Optional notes on the creation/upgrade; could include the binary version.
|
||||
notes text
|
||||
);
|
||||
|
||||
-- Tracks every time the database has been opened in read/write mode.
|
||||
-- This is used to ensure directories are in sync with the database (see
|
||||
-- schema.proto:DirMeta), to disambiguate uncommitted recordings, and
|
||||
-- potentially to understand time problems.
|
||||
create table open (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- Information about when / how long the database was open. These may be all
|
||||
-- null, for example in the open that represents all information written
|
||||
-- prior to database version 3.
|
||||
|
||||
-- System time when the database was opened, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00Z excluding leap seconds.
|
||||
start_time_90k integer,
|
||||
|
||||
-- System time when the database was closed or (on crash) last flushed.
|
||||
end_time_90k integer,
|
||||
|
||||
-- How long the database was open. This is end_time_90k - start_time_90k if
|
||||
-- there were no time steps or leap seconds during this time.
|
||||
duration_90k integer
|
||||
);
|
||||
|
||||
create table sample_file_dir (
|
||||
id integer primary key,
|
||||
path text unique not null,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- The last (read/write) open of this directory which fully completed.
|
||||
-- See schema.proto:DirMeta for a more complete description.
|
||||
last_complete_open_id integer references open (id)
|
||||
);
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- A short name of the camera, used in log messages.
|
||||
short_name text not null,
|
||||
|
||||
-- A short description of the camera.
|
||||
description text,
|
||||
|
||||
-- The host part of the http:// URL when accessing ONVIF, optionally
|
||||
-- including ":<port>". Eg with ONVIF host "192.168.1.110:85", the full URL
|
||||
-- of the devie management service will be
|
||||
-- "http://192.168.1.110:85/device_service".
|
||||
onvif_host text,
|
||||
|
||||
-- The username to use when accessing the camera.
|
||||
-- If empty, no username or password will be supplied.
|
||||
username text,
|
||||
|
||||
-- The password to use when accessing the camera.
|
||||
password text
|
||||
);
|
||||
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
|
||||
-- If record is true, the stream should start recording when moonfire
|
||||
-- starts. If false, no new recordings will be made, but old recordings
|
||||
-- will not be deleted.
|
||||
record integer not null check (record in (1, 0)),
|
||||
|
||||
-- The rtsp:// URL to use for this stream, excluding username and password.
|
||||
-- (Those are taken from the camera row's respective fields.)
|
||||
rtsp_url text not null,
|
||||
|
||||
-- The number of bytes of video to retain, excluding the currently-recording
|
||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
|
||||
-- Flush the database when the first instant of completed recording is this
|
||||
-- many seconds old. A value of 0 means that every completed recording will
|
||||
-- cause an immediate flush. Higher values may allow flushes to be combined,
|
||||
-- reducing SSD write cycles. For example, if all streams have a flush_if_sec
|
||||
-- >= x sec, there will be:
|
||||
--
|
||||
-- * at most one flush per x sec in total
|
||||
-- * at most x sec of completed but unflushed recordings per stream.
|
||||
-- * at most x completed but unflushed recordings per stream, in the worst
|
||||
-- case where a recording instantly fails, waits the 1-second retry delay,
|
||||
-- then fails again, forever.
|
||||
flush_if_sec integer not null,
|
||||
|
||||
-- The total number of recordings ever created on this stream, including
|
||||
-- deleted ones. This is used for assigning the next recording id.
|
||||
cum_recordings integer not null check (cum_recordings >= 0),
|
||||
|
||||
-- The total media duration of all recordings ever created on this stream.
|
||||
cum_media_duration_90k integer not null check (cum_media_duration_90k >= 0),
|
||||
|
||||
-- The total number of runs (recordings with run_offset = 0) ever created
|
||||
-- on this stream.
|
||||
cum_runs integer not null check (cum_runs >= 0),
|
||||
|
||||
unique (camera_id, type)
|
||||
);
|
||||
|
||||
-- Each row represents a single completed recorded segment of video.
|
||||
-- Recordings are typically ~60 seconds; never more than 5 minutes.
|
||||
create table recording (
|
||||
-- The high 32 bits of composite_id are taken from the stream's id, which
|
||||
-- improves locality. The low 32 bits are taken from the stream's
|
||||
-- cum_recordings (which should be post-incremented in the same
|
||||
-- transaction). It'd be simpler to use a "without rowid" table and separate
|
||||
-- fields to make up the primary key, but
|
||||
-- <https://www.sqlite.org/withoutrowid.html> points out that "without
|
||||
-- rowid" is not appropriate when the average row size is in excess of 50
|
||||
-- bytes. recording_cover rows (which match this id format) are typically
|
||||
-- 1--5 KiB.
|
||||
composite_id integer primary key,
|
||||
|
||||
-- The open in which this was committed to the database. For a given
|
||||
-- composite_id, only one recording will ever be committed to the database,
|
||||
-- but in-memory state may reflect a recording which never gets committed.
|
||||
-- This field allows disambiguation in etags and such.
|
||||
open_id integer not null references open (id),
|
||||
|
||||
-- This field is redundant with composite_id above, but used to enforce the
|
||||
-- reference constraint and to structure the recording_start_time index.
|
||||
stream_id integer not null references stream (id),
|
||||
|
||||
-- The offset of this recording within a run. 0 means this was the first
|
||||
-- recording made from a RTSP session. The start of the run has composite_id
|
||||
-- (composite_id-run_offset).
|
||||
run_offset integer not null,
|
||||
|
||||
-- flags is a bitmask:
|
||||
--
|
||||
-- * 1, or "trailing zero", indicates that this recording is the last in a
|
||||
-- stream. As the duration of a sample is not known until the next sample
|
||||
-- is received, the final sample in this recording will have duration 0.
|
||||
flags integer not null,
|
||||
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
|
||||
-- The starting time of the recording, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00 UTC excluding leap seconds. Currently on initial
|
||||
-- connection, this is taken from the local system time; on subsequent
|
||||
-- recordings in a run, it exactly matches the previous recording's end
|
||||
-- time.
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
|
||||
-- The total duration of all previous recordings on this stream. This is
|
||||
-- returned in API requests and may be helpful for timestamps in a HTML
|
||||
-- MediaSourceExtensions SourceBuffer.
|
||||
prev_media_duration_90k integer not null
|
||||
check (prev_media_duration_90k >= 0),
|
||||
|
||||
-- The total number of previous runs (rows in which run_offset = 0).
|
||||
prev_runs integer not null check (prev_runs >= 0),
|
||||
|
||||
-- The wall-time duration of the recording, in 90 kHz units. This is the
|
||||
-- "corrected" duration.
|
||||
wall_duration_90k integer not null
|
||||
check (wall_duration_90k >= 0 and wall_duration_90k < 5*60*90000),
|
||||
|
||||
-- TODO: comment.
|
||||
media_duration_delta_90k integer not null,
|
||||
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
-- Typical queries use "where stream_id = ? order by start_time_90k".
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
|
||||
-- These fields are not used for ordering; they cover most queries so
|
||||
-- that only database verification and actual viewing of recordings need
|
||||
-- to consult the underlying row.
|
||||
open_id,
|
||||
wall_duration_90k,
|
||||
media_duration_delta_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
-- Fields which are only needed to check/correct database integrity problems
|
||||
-- (such as incorrect timestamps).
|
||||
create table recording_integrity (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock has
|
||||
-- advanced more than the stated duration of recordings in a run since the
|
||||
-- first recording ended. Negative numbers indicate the local system time is
|
||||
-- behind the recording.
|
||||
--
|
||||
-- The first recording of a run (that is, one with run_offset=0) has null
|
||||
-- local_time_delta_90k because errors are assumed to
|
||||
-- be the result of initial buffering rather than frequency mismatch.
|
||||
--
|
||||
-- This value should be near 0 even on long runs in which the camera's clock
|
||||
-- and local system's clock frequency differ because each recording's delta
|
||||
-- is used to correct the durations of the next (up to 500 ppm error).
|
||||
local_time_delta_90k integer,
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock had
|
||||
-- advanced since the database was opened, as of the start of recording.
|
||||
-- TODO: fill this in!
|
||||
local_time_since_open_90k integer,
|
||||
|
||||
-- The difference between start_time_90k+duration_90k and a wall clock
|
||||
-- timestamp captured at end of this recording. This is meaningful for all
|
||||
-- recordings in a run, even the initial one (run_offset=0), because
|
||||
-- start_time_90k is derived from the wall time as of when recording
|
||||
-- starts, not when it ends.
|
||||
-- TODO: fill this in!
|
||||
wall_time_delta_90k integer,
|
||||
|
||||
-- The (possibly truncated) raw blake3 hash of the contents of the sample
|
||||
-- file.
|
||||
sample_file_blake3 blob check (length(sample_file_blake3) <= 32)
|
||||
);
|
||||
|
||||
-- Large fields for a recording which are needed ony for playback.
|
||||
-- In particular, when serving a byte range within a .mp4 file, the
|
||||
-- recording_playback row is needed for the recording(s) corresponding to that
|
||||
-- particular byte range, needed, but the recording rows suffice for all other
|
||||
-- recordings in the .mp4.
|
||||
create table recording_playback (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- See design/schema.md#video_index for a description of this field.
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
|
||||
-- audio_index could be added here in the future.
|
||||
);
|
||||
|
||||
-- Files which are to be deleted (may or may not still exist).
|
||||
-- Note that besides these files, for each stream, any recordings >= its
|
||||
-- cum_recordings should be discarded on startup.
|
||||
create table garbage (
|
||||
-- This is _mostly_ redundant with composite_id, which contains the stream
|
||||
-- id and thus a linkage to the sample file directory. Listing it here
|
||||
-- explicitly means that streams can be deleted without losing the
|
||||
-- association of garbage to directory.
|
||||
sample_file_dir_id integer not null references sample_file_dir (id),
|
||||
|
||||
-- See description on recording table.
|
||||
composite_id integer not null,
|
||||
|
||||
-- Organize the table first by directory, as that's how it will be queried.
|
||||
primary key (sample_file_dir_id, composite_id)
|
||||
) without rowid;
|
||||
|
||||
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
|
||||
-- VisualSampleEntry box. Describes the codec, width, height, etc.
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
|
||||
-- The width and height in pixels; must match values within
|
||||
-- `sample_entry_bytes`.
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
|
||||
-- The codec in RFC-6381 format, such as "avc1.4d001f".
|
||||
rfc6381_codec text not null,
|
||||
|
||||
-- The serialized box, including the leading length and box type (avcC in
|
||||
-- the case of H.264).
|
||||
data blob not null check (length(data) > 86),
|
||||
|
||||
-- Pixel aspect ratio, if known. As defined in ISO/IEC 14496-12 section
|
||||
-- 12.1.4.
|
||||
pasp_h_spacing integer not null default 1 check (pasp_h_spacing > 0),
|
||||
pasp_v_spacing integer not null default 1 check (pasp_v_spacing > 0)
|
||||
);
|
||||
|
||||
create table user (
|
||||
id integer primary key,
|
||||
username unique not null,
|
||||
|
||||
-- Bitwise mask of flags:
|
||||
-- 1: disabled. If set, no method of authentication for this user will succeed.
|
||||
flags integer not null,
|
||||
|
||||
-- If set, a hash for password authentication, as generated by `libpasta::hash_password`.
|
||||
password_hash text,
|
||||
|
||||
-- A counter which increments with every password reset or clear.
|
||||
password_id integer not null default 0,
|
||||
|
||||
-- Updated lazily on database flush; reset when password_id is incremented.
|
||||
-- This could be used to automatically disable the password on hitting a threshold.
|
||||
password_failure_count integer not null default 0,
|
||||
|
||||
-- If set, a Unix UID that is accepted for authentication when using HTTP over
|
||||
-- a Unix domain socket. (Additionally, the UID running Moonfire NVR can authenticate
|
||||
-- as anyone; there's no point in trying to do otherwise.) This might be an easy
|
||||
-- bootstrap method once configuration happens through a web UI rather than text UI.
|
||||
unix_uid integer,
|
||||
|
||||
-- Permissions available for newly created tokens or when authenticating via
|
||||
-- unix_uid above. A serialized "Permissions" protobuf.
|
||||
permissions blob not null default X''
|
||||
);
|
||||
|
||||
-- A single session, whether for browser or robot use.
|
||||
-- These map at the HTTP layer to an "s" cookie (exact format described
|
||||
-- elsewhere), which holds the session id and an encrypted sequence number for
|
||||
-- replay protection.
|
||||
create table user_session (
|
||||
-- The session id is a 48-byte blob. This is the unsalted Blake3 (32 bytes)
|
||||
-- of the unencoded session id. Much like `password_hash`, a hash is used here
|
||||
-- so that a leaked database backup can't be trivially used to steal
|
||||
-- credentials.
|
||||
session_id_hash blob primary key not null,
|
||||
|
||||
user_id integer references user (id) not null,
|
||||
|
||||
-- A 32-byte random number. Used to derive keys for the replay protection
|
||||
-- and CSRF tokens.
|
||||
seed blob not null,
|
||||
|
||||
-- A bitwise mask of flags, currently all properties of the HTTP cookie
|
||||
-- used to hold the session:
|
||||
-- 1: HttpOnly
|
||||
-- 2: Secure
|
||||
-- 4: SameSite=Lax
|
||||
-- 8: SameSite=Strict - 4 must also be set.
|
||||
flags integer not null,
|
||||
|
||||
-- The domain of the HTTP cookie used to store this session. The outbound
|
||||
-- `Set-Cookie` header never specifies a scope, so this matches the `Host:` of
|
||||
-- the inbound HTTP request (minus the :port, if any was specified).
|
||||
domain text,
|
||||
|
||||
-- An editable description which might describe the device/program which uses
|
||||
-- this session, such as "Chromebook", "iPhone", or "motion detection worker".
|
||||
description text,
|
||||
|
||||
creation_password_id integer, -- the id it was created from, if created via password
|
||||
creation_time_sec integer not null, -- sec since epoch
|
||||
creation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
creation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
|
||||
revocation_time_sec integer, -- sec since epoch
|
||||
revocation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
revocation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket/no peer.
|
||||
|
||||
-- A value indicating the reason for revocation, with optional additional
|
||||
-- text detail. Enumeration values:
|
||||
-- 0: logout link clicked (i.e. from within the session itself)
|
||||
-- 1: obsoleted by a change in hashing algorithm (eg schema 5->6 upgrade)
|
||||
--
|
||||
-- This might be extended for a variety of other reasons:
|
||||
-- x: user revoked (while authenticated in another way)
|
||||
-- x: password change invalidated all sessions created with that password
|
||||
-- x: expired (due to fixed total time or time inactive)
|
||||
-- x: evicted (due to too many sessions)
|
||||
-- x: suspicious activity
|
||||
revocation_reason integer,
|
||||
revocation_reason_detail text,
|
||||
|
||||
-- Information about requests which used this session, updated lazily on database flush.
|
||||
last_use_time_sec integer, -- sec since epoch
|
||||
last_use_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
last_use_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
use_count not null default 0,
|
||||
|
||||
-- Permissions associated with this token; a serialized "Permissions" protobuf.
|
||||
permissions blob not null default X''
|
||||
) without rowid;
|
||||
|
||||
create index user_session_uid on user_session (user_id);
|
||||
|
||||
create table signal (
|
||||
id integer primary key,
|
||||
|
||||
-- a uuid describing the originating object, such as the uuid of the camera
|
||||
-- for built-in motion detection. There will be a JSON interface for adding
|
||||
-- events; it will require this UUID to be supplied. An external uuid might
|
||||
-- indicate "my house security system's zone 23".
|
||||
source_uuid blob not null check (length(source_uuid) = 16),
|
||||
|
||||
-- a uuid describing the type of event. A registry (TBD) will list built-in
|
||||
-- supported types, such as "Hikvision on-camera motion detection", or
|
||||
-- "ONVIF on-camera motion detection". External programs can use their own
|
||||
-- uuids, such as "Elk security system watcher".
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
|
||||
-- a short human-readable description of the event to use in mouseovers or event
|
||||
-- lists, such as "driveway motion" or "front door open".
|
||||
short_name not null,
|
||||
|
||||
unique (source_uuid, type_uuid)
|
||||
);
|
||||
|
||||
-- e.g. "moving/still", "disarmed/away/stay", etc.
|
||||
-- TODO: just do a protobuf for each type? might be simpler, more flexible.
|
||||
create table signal_type_enum (
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
value integer not null check (value > 0 and value < 16),
|
||||
name text not null,
|
||||
|
||||
-- true/1 iff this signal value should be considered "motion" for directly associated cameras.
|
||||
motion int not null check (motion in (0, 1)) default 0,
|
||||
|
||||
color text
|
||||
);
|
||||
|
||||
-- Associations between event sources and cameras.
|
||||
-- For example, if two cameras have overlapping fields of view, they might be
|
||||
-- configured such that each camera is associated with both its own motion and
|
||||
-- the other camera's motion.
|
||||
create table signal_camera (
|
||||
signal_id integer references signal (id),
|
||||
camera_id integer references camera (id),
|
||||
|
||||
-- type:
|
||||
--
|
||||
-- 0 means direct association, as if the event source if the camera's own
|
||||
-- motion detection. Here are a couple ways this could be used:
|
||||
--
|
||||
-- * when viewing the camera, hotkeys to go to the start of the next or
|
||||
-- previous event should respect this event.
|
||||
-- * a list of events might include the recordings associated with the
|
||||
-- camera in the same timespan.
|
||||
--
|
||||
-- 1 means indirect association. A screen associated with the camera should
|
||||
-- given some indication of this event, but there should be no assumption
|
||||
-- that the camera will have a direct view of the event. For example, all
|
||||
-- cameras might be indirectly associated with a doorknob press. Cameras at
|
||||
-- the back of the house shouldn't be expected to have a direct view of this
|
||||
-- event, but motion events shortly afterward might warrant extra scrutiny.
|
||||
type integer not null,
|
||||
|
||||
primary key (signal_id, camera_id)
|
||||
) without rowid;
|
||||
|
||||
-- Changes to signals as of a given timestamp.
|
||||
create table signal_change (
|
||||
-- Event time, in 90 kHz units since 1970-01-01 00:00:00Z excluding leap seconds.
|
||||
time_90k integer primary key,
|
||||
|
||||
-- Changes at this timestamp.
|
||||
--
|
||||
-- A blob of varints representing a list of
|
||||
-- (signal number - next allowed, state) pairs, where signal number is
|
||||
-- non-decreasing. For example,
|
||||
-- input signals: 1 3 200 (must be sorted)
|
||||
-- delta: 1 1 196 (must be non-negative)
|
||||
-- states: 1 1 2
|
||||
-- varint: \x01 \x01 \x01 \x01 \xc4 \x01 \x02
|
||||
changes blob not null
|
||||
);
|
||||
|
||||
insert into version (id, unix_time, notes)
|
||||
values (6, cast(strftime('%s', 'now') as int), 'db creation');
|
||||
835
server/db/signal.rs
Normal file
835
server/db/signal.rs
Normal file
@@ -0,0 +1,835 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2019 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use base::bail_t;
|
||||
use crate::coding;
|
||||
use crate::db::FromSqlUuid;
|
||||
use crate::recording;
|
||||
use failure::{Error, bail, format_err};
|
||||
use fnv::FnvHashMap;
|
||||
use log::debug;
|
||||
use rusqlite::{Connection, Transaction, params};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::ops::Range;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// All state associated with signals. This is the entry point to this module.
|
||||
pub(crate) struct State {
|
||||
signals_by_id: BTreeMap<u32, Signal>,
|
||||
|
||||
/// All types with known states. Note that currently there's no requirement an entry here
|
||||
/// exists for every `type_` specified in a `Signal`, and there's an implied `0` (unknown)
|
||||
/// state for every `Type`.
|
||||
types_by_uuid: FnvHashMap<Uuid, Type>,
|
||||
|
||||
points_by_time: BTreeMap<recording::Time, Point>,
|
||||
|
||||
/// Times which need to be flushed to the database.
|
||||
/// These either have a matching `points_by_time` entry or represent a removal.
|
||||
dirty_by_time: BTreeSet<recording::Time>,
|
||||
|
||||
max_signal_changes: Option<i64>,
|
||||
}
|
||||
|
||||
/// Representation of all signals at a point in time.
|
||||
/// Each point matches a `signal_change` table row (when flushed). However, the in-memory
|
||||
/// representation keeps not only the changes as of that time but also the complete prior state.
|
||||
#[derive(Default)]
|
||||
struct Point {
|
||||
/// All data associated with the point.
|
||||
///
|
||||
/// `data[0..changes_off]` represents previous state (immediately prior to this point).
|
||||
/// `data[changes_off..]` represents the changes at this point.
|
||||
///
|
||||
/// This representation could be 8 bytes shorter on 64-bit platforms by using a u32 for the
|
||||
/// lengths, but this would require some unsafe code.
|
||||
///
|
||||
/// The serialized form stored here must always be valid.
|
||||
data: Box<[u8]>,
|
||||
changes_off: usize,
|
||||
}
|
||||
|
||||
impl Point {
|
||||
/// Creates a new point from `prev` and `changes`.
|
||||
///
|
||||
/// The caller is responsible for validation. In particular, `changes` must be a valid
|
||||
/// serialized form.
|
||||
fn new(prev: &BTreeMap<u32, u16>, changes: &[u8]) -> Self {
|
||||
let mut data = Vec::with_capacity(3 * prev.len() + changes.len());
|
||||
append_serialized(prev, &mut data);
|
||||
let changes_off = data.len();
|
||||
data.extend(changes);
|
||||
Point {
|
||||
data: data.into_boxed_slice(),
|
||||
changes_off,
|
||||
}
|
||||
}
|
||||
|
||||
fn swap(&mut self, other: &mut Point) {
|
||||
std::mem::swap(&mut self.data, &mut other.data);
|
||||
std::mem::swap(&mut self.changes_off, &mut other.changes_off);
|
||||
}
|
||||
|
||||
/// Returns an iterator over state as of immediately before this point.
|
||||
fn prev(&self) -> PointDataIterator {
|
||||
PointDataIterator::new(&self.data[0..self.changes_off])
|
||||
}
|
||||
|
||||
/// Returns an iterator over changes in this point.
|
||||
fn changes(&self) -> PointDataIterator {
|
||||
PointDataIterator::new(&self.data[self.changes_off..])
|
||||
}
|
||||
|
||||
/// Returns a mapping of signals to states immediately after this point.
|
||||
fn after(&self) -> BTreeMap<u32, u16> {
|
||||
let mut after = BTreeMap::new();
|
||||
let mut it = self.prev();
|
||||
while let Some((signal, state)) = it.next().expect("in-mem prev is valid") {
|
||||
after.insert(signal, state);
|
||||
}
|
||||
let mut it = self.changes();
|
||||
while let Some((signal, state)) = it.next().expect("in-mem changes is valid") {
|
||||
if state == 0 {
|
||||
after.remove(&signal);
|
||||
} else {
|
||||
after.insert(signal, state);
|
||||
}
|
||||
}
|
||||
after
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a serialized form of `from` into `to`.
|
||||
///
|
||||
/// `from` must be an iterator of `(signal, state)` with signal numbers in monotonically increasing
|
||||
/// order.
|
||||
fn append_serialized<'a, I>(from: I, to: &mut Vec<u8>)
|
||||
where I: IntoIterator<Item = (&'a u32, &'a u16)> {
|
||||
let mut next_allowed = 0;
|
||||
for (&signal, &state) in from.into_iter() {
|
||||
assert!(signal >= next_allowed);
|
||||
coding::append_varint32(signal - next_allowed, to);
|
||||
coding::append_varint32(state as u32, to);
|
||||
next_allowed = signal + 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize(from: &BTreeMap<u32, u16>) -> Vec<u8> {
|
||||
let mut to = Vec::with_capacity(3 * from.len());
|
||||
append_serialized(from, &mut to);
|
||||
to
|
||||
}
|
||||
|
||||
struct PointDataIterator<'a> {
|
||||
data: &'a [u8],
|
||||
cur_pos: usize,
|
||||
cur_signal: u32,
|
||||
}
|
||||
|
||||
impl<'a> PointDataIterator<'a> {
|
||||
fn new(data: &'a [u8]) -> Self {
|
||||
PointDataIterator {
|
||||
data,
|
||||
cur_pos: 0,
|
||||
cur_signal: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an error, `None`, or `Some((signal, state))`.
|
||||
/// Note that errors should be impossible on in-memory data; this returns `Result` for
|
||||
/// validating blobs as they're read from the database.
|
||||
fn next(&mut self) -> Result<Option<(u32, u16)>, Error> {
|
||||
if self.cur_pos == self.data.len() {
|
||||
return Ok(None);
|
||||
}
|
||||
let (signal_delta, p) = coding::decode_varint32(self.data, self.cur_pos)
|
||||
.map_err(|()| format_err!("varint32 decode failure; data={:?} pos={}",
|
||||
self.data, self.cur_pos))?;
|
||||
let (state, p) = coding::decode_varint32(self.data, p)
|
||||
.map_err(|()| format_err!("varint32 decode failure; data={:?} pos={}",
|
||||
self.data, p))?;
|
||||
let signal = self.cur_signal.checked_add(signal_delta)
|
||||
.ok_or_else(|| format_err!("signal overflow: {} + {}",
|
||||
self.cur_signal, signal_delta))?;
|
||||
if state > u16::max_value() as u32 {
|
||||
bail!("state overflow: {}", state);
|
||||
}
|
||||
self.cur_pos = p;
|
||||
self.cur_signal = signal + 1;
|
||||
Ok(Some((signal, state as u16)))
|
||||
}
|
||||
|
||||
fn to_map(mut self) -> Result<BTreeMap<u32, u16>, Error> {
|
||||
let mut out = BTreeMap::new();
|
||||
while let Some((signal, state)) = self.next()? {
|
||||
out.insert(signal, state);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
|
||||
/// Representation of a `signal_camera` row.
|
||||
/// `signal_id` is implied by the `Signal` which owns this struct.
|
||||
#[derive(Debug)]
|
||||
pub struct SignalCamera {
|
||||
pub camera_id: i32,
|
||||
pub type_: SignalCameraType,
|
||||
}
|
||||
|
||||
/// Representation of the `type` field in a `signal_camera` row.
|
||||
#[derive(Debug)]
|
||||
pub enum SignalCameraType {
|
||||
Direct = 0,
|
||||
Indirect = 1,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ListStateChangesRow {
|
||||
pub when: recording::Time,
|
||||
pub signal: u32,
|
||||
pub state: u16,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn init(conn: &Connection) -> Result<Self, Error> {
|
||||
let max_signal_changes: Option<i64> =
|
||||
conn.query_row("select max_signal_changes from meta", params![], |row| row.get(0))?;
|
||||
let mut signals_by_id = State::init_signals(conn)?;
|
||||
State::fill_signal_cameras(conn, &mut signals_by_id)?;
|
||||
Ok(State {
|
||||
max_signal_changes,
|
||||
signals_by_id,
|
||||
types_by_uuid: State::init_types(conn)?,
|
||||
points_by_time: State::init_points(conn)?,
|
||||
dirty_by_time: BTreeSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn list_changes_by_time(
|
||||
&self, desired_time: Range<recording::Time>, f: &mut dyn FnMut(&ListStateChangesRow)) {
|
||||
|
||||
// First find the state immediately before. If it exists, include it.
|
||||
if let Some((&when, p)) = self.points_by_time.range(..desired_time.start).next_back() {
|
||||
for (&signal, &state) in &p.after() {
|
||||
f(&ListStateChangesRow {
|
||||
when,
|
||||
signal,
|
||||
state,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Then include changes up to (but not including) the end time.
|
||||
for (&when, p) in self.points_by_time.range(desired_time.clone()) {
|
||||
let mut it = p.changes();
|
||||
while let Some((signal, state)) = it.next().expect("in-mem changes is valid") {
|
||||
f(&ListStateChangesRow {
|
||||
when,
|
||||
signal,
|
||||
state,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_signals(
|
||||
&mut self, when: Range<recording::Time>, signals: &[u32], states: &[u16])
|
||||
-> Result<(), base::Error> {
|
||||
// Do input validation before any mutation.
|
||||
self.update_signals_validate(signals, states)?;
|
||||
|
||||
// Follow the std::ops::Range convention of considering a range empty if its start >= end.
|
||||
// Bailing early in the empty case isn't just an optimization; apply_observation_end would
|
||||
// be incorrect otherwise.
|
||||
if when.end <= when.start {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Apply the end before the start so that the `prev` state can be examined.
|
||||
self.update_signals_end(when.end, signals, states);
|
||||
self.update_signals_start(when.start, signals, states);
|
||||
self.update_signals_middle(when, signals, states);
|
||||
|
||||
self.gc();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Performs garbage collection if the number of points exceeds `max_signal_changes`.
|
||||
fn gc(&mut self) {
|
||||
let max = match self.max_signal_changes {
|
||||
None => return,
|
||||
Some(m) if m < 0 => 0 as usize,
|
||||
Some(m) if m > (isize::max_value() as i64) => return,
|
||||
Some(m) => m as usize,
|
||||
};
|
||||
let to_remove = match self.points_by_time.len().checked_sub(max) {
|
||||
None => return,
|
||||
Some(p) => p,
|
||||
};
|
||||
debug!("Performing signal GC: have {} points, want only {}, so removing {}",
|
||||
self.points_by_time.len(), max, to_remove);
|
||||
|
||||
let remove: smallvec::SmallVec<[recording::Time; 4]> =
|
||||
self.points_by_time.keys().take(to_remove).map(|p| *p).collect();
|
||||
|
||||
for p in &remove {
|
||||
self.points_by_time.remove(p);
|
||||
self.dirty_by_time.insert(*p);
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for `update_signals` to do validation.
|
||||
fn update_signals_validate(&self, signals: &[u32], states: &[u16]) -> Result<(), base::Error> {
|
||||
if signals.len() != states.len() {
|
||||
bail_t!(InvalidArgument, "signals and states must have same length");
|
||||
}
|
||||
let mut next_allowed = 0u32;
|
||||
for (&signal, &state) in signals.iter().zip(states) {
|
||||
if signal < next_allowed {
|
||||
bail_t!(InvalidArgument, "signals must be monotonically increasing");
|
||||
}
|
||||
match self.signals_by_id.get(&signal) {
|
||||
None => bail_t!(InvalidArgument, "unknown signal {}", signal),
|
||||
Some(ref s) => {
|
||||
let empty = Vec::new();
|
||||
let states = self.types_by_uuid.get(&s.type_)
|
||||
.map(|t| &t.states)
|
||||
.unwrap_or(&empty);
|
||||
if state != 0 && states.binary_search_by_key(&state, |s| s.value).is_err() {
|
||||
bail_t!(FailedPrecondition, "signal {} specifies unknown state {}",
|
||||
signal, state);
|
||||
}
|
||||
},
|
||||
}
|
||||
next_allowed = signal + 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper for `update_signals` to apply the end point.
|
||||
fn update_signals_end(&mut self, end: recording::Time, signals: &[u32], states: &[u16]) {
|
||||
let mut prev;
|
||||
let mut changes = BTreeMap::<u32, u16>::new();
|
||||
if let Some((&t, ref mut p)) = self.points_by_time.range_mut(..=end).next_back() {
|
||||
if t == end {
|
||||
// Already have a point at end. Adjust it. prev starts unchanged...
|
||||
prev = p.prev().to_map().expect("in-mem prev is valid");
|
||||
|
||||
// ...and then prev and changes are altered to reflect the desired update.
|
||||
State::update_signals_end_maps(signals, states, &mut prev, &mut changes);
|
||||
|
||||
// If this doesn't alter the new state, don't dirty the database.
|
||||
if changes.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Any existing changes should still be applied. They win over reverting to prev.
|
||||
let mut it = p.changes();
|
||||
while let Some((signal, state)) = it.next().expect("in-mem changes is valid") {
|
||||
changes.entry(signal).and_modify(|e| *e = state).or_insert(state);
|
||||
}
|
||||
self.dirty_by_time.insert(t);
|
||||
p.swap(&mut Point::new(&prev, &serialize(&changes)));
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't have a point at end, but do have previous state.
|
||||
prev = p.after();
|
||||
} else {
|
||||
// No point at or before end. Start from scratch (all signals unknown).
|
||||
prev = BTreeMap::new();
|
||||
}
|
||||
|
||||
// Create a new end point if necessary.
|
||||
State::update_signals_end_maps(signals, states, &mut prev, &mut changes);
|
||||
if changes.is_empty() {
|
||||
return;
|
||||
}
|
||||
self.dirty_by_time.insert(end);
|
||||
self.points_by_time.insert(end, Point::new(&prev, &serialize(&changes)));
|
||||
}
|
||||
|
||||
/// Helper for `update_signals_end`. Adjusts `prev` (the state prior to the end point) to
|
||||
/// reflect the desired update (in `signals` and `states`). Adjusts `changes` (changes to
|
||||
/// execute at the end point) to undo the change.
|
||||
fn update_signals_end_maps(signals: &[u32], states: &[u16], prev: &mut BTreeMap<u32, u16>,
|
||||
changes: &mut BTreeMap<u32, u16>) {
|
||||
for (&signal, &state) in signals.iter().zip(states) {
|
||||
match prev.entry(signal) {
|
||||
Entry::Vacant(e) => {
|
||||
changes.insert(signal, 0);
|
||||
e.insert(state);
|
||||
},
|
||||
Entry::Occupied(mut e) => {
|
||||
if state == 0 {
|
||||
changes.insert(signal, *e.get());
|
||||
e.remove();
|
||||
} else if *e.get() != state {
|
||||
changes.insert(signal, *e.get());
|
||||
*e.get_mut() = state;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for `update_signals` to apply the start point.
|
||||
fn update_signals_start(&mut self, start: recording::Time, signals: &[u32], states: &[u16]) {
|
||||
let prev;
|
||||
if let Some((&t, ref mut p)) = self.points_by_time.range_mut(..=start).next_back() {
|
||||
if t == start {
|
||||
// Reuse existing point at start.
|
||||
prev = p.prev().to_map().expect("in-mem prev is valid");
|
||||
let mut changes = p.changes().to_map().expect("in-mem changes is valid");
|
||||
let mut dirty = false;
|
||||
for (&signal, &state) in signals.iter().zip(states) {
|
||||
match changes.entry(signal) {
|
||||
Entry::Occupied(mut e) => {
|
||||
if *e.get() != state {
|
||||
dirty = true;
|
||||
if state == *prev.get(&signal).unwrap_or(&0) {
|
||||
e.remove();
|
||||
} else {
|
||||
*e.get_mut() = state;
|
||||
}
|
||||
}
|
||||
},
|
||||
Entry::Vacant(e) => {
|
||||
if signal != 0 {
|
||||
dirty = true;
|
||||
e.insert(state);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
if dirty {
|
||||
p.swap(&mut Point::new(&prev, &serialize(&changes)));
|
||||
self.dirty_by_time.insert(start);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Create new point at start, using state from previous point.
|
||||
prev = p.after();
|
||||
} else {
|
||||
// Create new point at start, from scratch.
|
||||
prev = BTreeMap::new();
|
||||
}
|
||||
|
||||
let mut changes = BTreeMap::new();
|
||||
for (&signal, &state) in signals.iter().zip(states) {
|
||||
if state != *prev.get(&signal).unwrap_or(&0) {
|
||||
changes.insert(signal, state);
|
||||
}
|
||||
}
|
||||
|
||||
if changes.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
self.dirty_by_time.insert(start);
|
||||
self.points_by_time.insert(start, Point::new(&prev, &serialize(&changes)));
|
||||
}
|
||||
|
||||
/// Helper for `update_signals` to apply all points in `(when.start, when.end)`.
|
||||
fn update_signals_middle(&mut self, when: Range<recording::Time>, signals: &[u32],
|
||||
states: &[u16]) {
|
||||
let mut to_delete = Vec::new();
|
||||
let after_start = recording::Time(when.start.0+1);
|
||||
for (&t, ref mut p) in self.points_by_time.range_mut(after_start..when.end) {
|
||||
let mut prev = p.prev().to_map().expect("in-mem prev is valid");
|
||||
|
||||
// Update prev to reflect desired update.
|
||||
for (&signal, &state) in signals.iter().zip(states) {
|
||||
match prev.entry(signal) {
|
||||
Entry::Occupied(mut e) => {
|
||||
if state == 0 {
|
||||
e.remove_entry();
|
||||
} else if *e.get() != state {
|
||||
*e.get_mut() = state;
|
||||
}
|
||||
},
|
||||
Entry::Vacant(e) => {
|
||||
if state != 0 {
|
||||
e.insert(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trim changes to omit any change to signals.
|
||||
let mut changes = Vec::with_capacity(3*signals.len());
|
||||
let mut it = p.changes();
|
||||
let mut next_allowed = 0;
|
||||
let mut dirty = false;
|
||||
while let Some((signal, state)) = it.next().expect("in-memory changes is valid") {
|
||||
if signals.binary_search(&signal).is_ok() { // discard.
|
||||
dirty = true;
|
||||
} else { // keep.
|
||||
assert!(signal >= next_allowed);
|
||||
coding::append_varint32(signal - next_allowed, &mut changes);
|
||||
coding::append_varint32(state as u32, &mut changes);
|
||||
next_allowed = signal + 1;
|
||||
}
|
||||
}
|
||||
if changes.is_empty() {
|
||||
to_delete.push(t);
|
||||
} else {
|
||||
p.swap(&mut Point::new(&prev, &changes));
|
||||
}
|
||||
if dirty {
|
||||
self.dirty_by_time.insert(t);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete any points with no more changes.
|
||||
for &t in &to_delete {
|
||||
self.points_by_time.remove(&t).expect("point exists");
|
||||
}
|
||||
}
|
||||
|
||||
/// Flushes all pending database changes to the given transaction.
|
||||
///
|
||||
/// The caller is expected to call `post_flush` afterward if the transaction is
|
||||
/// successfully committed. No mutations should happen between these calls.
|
||||
pub fn flush(&mut self, tx: &Transaction) -> Result<(), Error> {
|
||||
let mut i_stmt = tx.prepare(r#"
|
||||
insert or replace into signal_change (time_90k, changes) values (?, ?)
|
||||
"#)?;
|
||||
let mut d_stmt = tx.prepare(r#"
|
||||
delete from signal_change where time_90k = ?
|
||||
"#)?;
|
||||
for &t in &self.dirty_by_time {
|
||||
match self.points_by_time.entry(t) {
|
||||
Entry::Occupied(ref e) => {
|
||||
let p = e.get();
|
||||
i_stmt.execute(params![
|
||||
t.0,
|
||||
&p.data[p.changes_off..],
|
||||
])?;
|
||||
},
|
||||
Entry::Vacant(_) => {
|
||||
d_stmt.execute(params![t.0])?;
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Marks that the previous `flush` was completed successfully.
|
||||
///
|
||||
/// See notes there.
|
||||
pub fn post_flush(&mut self) {
|
||||
self.dirty_by_time.clear();
|
||||
}
|
||||
|
||||
fn init_signals(conn: &Connection) -> Result<BTreeMap<u32, Signal>, Error> {
|
||||
let mut signals = BTreeMap::new();
|
||||
let mut stmt = conn.prepare(r#"
|
||||
select
|
||||
id,
|
||||
source_uuid,
|
||||
type_uuid,
|
||||
short_name
|
||||
from
|
||||
signal
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = row.get(0)?;
|
||||
let source: FromSqlUuid = row.get(1)?;
|
||||
let type_: FromSqlUuid = row.get(2)?;
|
||||
signals.insert(id, Signal {
|
||||
id,
|
||||
source: source.0,
|
||||
type_: type_.0,
|
||||
short_name: row.get(3)?,
|
||||
cameras: Vec::new(),
|
||||
});
|
||||
}
|
||||
Ok(signals)
|
||||
}
|
||||
|
||||
fn init_points(conn: &Connection) -> Result<BTreeMap<recording::Time, Point>, Error> {
|
||||
let mut stmt = conn.prepare(r#"
|
||||
select
|
||||
time_90k,
|
||||
changes
|
||||
from
|
||||
signal_change
|
||||
order by time_90k
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
let mut points = BTreeMap::new();
|
||||
let mut cur = BTreeMap::new(); // latest signal -> state, where state != 0
|
||||
while let Some(row) = rows.next()? {
|
||||
let time_90k = recording::Time(row.get(0)?);
|
||||
let changes = row.get_raw_checked(1)?.as_blob()?;
|
||||
let mut it = PointDataIterator::new(changes);
|
||||
while let Some((signal, state)) = it.next()? {
|
||||
if state == 0 {
|
||||
cur.remove(&signal);
|
||||
} else {
|
||||
cur.insert(signal, state);
|
||||
}
|
||||
}
|
||||
points.insert(time_90k, Point::new(&cur, changes));
|
||||
}
|
||||
Ok(points)
|
||||
}
|
||||
|
||||
/// Fills the `cameras` field of the `Signal` structs within the supplied `signals`.
|
||||
fn fill_signal_cameras(conn: &Connection, signals: &mut BTreeMap<u32, Signal>)
|
||||
-> Result<(), Error> {
|
||||
let mut stmt = conn.prepare(r#"
|
||||
select
|
||||
signal_id,
|
||||
camera_id,
|
||||
type
|
||||
from
|
||||
signal_camera
|
||||
order by signal_id, camera_id
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let signal_id = row.get(0)?;
|
||||
let s = signals.get_mut(&signal_id)
|
||||
.ok_or_else(|| format_err!("signal_camera row for unknown signal id {}",
|
||||
signal_id))?;
|
||||
let type_ = row.get(2)?;
|
||||
s.cameras.push(SignalCamera {
|
||||
camera_id: row.get(1)?,
|
||||
type_: match type_ {
|
||||
0 => SignalCameraType::Direct,
|
||||
1 => SignalCameraType::Indirect,
|
||||
_ => bail!("unknown signal_camera type {}", type_),
|
||||
},
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_types(conn: &Connection) -> Result<FnvHashMap<Uuid, Type>, Error> {
|
||||
let mut types = FnvHashMap::default();
|
||||
let mut stmt = conn.prepare(r#"
|
||||
select
|
||||
type_uuid,
|
||||
value,
|
||||
name,
|
||||
motion,
|
||||
color
|
||||
from
|
||||
signal_type_enum
|
||||
order by type_uuid, value
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let type_: FromSqlUuid = row.get(0)?;
|
||||
types.entry(type_.0).or_insert_with(Type::default).states.push(TypeState {
|
||||
value: row.get(1)?,
|
||||
name: row.get(2)?,
|
||||
motion: row.get(3)?,
|
||||
color: row.get(4)?,
|
||||
});
|
||||
}
|
||||
Ok(types)
|
||||
}
|
||||
|
||||
pub fn signals_by_id(&self) -> &BTreeMap<u32, Signal> { &self.signals_by_id }
|
||||
pub fn types_by_uuid(&self) -> &FnvHashMap<Uuid, Type> { & self.types_by_uuid }
|
||||
}
|
||||
|
||||
/// Representation of a `signal` row.
|
||||
#[derive(Debug)]
|
||||
pub struct Signal {
|
||||
pub id: u32,
|
||||
pub source: Uuid,
|
||||
pub type_: Uuid,
|
||||
pub short_name: String,
|
||||
|
||||
/// The cameras this signal is associated with. Sorted by camera id, which is unique.
|
||||
pub cameras: Vec<SignalCamera>,
|
||||
}
|
||||
|
||||
/// Representation of a `signal_type_enum` row.
|
||||
/// `type_uuid` is implied by the `Type` which owns this struct.
|
||||
#[derive(Debug)]
|
||||
pub struct TypeState {
|
||||
pub value: u16,
|
||||
pub name: String,
|
||||
pub motion: bool,
|
||||
pub color: String,
|
||||
}
|
||||
|
||||
/// Representation of a signal type; currently this just gathers together the TypeStates.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Type {
|
||||
/// The possible states associated with this type. They are sorted by value, which is unique.
|
||||
pub states: Vec<TypeState>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{db, testutil};
|
||||
use rusqlite::Connection;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_point_data_it() {
|
||||
// Example taken from the .sql file.
|
||||
let data = b"\x01\x01\x01\x01\xc4\x01\x02";
|
||||
let mut it = super::PointDataIterator::new(data);
|
||||
assert_eq!(it.next().unwrap(), Some((1, 1)));
|
||||
assert_eq!(it.next().unwrap(), Some((3, 1)));
|
||||
assert_eq!(it.next().unwrap(), Some((200, 2)));
|
||||
assert_eq!(it.next().unwrap(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_db() {
|
||||
testutil::init();
|
||||
let mut conn = Connection::open_in_memory().unwrap();
|
||||
db::init(&mut conn).unwrap();
|
||||
let s = State::init(&conn).unwrap();
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |_r| panic!("no changes expected"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn round_trip() {
|
||||
testutil::init();
|
||||
let mut conn = Connection::open_in_memory().unwrap();
|
||||
db::init(&mut conn).unwrap();
|
||||
conn.execute_batch(r#"
|
||||
update meta set max_signal_changes = 2;
|
||||
|
||||
insert into signal (id, source_uuid, type_uuid, short_name)
|
||||
values (1, x'1B3889C0A59F400DA24C94EBEB19CC3A',
|
||||
x'EE66270FD9C648198B339720D4CBCA6B', 'a'),
|
||||
(2, x'A4A73D9A53424EBCB9F6366F1E5617FA',
|
||||
x'EE66270FD9C648198B339720D4CBCA6B', 'b');
|
||||
|
||||
insert into signal_type_enum (type_uuid, value, name, motion, color)
|
||||
values (x'EE66270FD9C648198B339720D4CBCA6B', 1, 'still', 0, 'black'),
|
||||
(x'EE66270FD9C648198B339720D4CBCA6B', 2, 'moving', 1, 'red');
|
||||
"#).unwrap();
|
||||
let mut s = State::init(&conn).unwrap();
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |_r| panic!("no changes expected"));
|
||||
const START: recording::Time = recording::Time(140067462600000); // 2019-04-26T11:59:00
|
||||
const NOW: recording::Time = recording::Time(140067468000000); // 2019-04-26T12:00:00
|
||||
s.update_signals(START..NOW, &[1, 2], &[2, 1]).unwrap();
|
||||
let mut rows = Vec::new();
|
||||
|
||||
const EXPECTED: &[ListStateChangesRow] = &[
|
||||
ListStateChangesRow {
|
||||
when: START,
|
||||
signal: 1,
|
||||
state: 2,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: START,
|
||||
signal: 2,
|
||||
state: 1,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: NOW,
|
||||
signal: 1,
|
||||
state: 0,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: NOW,
|
||||
signal: 2,
|
||||
state: 0,
|
||||
},
|
||||
];
|
||||
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r));
|
||||
assert_eq!(&rows[..], EXPECTED);
|
||||
|
||||
{
|
||||
let tx = conn.transaction().unwrap();
|
||||
s.flush(&tx).unwrap();
|
||||
tx.commit().unwrap();
|
||||
}
|
||||
|
||||
drop(s);
|
||||
let mut s = State::init(&conn).unwrap();
|
||||
rows.clear();
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r));
|
||||
assert_eq!(&rows[..], EXPECTED);
|
||||
|
||||
// Go through it again. This time, hit the max number of signals, forcing START to be
|
||||
// dropped.
|
||||
const SOON: recording::Time = recording::Time(140067473400000); // 2019-04-26T12:01:00
|
||||
s.update_signals(NOW..SOON, &[1, 2], &[1, 2]).unwrap();
|
||||
rows.clear();
|
||||
const EXPECTED2: &[ListStateChangesRow] = &[
|
||||
ListStateChangesRow {
|
||||
when: NOW,
|
||||
signal: 1,
|
||||
state: 1,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: NOW,
|
||||
signal: 2,
|
||||
state: 2,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: SOON,
|
||||
signal: 1,
|
||||
state: 0,
|
||||
},
|
||||
ListStateChangesRow {
|
||||
when: SOON,
|
||||
signal: 2,
|
||||
state: 0,
|
||||
},
|
||||
];
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r));
|
||||
assert_eq!(&rows[..], EXPECTED2);
|
||||
|
||||
{
|
||||
let tx = conn.transaction().unwrap();
|
||||
s.flush(&tx).unwrap();
|
||||
tx.commit().unwrap();
|
||||
}
|
||||
drop(s);
|
||||
let s = State::init(&conn).unwrap();
|
||||
rows.clear();
|
||||
s.list_changes_by_time(recording::Time::min_value() .. recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r));
|
||||
assert_eq!(&rows[..], EXPECTED2);
|
||||
}
|
||||
}
|
||||
BIN
server/db/testdata/avc1
vendored
Normal file
BIN
server/db/testdata/avc1
vendored
Normal file
Binary file not shown.
BIN
server/db/testdata/video_sample_index.bin
vendored
Normal file
BIN
server/db/testdata/video_sample_index.bin
vendored
Normal file
Binary file not shown.
206
server/db/testutil.rs
Normal file
206
server/db/testutil.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2016-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use base::clock::Clocks;
|
||||
use crate::db;
|
||||
use crate::dir;
|
||||
use fnv::FnvHashMap;
|
||||
use mylog;
|
||||
use rusqlite;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use tempdir::TempDir;
|
||||
use time;
|
||||
use uuid::Uuid;
|
||||
use crate::writer;
|
||||
|
||||
static INIT: parking_lot::Once = parking_lot::Once::new();
|
||||
|
||||
/// id of the camera created by `TestDb::new` below.
|
||||
pub const TEST_CAMERA_ID: i32 = 1;
|
||||
pub const TEST_STREAM_ID: i32 = 1;
|
||||
|
||||
pub const TEST_VIDEO_SAMPLE_ENTRY_DATA: &[u8] =
|
||||
b"\x00\x00\x00\x7D\x61\x76\x63\x31\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x80\x04\x38\x00\x48\x00\x00\x00\x48\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\xFF\xFF\x00\x00\x00\x27\x61\x76\
|
||||
\x63\x43\x01\x4D\x00\x2A\xFF\xE1\x00\x10\x67\x4D\x00\x2A\x95\xA8\x1E\x00\x89\xF9\x66\xE0\x20\
|
||||
\x20\x20\x40\x01\x00\x04\x68\xEE\x3C\x80";
|
||||
|
||||
/// Performs global initialization for tests.
|
||||
/// * set up logging. (Note the output can be confusing unless `RUST_TEST_THREADS=1` is set in
|
||||
/// the program's environment prior to running.)
|
||||
/// * set `TZ=America/Los_Angeles` so that tests that care about calendar time get the expected
|
||||
/// results regardless of machine setup.)
|
||||
/// * use a fast but insecure password hashing format.
|
||||
pub fn init() {
|
||||
INIT.call_once(|| {
|
||||
let h = mylog::Builder::new()
|
||||
.set_spec(&::std::env::var("MOONFIRE_LOG").unwrap_or("info".to_owned()))
|
||||
.build();
|
||||
h.install().unwrap();
|
||||
env::set_var("TZ", "America/Los_Angeles");
|
||||
time::tzset();
|
||||
crate::auth::set_test_config();
|
||||
});
|
||||
}
|
||||
|
||||
pub struct TestDb<C: Clocks + Clone> {
|
||||
pub db: Arc<db::Database<C>>,
|
||||
pub dirs_by_stream_id: Arc<FnvHashMap<i32, Arc<dir::SampleFileDir>>>,
|
||||
pub syncer_channel: writer::SyncerChannel<::std::fs::File>,
|
||||
pub syncer_join: thread::JoinHandle<()>,
|
||||
pub tmpdir: TempDir,
|
||||
pub test_camera_uuid: Uuid,
|
||||
}
|
||||
|
||||
impl<C: Clocks + Clone> TestDb<C> {
|
||||
/// Creates a test database with one camera.
|
||||
pub fn new(clocks: C) -> Self {
|
||||
Self::new_with_flush_if_sec(clocks, 0)
|
||||
}
|
||||
|
||||
pub(crate) fn new_with_flush_if_sec(clocks: C, flush_if_sec: i64) -> Self {
|
||||
let tmpdir = TempDir::new("moonfire-nvr-test").unwrap();
|
||||
|
||||
let mut conn = rusqlite::Connection::open_in_memory().unwrap();
|
||||
db::init(&mut conn).unwrap();
|
||||
let db = Arc::new(db::Database::new(clocks, conn, true).unwrap());
|
||||
let (test_camera_uuid, sample_file_dir_id);
|
||||
let path = tmpdir.path().to_str().unwrap().to_owned();
|
||||
let dir;
|
||||
{
|
||||
let mut l = db.lock();
|
||||
sample_file_dir_id = l.add_sample_file_dir(path.to_owned()).unwrap();
|
||||
assert_eq!(TEST_CAMERA_ID, l.add_camera(db::CameraChange {
|
||||
short_name: "test camera".to_owned(),
|
||||
description: "".to_owned(),
|
||||
onvif_host: "test-camera".to_owned(),
|
||||
username: "foo".to_owned(),
|
||||
password: "bar".to_owned(),
|
||||
streams: [
|
||||
db::StreamChange {
|
||||
sample_file_dir_id: Some(sample_file_dir_id),
|
||||
rtsp_url: "rtsp://test-camera/main".to_owned(),
|
||||
record: true,
|
||||
flush_if_sec,
|
||||
},
|
||||
Default::default(),
|
||||
],
|
||||
}).unwrap());
|
||||
test_camera_uuid = l.cameras_by_id().get(&TEST_CAMERA_ID).unwrap().uuid;
|
||||
l.update_retention(&[db::RetentionChange {
|
||||
stream_id: TEST_STREAM_ID,
|
||||
new_record: true,
|
||||
new_limit: 1048576,
|
||||
}]).unwrap();
|
||||
dir = l.sample_file_dirs_by_id().get(&sample_file_dir_id).unwrap().get().unwrap();
|
||||
}
|
||||
let mut dirs_by_stream_id = FnvHashMap::default();
|
||||
dirs_by_stream_id.insert(TEST_STREAM_ID, dir.clone());
|
||||
let (syncer_channel, syncer_join) =
|
||||
writer::start_syncer(db.clone(), sample_file_dir_id).unwrap();
|
||||
TestDb {
|
||||
db,
|
||||
dirs_by_stream_id: Arc::new(dirs_by_stream_id),
|
||||
syncer_channel,
|
||||
syncer_join,
|
||||
tmpdir,
|
||||
test_camera_uuid,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a recording with a fresh `RecordingToInsert` row which has been touched only by
|
||||
/// a `SampleIndexEncoder`. Fills in a video sample entry id and such to make it valid.
|
||||
/// There will no backing sample file, so it won't be possible to generate a full `.mp4`.
|
||||
pub fn insert_recording_from_encoder(&self, r: db::RecordingToInsert)
|
||||
-> db::ListRecordingsRow {
|
||||
use crate::recording::{self, TIME_UNITS_PER_SEC};
|
||||
let mut db = self.db.lock();
|
||||
let video_sample_entry_id = db.insert_video_sample_entry(db::VideoSampleEntryToInsert {
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
pasp_h_spacing: 1,
|
||||
pasp_v_spacing: 1,
|
||||
data: [0u8; 100].to_vec(),
|
||||
rfc6381_codec: "avc1.000000".to_owned(),
|
||||
}).unwrap();
|
||||
let (id, _) = db.add_recording(TEST_STREAM_ID, db::RecordingToInsert {
|
||||
start: recording::Time(1430006400i64 * TIME_UNITS_PER_SEC),
|
||||
video_sample_entry_id,
|
||||
wall_duration_90k: r.media_duration_90k,
|
||||
..r
|
||||
}).unwrap();
|
||||
db.mark_synced(id).unwrap();
|
||||
db.flush("create_recording_from_encoder").unwrap();
|
||||
let mut row = None;
|
||||
db.list_recordings_by_id(TEST_STREAM_ID, id.recording() .. id.recording()+1,
|
||||
&mut |r| { row = Some(r); Ok(()) }).unwrap();
|
||||
row.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// For benchmarking
|
||||
#[cfg(feature="nightly")]
|
||||
pub fn add_dummy_recordings_to_db(db: &db::Database, num: usize) {
|
||||
use crate::recording::{self, TIME_UNITS_PER_SEC};
|
||||
let mut data = Vec::new();
|
||||
data.extend_from_slice(include_bytes!("testdata/video_sample_index.bin"));
|
||||
let mut db = db.lock();
|
||||
let video_sample_entry_id = db.insert_video_sample_entry(db::VideoSampleEntryToInsert {
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
pasp_h_spacing: 1,
|
||||
pasp_v_spacing: 1,
|
||||
data: [0u8; 100].to_vec(),
|
||||
rfc6381_codec: "avc1.000000".to_owned(),
|
||||
}).unwrap();
|
||||
let mut recording = db::RecordingToInsert {
|
||||
sample_file_bytes: 30104460,
|
||||
start: recording::Time(1430006400i64 * TIME_UNITS_PER_SEC),
|
||||
media_duration_90k: 5399985,
|
||||
wall_duration_90k: 5399985,
|
||||
video_samples: 1800,
|
||||
video_sync_samples: 60,
|
||||
video_sample_entry_id: video_sample_entry_id,
|
||||
video_index: data,
|
||||
run_offset: 0,
|
||||
..Default::default()
|
||||
};
|
||||
for _ in 0..num {
|
||||
let (id, _) = db.add_recording(TEST_STREAM_ID, recording.clone()).unwrap();
|
||||
recording.start += recording::Duration(recording.wall_duration_90k as i64);
|
||||
recording.run_offset += 1;
|
||||
db.mark_synced(id).unwrap();
|
||||
}
|
||||
db.flush("add_dummy_recordings_to_db").unwrap();
|
||||
}
|
||||
308
server/db/upgrade/mod.rs
Normal file
308
server/db/upgrade/mod.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2016-2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades the database schema.
|
||||
///
|
||||
/// See `guide/schema.md` for more information.
|
||||
|
||||
use crate::db;
|
||||
use failure::{Error, bail};
|
||||
use log::info;
|
||||
use std::ffi::CStr;
|
||||
use std::io::Write;
|
||||
use nix::NixPath;
|
||||
use rusqlite::params;
|
||||
use uuid::Uuid;
|
||||
|
||||
mod v0_to_v1;
|
||||
mod v1_to_v2;
|
||||
mod v2_to_v3;
|
||||
mod v3_to_v4;
|
||||
mod v4_to_v5;
|
||||
mod v5_to_v6;
|
||||
|
||||
const UPGRADE_NOTES: &'static str =
|
||||
concat!("upgraded using moonfire-db ", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Args<'a> {
|
||||
pub sample_file_dir: Option<&'a std::path::Path>,
|
||||
pub preset_journal: &'a str,
|
||||
pub no_vacuum: bool,
|
||||
}
|
||||
|
||||
fn set_journal_mode(conn: &rusqlite::Connection, requested: &str) -> Result<(), Error> {
|
||||
assert!(!requested.contains(';')); // quick check for accidental sql injection.
|
||||
let actual = conn.query_row(&format!("pragma journal_mode = {}", requested), params![],
|
||||
|row| row.get::<_, String>(0))?;
|
||||
info!("...database now in journal_mode {} (requested {}).", actual, requested);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn upgrade(args: &Args, target_ver: i32, conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||
let upgraders = [
|
||||
v0_to_v1::run,
|
||||
v1_to_v2::run,
|
||||
v2_to_v3::run,
|
||||
v3_to_v4::run,
|
||||
v4_to_v5::run,
|
||||
v5_to_v6::run,
|
||||
];
|
||||
|
||||
{
|
||||
assert_eq!(upgraders.len(), db::EXPECTED_VERSION as usize);
|
||||
let old_ver =
|
||||
conn.query_row("select max(id) from version", params![],
|
||||
|row| row.get(0))?;
|
||||
if old_ver > db::EXPECTED_VERSION {
|
||||
bail!("Database is at version {}, later than expected {}",
|
||||
old_ver, db::EXPECTED_VERSION);
|
||||
} else if old_ver < 0 {
|
||||
bail!("Database is at negative version {}!", old_ver);
|
||||
}
|
||||
info!("Upgrading database from version {} to version {}...", old_ver, target_ver);
|
||||
set_journal_mode(&conn, args.preset_journal)?;
|
||||
for ver in old_ver .. target_ver {
|
||||
info!("...from version {} to version {}", ver, ver + 1);
|
||||
let tx = conn.transaction()?;
|
||||
upgraders[ver as usize](&args, &tx)?;
|
||||
tx.execute(r#"
|
||||
insert into version (id, unix_time, notes)
|
||||
values (?, cast(strftime('%s', 'now') as int32), ?)
|
||||
"#, params![ver + 1, UPGRADE_NOTES])?;
|
||||
tx.commit()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(args: &Args, conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||
db::set_integrity_pragmas(conn)?;
|
||||
upgrade(args, db::EXPECTED_VERSION, conn)?;
|
||||
|
||||
// WAL is the preferred journal mode for normal operation; it reduces the number of syncs
|
||||
// without compromising safety.
|
||||
set_journal_mode(&conn, "wal")?;
|
||||
if !args.no_vacuum {
|
||||
info!("...vacuuming database after upgrade.");
|
||||
conn.execute_batch(r#"
|
||||
pragma page_size = 16384;
|
||||
vacuum;
|
||||
"#)?;
|
||||
}
|
||||
info!("...done.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// A uuid-based path, as used in version 0 and version 1 schemas.
|
||||
struct UuidPath([u8; 37]);
|
||||
|
||||
impl UuidPath {
|
||||
pub(crate) fn from(uuid: Uuid) -> Self {
|
||||
let mut buf = [0u8; 37];
|
||||
write!(&mut buf[..36], "{}", uuid.to_hyphenated_ref())
|
||||
.expect("can't format uuid to pathname buf");
|
||||
UuidPath(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl NixPath for UuidPath {
|
||||
fn is_empty(&self) -> bool { false }
|
||||
fn len(&self) -> usize { 36 }
|
||||
|
||||
fn with_nix_path<T, F>(&self, f: F) -> Result<T, nix::Error>
|
||||
where F: FnOnce(&CStr) -> T {
|
||||
let p = CStr::from_bytes_with_nul(&self.0[..]).expect("no interior nuls");
|
||||
Ok(f(p))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::compare;
|
||||
use crate::testutil;
|
||||
use failure::ResultExt;
|
||||
use fnv::FnvHashMap;
|
||||
use super::*;
|
||||
|
||||
const BAD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY: &[u8] =
|
||||
b"\x00\x00\x00\x84\x61\x76\x63\x31\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x01\x40\x00\xf0\x00\x48\x00\x00\x00\x48\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\xff\xff\x00\x00\x00\x2e\
|
||||
\x61\x76\x63\x43\x01\x4d\x40\x1e\xff\xe1\x00\x17\x67\x4d\x40\x1e\
|
||||
\x9a\x66\x0a\x0f\xff\x35\x01\x01\x01\x40\x00\x00\xfa\x00\x00\x03\
|
||||
\x01\xf4\x01\x01\x00\x04\x68\xee\x3c\x80";
|
||||
|
||||
const GOOD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY: &[u8] =
|
||||
b"\x00\x00\x00\x9f\x61\x76\x63\x31\x00\x00\x00\x00\x00\x00\x00\x01\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x02\xc0\x01\xe0\x00\x48\x00\x00\x00\x48\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
|
||||
\x00\x00\x00\x18\xff\xff\x00\x00\x00\x49\x61\x76\x63\x43\x01\x64\
|
||||
\x00\x16\xff\xe1\x00\x31\x67\x64\x00\x16\xac\x1b\x1a\x80\xb0\x3d\
|
||||
\xff\xff\x00\x28\x00\x21\x6e\x0c\x0c\x0c\x80\x00\x01\xf4\x00\x00\
|
||||
\x27\x10\x74\x30\x07\xd0\x00\x07\xa1\x25\xde\x5c\x68\x60\x0f\xa0\
|
||||
\x00\x0f\x42\x4b\xbc\xb8\x50\x01\x00\x05\x68\xee\x38\x30\x00";
|
||||
|
||||
fn new_conn() -> Result<rusqlite::Connection, Error> {
|
||||
let conn = rusqlite::Connection::open_in_memory()?;
|
||||
conn.execute("pragma foreign_keys = on", params![])?;
|
||||
conn.execute("pragma fullfsync = on", params![])?;
|
||||
conn.execute("pragma synchronous = 2", params![])?;
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
fn compare(c: &rusqlite::Connection, ver: i32, fresh_sql: &str) -> Result<(), Error> {
|
||||
let fresh = new_conn()?;
|
||||
fresh.execute_batch(fresh_sql)?;
|
||||
if let Some(diffs) = compare::get_diffs("upgraded", &c, "fresh", &fresh)? {
|
||||
panic!("Version {}: differences found:\n{}", ver, diffs);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upgrades and compares schemas.
|
||||
/// Doesn't (yet) compare any actual data.
|
||||
#[test]
|
||||
fn upgrade_and_compare() -> Result<(), Error> {
|
||||
testutil::init();
|
||||
let tmpdir = tempdir::TempDir::new("moonfire-nvr-test")?;
|
||||
//let path = tmpdir.path().to_str().ok_or_else(|| format_err!("invalid UTF-8"))?.to_owned();
|
||||
let mut upgraded = new_conn()?;
|
||||
upgraded.execute_batch(include_str!("v0.sql"))?;
|
||||
upgraded.execute_batch(r#"
|
||||
insert into camera (id, uuid, short_name, description, host, username, password,
|
||||
main_rtsp_path, sub_rtsp_path, retain_bytes)
|
||||
values (1, zeroblob(16), 'test camera', 'desc', 'host', 'user', 'pass',
|
||||
'main', 'sub', 42);
|
||||
"#)?;
|
||||
upgraded.execute(r#"
|
||||
insert into video_sample_entry (id, sha1, width, height, data)
|
||||
values (1, X'0000000000000000000000000000000000000000', 1920, 1080, ?);
|
||||
"#, params![testutil::TEST_VIDEO_SAMPLE_ENTRY_DATA])?;
|
||||
upgraded.execute(r#"
|
||||
insert into video_sample_entry (id, sha1, width, height, data)
|
||||
values (2, X'0000000000000000000000000000000000000001', 320, 240, ?);
|
||||
"#, params![BAD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY])?;
|
||||
upgraded.execute(r#"
|
||||
insert into video_sample_entry (id, sha1, width, height, data)
|
||||
values (3, X'0000000000000000000000000000000000000002', 704, 480, ?);
|
||||
"#, params![GOOD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY])?;
|
||||
upgraded.execute(r#"
|
||||
insert into video_sample_entry (id, sha1, width, height, data)
|
||||
values (4, X'0000000000000000000000000000000000000003', 704, 480, ?);
|
||||
"#, params![GOOD_ANAMORPHIC_VIDEO_SAMPLE_ENTRY])?;
|
||||
upgraded.execute_batch(r#"
|
||||
insert into recording (id, camera_id, sample_file_bytes, start_time_90k, duration_90k,
|
||||
local_time_delta_90k, video_samples, video_sync_samples,
|
||||
video_sample_entry_id, sample_file_uuid, sample_file_sha1,
|
||||
video_index)
|
||||
values (1, 1, 42, 140063580000000, 90000, 0, 1, 1, 1,
|
||||
X'E69D45E8CBA64DC1BA2ECB1585983A10', zeroblob(20), X'00'),
|
||||
(2, 1, 42, 140063580090000, 90000, 0, 1, 1, 2,
|
||||
X'94DE8484FF874A5295D488C8038A0312', zeroblob(20), X'00'),
|
||||
(3, 1, 42, 140063580180000, 90000, 0, 1, 1, 3,
|
||||
X'C94D4D0B533746059CD40B29039E641E', zeroblob(20), X'00');
|
||||
insert into reserved_sample_files values (X'51EF700C933E4197AAE4EE8161E94221', 0),
|
||||
(X'E69D45E8CBA64DC1BA2ECB1585983A10', 1);
|
||||
"#)?;
|
||||
let rec1 = tmpdir.path().join("e69d45e8-cba6-4dc1-ba2e-cb1585983a10");
|
||||
let rec2 = tmpdir.path().join("94de8484-ff87-4a52-95d4-88c8038a0312");
|
||||
let rec3 = tmpdir.path().join("c94d4d0b-5337-4605-9cd4-0b29039e641e");
|
||||
let garbage = tmpdir.path().join("51ef700c-933e-4197-aae4-ee8161e94221");
|
||||
std::fs::File::create(&rec1)?;
|
||||
std::fs::File::create(&rec2)?;
|
||||
std::fs::File::create(&rec3)?;
|
||||
std::fs::File::create(&garbage)?;
|
||||
|
||||
for (ver, fresh_sql) in &[(1, Some(include_str!("v1.sql"))),
|
||||
(2, None), // transitional; don't compare schemas.
|
||||
(3, Some(include_str!("v3.sql"))),
|
||||
(4, None), // transitional; don't compare schemas.
|
||||
(5, Some(include_str!("v5.sql"))),
|
||||
(6, Some(include_str!("../schema.sql")))] {
|
||||
upgrade(&Args {
|
||||
sample_file_dir: Some(&tmpdir.path()),
|
||||
preset_journal: "delete",
|
||||
no_vacuum: false,
|
||||
}, *ver, &mut upgraded).context(format!("upgrading to version {}", ver))?;
|
||||
if let Some(f) = fresh_sql {
|
||||
compare(&upgraded, *ver, f)?;
|
||||
}
|
||||
if *ver == 3 {
|
||||
// Check that the garbage files is cleaned up properly, but also add it back
|
||||
// to simulate a bug prior to 433be217. The v5 upgrade should take care of
|
||||
// anything left over.
|
||||
assert!(!garbage.exists());
|
||||
std::fs::File::create(&garbage)?;
|
||||
}
|
||||
if *ver == 6 {
|
||||
// Check that the pasp was set properly.
|
||||
let mut stmt = upgraded.prepare(r#"
|
||||
select
|
||||
id,
|
||||
pasp_h_spacing,
|
||||
pasp_v_spacing
|
||||
from
|
||||
video_sample_entry
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
let mut pasp_by_id = FnvHashMap::default();
|
||||
while let Some(row) = rows.next()? {
|
||||
let id: i32 = row.get(0)?;
|
||||
let pasp_h_spacing: i32 = row.get(1)?;
|
||||
let pasp_v_spacing: i32 = row.get(2)?;
|
||||
pasp_by_id.insert(id, (pasp_h_spacing, pasp_v_spacing));
|
||||
}
|
||||
assert_eq!(pasp_by_id.get(&1), Some(&(1, 1)));
|
||||
assert_eq!(pasp_by_id.get(&2), Some(&(4, 3)));
|
||||
assert_eq!(pasp_by_id.get(&3), Some(&(40, 33)));
|
||||
|
||||
// No recording references this video_sample_entry, so it gets dropped on upgrade.
|
||||
assert_eq!(pasp_by_id.get(&4), None);
|
||||
}
|
||||
}
|
||||
|
||||
// Check that recording files get renamed.
|
||||
assert!(!rec1.exists());
|
||||
assert!(tmpdir.path().join("0000000100000001").exists());
|
||||
|
||||
// Check that garbage files get cleaned up.
|
||||
assert!(!garbage.exists());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
159
server/db/upgrade/v0.sql
Normal file
159
server/db/upgrade/v0.sql
Normal file
@@ -0,0 +1,159 @@
|
||||
-- This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
-- Copyright (C) 2016 The Moonfire NVR Authors
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU General Public License as published by
|
||||
-- the Free Software Foundation, either version 3 of the License, or
|
||||
-- (at your option) any later version.
|
||||
--
|
||||
-- In addition, as a special exception, the copyright holders give
|
||||
-- permission to link the code of portions of this program with the
|
||||
-- OpenSSL library under certain conditions as described in each
|
||||
-- individual source file, and distribute linked combinations including
|
||||
-- the two.
|
||||
--
|
||||
-- You must obey the GNU General Public License in all respects for all
|
||||
-- of the code used other than OpenSSL. If you modify file(s) with this
|
||||
-- exception, you may extend this exception to your version of the
|
||||
-- file(s), but you are not obligated to do so. If you do not wish to do
|
||||
-- so, delete this exception statement from your version. If you delete
|
||||
-- this exception statement from all source files in the program, then
|
||||
-- also delete it here.
|
||||
--
|
||||
-- This program is distributed in the hope that it will be useful,
|
||||
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
-- GNU General Public License for more details.
|
||||
--
|
||||
-- You should have received a copy of the GNU General Public License
|
||||
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
--
|
||||
-- schema.sql: SQLite3 database schema for Moonfire NVR.
|
||||
-- See also design/schema.md.
|
||||
|
||||
--pragma journal_mode = wal;
|
||||
|
||||
-- This table tracks the schema version.
|
||||
-- There is one row for the initial database creation (inserted below, after the
|
||||
-- create statements) and one for each upgrade procedure (if any).
|
||||
create table version (
|
||||
id integer primary key,
|
||||
|
||||
-- The unix time as of the creation/upgrade, as determined by
|
||||
-- cast(strftime('%s', 'now') as int).
|
||||
unix_time integer not null,
|
||||
|
||||
-- Optional notes on the creation/upgrade; could include the binary version.
|
||||
notes text
|
||||
);
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique,-- not null check (length(uuid) = 16),
|
||||
|
||||
-- A short name of the camera, used in log messages.
|
||||
short_name text,-- not null,
|
||||
|
||||
-- A short description of the camera.
|
||||
description text,
|
||||
|
||||
-- The host (or IP address) to use in rtsp:// URLs when accessing the camera.
|
||||
host text,
|
||||
|
||||
-- The username to use when accessing the camera.
|
||||
-- If empty, no username or password will be supplied.
|
||||
username text,
|
||||
|
||||
-- The password to use when accessing the camera.
|
||||
password text,
|
||||
|
||||
-- The path (starting with "/") to use in rtsp:// URLs to reference this
|
||||
-- camera's "main" (full-quality) video stream.
|
||||
main_rtsp_path text,
|
||||
|
||||
-- The path (starting with "/") to use in rtsp:// URLs to reference this
|
||||
-- camera's "sub" (low-bandwidth) video stream.
|
||||
sub_rtsp_path text,
|
||||
|
||||
-- The number of bytes of video to retain, excluding the currently-recording
|
||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||
retain_bytes integer not null check (retain_bytes >= 0)
|
||||
);
|
||||
|
||||
-- Each row represents a single completed recorded segment of video.
|
||||
-- Recordings are typically ~60 seconds; never more than 5 minutes.
|
||||
create table recording (
|
||||
id integer primary key,
|
||||
camera_id integer references camera (id) not null,
|
||||
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
|
||||
-- The starting time of the recording, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00 UTC. Currently on initial connection, this is taken
|
||||
-- from the local system time; on subsequent recordings, it exactly
|
||||
-- matches the previous recording's end time.
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
|
||||
-- The duration of the recording, in 90 kHz units.
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
|
||||
-- The number of 90 kHz units the local system time is ahead of the
|
||||
-- recording; negative numbers indicate the local system time is behind
|
||||
-- the recording. Large values would indicate that the local time has jumped
|
||||
-- during recording or that the local time and camera time frequencies do
|
||||
-- not match.
|
||||
local_time_delta_90k integer not null,
|
||||
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
|
||||
sample_file_uuid blob not null check (length(sample_file_uuid) = 16),
|
||||
sample_file_sha1 blob not null check (length(sample_file_sha1) = 20),
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
-- Typical queries use "where camera_id = ? order by start_time_90k (desc)?".
|
||||
camera_id,
|
||||
start_time_90k,
|
||||
|
||||
-- These fields are not used for ordering; they cover most queries so
|
||||
-- that only database verification and actual viewing of recordings need
|
||||
-- to consult the underlying row.
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes
|
||||
);
|
||||
|
||||
-- Files in the sample file directory which may be present but should simply be
|
||||
-- discarded on startup. (Recordings which were never completed or have been
|
||||
-- marked for completion.)
|
||||
create table reserved_sample_files (
|
||||
uuid blob primary key check (length(uuid) = 16),
|
||||
state integer not null -- 0 (writing) or 1 (deleted)
|
||||
) without rowid;
|
||||
|
||||
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
|
||||
-- VisualSampleEntry box. Describes the codec, width, height, etc.
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
|
||||
-- A SHA-1 hash of |bytes|.
|
||||
sha1 blob unique not null check (length(sha1) = 20),
|
||||
|
||||
-- The width and height in pixels; must match values within
|
||||
-- |sample_entry_bytes|.
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
|
||||
-- The serialized box, including the leading length and box type (avcC in
|
||||
-- the case of H.264).
|
||||
data blob not null check (length(data) > 86)
|
||||
);
|
||||
|
||||
insert into version (id, unix_time, notes)
|
||||
values (0, cast(strftime('%s', 'now') as int), 'db creation');
|
||||
233
server/db/upgrade/v0_to_v1.rs
Normal file
233
server/db/upgrade/v0_to_v1.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2016 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 0 schema to a version 1 schema.
|
||||
|
||||
use crate::db;
|
||||
use crate::recording;
|
||||
use failure::Error;
|
||||
use log::warn;
|
||||
use rusqlite::params;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
// These create statements match the schema.sql when version 1 was the latest.
|
||||
tx.execute_batch(r#"
|
||||
alter table camera rename to old_camera;
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
short_name text not null,
|
||||
description text,
|
||||
host text,
|
||||
username text,
|
||||
password text,
|
||||
main_rtsp_path text,
|
||||
sub_rtsp_path text,
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
next_recording_id integer not null check (next_recording_id >= 0)
|
||||
);
|
||||
alter table recording rename to old_recording;
|
||||
drop index recording_cover;
|
||||
create table recording (
|
||||
composite_id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
run_offset integer not null,
|
||||
flags integer not null,
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
local_time_delta_90k integer not null,
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
check (composite_id >> 32 = camera_id)
|
||||
);
|
||||
create index recording_cover on recording (
|
||||
camera_id,
|
||||
start_time_90k,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
create table recording_playback (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
sample_file_uuid blob not null check (length(sample_file_uuid) = 16),
|
||||
sample_file_sha1 blob not null check (length(sample_file_sha1) = 20),
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
insert into camera
|
||||
select
|
||||
id,
|
||||
uuid,
|
||||
short_name,
|
||||
description,
|
||||
host,
|
||||
username,
|
||||
password,
|
||||
main_rtsp_path,
|
||||
sub_rtsp_path,
|
||||
retain_bytes,
|
||||
1 as next_recording_id
|
||||
from
|
||||
old_camera;
|
||||
"#)?;
|
||||
let camera_state = fill_recording(tx)?;
|
||||
update_camera(tx, camera_state)?;
|
||||
tx.execute_batch(r#"
|
||||
drop table old_recording;
|
||||
drop table old_camera;
|
||||
"#)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct CameraState {
|
||||
/// tuple of (run_start_id, next_start_90k).
|
||||
current_run: Option<(i64, i64)>,
|
||||
|
||||
/// As in the `next_recording_id` field of the `camera` table.
|
||||
next_recording_id: i32,
|
||||
}
|
||||
|
||||
fn has_trailing_zero(video_index: &[u8]) -> Result<bool, Error> {
|
||||
let mut it = recording::SampleIndexIterator::new();
|
||||
while it.next(video_index)? {}
|
||||
Ok(it.duration_90k == 0)
|
||||
}
|
||||
|
||||
/// Fills the `recording` and `recording_playback` tables from `old_recording`, returning
|
||||
/// the `camera_state` map for use by a following call to `fill_cameras`.
|
||||
fn fill_recording(tx: &rusqlite::Transaction) -> Result<HashMap<i32, CameraState>, Error> {
|
||||
let mut select = tx.prepare(r#"
|
||||
select
|
||||
camera_id,
|
||||
sample_file_bytes,
|
||||
start_time_90k,
|
||||
duration_90k,
|
||||
local_time_delta_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_uuid,
|
||||
sample_file_sha1,
|
||||
video_index,
|
||||
id
|
||||
from
|
||||
old_recording
|
||||
"#)?;
|
||||
let mut insert1 = tx.prepare(r#"
|
||||
insert into recording values (:composite_id, :camera_id, :run_offset, :flags,
|
||||
:sample_file_bytes, :start_time_90k, :duration_90k,
|
||||
:local_time_delta_90k, :video_samples, :video_sync_samples,
|
||||
:video_sample_entry_id)
|
||||
"#)?;
|
||||
let mut insert2 = tx.prepare(r#"
|
||||
insert into recording_playback values (:composite_id, :sample_file_uuid, :sample_file_sha1,
|
||||
:video_index)
|
||||
"#)?;
|
||||
let mut rows = select.query(params![])?;
|
||||
let mut camera_state: HashMap<i32, CameraState> = HashMap::new();
|
||||
while let Some(row) = rows.next()? {
|
||||
let camera_id: i32 = row.get(0)?;
|
||||
let camera_state = camera_state.entry(camera_id).or_insert_with(|| {
|
||||
CameraState{
|
||||
current_run: None,
|
||||
next_recording_id: 1,
|
||||
}
|
||||
});
|
||||
let composite_id = ((camera_id as i64) << 32) | (camera_state.next_recording_id as i64);
|
||||
camera_state.next_recording_id += 1;
|
||||
let sample_file_bytes: i32 = row.get(1)?;
|
||||
let start_time_90k: i64 = row.get(2)?;
|
||||
let duration_90k: i32 = row.get(3)?;
|
||||
let local_time_delta_90k: i64 = row.get(4)?;
|
||||
let video_samples: i32 = row.get(5)?;
|
||||
let video_sync_samples: i32 = row.get(6)?;
|
||||
let video_sample_entry_id: i32 = row.get(7)?;
|
||||
let sample_file_uuid: db::FromSqlUuid = row.get(8)?;
|
||||
let sample_file_sha1: Vec<u8> = row.get(9)?;
|
||||
let video_index: Vec<u8> = row.get(10)?;
|
||||
let old_id: i32 = row.get(11)?;
|
||||
let trailing_zero = has_trailing_zero(&video_index).unwrap_or_else(|e| {
|
||||
warn!("recording {}/{} (sample file {}, formerly recording {}) has corrupt \
|
||||
video_index: {}",
|
||||
camera_id, composite_id & 0xFFFF, sample_file_uuid.0, old_id, e);
|
||||
false
|
||||
});
|
||||
let run_id = match camera_state.current_run {
|
||||
Some((run_id, expected_start)) if expected_start == start_time_90k => run_id,
|
||||
_ => composite_id,
|
||||
};
|
||||
insert1.execute_named(&[
|
||||
(":composite_id", &composite_id),
|
||||
(":camera_id", &camera_id),
|
||||
(":run_offset", &(composite_id - run_id)),
|
||||
(":flags", &(if trailing_zero { db::RecordingFlags::TrailingZero as i32 } else { 0 })),
|
||||
(":sample_file_bytes", &sample_file_bytes),
|
||||
(":start_time_90k", &start_time_90k),
|
||||
(":duration_90k", &duration_90k),
|
||||
(":local_time_delta_90k", &local_time_delta_90k),
|
||||
(":video_samples", &video_samples),
|
||||
(":video_sync_samples", &video_sync_samples),
|
||||
(":video_sample_entry_id", &video_sample_entry_id),
|
||||
])?;
|
||||
insert2.execute_named(&[
|
||||
(":composite_id", &composite_id),
|
||||
(":sample_file_uuid", &&sample_file_uuid.0.as_bytes()[..]),
|
||||
(":sample_file_sha1", &sample_file_sha1),
|
||||
(":video_index", &video_index),
|
||||
])?;
|
||||
camera_state.current_run = if trailing_zero {
|
||||
None
|
||||
} else {
|
||||
Some((run_id, start_time_90k + duration_90k as i64))
|
||||
};
|
||||
}
|
||||
Ok(camera_state)
|
||||
}
|
||||
|
||||
fn update_camera(tx: &rusqlite::Transaction, camera_state: HashMap<i32, CameraState>)
|
||||
-> Result<(), Error> {
|
||||
let mut stmt = tx.prepare(r#"
|
||||
update camera set next_recording_id = :next_recording_id where id = :id
|
||||
"#)?;
|
||||
for (ref id, ref state) in &camera_state {
|
||||
stmt.execute_named(&[
|
||||
(":id", &id),
|
||||
(":next_recording_id", &state.next_recording_id),
|
||||
])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
205
server/db/upgrade/v1.sql
Normal file
205
server/db/upgrade/v1.sql
Normal file
@@ -0,0 +1,205 @@
|
||||
-- This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
-- Copyright (C) 2016 The Moonfire NVR Authors
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU General Public License as published by
|
||||
-- the Free Software Foundation, either version 3 of the License, or
|
||||
-- (at your option) any later version.
|
||||
--
|
||||
-- In addition, as a special exception, the copyright holders give
|
||||
-- permission to link the code of portions of this program with the
|
||||
-- OpenSSL library under certain conditions as described in each
|
||||
-- individual source file, and distribute linked combinations including
|
||||
-- the two.
|
||||
--
|
||||
-- You must obey the GNU General Public License in all respects for all
|
||||
-- of the code used other than OpenSSL. If you modify file(s) with this
|
||||
-- exception, you may extend this exception to your version of the
|
||||
-- file(s), but you are not obligated to do so. If you do not wish to do
|
||||
-- so, delete this exception statement from your version. If you delete
|
||||
-- this exception statement from all source files in the program, then
|
||||
-- also delete it here.
|
||||
--
|
||||
-- This program is distributed in the hope that it will be useful,
|
||||
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
-- GNU General Public License for more details.
|
||||
--
|
||||
-- You should have received a copy of the GNU General Public License
|
||||
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
--
|
||||
-- schema.sql: SQLite3 database schema for Moonfire NVR.
|
||||
-- See also design/schema.md.
|
||||
|
||||
-- This table tracks the schema version.
|
||||
-- There is one row for the initial database creation (inserted below, after the
|
||||
-- create statements) and one for each upgrade procedure (if any).
|
||||
create table version (
|
||||
id integer primary key,
|
||||
|
||||
-- The unix time as of the creation/upgrade, as determined by
|
||||
-- cast(strftime('%s', 'now') as int).
|
||||
unix_time integer not null,
|
||||
|
||||
-- Optional notes on the creation/upgrade; could include the binary version.
|
||||
notes text
|
||||
);
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- A short name of the camera, used in log messages.
|
||||
short_name text not null,
|
||||
|
||||
-- A short description of the camera.
|
||||
description text,
|
||||
|
||||
-- The host (or IP address) to use in rtsp:// URLs when accessing the camera.
|
||||
host text,
|
||||
|
||||
-- The username to use when accessing the camera.
|
||||
-- If empty, no username or password will be supplied.
|
||||
username text,
|
||||
|
||||
-- The password to use when accessing the camera.
|
||||
password text,
|
||||
|
||||
-- The path (starting with "/") to use in rtsp:// URLs to reference this
|
||||
-- camera's "main" (full-quality) video stream.
|
||||
main_rtsp_path text,
|
||||
|
||||
-- The path (starting with "/") to use in rtsp:// URLs to reference this
|
||||
-- camera's "sub" (low-bandwidth) video stream.
|
||||
sub_rtsp_path text,
|
||||
|
||||
-- The number of bytes of video to retain, excluding the currently-recording
|
||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
|
||||
-- The low 32 bits of the next recording id to assign for this camera.
|
||||
-- Typically this is the maximum current recording + 1, but it does
|
||||
-- not decrease if that recording is deleted.
|
||||
next_recording_id integer not null check (next_recording_id >= 0)
|
||||
);
|
||||
|
||||
-- Each row represents a single completed recorded segment of video.
|
||||
-- Recordings are typically ~60 seconds; never more than 5 minutes.
|
||||
create table recording (
|
||||
-- The high 32 bits of composite_id are taken from the camera's id, which
|
||||
-- improves locality. The low 32 bits are taken from the camera's
|
||||
-- next_recording_id (which should be post-incremented in the same
|
||||
-- transaction). It'd be simpler to use a "without rowid" table and separate
|
||||
-- fields to make up the primary key, but
|
||||
-- <https://www.sqlite.org/withoutrowid.html> points out that "without rowid"
|
||||
-- is not appropriate when the average row size is in excess of 50 bytes.
|
||||
-- recording_cover rows (which match this id format) are typically 1--5 KiB.
|
||||
composite_id integer primary key,
|
||||
|
||||
-- This field is redundant with id above, but used to enforce the reference
|
||||
-- constraint and to structure the recording_start_time index.
|
||||
camera_id integer not null references camera (id),
|
||||
|
||||
-- The offset of this recording within a run. 0 means this was the first
|
||||
-- recording made from a RTSP session. The start of the run has id
|
||||
-- (id-run_offset).
|
||||
run_offset integer not null,
|
||||
|
||||
-- flags is a bitmask:
|
||||
--
|
||||
-- * 1, or "trailing zero", indicates that this recording is the last in a
|
||||
-- stream. As the duration of a sample is not known until the next sample
|
||||
-- is received, the final sample in this recording will have duration 0.
|
||||
flags integer not null,
|
||||
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
|
||||
-- The starting time of the recording, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00 UTC. Currently on initial connection, this is taken
|
||||
-- from the local system time; on subsequent recordings, it exactly
|
||||
-- matches the previous recording's end time.
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
|
||||
-- The duration of the recording, in 90 kHz units.
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
|
||||
-- The number of 90 kHz units the local system time is ahead of the
|
||||
-- recording; negative numbers indicate the local system time is behind
|
||||
-- the recording. Large absolute values would indicate that the local time
|
||||
-- has jumped during recording or that the local time and camera time
|
||||
-- frequencies do not match.
|
||||
local_time_delta_90k integer not null,
|
||||
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
|
||||
check (composite_id >> 32 = camera_id)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
-- Typical queries use "where camera_id = ? order by start_time_90k".
|
||||
camera_id,
|
||||
start_time_90k,
|
||||
|
||||
-- These fields are not used for ordering; they cover most queries so
|
||||
-- that only database verification and actual viewing of recordings need
|
||||
-- to consult the underlying row.
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
-- Large fields for a recording which are not needed when simply listing all
|
||||
-- of the recordings in a given range. In particular, when serving a byte
|
||||
-- range within a .mp4 file, the recording_playback row is needed for the
|
||||
-- recording(s) corresponding to that particular byte range, needed, but the
|
||||
-- recording rows suffice for all other recordings in the .mp4.
|
||||
create table recording_playback (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- The binary representation of the sample file's uuid. The canonical text
|
||||
-- representation of this uuid is the filename within the sample file dir.
|
||||
sample_file_uuid blob not null check (length(sample_file_uuid) = 16),
|
||||
|
||||
-- The sha1 hash of the contents of the sample file.
|
||||
sample_file_sha1 blob not null check (length(sample_file_sha1) = 20),
|
||||
|
||||
-- See design/schema.md#video_index for a description of this field.
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
|
||||
-- Files in the sample file directory which may be present but should simply be
|
||||
-- discarded on startup. (Recordings which were never completed or have been
|
||||
-- marked for completion.)
|
||||
create table reserved_sample_files (
|
||||
uuid blob primary key check (length(uuid) = 16),
|
||||
state integer not null -- 0 (writing) or 1 (deleted)
|
||||
) without rowid;
|
||||
|
||||
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
|
||||
-- VisualSampleEntry box. Describes the codec, width, height, etc.
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
|
||||
-- A SHA-1 hash of |bytes|.
|
||||
sha1 blob unique not null check (length(sha1) = 20),
|
||||
|
||||
-- The width and height in pixels; must match values within
|
||||
-- |sample_entry_bytes|.
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
|
||||
-- The serialized box, including the leading length and box type (avcC in
|
||||
-- the case of H.264).
|
||||
data blob not null check (length(data) > 86)
|
||||
);
|
||||
|
||||
insert into version (id, unix_time, notes)
|
||||
values (1, cast(strftime('%s', 'now') as int), 'db creation');
|
||||
409
server/db/upgrade/v1_to_v2.rs
Normal file
409
server/db/upgrade/v1_to_v2.rs
Normal file
@@ -0,0 +1,409 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 1 schema to a version 2 schema.
|
||||
|
||||
use crate::dir;
|
||||
use failure::{Error, bail, format_err};
|
||||
use nix::fcntl::{FlockArg, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use rusqlite::params;
|
||||
use crate::schema::DirMeta;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn run(args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
let sample_file_path =
|
||||
args.sample_file_dir
|
||||
.ok_or_else(|| format_err!("--sample-file-dir required when upgrading from \
|
||||
schema version 1 to 2."))?;
|
||||
|
||||
let mut d = nix::dir::Dir::open(sample_file_path, OFlag::O_DIRECTORY | OFlag::O_RDONLY,
|
||||
Mode::empty())?;
|
||||
nix::fcntl::flock(d.as_raw_fd(), FlockArg::LockExclusiveNonblock)?;
|
||||
verify_dir_contents(sample_file_path, &mut d, tx)?;
|
||||
|
||||
// These create statements match the schema.sql when version 2 was the latest.
|
||||
tx.execute_batch(r#"
|
||||
create table meta (
|
||||
uuid blob not null check (length(uuid) = 16)
|
||||
);
|
||||
create table open (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
start_time_90k integer,
|
||||
end_time_90k integer,
|
||||
duration_90k integer
|
||||
);
|
||||
create table sample_file_dir (
|
||||
id integer primary key,
|
||||
path text unique not null,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
last_complete_open_id integer references open (id)
|
||||
);
|
||||
create table user (
|
||||
id integer primary key,
|
||||
username unique not null,
|
||||
flags integer not null,
|
||||
password_hash text,
|
||||
password_id integer not null default 0,
|
||||
password_failure_count integer not null default 0,
|
||||
unix_uid integer
|
||||
);
|
||||
create table user_session (
|
||||
session_id_hash blob primary key not null,
|
||||
user_id integer references user (id) not null,
|
||||
seed blob not null,
|
||||
flags integer not null,
|
||||
domain text,
|
||||
description text,
|
||||
creation_password_id integer,
|
||||
creation_time_sec integer not null,
|
||||
creation_user_agent text,
|
||||
creation_peer_addr blob,
|
||||
revocation_time_sec integer,
|
||||
revocation_user_agent text,
|
||||
revocation_peer_addr blob,
|
||||
revocation_reason integer,
|
||||
revocation_reason_detail text,
|
||||
last_use_time_sec integer,
|
||||
last_use_user_agent text,
|
||||
last_use_peer_addr blob,
|
||||
use_count not null default 0
|
||||
) without rowid;
|
||||
create index user_session_uid on user_session (user_id);
|
||||
"#)?;
|
||||
let db_uuid = ::uuid::Uuid::new_v4();
|
||||
let db_uuid_bytes = &db_uuid.as_bytes()[..];
|
||||
tx.execute("insert into meta (uuid) values (?)", params![db_uuid_bytes])?;
|
||||
let open_uuid = ::uuid::Uuid::new_v4();
|
||||
let open_uuid_bytes = &open_uuid.as_bytes()[..];
|
||||
tx.execute("insert into open (uuid) values (?)", params![open_uuid_bytes])?;
|
||||
let open_id = tx.last_insert_rowid() as u32;
|
||||
let dir_uuid = ::uuid::Uuid::new_v4();
|
||||
let dir_uuid_bytes = &dir_uuid.as_bytes()[..];
|
||||
|
||||
// Write matching metadata to the directory.
|
||||
let mut meta = DirMeta::default();
|
||||
{
|
||||
meta.db_uuid.extend_from_slice(db_uuid_bytes);
|
||||
meta.dir_uuid.extend_from_slice(dir_uuid_bytes);
|
||||
let open = meta.last_complete_open.set_default();
|
||||
open.id = open_id;
|
||||
open.uuid.extend_from_slice(&open_uuid_bytes);
|
||||
}
|
||||
dir::write_meta(d.as_raw_fd(), &meta)?;
|
||||
|
||||
let sample_file_path = sample_file_path.to_str()
|
||||
.ok_or_else(|| format_err!("sample file dir {} is not a valid string",
|
||||
sample_file_path.display()))?;
|
||||
tx.execute(r#"
|
||||
insert into sample_file_dir (path, uuid, last_complete_open_id)
|
||||
values (?, ?, ?)
|
||||
"#, params![sample_file_path, dir_uuid_bytes, open_id])?;
|
||||
|
||||
tx.execute_batch(r#"
|
||||
drop table reserved_sample_files;
|
||||
alter table camera rename to old_camera;
|
||||
alter table recording rename to old_recording;
|
||||
alter table video_sample_entry rename to old_video_sample_entry;
|
||||
drop index recording_cover;
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
short_name text not null,
|
||||
description text,
|
||||
host text,
|
||||
username text,
|
||||
password text
|
||||
);
|
||||
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
record integer not null check (record in (1, 0)),
|
||||
rtsp_path text not null,
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
flush_if_sec integer not null,
|
||||
next_recording_id integer not null check (next_recording_id >= 0),
|
||||
unique (camera_id, type)
|
||||
);
|
||||
|
||||
create table recording (
|
||||
composite_id integer primary key,
|
||||
open_id integer not null,
|
||||
stream_id integer not null references stream (id),
|
||||
run_offset integer not null,
|
||||
flags integer not null,
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
open_id,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
create table recording_integrity (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
local_time_delta_90k integer,
|
||||
local_time_since_open_90k integer,
|
||||
wall_time_delta_90k integer,
|
||||
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
|
||||
);
|
||||
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
sha1 blob unique not null check (length(sha1) = 20),
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
rfc6381_codec text not null,
|
||||
data blob not null check (length(data) > 86)
|
||||
);
|
||||
|
||||
create table garbage (
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
composite_id integer,
|
||||
primary key (sample_file_dir_id, composite_id)
|
||||
) without rowid;
|
||||
|
||||
insert into camera
|
||||
select
|
||||
id,
|
||||
uuid,
|
||||
short_name,
|
||||
description,
|
||||
host,
|
||||
username,
|
||||
password
|
||||
from old_camera;
|
||||
|
||||
-- Insert main streams using the same id as the camera, to ease changing recordings.
|
||||
insert into stream
|
||||
select
|
||||
old_camera.id,
|
||||
old_camera.id,
|
||||
sample_file_dir.id,
|
||||
'main',
|
||||
1,
|
||||
old_camera.main_rtsp_path,
|
||||
old_camera.retain_bytes,
|
||||
0,
|
||||
old_camera.next_recording_id
|
||||
from
|
||||
old_camera cross join sample_file_dir;
|
||||
|
||||
-- Insert sub stream (if path is non-empty) using any id.
|
||||
insert into stream (camera_id, sample_file_dir_id, type, record, rtsp_path,
|
||||
retain_bytes, flush_if_sec, next_recording_id)
|
||||
select
|
||||
old_camera.id,
|
||||
sample_file_dir.id,
|
||||
'sub',
|
||||
0,
|
||||
old_camera.sub_rtsp_path,
|
||||
0,
|
||||
90,
|
||||
1
|
||||
from
|
||||
old_camera cross join sample_file_dir
|
||||
where
|
||||
old_camera.sub_rtsp_path != '';
|
||||
"#)?;
|
||||
|
||||
// Add the new video_sample_entry rows, before inserting the recordings referencing them.
|
||||
fix_video_sample_entry(tx)?;
|
||||
|
||||
tx.execute_batch(r#"
|
||||
insert into recording
|
||||
select
|
||||
r.composite_id,
|
||||
r.camera_id,
|
||||
o.id,
|
||||
r.run_offset,
|
||||
r.flags,
|
||||
r.sample_file_bytes,
|
||||
r.start_time_90k,
|
||||
r.duration_90k,
|
||||
r.video_samples,
|
||||
r.video_sync_samples,
|
||||
r.video_sample_entry_id
|
||||
from
|
||||
old_recording r cross join open o;
|
||||
|
||||
insert into recording_integrity (composite_id, local_time_delta_90k, sample_file_sha1)
|
||||
select
|
||||
r.composite_id,
|
||||
case when r.run_offset > 0 then local_time_delta_90k else null end,
|
||||
p.sample_file_sha1
|
||||
from
|
||||
old_recording r join recording_playback p on (r.composite_id = p.composite_id);
|
||||
"#)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensures the sample file directory has the expected contents.
|
||||
/// Among other problems, this catches a fat-fingered `--sample-file-dir`.
|
||||
/// The expected contents are:
|
||||
///
|
||||
/// * required: recording uuids.
|
||||
/// * optional: reserved sample file uuids.
|
||||
/// * optional: meta and meta-tmp from half-completed update attempts.
|
||||
/// * forbidden: anything else.
|
||||
fn verify_dir_contents(sample_file_path: &std::path::Path, dir: &mut nix::dir::Dir,
|
||||
tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
// Build a hash of the uuids found in the directory.
|
||||
let n: i64 = tx.query_row(r#"
|
||||
select
|
||||
a.c + b.c
|
||||
from
|
||||
(select count(*) as c from recording) a,
|
||||
(select count(*) as c from reserved_sample_files) b;
|
||||
"#, params![], |r| r.get(0))?;
|
||||
let mut files = ::fnv::FnvHashSet::with_capacity_and_hasher(n as usize, Default::default());
|
||||
for e in dir.iter() {
|
||||
let e = e?;
|
||||
let f = e.file_name();
|
||||
match f.to_bytes() {
|
||||
b"." | b".." => continue,
|
||||
b"meta" | b"meta-tmp" => {
|
||||
// Ignore metadata files. These might from a half-finished update attempt.
|
||||
// If the directory is actually an in-use >v3 format, other contents won't match.
|
||||
continue;
|
||||
},
|
||||
_ => {},
|
||||
};
|
||||
let s = match f.to_str() {
|
||||
Ok(s) => s,
|
||||
Err(_) => bail!("unexpected file {:?} in {:?}", f, sample_file_path),
|
||||
};
|
||||
let uuid = match Uuid::parse_str(s) {
|
||||
Ok(u) => u,
|
||||
Err(_) => bail!("unexpected file {:?} in {:?}", f, sample_file_path),
|
||||
};
|
||||
if s != uuid.to_hyphenated_ref().to_string() { // non-canonical form.
|
||||
bail!("unexpected file {:?} in {:?}", f, sample_file_path);
|
||||
}
|
||||
files.insert(uuid);
|
||||
}
|
||||
|
||||
// Iterate through the database and check that everything has a matching file.
|
||||
{
|
||||
let mut stmt = tx.prepare(r"select sample_file_uuid from recording_playback")?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let uuid: crate::db::FromSqlUuid = row.get(0)?;
|
||||
if !files.remove(&uuid.0) {
|
||||
bail!("{} is missing from dir {}!", uuid.0, sample_file_path.display());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut stmt = tx.prepare(r"select uuid from reserved_sample_files")?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let uuid: crate::db::FromSqlUuid = row.get(0)?;
|
||||
if files.remove(&uuid.0) {
|
||||
// Also remove the garbage file. For historical reasons (version 2 was originally
|
||||
// defined as not having a garbage table so still is), do this here rather than with
|
||||
// the other path manipulations in v2_to_v3.rs. There's no harm anyway in deleting
|
||||
// a garbage file so if the upgrade transation fails this is still a valid and complete
|
||||
// version 1 database.
|
||||
let p = super::UuidPath::from(uuid.0);
|
||||
nix::unistd::unlinkat(Some(dir.as_raw_fd()), &p,
|
||||
nix::unistd::UnlinkatFlags::NoRemoveDir)?;
|
||||
}
|
||||
}
|
||||
|
||||
if !files.is_empty() {
|
||||
bail!("{} unexpected sample file uuids in dir {}: {:?}!",
|
||||
files.len(), sample_file_path.display(), files);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fix_video_sample_entry(tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
let mut select = tx.prepare(r#"
|
||||
select
|
||||
id,
|
||||
sha1,
|
||||
width,
|
||||
height,
|
||||
data
|
||||
from
|
||||
old_video_sample_entry
|
||||
"#)?;
|
||||
let mut insert = tx.prepare(r#"
|
||||
insert into video_sample_entry values (:id, :sha1, :width, :height, :rfc6381_codec, :data)
|
||||
"#)?;
|
||||
let mut rows = select.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let data: Vec<u8> = row.get(4)?;
|
||||
insert.execute_named(&[
|
||||
(":id", &row.get::<_, i32>(0)?),
|
||||
(":sha1", &row.get::<_, Vec<u8>>(1)?),
|
||||
(":width", &row.get::<_, i32>(2)?),
|
||||
(":height", &row.get::<_, i32>(3)?),
|
||||
(":rfc6381_codec", &rfc6381_codec_from_sample_entry(&data)?),
|
||||
(":data", &data),
|
||||
])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// This previously lived in h264.rs. As of version 1, H.264 is the only supported codec.
|
||||
fn rfc6381_codec_from_sample_entry(sample_entry: &[u8]) -> Result<String, Error> {
|
||||
if sample_entry.len() < 99 || &sample_entry[4..8] != b"avc1" ||
|
||||
&sample_entry[90..94] != b"avcC" {
|
||||
bail!("not a valid AVCSampleEntry");
|
||||
}
|
||||
let profile_idc = sample_entry[103];
|
||||
let constraint_flags_byte = sample_entry[104];
|
||||
let level_idc = sample_entry[105];
|
||||
Ok(format!("avc1.{:02x}{:02x}{:02x}", profile_idc, constraint_flags_byte, level_idc))
|
||||
}
|
||||
118
server/db/upgrade/v2_to_v3.rs
Normal file
118
server/db/upgrade/v2_to_v3.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2018 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 2 schema to a version 3 schema.
|
||||
/// Note that a version 2 schema is never actually used; so we know the upgrade from version 1 was
|
||||
/// completed, and possibly an upgrade from 2 to 3 is half-finished.
|
||||
|
||||
use crate::db::{self, FromSqlUuid};
|
||||
use crate::dir;
|
||||
use failure::Error;
|
||||
use crate::schema;
|
||||
use rusqlite::params;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Opens the sample file dir.
|
||||
///
|
||||
/// Makes a couple simplifying assumptions valid for version 2:
|
||||
/// * there's only one dir.
|
||||
/// * it has a last completed open.
|
||||
fn open_sample_file_dir(tx: &rusqlite::Transaction) -> Result<Arc<dir::SampleFileDir>, Error> {
|
||||
let (p, s_uuid, o_id, o_uuid, db_uuid): (String, FromSqlUuid, i32, FromSqlUuid, FromSqlUuid) =
|
||||
tx.query_row(r#"
|
||||
select
|
||||
s.path, s.uuid, s.last_complete_open_id, o.uuid, m.uuid
|
||||
from
|
||||
sample_file_dir s
|
||||
join open o on (s.last_complete_open_id = o.id)
|
||||
cross join meta m
|
||||
"#, params![], |row| {
|
||||
Ok((row.get(0)?,
|
||||
row.get(1)?,
|
||||
row.get(2)?,
|
||||
row.get(3)?,
|
||||
row.get(4)?))
|
||||
})?;
|
||||
let mut meta = schema::DirMeta::default();
|
||||
meta.db_uuid.extend_from_slice(&db_uuid.0.as_bytes()[..]);
|
||||
meta.dir_uuid.extend_from_slice(&s_uuid.0.as_bytes()[..]);
|
||||
{
|
||||
let open = meta.last_complete_open.set_default();
|
||||
open.id = o_id as u32;
|
||||
open.uuid.extend_from_slice(&o_uuid.0.as_bytes()[..]);
|
||||
}
|
||||
dir::SampleFileDir::open(&p, &meta)
|
||||
}
|
||||
|
||||
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
let d = open_sample_file_dir(&tx)?;
|
||||
let mut stmt = tx.prepare(r#"
|
||||
select
|
||||
composite_id,
|
||||
sample_file_uuid
|
||||
from
|
||||
recording_playback
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id = db::CompositeId(row.get(0)?);
|
||||
let sample_file_uuid: FromSqlUuid = row.get(1)?;
|
||||
let from_path = super::UuidPath::from(sample_file_uuid.0);
|
||||
let to_path = crate::dir::CompositeIdPath::from(id);
|
||||
if let Err(e) = nix::fcntl::renameat(Some(d.fd.as_raw_fd()), &from_path,
|
||||
Some(d.fd.as_raw_fd()), &to_path) {
|
||||
if e == nix::Error::Sys(nix::errno::Errno::ENOENT) {
|
||||
continue; // assume it was already moved.
|
||||
}
|
||||
Err(e)?;
|
||||
}
|
||||
}
|
||||
|
||||
// These create statements match the schema.sql when version 3 was the latest.
|
||||
tx.execute_batch(r#"
|
||||
alter table recording_playback rename to old_recording_playback;
|
||||
create table recording_playback (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
insert into recording_playback
|
||||
select
|
||||
composite_id,
|
||||
video_index
|
||||
from
|
||||
old_recording_playback;
|
||||
drop table old_recording_playback;
|
||||
drop table old_recording;
|
||||
drop table old_camera;
|
||||
drop table old_video_sample_entry;
|
||||
"#)?;
|
||||
Ok(())
|
||||
}
|
||||
400
server/db/upgrade/v3.sql
Normal file
400
server/db/upgrade/v3.sql
Normal file
@@ -0,0 +1,400 @@
|
||||
-- This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
-- Copyright (C) 2016 The Moonfire NVR Authors
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU General Public License as published by
|
||||
-- the Free Software Foundation, either version 3 of the License, or
|
||||
-- (at your option) any later version.
|
||||
--
|
||||
-- In addition, as a special exception, the copyright holders give
|
||||
-- permission to link the code of portions of this program with the
|
||||
-- OpenSSL library under certain conditions as described in each
|
||||
-- individual source file, and distribute linked combinations including
|
||||
-- the two.
|
||||
--
|
||||
-- You must obey the GNU General Public License in all respects for all
|
||||
-- of the code used other than OpenSSL. If you modify file(s) with this
|
||||
-- exception, you may extend this exception to your version of the
|
||||
-- file(s), but you are not obligated to do so. If you do not wish to do
|
||||
-- so, delete this exception statement from your version. If you delete
|
||||
-- this exception statement from all source files in the program, then
|
||||
-- also delete it here.
|
||||
--
|
||||
-- This program is distributed in the hope that it will be useful,
|
||||
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
-- GNU General Public License for more details.
|
||||
--
|
||||
-- You should have received a copy of the GNU General Public License
|
||||
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
--
|
||||
-- schema.sql: SQLite3 database schema for Moonfire NVR.
|
||||
-- See also design/schema.md.
|
||||
|
||||
-- Database metadata. There should be exactly one row in this table.
|
||||
create table meta (
|
||||
uuid blob not null check (length(uuid) = 16)
|
||||
);
|
||||
|
||||
-- This table tracks the schema version.
|
||||
-- There is one row for the initial database creation (inserted below, after the
|
||||
-- create statements) and one for each upgrade procedure (if any).
|
||||
create table version (
|
||||
id integer primary key,
|
||||
|
||||
-- The unix time as of the creation/upgrade, as determined by
|
||||
-- cast(strftime('%s', 'now') as int).
|
||||
unix_time integer not null,
|
||||
|
||||
-- Optional notes on the creation/upgrade; could include the binary version.
|
||||
notes text
|
||||
);
|
||||
|
||||
-- Tracks every time the database has been opened in read/write mode.
|
||||
-- This is used to ensure directories are in sync with the database (see
|
||||
-- schema.proto:DirMeta), to disambiguate uncommitted recordings, and
|
||||
-- potentially to understand time problems.
|
||||
create table open (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- Information about when / how long the database was open. These may be all
|
||||
-- null, for example in the open that represents all information written
|
||||
-- prior to database version 3.
|
||||
|
||||
-- System time when the database was opened, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00Z excluding leap seconds.
|
||||
start_time_90k integer,
|
||||
|
||||
-- System time when the database was closed or (on crash) last flushed.
|
||||
end_time_90k integer,
|
||||
|
||||
-- How long the database was open. This is end_time_90k - start_time_90k if
|
||||
-- there were no time steps or leap seconds during this time.
|
||||
duration_90k integer
|
||||
);
|
||||
|
||||
create table sample_file_dir (
|
||||
id integer primary key,
|
||||
path text unique not null,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- The last (read/write) open of this directory which fully completed.
|
||||
-- See schema.proto:DirMeta for a more complete description.
|
||||
last_complete_open_id integer references open (id)
|
||||
);
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- A short name of the camera, used in log messages.
|
||||
short_name text not null,
|
||||
|
||||
-- A short description of the camera.
|
||||
description text,
|
||||
|
||||
-- The host (or IP address) to use in rtsp:// URLs when accessing the camera.
|
||||
host text,
|
||||
|
||||
-- The username to use when accessing the camera.
|
||||
-- If empty, no username or password will be supplied.
|
||||
username text,
|
||||
|
||||
-- The password to use when accessing the camera.
|
||||
password text
|
||||
);
|
||||
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
|
||||
-- If record is true, the stream should start recording when moonfire
|
||||
-- starts. If false, no new recordings will be made, but old recordings
|
||||
-- will not be deleted.
|
||||
record integer not null check (record in (1, 0)),
|
||||
|
||||
-- The path (starting with "/") to use in rtsp:// URLs to for this stream.
|
||||
rtsp_path text not null,
|
||||
|
||||
-- The number of bytes of video to retain, excluding the currently-recording
|
||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
|
||||
-- Flush the database when the first instant of completed recording is this
|
||||
-- many seconds old. A value of 0 means that every completed recording will
|
||||
-- cause an immediate flush. Higher values may allow flushes to be combined,
|
||||
-- reducing SSD write cycles. For example, if all streams have a flush_if_sec
|
||||
-- >= x sec, there will be:
|
||||
--
|
||||
-- * at most one flush per x sec in total
|
||||
-- * at most x sec of completed but unflushed recordings per stream.
|
||||
-- * at most x completed but unflushed recordings per stream, in the worst
|
||||
-- case where a recording instantly fails, waits the 1-second retry delay,
|
||||
-- then fails again, forever.
|
||||
flush_if_sec integer not null,
|
||||
|
||||
-- The low 32 bits of the next recording id to assign for this stream.
|
||||
-- Typically this is the maximum current recording + 1, but it does
|
||||
-- not decrease if that recording is deleted.
|
||||
next_recording_id integer not null check (next_recording_id >= 0),
|
||||
|
||||
unique (camera_id, type)
|
||||
);
|
||||
|
||||
-- Each row represents a single completed recorded segment of video.
|
||||
-- Recordings are typically ~60 seconds; never more than 5 minutes.
|
||||
create table recording (
|
||||
-- The high 32 bits of composite_id are taken from the stream's id, which
|
||||
-- improves locality. The low 32 bits are taken from the stream's
|
||||
-- next_recording_id (which should be post-incremented in the same
|
||||
-- transaction). It'd be simpler to use a "without rowid" table and separate
|
||||
-- fields to make up the primary key, but
|
||||
-- <https://www.sqlite.org/withoutrowid.html> points out that "without rowid"
|
||||
-- is not appropriate when the average row size is in excess of 50 bytes.
|
||||
-- recording_cover rows (which match this id format) are typically 1--5 KiB.
|
||||
composite_id integer primary key,
|
||||
|
||||
-- The open in which this was committed to the database. For a given
|
||||
-- composite_id, only one recording will ever be committed to the database,
|
||||
-- but in-memory state may reflect a recording which never gets committed.
|
||||
-- This field allows disambiguation in etags and such.
|
||||
open_id integer not null references open (id),
|
||||
|
||||
-- This field is redundant with id above, but used to enforce the reference
|
||||
-- constraint and to structure the recording_start_time index.
|
||||
stream_id integer not null references stream (id),
|
||||
|
||||
-- The offset of this recording within a run. 0 means this was the first
|
||||
-- recording made from a RTSP session. The start of the run has id
|
||||
-- (id-run_offset).
|
||||
run_offset integer not null,
|
||||
|
||||
-- flags is a bitmask:
|
||||
--
|
||||
-- * 1, or "trailing zero", indicates that this recording is the last in a
|
||||
-- stream. As the duration of a sample is not known until the next sample
|
||||
-- is received, the final sample in this recording will have duration 0.
|
||||
flags integer not null,
|
||||
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
|
||||
-- The starting time of the recording, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00 UTC excluding leap seconds. Currently on initial
|
||||
-- connection, this is taken from the local system time; on subsequent
|
||||
-- recordings, it exactly matches the previous recording's end time.
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
|
||||
-- The duration of the recording, in 90 kHz units.
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
-- Typical queries use "where stream_id = ? order by start_time_90k".
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
|
||||
-- These fields are not used for ordering; they cover most queries so
|
||||
-- that only database verification and actual viewing of recordings need
|
||||
-- to consult the underlying row.
|
||||
open_id,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
-- Fields which are only needed to check/correct database integrity problems
|
||||
-- (such as incorrect timestamps).
|
||||
create table recording_integrity (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock has
|
||||
-- advanced more than the stated duration of recordings in a run since the
|
||||
-- first recording ended. Negative numbers indicate the local system time is
|
||||
-- behind the recording.
|
||||
--
|
||||
-- The first recording of a run (that is, one with run_offset=0) has null
|
||||
-- local_time_delta_90k because errors are assumed to
|
||||
-- be the result of initial buffering rather than frequency mismatch.
|
||||
--
|
||||
-- This value should be near 0 even on long runs in which the camera's clock
|
||||
-- and local system's clock frequency differ because each recording's delta
|
||||
-- is used to correct the durations of the next (up to 500 ppm error).
|
||||
local_time_delta_90k integer,
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock had
|
||||
-- advanced since the database was opened, as of the start of recording.
|
||||
-- TODO: fill this in!
|
||||
local_time_since_open_90k integer,
|
||||
|
||||
-- The difference between start_time_90k+duration_90k and a wall clock
|
||||
-- timestamp captured at end of this recording. This is meaningful for all
|
||||
-- recordings in a run, even the initial one (run_offset=0), because
|
||||
-- start_time_90k is derived from the wall time as of when recording
|
||||
-- starts, not when it ends.
|
||||
-- TODO: fill this in!
|
||||
wall_time_delta_90k integer,
|
||||
|
||||
-- The sha1 hash of the contents of the sample file.
|
||||
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
|
||||
);
|
||||
|
||||
-- Large fields for a recording which are needed ony for playback.
|
||||
-- In particular, when serving a byte range within a .mp4 file, the
|
||||
-- recording_playback row is needed for the recording(s) corresponding to that
|
||||
-- particular byte range, needed, but the recording rows suffice for all other
|
||||
-- recordings in the .mp4.
|
||||
create table recording_playback (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- See design/schema.md#video_index for a description of this field.
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
|
||||
-- audio_index could be added here in the future.
|
||||
);
|
||||
|
||||
-- Files which are to be deleted (may or may not still exist).
|
||||
-- Note that besides these files, for each stream, any recordings >= its
|
||||
-- next_recording_id should be discarded on startup.
|
||||
create table garbage (
|
||||
-- This is _mostly_ redundant with composite_id, which contains the stream
|
||||
-- id and thus a linkage to the sample file directory. Listing it here
|
||||
-- explicitly means that streams can be deleted without losing the
|
||||
-- association of garbage to directory.
|
||||
sample_file_dir_id integer not null references sample_file_dir (id),
|
||||
|
||||
-- See description on recording table.
|
||||
composite_id integer not null,
|
||||
|
||||
-- Organize the table first by directory, as that's how it will be queried.
|
||||
primary key (sample_file_dir_id, composite_id)
|
||||
) without rowid;
|
||||
|
||||
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
|
||||
-- VisualSampleEntry box. Describes the codec, width, height, etc.
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
|
||||
-- A SHA-1 hash of |bytes|.
|
||||
sha1 blob unique not null check (length(sha1) = 20),
|
||||
|
||||
-- The width and height in pixels; must match values within
|
||||
-- |sample_entry_bytes|.
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
|
||||
-- The codec in RFC-6381 format, such as "avc1.4d001f".
|
||||
rfc6381_codec text not null,
|
||||
|
||||
-- The serialized box, including the leading length and box type (avcC in
|
||||
-- the case of H.264).
|
||||
data blob not null check (length(data) > 86)
|
||||
);
|
||||
|
||||
create table user (
|
||||
id integer primary key,
|
||||
username unique not null,
|
||||
|
||||
-- Bitwise mask of flags:
|
||||
-- 1: disabled. If set, no method of authentication for this user will succeed.
|
||||
flags integer not null,
|
||||
|
||||
-- If set, a hash for password authentication, as generated by `libpasta::hash_password`.
|
||||
password_hash text,
|
||||
|
||||
-- A counter which increments with every password reset or clear.
|
||||
password_id integer not null default 0,
|
||||
|
||||
-- Updated lazily on database flush; reset when password_id is incremented.
|
||||
-- This could be used to automatically disable the password on hitting a threshold.
|
||||
password_failure_count integer not null default 0,
|
||||
|
||||
-- If set, a Unix UID that is accepted for authentication when using HTTP over
|
||||
-- a Unix domain socket. (Additionally, the UID running Moonfire NVR can authenticate
|
||||
-- as anyone; there's no point in trying to do otherwise.) This might be an easy
|
||||
-- bootstrap method once configuration happens through a web UI rather than text UI.
|
||||
unix_uid integer
|
||||
);
|
||||
|
||||
-- A single session, whether for browser or robot use.
|
||||
-- These map at the HTTP layer to an "s" cookie (exact format described
|
||||
-- elsewhere), which holds the session id and an encrypted sequence number for
|
||||
-- replay protection.
|
||||
create table user_session (
|
||||
-- The session id is a 48-byte blob. This is the unencoded, unsalted Blake2b-192
|
||||
-- (24 bytes) of the unencoded session id. Much like `password_hash`, a
|
||||
-- hash is used here so that a leaked database backup can't be trivially used
|
||||
-- to steal credentials.
|
||||
session_id_hash blob primary key not null,
|
||||
|
||||
user_id integer references user (id) not null,
|
||||
|
||||
-- A 32-byte random number. Used to derive keys for the replay protection
|
||||
-- and CSRF tokens.
|
||||
seed blob not null,
|
||||
|
||||
-- A bitwise mask of flags, currently all properties of the HTTP cookie
|
||||
-- used to hold the session:
|
||||
-- 1: HttpOnly
|
||||
-- 2: Secure
|
||||
-- 4: SameSite=Lax
|
||||
-- 8: SameSite=Strict - 4 must also be set.
|
||||
flags integer not null,
|
||||
|
||||
-- The domain of the HTTP cookie used to store this session. The outbound
|
||||
-- `Set-Cookie` header never specifies a scope, so this matches the `Host:` of
|
||||
-- the inbound HTTP request (minus the :port, if any was specified).
|
||||
domain text,
|
||||
|
||||
-- An editable description which might describe the device/program which uses
|
||||
-- this session, such as "Chromebook", "iPhone", or "motion detection worker".
|
||||
description text,
|
||||
|
||||
creation_password_id integer, -- the id it was created from, if created via password
|
||||
creation_time_sec integer not null, -- sec since epoch
|
||||
creation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
creation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
|
||||
revocation_time_sec integer, -- sec since epoch
|
||||
revocation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
revocation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket/no peer.
|
||||
|
||||
-- A value indicating the reason for revocation, with optional additional
|
||||
-- text detail. Enumeration values:
|
||||
-- 0: logout link clicked (i.e. from within the session itself)
|
||||
--
|
||||
-- This might be extended for a variety of other reasons:
|
||||
-- x: user revoked (while authenticated in another way)
|
||||
-- x: password change invalidated all sessions created with that password
|
||||
-- x: expired (due to fixed total time or time inactive)
|
||||
-- x: evicted (due to too many sessions)
|
||||
-- x: suspicious activity
|
||||
revocation_reason integer,
|
||||
revocation_reason_detail text,
|
||||
|
||||
-- Information about requests which used this session, updated lazily on database flush.
|
||||
last_use_time_sec integer, -- sec since epoch
|
||||
last_use_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
last_use_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
use_count not null default 0
|
||||
) without rowid;
|
||||
|
||||
create index user_session_uid on user_session (user_id);
|
||||
|
||||
insert into version (id, unix_time, notes)
|
||||
values (3, cast(strftime('%s', 'now') as int), 'db creation');
|
||||
196
server/db/upgrade/v3_to_v4.rs
Normal file
196
server/db/upgrade/v3_to_v4.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2019 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 3 schema to a version 4 schema.
|
||||
|
||||
use failure::Error;
|
||||
|
||||
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
// These create statements match the schema.sql when version 4 was the latest.
|
||||
tx.execute_batch(r#"
|
||||
alter table meta add column max_signal_changes integer check (max_signal_changes >= 0);
|
||||
|
||||
create table signal (
|
||||
id integer primary key,
|
||||
source_uuid blob not null check (length(source_uuid) = 16),
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
short_name not null,
|
||||
unique (source_uuid, type_uuid)
|
||||
);
|
||||
|
||||
create table signal_type_enum (
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
value integer not null check (value > 0 and value < 16),
|
||||
name text not null,
|
||||
motion int not null check (motion in (0, 1)) default 0,
|
||||
color text
|
||||
);
|
||||
|
||||
create table signal_camera (
|
||||
signal_id integer references signal (id),
|
||||
camera_id integer references camera (id),
|
||||
type integer not null,
|
||||
primary key (signal_id, camera_id)
|
||||
) without rowid;
|
||||
|
||||
create table signal_change (
|
||||
time_90k integer primary key,
|
||||
changes blob not null
|
||||
);
|
||||
|
||||
alter table user add column permissions blob not null default X'';
|
||||
alter table user_session add column permissions blob not null default X'';
|
||||
|
||||
-- Set permissions to "view_video" on existing users and sessions to preserve their
|
||||
-- behavior. Newly created users won't have prepopulated permissions like this.
|
||||
update user set permissions = X'0801';
|
||||
update user_session set permissions = X'0801';
|
||||
|
||||
alter table camera rename to old_camera;
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
short_name text not null,
|
||||
description text,
|
||||
onvif_host text,
|
||||
username text,
|
||||
password text
|
||||
);
|
||||
insert into camera
|
||||
select
|
||||
id,
|
||||
uuid,
|
||||
short_name,
|
||||
description,
|
||||
host,
|
||||
username,
|
||||
password
|
||||
from
|
||||
old_camera;
|
||||
|
||||
alter table stream rename to old_stream;
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
record integer not null check (record in (1, 0)),
|
||||
rtsp_url text not null,
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
flush_if_sec integer not null,
|
||||
next_recording_id integer not null check (next_recording_id >= 0),
|
||||
unique (camera_id, type)
|
||||
);
|
||||
insert into stream
|
||||
select
|
||||
s.id,
|
||||
s.camera_id,
|
||||
s.sample_file_dir_id,
|
||||
s.type,
|
||||
s.record,
|
||||
'rtsp://' || c.onvif_host || s.rtsp_path as rtsp_url,
|
||||
retain_bytes,
|
||||
flush_if_sec,
|
||||
next_recording_id
|
||||
from
|
||||
old_stream s join camera c on (s.camera_id = c.id);
|
||||
|
||||
alter table recording rename to old_recording;
|
||||
create table recording (
|
||||
composite_id integer primary key,
|
||||
open_id integer not null,
|
||||
stream_id integer not null references stream (id),
|
||||
run_offset integer not null,
|
||||
flags integer not null,
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
insert into recording select
|
||||
composite_id,
|
||||
open_id,
|
||||
stream_id,
|
||||
run_offset,
|
||||
flags,
|
||||
sample_file_bytes,
|
||||
start_time_90k,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id
|
||||
from old_recording;
|
||||
drop index recording_cover;
|
||||
create index recording_cover on recording (
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
open_id,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
alter table recording_integrity rename to old_recording_integrity;
|
||||
create table recording_integrity (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
local_time_delta_90k integer,
|
||||
local_time_since_open_90k integer,
|
||||
wall_time_delta_90k integer,
|
||||
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
|
||||
);
|
||||
insert into recording_integrity select * from old_recording_integrity;
|
||||
|
||||
alter table recording_playback rename to old_recording_playback;
|
||||
create table recording_playback (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
insert into recording_playback select * from old_recording_playback;
|
||||
|
||||
drop table old_recording_playback;
|
||||
drop table old_recording_integrity;
|
||||
drop table old_recording;
|
||||
drop table old_stream;
|
||||
drop table old_camera;
|
||||
|
||||
-- This was supposed to be present in version 2, but the upgrade procedure used to miss it.
|
||||
-- Catch up so we know a version 4 database is right.
|
||||
create index if not exists user_session_uid on user_session (user_id);
|
||||
"#)?;
|
||||
Ok(())
|
||||
}
|
||||
162
server/db/upgrade/v4_to_v5.rs
Normal file
162
server/db/upgrade/v4_to_v5.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2019 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 4 schema to a version 5 schema.
|
||||
///
|
||||
/// This just handles the directory meta files. If they're already in the new format, great.
|
||||
/// Otherwise, verify they are consistent with the database then upgrade them.
|
||||
|
||||
use crate::db::FromSqlUuid;
|
||||
use crate::{dir, schema};
|
||||
use cstr::cstr;
|
||||
use failure::{Error, Fail, bail};
|
||||
use log::info;
|
||||
use nix::fcntl::{FlockArg, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use protobuf::Message;
|
||||
use rusqlite::params;
|
||||
use std::io::{Read, Write};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use uuid::Uuid;
|
||||
|
||||
const FIXED_DIR_META_LEN: usize = 512;
|
||||
|
||||
/// Maybe upgrades the `meta` file, returning if an upgrade happened (and thus a sync is needed).
|
||||
fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool, Error> {
|
||||
let tmp_path = cstr!("meta.tmp");
|
||||
let meta_path = cstr!("meta");
|
||||
let mut f = crate::fs::openat(dir.as_raw_fd(), meta_path, OFlag::O_RDONLY, Mode::empty())?;
|
||||
let mut data = Vec::new();
|
||||
f.read_to_end(&mut data)?;
|
||||
if data.len() == FIXED_DIR_META_LEN {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let mut s = protobuf::CodedInputStream::from_bytes(&data);
|
||||
let mut dir_meta = schema::DirMeta::new();
|
||||
dir_meta.merge_from(&mut s)
|
||||
.map_err(|e| e.context("Unable to parse metadata proto: {}"))?;
|
||||
if !dir::SampleFileDir::consistent(&db_meta, &dir_meta) {
|
||||
bail!("Inconsistent db_meta={:?} dir_meta={:?}", &db_meta, &dir_meta);
|
||||
}
|
||||
let mut f = crate::fs::openat(dir.as_raw_fd(), tmp_path,
|
||||
OFlag::O_CREAT | OFlag::O_TRUNC | OFlag::O_WRONLY,
|
||||
Mode::S_IRUSR | Mode::S_IWUSR)?;
|
||||
let mut data =
|
||||
dir_meta.write_length_delimited_to_bytes().expect("proto3->vec is infallible");
|
||||
if data.len() > FIXED_DIR_META_LEN {
|
||||
bail!("Length-delimited DirMeta message requires {} bytes, over limit of {}",
|
||||
data.len(), FIXED_DIR_META_LEN);
|
||||
}
|
||||
data.resize(FIXED_DIR_META_LEN, 0); // pad to required length.
|
||||
f.write_all(&data)?;
|
||||
f.sync_all()?;
|
||||
|
||||
nix::fcntl::renameat(Some(dir.as_raw_fd()), tmp_path, Some(dir.as_raw_fd()), meta_path)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Looks for uuid-based filenames and deletes them.
|
||||
///
|
||||
/// The v1->v3 migration failed to remove garbage files prior to 433be217. Let's have a clean slate
|
||||
/// at v5.
|
||||
///
|
||||
/// Returns true if something was done (and thus a sync is needed).
|
||||
fn maybe_cleanup_garbage_uuids(dir: &dir::Fd) -> Result<bool, Error> {
|
||||
let mut need_sync = false;
|
||||
let mut dir2 = nix::dir::Dir::openat(dir.as_raw_fd(), ".",
|
||||
OFlag::O_DIRECTORY | OFlag::O_RDONLY, Mode::empty())?;
|
||||
for e in dir2.iter() {
|
||||
let e = e?;
|
||||
let f = e.file_name();
|
||||
info!("file: {}", f.to_str().unwrap());
|
||||
let f_str = match f.to_str() {
|
||||
Ok(f) => f,
|
||||
Err(_) => continue,
|
||||
};
|
||||
if Uuid::parse_str(f_str).is_ok() {
|
||||
info!("removing leftover garbage file {}", f_str);
|
||||
nix::unistd::unlinkat(Some(dir.as_raw_fd()), f,
|
||||
nix::unistd::UnlinkatFlags::NoRemoveDir)?;
|
||||
need_sync = true;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(need_sync)
|
||||
}
|
||||
|
||||
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
let db_uuid: FromSqlUuid =
|
||||
tx.query_row_and_then(r"select uuid from meta", params![], |row| row.get(0))?;
|
||||
let mut stmt = tx.prepare(r#"
|
||||
select
|
||||
d.path,
|
||||
d.uuid,
|
||||
d.last_complete_open_id,
|
||||
o.uuid
|
||||
from
|
||||
sample_file_dir d
|
||||
left join open o on (d.last_complete_open_id = o.id);
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let path = row.get_raw_checked(0)?.as_str()?;
|
||||
info!("path: {}", path);
|
||||
let dir_uuid: FromSqlUuid = row.get(1)?;
|
||||
let open_id: Option<u32> = row.get(2)?;
|
||||
let open_uuid: Option<FromSqlUuid> = row.get(3)?;
|
||||
let mut db_meta = schema::DirMeta::new();
|
||||
db_meta.db_uuid.extend_from_slice(&db_uuid.0.as_bytes()[..]);
|
||||
db_meta.dir_uuid.extend_from_slice(&dir_uuid.0.as_bytes()[..]);
|
||||
match (open_id, open_uuid) {
|
||||
(Some(id), Some(uuid)) => {
|
||||
let mut o = db_meta.last_complete_open.set_default();
|
||||
o.id = id;
|
||||
o.uuid.extend_from_slice(&uuid.0.as_bytes()[..]);
|
||||
},
|
||||
(None, None) => {},
|
||||
_ => bail!("open table missing id"),
|
||||
}
|
||||
|
||||
let dir = dir::Fd::open(path, false)?;
|
||||
dir.lock(FlockArg::LockExclusiveNonblock)?;
|
||||
|
||||
let mut need_sync = maybe_upgrade_meta(&dir, &db_meta)?;
|
||||
if maybe_cleanup_garbage_uuids(&dir)? {
|
||||
need_sync = true;
|
||||
}
|
||||
|
||||
if need_sync {
|
||||
dir.sync()?;
|
||||
}
|
||||
info!("done with path: {}", path);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
497
server/db/upgrade/v5.sql
Normal file
497
server/db/upgrade/v5.sql
Normal file
@@ -0,0 +1,497 @@
|
||||
-- This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
-- Copyright (C) 2016 The Moonfire NVR Authors
|
||||
--
|
||||
-- This program is free software: you can redistribute it and/or modify
|
||||
-- it under the terms of the GNU General Public License as published by
|
||||
-- the Free Software Foundation, either version 3 of the License, or
|
||||
-- (at your option) any later version.
|
||||
--
|
||||
-- In addition, as a special exception, the copyright holders give
|
||||
-- permission to link the code of portions of this program with the
|
||||
-- OpenSSL library under certain conditions as described in each
|
||||
-- individual source file, and distribute linked combinations including
|
||||
-- the two.
|
||||
--
|
||||
-- You must obey the GNU General Public License in all respects for all
|
||||
-- of the code used other than OpenSSL. If you modify file(s) with this
|
||||
-- exception, you may extend this exception to your version of the
|
||||
-- file(s), but you are not obligated to do so. If you do not wish to do
|
||||
-- so, delete this exception statement from your version. If you delete
|
||||
-- this exception statement from all source files in the program, then
|
||||
-- also delete it here.
|
||||
--
|
||||
-- This program is distributed in the hope that it will be useful,
|
||||
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
-- GNU General Public License for more details.
|
||||
--
|
||||
-- You should have received a copy of the GNU General Public License
|
||||
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
--
|
||||
-- schema.sql: SQLite3 database schema for Moonfire NVR.
|
||||
-- See also design/schema.md.
|
||||
|
||||
-- Database metadata. There should be exactly one row in this table.
|
||||
create table meta (
|
||||
uuid blob not null check (length(uuid) = 16),
|
||||
|
||||
-- The maximum number of entries in the signal_state table. If an update
|
||||
-- causes this to be exceeded, older times will be garbage collected to stay
|
||||
-- within the limit.
|
||||
max_signal_changes integer check (max_signal_changes >= 0)
|
||||
);
|
||||
|
||||
-- This table tracks the schema version.
|
||||
-- There is one row for the initial database creation (inserted below, after the
|
||||
-- create statements) and one for each upgrade procedure (if any).
|
||||
create table version (
|
||||
id integer primary key,
|
||||
|
||||
-- The unix time as of the creation/upgrade, as determined by
|
||||
-- cast(strftime('%s', 'now') as int).
|
||||
unix_time integer not null,
|
||||
|
||||
-- Optional notes on the creation/upgrade; could include the binary version.
|
||||
notes text
|
||||
);
|
||||
|
||||
-- Tracks every time the database has been opened in read/write mode.
|
||||
-- This is used to ensure directories are in sync with the database (see
|
||||
-- schema.proto:DirMeta), to disambiguate uncommitted recordings, and
|
||||
-- potentially to understand time problems.
|
||||
create table open (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- Information about when / how long the database was open. These may be all
|
||||
-- null, for example in the open that represents all information written
|
||||
-- prior to database version 3.
|
||||
|
||||
-- System time when the database was opened, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00Z excluding leap seconds.
|
||||
start_time_90k integer,
|
||||
|
||||
-- System time when the database was closed or (on crash) last flushed.
|
||||
end_time_90k integer,
|
||||
|
||||
-- How long the database was open. This is end_time_90k - start_time_90k if
|
||||
-- there were no time steps or leap seconds during this time.
|
||||
duration_90k integer
|
||||
);
|
||||
|
||||
create table sample_file_dir (
|
||||
id integer primary key,
|
||||
path text unique not null,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- The last (read/write) open of this directory which fully completed.
|
||||
-- See schema.proto:DirMeta for a more complete description.
|
||||
last_complete_open_id integer references open (id)
|
||||
);
|
||||
|
||||
create table camera (
|
||||
id integer primary key,
|
||||
uuid blob unique not null check (length(uuid) = 16),
|
||||
|
||||
-- A short name of the camera, used in log messages.
|
||||
short_name text not null,
|
||||
|
||||
-- A short description of the camera.
|
||||
description text,
|
||||
|
||||
-- The host part of the http:// URL when accessing ONVIF, optionally
|
||||
-- including ":<port>". Eg with ONVIF host "192.168.1.110:85", the full URL
|
||||
-- of the devie management service will be
|
||||
-- "http://192.168.1.110:85/device_service".
|
||||
onvif_host text,
|
||||
|
||||
-- The username to use when accessing the camera.
|
||||
-- If empty, no username or password will be supplied.
|
||||
username text,
|
||||
|
||||
-- The password to use when accessing the camera.
|
||||
password text
|
||||
);
|
||||
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
|
||||
-- If record is true, the stream should start recording when moonfire
|
||||
-- starts. If false, no new recordings will be made, but old recordings
|
||||
-- will not be deleted.
|
||||
record integer not null check (record in (1, 0)),
|
||||
|
||||
-- The rtsp:// URL to use for this stream, excluding username and password.
|
||||
-- (Those are taken from the camera row's respective fields.)
|
||||
rtsp_url text not null,
|
||||
|
||||
-- The number of bytes of video to retain, excluding the currently-recording
|
||||
-- file. Older files will be deleted as necessary to stay within this limit.
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
|
||||
-- Flush the database when the first instant of completed recording is this
|
||||
-- many seconds old. A value of 0 means that every completed recording will
|
||||
-- cause an immediate flush. Higher values may allow flushes to be combined,
|
||||
-- reducing SSD write cycles. For example, if all streams have a flush_if_sec
|
||||
-- >= x sec, there will be:
|
||||
--
|
||||
-- * at most one flush per x sec in total
|
||||
-- * at most x sec of completed but unflushed recordings per stream.
|
||||
-- * at most x completed but unflushed recordings per stream, in the worst
|
||||
-- case where a recording instantly fails, waits the 1-second retry delay,
|
||||
-- then fails again, forever.
|
||||
flush_if_sec integer not null,
|
||||
|
||||
-- The low 32 bits of the next recording id to assign for this stream.
|
||||
-- Typically this is the maximum current recording + 1, but it does
|
||||
-- not decrease if that recording is deleted.
|
||||
next_recording_id integer not null check (next_recording_id >= 0),
|
||||
|
||||
unique (camera_id, type)
|
||||
);
|
||||
|
||||
-- Each row represents a single completed recorded segment of video.
|
||||
-- Recordings are typically ~60 seconds; never more than 5 minutes.
|
||||
create table recording (
|
||||
-- The high 32 bits of composite_id are taken from the stream's id, which
|
||||
-- improves locality. The low 32 bits are taken from the stream's
|
||||
-- next_recording_id (which should be post-incremented in the same
|
||||
-- transaction). It'd be simpler to use a "without rowid" table and separate
|
||||
-- fields to make up the primary key, but
|
||||
-- <https://www.sqlite.org/withoutrowid.html> points out that "without rowid"
|
||||
-- is not appropriate when the average row size is in excess of 50 bytes.
|
||||
-- recording_cover rows (which match this id format) are typically 1--5 KiB.
|
||||
composite_id integer primary key,
|
||||
|
||||
-- The open in which this was committed to the database. For a given
|
||||
-- composite_id, only one recording will ever be committed to the database,
|
||||
-- but in-memory state may reflect a recording which never gets committed.
|
||||
-- This field allows disambiguation in etags and such.
|
||||
open_id integer not null references open (id),
|
||||
|
||||
-- This field is redundant with id above, but used to enforce the reference
|
||||
-- constraint and to structure the recording_start_time index.
|
||||
stream_id integer not null references stream (id),
|
||||
|
||||
-- The offset of this recording within a run. 0 means this was the first
|
||||
-- recording made from a RTSP session. The start of the run has id
|
||||
-- (id-run_offset).
|
||||
run_offset integer not null,
|
||||
|
||||
-- flags is a bitmask:
|
||||
--
|
||||
-- * 1, or "trailing zero", indicates that this recording is the last in a
|
||||
-- stream. As the duration of a sample is not known until the next sample
|
||||
-- is received, the final sample in this recording will have duration 0.
|
||||
flags integer not null,
|
||||
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
|
||||
-- The starting time of the recording, in 90 kHz units since
|
||||
-- 1970-01-01 00:00:00 UTC excluding leap seconds. Currently on initial
|
||||
-- connection, this is taken from the local system time; on subsequent
|
||||
-- recordings, it exactly matches the previous recording's end time.
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
|
||||
-- The duration of the recording, in 90 kHz units.
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
|
||||
create index recording_cover on recording (
|
||||
-- Typical queries use "where stream_id = ? order by start_time_90k".
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
|
||||
-- These fields are not used for ordering; they cover most queries so
|
||||
-- that only database verification and actual viewing of recordings need
|
||||
-- to consult the underlying row.
|
||||
open_id,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
-- Fields which are only needed to check/correct database integrity problems
|
||||
-- (such as incorrect timestamps).
|
||||
create table recording_integrity (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock has
|
||||
-- advanced more than the stated duration of recordings in a run since the
|
||||
-- first recording ended. Negative numbers indicate the local system time is
|
||||
-- behind the recording.
|
||||
--
|
||||
-- The first recording of a run (that is, one with run_offset=0) has null
|
||||
-- local_time_delta_90k because errors are assumed to
|
||||
-- be the result of initial buffering rather than frequency mismatch.
|
||||
--
|
||||
-- This value should be near 0 even on long runs in which the camera's clock
|
||||
-- and local system's clock frequency differ because each recording's delta
|
||||
-- is used to correct the durations of the next (up to 500 ppm error).
|
||||
local_time_delta_90k integer,
|
||||
|
||||
-- The number of 90 kHz units the local system's monotonic clock had
|
||||
-- advanced since the database was opened, as of the start of recording.
|
||||
-- TODO: fill this in!
|
||||
local_time_since_open_90k integer,
|
||||
|
||||
-- The difference between start_time_90k+duration_90k and a wall clock
|
||||
-- timestamp captured at end of this recording. This is meaningful for all
|
||||
-- recordings in a run, even the initial one (run_offset=0), because
|
||||
-- start_time_90k is derived from the wall time as of when recording
|
||||
-- starts, not when it ends.
|
||||
-- TODO: fill this in!
|
||||
wall_time_delta_90k integer,
|
||||
|
||||
-- The sha1 hash of the contents of the sample file.
|
||||
sample_file_sha1 blob check (length(sample_file_sha1) <= 20)
|
||||
);
|
||||
|
||||
-- Large fields for a recording which are needed ony for playback.
|
||||
-- In particular, when serving a byte range within a .mp4 file, the
|
||||
-- recording_playback row is needed for the recording(s) corresponding to that
|
||||
-- particular byte range, needed, but the recording rows suffice for all other
|
||||
-- recordings in the .mp4.
|
||||
create table recording_playback (
|
||||
-- See description on recording table.
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
|
||||
-- See design/schema.md#video_index for a description of this field.
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
|
||||
-- audio_index could be added here in the future.
|
||||
);
|
||||
|
||||
-- Files which are to be deleted (may or may not still exist).
|
||||
-- Note that besides these files, for each stream, any recordings >= its
|
||||
-- next_recording_id should be discarded on startup.
|
||||
create table garbage (
|
||||
-- This is _mostly_ redundant with composite_id, which contains the stream
|
||||
-- id and thus a linkage to the sample file directory. Listing it here
|
||||
-- explicitly means that streams can be deleted without losing the
|
||||
-- association of garbage to directory.
|
||||
sample_file_dir_id integer not null references sample_file_dir (id),
|
||||
|
||||
-- See description on recording table.
|
||||
composite_id integer not null,
|
||||
|
||||
-- Organize the table first by directory, as that's how it will be queried.
|
||||
primary key (sample_file_dir_id, composite_id)
|
||||
) without rowid;
|
||||
|
||||
-- A concrete box derived from a ISO/IEC 14496-12 section 8.5.2
|
||||
-- VisualSampleEntry box. Describes the codec, width, height, etc.
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
|
||||
-- A SHA-1 hash of |bytes|.
|
||||
sha1 blob unique not null check (length(sha1) = 20),
|
||||
|
||||
-- The width and height in pixels; must match values within
|
||||
-- |sample_entry_bytes|.
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
|
||||
-- The codec in RFC-6381 format, such as "avc1.4d001f".
|
||||
rfc6381_codec text not null,
|
||||
|
||||
-- The serialized box, including the leading length and box type (avcC in
|
||||
-- the case of H.264).
|
||||
data blob not null check (length(data) > 86)
|
||||
);
|
||||
|
||||
create table user (
|
||||
id integer primary key,
|
||||
username unique not null,
|
||||
|
||||
-- Bitwise mask of flags:
|
||||
-- 1: disabled. If set, no method of authentication for this user will succeed.
|
||||
flags integer not null,
|
||||
|
||||
-- If set, a hash for password authentication, as generated by `libpasta::hash_password`.
|
||||
password_hash text,
|
||||
|
||||
-- A counter which increments with every password reset or clear.
|
||||
password_id integer not null default 0,
|
||||
|
||||
-- Updated lazily on database flush; reset when password_id is incremented.
|
||||
-- This could be used to automatically disable the password on hitting a threshold.
|
||||
password_failure_count integer not null default 0,
|
||||
|
||||
-- If set, a Unix UID that is accepted for authentication when using HTTP over
|
||||
-- a Unix domain socket. (Additionally, the UID running Moonfire NVR can authenticate
|
||||
-- as anyone; there's no point in trying to do otherwise.) This might be an easy
|
||||
-- bootstrap method once configuration happens through a web UI rather than text UI.
|
||||
unix_uid integer,
|
||||
|
||||
-- Permissions available for newly created tokens or when authenticating via
|
||||
-- unix_uid above. A serialized "Permissions" protobuf.
|
||||
permissions blob not null default X''
|
||||
);
|
||||
|
||||
-- A single session, whether for browser or robot use.
|
||||
-- These map at the HTTP layer to an "s" cookie (exact format described
|
||||
-- elsewhere), which holds the session id and an encrypted sequence number for
|
||||
-- replay protection.
|
||||
create table user_session (
|
||||
-- The session id is a 48-byte blob. This is the unencoded, unsalted Blake2b-192
|
||||
-- (24 bytes) of the unencoded session id. Much like `password_hash`, a
|
||||
-- hash is used here so that a leaked database backup can't be trivially used
|
||||
-- to steal credentials.
|
||||
session_id_hash blob primary key not null,
|
||||
|
||||
user_id integer references user (id) not null,
|
||||
|
||||
-- A 32-byte random number. Used to derive keys for the replay protection
|
||||
-- and CSRF tokens.
|
||||
seed blob not null,
|
||||
|
||||
-- A bitwise mask of flags, currently all properties of the HTTP cookie
|
||||
-- used to hold the session:
|
||||
-- 1: HttpOnly
|
||||
-- 2: Secure
|
||||
-- 4: SameSite=Lax
|
||||
-- 8: SameSite=Strict - 4 must also be set.
|
||||
flags integer not null,
|
||||
|
||||
-- The domain of the HTTP cookie used to store this session. The outbound
|
||||
-- `Set-Cookie` header never specifies a scope, so this matches the `Host:` of
|
||||
-- the inbound HTTP request (minus the :port, if any was specified).
|
||||
domain text,
|
||||
|
||||
-- An editable description which might describe the device/program which uses
|
||||
-- this session, such as "Chromebook", "iPhone", or "motion detection worker".
|
||||
description text,
|
||||
|
||||
creation_password_id integer, -- the id it was created from, if created via password
|
||||
creation_time_sec integer not null, -- sec since epoch
|
||||
creation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
creation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
|
||||
revocation_time_sec integer, -- sec since epoch
|
||||
revocation_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
revocation_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket/no peer.
|
||||
|
||||
-- A value indicating the reason for revocation, with optional additional
|
||||
-- text detail. Enumeration values:
|
||||
-- 0: logout link clicked (i.e. from within the session itself)
|
||||
--
|
||||
-- This might be extended for a variety of other reasons:
|
||||
-- x: user revoked (while authenticated in another way)
|
||||
-- x: password change invalidated all sessions created with that password
|
||||
-- x: expired (due to fixed total time or time inactive)
|
||||
-- x: evicted (due to too many sessions)
|
||||
-- x: suspicious activity
|
||||
revocation_reason integer,
|
||||
revocation_reason_detail text,
|
||||
|
||||
-- Information about requests which used this session, updated lazily on database flush.
|
||||
last_use_time_sec integer, -- sec since epoch
|
||||
last_use_user_agent text, -- User-Agent header from inbound HTTP request.
|
||||
last_use_peer_addr blob, -- IPv4 or IPv6 address, or null for Unix socket.
|
||||
use_count not null default 0,
|
||||
|
||||
-- Permissions associated with this token; a serialized "Permissions" protobuf.
|
||||
permissions blob not null default X''
|
||||
) without rowid;
|
||||
|
||||
create index user_session_uid on user_session (user_id);
|
||||
|
||||
create table signal (
|
||||
id integer primary key,
|
||||
|
||||
-- a uuid describing the originating object, such as the uuid of the camera
|
||||
-- for built-in motion detection. There will be a JSON interface for adding
|
||||
-- events; it will require this UUID to be supplied. An external uuid might
|
||||
-- indicate "my house security system's zone 23".
|
||||
source_uuid blob not null check (length(source_uuid) = 16),
|
||||
|
||||
-- a uuid describing the type of event. A registry (TBD) will list built-in
|
||||
-- supported types, such as "Hikvision on-camera motion detection", or
|
||||
-- "ONVIF on-camera motion detection". External programs can use their own
|
||||
-- uuids, such as "Elk security system watcher".
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
|
||||
-- a short human-readable description of the event to use in mouseovers or event
|
||||
-- lists, such as "driveway motion" or "front door open".
|
||||
short_name not null,
|
||||
|
||||
unique (source_uuid, type_uuid)
|
||||
);
|
||||
|
||||
-- e.g. "moving/still", "disarmed/away/stay", etc.
|
||||
-- TODO: just do a protobuf for each type? might be simpler, more flexible.
|
||||
create table signal_type_enum (
|
||||
type_uuid blob not null check (length(type_uuid) = 16),
|
||||
value integer not null check (value > 0 and value < 16),
|
||||
name text not null,
|
||||
|
||||
-- true/1 iff this signal value should be considered "motion" for directly associated cameras.
|
||||
motion int not null check (motion in (0, 1)) default 0,
|
||||
|
||||
color text
|
||||
);
|
||||
|
||||
-- Associations between event sources and cameras.
|
||||
-- For example, if two cameras have overlapping fields of view, they might be
|
||||
-- configured such that each camera is associated with both its own motion and
|
||||
-- the other camera's motion.
|
||||
create table signal_camera (
|
||||
signal_id integer references signal (id),
|
||||
camera_id integer references camera (id),
|
||||
|
||||
-- type:
|
||||
--
|
||||
-- 0 means direct association, as if the event source if the camera's own
|
||||
-- motion detection. Here are a couple ways this could be used:
|
||||
--
|
||||
-- * when viewing the camera, hotkeys to go to the start of the next or
|
||||
-- previous event should respect this event.
|
||||
-- * a list of events might include the recordings associated with the
|
||||
-- camera in the same timespan.
|
||||
--
|
||||
-- 1 means indirect association. A screen associated with the camera should
|
||||
-- given some indication of this event, but there should be no assumption
|
||||
-- that the camera will have a direct view of the event. For example, all
|
||||
-- cameras might be indirectly associated with a doorknob press. Cameras at
|
||||
-- the back of the house shouldn't be expected to have a direct view of this
|
||||
-- event, but motion events shortly afterward might warrant extra scrutiny.
|
||||
type integer not null,
|
||||
|
||||
primary key (signal_id, camera_id)
|
||||
) without rowid;
|
||||
|
||||
-- Changes to signals as of a given timestamp.
|
||||
create table signal_change (
|
||||
-- Event time, in 90 kHz units since 1970-01-01 00:00:00Z excluding leap seconds.
|
||||
time_90k integer primary key,
|
||||
|
||||
-- Changes at this timestamp.
|
||||
--
|
||||
-- A blob of varints representing a list of
|
||||
-- (signal number - next allowed, state) pairs, where signal number is
|
||||
-- non-decreasing. For example,
|
||||
-- input signals: 1 3 200 (must be sorted)
|
||||
-- delta: 1 1 196 (must be non-negative)
|
||||
-- states: 1 1 2
|
||||
-- varint: \x01 \x01 \x01 \x01 \xc4 \x01 \x02
|
||||
changes blob not null
|
||||
);
|
||||
|
||||
insert into version (id, unix_time, notes)
|
||||
values (5, cast(strftime('%s', 'now') as int), 'db creation');
|
||||
333
server/db/upgrade/v5_to_v6.rs
Normal file
333
server/db/upgrade/v5_to_v6.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2020 The Moonfire NVR Authors
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// In addition, as a special exception, the copyright holders give
|
||||
// permission to link the code of portions of this program with the
|
||||
// OpenSSL library under certain conditions as described in each
|
||||
// individual source file, and distribute linked combinations including
|
||||
// the two.
|
||||
//
|
||||
// You must obey the GNU General Public License in all respects for all
|
||||
// of the code used other than OpenSSL. If you modify file(s) with this
|
||||
// exception, you may extend this exception to your version of the
|
||||
// file(s), but you are not obligated to do so. If you do not wish to do
|
||||
// so, delete this exception statement from your version. If you delete
|
||||
// this exception statement from all source files in the program, then
|
||||
// also delete it here.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/// Upgrades a version 4 schema to a version 5 schema.
|
||||
|
||||
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
|
||||
use failure::{Error, ResultExt, bail, format_err};
|
||||
use h264_reader::avcc::AvcDecoderConfigurationRecord;
|
||||
use rusqlite::{named_params, params};
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
|
||||
// Copied from src/h264.rs. h264 stuff really doesn't belong in the db crate, but we do what we
|
||||
// must for schema upgrades.
|
||||
const PIXEL_ASPECT_RATIOS: [((u16, u16), (u16, u16)); 4] = [
|
||||
((320, 240), ( 4, 3)),
|
||||
((352, 240), (40, 33)),
|
||||
((640, 480), ( 4, 3)),
|
||||
((704, 480), (40, 33)),
|
||||
];
|
||||
fn default_pixel_aspect_ratio(width: u16, height: u16) -> (u16, u16) {
|
||||
let dims = (width, height);
|
||||
for r in &PIXEL_ASPECT_RATIOS {
|
||||
if r.0 == dims {
|
||||
return r.1;
|
||||
}
|
||||
}
|
||||
(1, 1)
|
||||
}
|
||||
|
||||
fn parse(data: &[u8]) -> Result<AvcDecoderConfigurationRecord, Error> {
|
||||
if data.len() < 94 || &data[4..8] != b"avc1" || &data[90..94] != b"avcC" {
|
||||
bail!("data of len {} doesn't have an avcC", data.len());
|
||||
}
|
||||
let avcc_len = BigEndian::read_u32(&data[86..90]);
|
||||
if avcc_len < 8 { // length and type.
|
||||
bail!("invalid avcc len {}", avcc_len);
|
||||
}
|
||||
let end_pos = 86 + usize::try_from(avcc_len)?;
|
||||
if end_pos != data.len() {
|
||||
bail!("expected avcC to be end of extradata; there are {} more bytes.",
|
||||
data.len() - end_pos);
|
||||
}
|
||||
AvcDecoderConfigurationRecord::try_from(&data[94..end_pos])
|
||||
.map_err(|e| format_err!("Bad AvcDecoderConfigurationRecord: {:?}", e))
|
||||
}
|
||||
|
||||
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
|
||||
// These create statements match the schema.sql when version 5 was the latest.
|
||||
tx.execute_batch(r#"
|
||||
alter table video_sample_entry rename to old_video_sample_entry;
|
||||
|
||||
create table video_sample_entry (
|
||||
id integer primary key,
|
||||
width integer not null check (width > 0),
|
||||
height integer not null check (height > 0),
|
||||
rfc6381_codec text not null,
|
||||
data blob not null check (length(data) > 86),
|
||||
pasp_h_spacing integer not null default 1 check (pasp_h_spacing > 0),
|
||||
pasp_v_spacing integer not null default 1 check (pasp_v_spacing > 0)
|
||||
);
|
||||
"#)?;
|
||||
|
||||
let mut insert = tx.prepare(r#"
|
||||
insert into video_sample_entry (id, width, height, rfc6381_codec, data,
|
||||
pasp_h_spacing, pasp_v_spacing)
|
||||
values (:id, :width, :height, :rfc6381_codec, :data,
|
||||
:pasp_h_spacing, :pasp_v_spacing)
|
||||
"#)?;
|
||||
|
||||
// Only insert still-referenced video sample entries. I've had problems with
|
||||
// no-longer-referenced ones (perhaps from some ancient, buggy version of Moonfire NVR) for
|
||||
// which avcc.create_context(()) fails.
|
||||
let mut stmt = tx.prepare(r#"
|
||||
select
|
||||
id,
|
||||
width,
|
||||
height,
|
||||
rfc6381_codec,
|
||||
data
|
||||
from
|
||||
old_video_sample_entry v
|
||||
where
|
||||
exists (
|
||||
select
|
||||
'x'
|
||||
from
|
||||
recording r
|
||||
where
|
||||
r.video_sample_entry_id = v.id)
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let id: i32 = row.get(0)?;
|
||||
let width: u16 = row.get::<_, i32>(1)?.try_into()?;
|
||||
let height: u16 = row.get::<_, i32>(2)?.try_into()?;
|
||||
let rfc6381_codec: &str = row.get_raw_checked(3)?.as_str()?;
|
||||
let mut data: Vec<u8> = row.get(4)?;
|
||||
let avcc = parse(&data)?;
|
||||
if avcc.num_of_sequence_parameter_sets() != 1 {
|
||||
bail!("Multiple SPSs!");
|
||||
}
|
||||
let ctx = avcc.create_context(())
|
||||
.map_err(|e| format_err!("Can't load SPS+PPS for video_sample_entry_id {}: {:?}",
|
||||
id, e))?;
|
||||
let sps = ctx.sps_by_id(h264_reader::nal::pps::ParamSetId::from_u32(0).unwrap())
|
||||
.ok_or_else(|| format_err!("No SPS 0 for video_sample_entry_id {}", id))?;
|
||||
let pasp = sps.vui_parameters.as_ref()
|
||||
.and_then(|v| v.aspect_ratio_info.as_ref())
|
||||
.and_then(|a| a.clone().get())
|
||||
.unwrap_or_else(|| default_pixel_aspect_ratio(width, height));
|
||||
if pasp != (1, 1) {
|
||||
data.extend_from_slice(b"\x00\x00\x00\x10pasp"); // length + box name
|
||||
data.write_u32::<BigEndian>(pasp.0.into())?;
|
||||
data.write_u32::<BigEndian>(pasp.1.into())?;
|
||||
let len = data.len();
|
||||
BigEndian::write_u32(&mut data[0..4], u32::try_from(len)?);
|
||||
}
|
||||
|
||||
insert.execute_named(named_params!{
|
||||
":id": id,
|
||||
":width": width,
|
||||
":height": height,
|
||||
":rfc6381_codec": rfc6381_codec,
|
||||
":data": &data,
|
||||
":pasp_h_spacing": pasp.0,
|
||||
":pasp_v_spacing": pasp.1,
|
||||
})?;
|
||||
}
|
||||
tx.execute_batch(r#"
|
||||
alter table stream rename to old_stream;
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
record integer not null check (record in (1, 0)),
|
||||
rtsp_url text not null,
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
flush_if_sec integer not null,
|
||||
cum_recordings integer not null check (cum_recordings >= 0),
|
||||
cum_media_duration_90k integer not null check (cum_media_duration_90k >= 0),
|
||||
cum_runs integer not null check (cum_runs >= 0),
|
||||
unique (camera_id, type)
|
||||
);
|
||||
insert into stream
|
||||
select
|
||||
s.id,
|
||||
s.camera_id,
|
||||
s.sample_file_dir_id,
|
||||
s.type,
|
||||
s.record,
|
||||
s.rtsp_url,
|
||||
s.retain_bytes,
|
||||
s.flush_if_sec,
|
||||
s.next_recording_id as cum_recordings,
|
||||
coalesce(sum(r.duration_90k), 0) as cum_duration_90k,
|
||||
coalesce(sum(case when r.run_offset = 0 then 1 else 0 end), 0) as cum_runs
|
||||
from
|
||||
old_stream s left join recording r on (s.id = r.stream_id)
|
||||
group by 1;
|
||||
|
||||
alter table recording rename to old_recording;
|
||||
create table recording (
|
||||
composite_id integer primary key,
|
||||
open_id integer not null,
|
||||
stream_id integer not null references stream (id),
|
||||
run_offset integer not null,
|
||||
flags integer not null,
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
prev_media_duration_90k integer not null check (prev_media_duration_90k >= 0),
|
||||
prev_runs integer not null check (prev_runs >= 0),
|
||||
wall_duration_90k integer not null
|
||||
check (wall_duration_90k >= 0 and wall_duration_90k < 5*60*90000),
|
||||
media_duration_delta_90k integer not null,
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
video_sync_samples integer not null check (video_sync_samples > 0),
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
"#)?;
|
||||
|
||||
// SQLite added window functions in 3.25.0. macOS still ships SQLite 3.24.0 (no support).
|
||||
// Compute cumulative columns by hand.
|
||||
let mut cur_stream_id = None;
|
||||
let mut cum_duration_90k = 0;
|
||||
let mut cum_runs = 0;
|
||||
let mut stmt = tx.prepare(r#"
|
||||
select
|
||||
composite_id,
|
||||
open_id,
|
||||
stream_id,
|
||||
run_offset,
|
||||
flags,
|
||||
sample_file_bytes,
|
||||
start_time_90k,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id
|
||||
from
|
||||
old_recording
|
||||
order by composite_id
|
||||
"#)?;
|
||||
let mut insert = tx.prepare(r#"
|
||||
insert into recording (composite_id, open_id, stream_id, run_offset, flags,
|
||||
sample_file_bytes, start_time_90k, prev_media_duration_90k,
|
||||
prev_runs, wall_duration_90k, media_duration_delta_90k,
|
||||
video_samples, video_sync_samples, video_sample_entry_id)
|
||||
values (:composite_id, :open_id, :stream_id, :run_offset, :flags,
|
||||
:sample_file_bytes, :start_time_90k, :prev_media_duration_90k,
|
||||
:prev_runs, :wall_duration_90k, 0, :video_samples,
|
||||
:video_sync_samples, :video_sample_entry_id)
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let composite_id: i64 = row.get(0)?;
|
||||
let open_id: i32 = row.get(1)?;
|
||||
let stream_id: i32 = row.get(2)?;
|
||||
let run_offset: i32 = row.get(3)?;
|
||||
let flags: i32 = row.get(4)?;
|
||||
let sample_file_bytes: i32 = row.get(5)?;
|
||||
let start_time_90k: i64 = row.get(6)?;
|
||||
let wall_duration_90k: i32 = row.get(7)?;
|
||||
let video_samples: i32 = row.get(8)?;
|
||||
let video_sync_samples: i32 = row.get(9)?;
|
||||
let video_sample_entry_id: i32 = row.get(10)?;
|
||||
if cur_stream_id != Some(stream_id) {
|
||||
cum_duration_90k = 0;
|
||||
cum_runs = 0;
|
||||
cur_stream_id = Some(stream_id);
|
||||
}
|
||||
insert.execute_named(named_params!{
|
||||
":composite_id": composite_id,
|
||||
":open_id": open_id,
|
||||
":stream_id": stream_id,
|
||||
":run_offset": run_offset,
|
||||
":flags": flags,
|
||||
":sample_file_bytes": sample_file_bytes,
|
||||
":start_time_90k": start_time_90k,
|
||||
":prev_media_duration_90k": cum_duration_90k,
|
||||
":prev_runs": cum_runs,
|
||||
":wall_duration_90k": wall_duration_90k,
|
||||
":video_samples": video_samples,
|
||||
":video_sync_samples": video_sync_samples,
|
||||
":video_sample_entry_id": video_sample_entry_id,
|
||||
}).with_context(|_| format!("Unable to insert composite_id {}", composite_id))?;
|
||||
cum_duration_90k += i64::from(wall_duration_90k);
|
||||
cum_runs += if run_offset == 0 { 1 } else { 0 };
|
||||
}
|
||||
tx.execute_batch(r#"
|
||||
drop index recording_cover;
|
||||
create index recording_cover on recording (
|
||||
stream_id,
|
||||
start_time_90k,
|
||||
open_id,
|
||||
wall_duration_90k,
|
||||
media_duration_delta_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id,
|
||||
sample_file_bytes,
|
||||
run_offset,
|
||||
flags
|
||||
);
|
||||
|
||||
alter table recording_integrity rename to old_recording_integrity;
|
||||
create table recording_integrity (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
local_time_delta_90k integer,
|
||||
local_time_since_open_90k integer,
|
||||
wall_time_delta_90k integer,
|
||||
sample_file_blake3 blob check (length(sample_file_blake3) <= 32)
|
||||
);
|
||||
insert into recording_integrity
|
||||
select
|
||||
composite_id,
|
||||
local_time_delta_90k,
|
||||
local_time_since_open_90k,
|
||||
wall_time_delta_90k,
|
||||
null
|
||||
from
|
||||
old_recording_integrity;
|
||||
|
||||
alter table recording_playback rename to old_recording_playback;
|
||||
create table recording_playback (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
video_index blob not null check (length(video_index) > 0)
|
||||
);
|
||||
insert into recording_playback select * from old_recording_playback;
|
||||
|
||||
drop table old_recording_playback;
|
||||
drop table old_recording_integrity;
|
||||
drop table old_recording;
|
||||
drop table old_stream;
|
||||
drop table old_video_sample_entry;
|
||||
|
||||
update user_session
|
||||
set
|
||||
revocation_reason = 1,
|
||||
revocation_reason_detail = 'Blake2b->Blake3 upgrade'
|
||||
where
|
||||
revocation_reason is null;
|
||||
"#)?;
|
||||
Ok(())
|
||||
}
|
||||
1275
server/db/writer.rs
Normal file
1275
server/db/writer.rs
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user