2020-03-02 01:53:41 -05:00
|
|
|
// This file is part of Moonfire NVR, a security camera network video recorder.
|
|
|
|
// Copyright (C) 2016 The Moonfire NVR Authors
|
2016-11-25 17:34:00 -05:00
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// In addition, as a special exception, the copyright holders give
|
|
|
|
// permission to link the code of portions of this program with the
|
|
|
|
// OpenSSL library under certain conditions as described in each
|
|
|
|
// individual source file, and distribute linked combinations including
|
|
|
|
// the two.
|
|
|
|
//
|
|
|
|
// You must obey the GNU General Public License in all respects for all
|
|
|
|
// of the code used other than OpenSSL. If you modify file(s) with this
|
|
|
|
// exception, you may extend this exception to your version of the
|
|
|
|
// file(s), but you are not obligated to do so. If you do not wish to do
|
|
|
|
// so, delete this exception statement from your version. If you delete
|
|
|
|
// this exception statement from all source files in the program, then
|
|
|
|
// also delete it here.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::coding::{append_varint32, decode_varint32, unzigzag32, zigzag32};
|
|
|
|
use crate::db;
|
2018-12-28 22:53:29 -05:00
|
|
|
use failure::{Error, bail, format_err};
|
|
|
|
use lazy_static::lazy_static;
|
|
|
|
use log::trace;
|
2017-01-13 02:09:02 -05:00
|
|
|
use regex::Regex;
|
|
|
|
use std::ops;
|
2016-11-25 17:34:00 -05:00
|
|
|
use std::fmt;
|
|
|
|
use std::ops::Range;
|
2018-02-21 02:15:39 -05:00
|
|
|
use std::str::FromStr;
|
2016-11-25 17:34:00 -05:00
|
|
|
use time;
|
|
|
|
|
|
|
|
pub const TIME_UNITS_PER_SEC: i64 = 90000;
|
|
|
|
pub const DESIRED_RECORDING_DURATION: i64 = 60 * TIME_UNITS_PER_SEC;
|
|
|
|
pub const MAX_RECORDING_DURATION: i64 = 5 * 60 * TIME_UNITS_PER_SEC;
|
|
|
|
|
|
|
|
/// A time specified as 90,000ths of a second since 1970-01-01 00:00:00 UTC.
|
2019-06-14 00:55:15 -04:00
|
|
|
#[derive(Clone, Copy, Default, Eq, Ord, PartialEq, PartialOrd)]
|
2016-11-25 17:34:00 -05:00
|
|
|
pub struct Time(pub i64);
|
|
|
|
|
|
|
|
impl Time {
|
|
|
|
pub fn new(tm: time::Timespec) -> Self {
|
|
|
|
Time(tm.sec * TIME_UNITS_PER_SEC + tm.nsec as i64 * TIME_UNITS_PER_SEC / 1_000_000_000)
|
|
|
|
}
|
|
|
|
|
2019-06-14 00:55:15 -04:00
|
|
|
pub const fn min_value() -> Self { Time(i64::min_value()) }
|
|
|
|
pub const fn max_value() -> Self { Time(i64::max_value()) }
|
|
|
|
|
2017-01-13 02:09:02 -05:00
|
|
|
/// Parses a time as either 90,000ths of a second since epoch or a RFC 3339-like string.
|
|
|
|
///
|
|
|
|
/// The former is 90,000ths of a second since 1970-01-01T00:00:00 UTC, excluding leap seconds.
|
|
|
|
///
|
|
|
|
/// The latter is a string such as `2006-01-02T15:04:05`, followed by an optional 90,000ths of
|
|
|
|
/// a second such as `:00001`, followed by an optional time zone offset such as `Z` or
|
|
|
|
/// `-07:00`. A missing fraction is assumed to be 0. A missing time zone offset implies the
|
|
|
|
/// local time zone.
|
|
|
|
pub fn parse(s: &str) -> Result<Self, Error> {
|
|
|
|
lazy_static! {
|
|
|
|
static ref RE: Regex = Regex::new(r#"(?x)
|
|
|
|
^
|
|
|
|
([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})
|
|
|
|
(?::([0-9]{5}))?
|
|
|
|
(Z|[+-]([0-9]{2}):([0-9]{2}))?
|
|
|
|
$"#).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
// First try parsing as 90,000ths of a second since epoch.
|
|
|
|
match i64::from_str(s) {
|
|
|
|
Ok(i) => return Ok(Time(i)),
|
|
|
|
Err(_) => {},
|
|
|
|
}
|
|
|
|
|
|
|
|
// If that failed, parse as a time string or bust.
|
2018-02-21 01:46:14 -05:00
|
|
|
let c = RE.captures(s).ok_or_else(|| format_err!("unparseable time {:?}", s))?;
|
2017-01-13 02:09:02 -05:00
|
|
|
let mut tm = time::Tm{
|
|
|
|
tm_sec: i32::from_str(c.get(6).unwrap().as_str()).unwrap(),
|
|
|
|
tm_min: i32::from_str(c.get(5).unwrap().as_str()).unwrap(),
|
|
|
|
tm_hour: i32::from_str(c.get(4).unwrap().as_str()).unwrap(),
|
|
|
|
tm_mday: i32::from_str(c.get(3).unwrap().as_str()).unwrap(),
|
|
|
|
tm_mon: i32::from_str(c.get(2).unwrap().as_str()).unwrap(),
|
|
|
|
tm_year: i32::from_str(c.get(1).unwrap().as_str()).unwrap(),
|
|
|
|
tm_wday: 0,
|
|
|
|
tm_yday: 0,
|
|
|
|
tm_isdst: -1,
|
|
|
|
tm_utcoff: 0,
|
|
|
|
tm_nsec: 0,
|
|
|
|
};
|
|
|
|
if tm.tm_mon == 0 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("time {:?} has month 0", s);
|
2017-01-13 02:09:02 -05:00
|
|
|
}
|
|
|
|
tm.tm_mon -= 1;
|
|
|
|
if tm.tm_year < 1900 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("time {:?} has year before 1900", s);
|
2017-01-13 02:09:02 -05:00
|
|
|
}
|
|
|
|
tm.tm_year -= 1900;
|
|
|
|
|
|
|
|
// The time crate doesn't use tm_utcoff properly; it just calls timegm() if tm_utcoff == 0,
|
|
|
|
// mktime() otherwise. If a zone is specified, use the timegm path and a manual offset.
|
|
|
|
// If no zone is specified, use the tm_utcoff path. This is pretty lame, but follow the
|
|
|
|
// chrono crate's lead and just use 0 or 1 to choose between these functions.
|
|
|
|
let sec = if let Some(zone) = c.get(8) {
|
|
|
|
tm.to_timespec().sec + if zone.as_str() == "Z" {
|
|
|
|
0
|
|
|
|
} else {
|
|
|
|
let off = i64::from_str(c.get(9).unwrap().as_str()).unwrap() * 3600 +
|
|
|
|
i64::from_str(c.get(10).unwrap().as_str()).unwrap() * 60;
|
|
|
|
if zone.as_str().as_bytes()[0] == b'-' { off } else { -off }
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tm.tm_utcoff = 1;
|
|
|
|
tm.to_timespec().sec
|
|
|
|
};
|
|
|
|
let fraction = if let Some(f) = c.get(7) { i64::from_str(f.as_str()).unwrap() } else { 0 };
|
|
|
|
Ok(Time(sec * TIME_UNITS_PER_SEC + fraction))
|
|
|
|
}
|
|
|
|
|
2019-06-14 00:55:15 -04:00
|
|
|
/// Convert to unix seconds by floor method (rounding down).
|
2016-11-25 17:34:00 -05:00
|
|
|
pub fn unix_seconds(&self) -> i64 { self.0 / TIME_UNITS_PER_SEC }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::Sub for Time {
|
|
|
|
type Output = Duration;
|
|
|
|
fn sub(self, rhs: Time) -> Duration { Duration(self.0 - rhs.0) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::AddAssign<Duration> for Time {
|
|
|
|
fn add_assign(&mut self, rhs: Duration) { self.0 += rhs.0 }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::Add<Duration> for Time {
|
|
|
|
type Output = Time;
|
|
|
|
fn add(self, rhs: Duration) -> Time { Time(self.0 + rhs.0) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::Sub<Duration> for Time {
|
|
|
|
type Output = Time;
|
|
|
|
fn sub(self, rhs: Duration) -> Time { Time(self.0 - rhs.0) }
|
|
|
|
}
|
|
|
|
|
2019-06-14 00:55:15 -04:00
|
|
|
impl fmt::Debug for Time {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
// Write both the raw and display forms.
|
|
|
|
write!(f, "{} /* {} */", self.0, self)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
impl fmt::Display for Time {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
let tm = time::at(time::Timespec{sec: self.0 / TIME_UNITS_PER_SEC, nsec: 0});
|
2017-01-13 02:09:02 -05:00
|
|
|
let zone_minutes = tm.tm_utcoff.abs() / 60;
|
|
|
|
write!(f, "{}:{:05}{}{:02}:{:02}", tm.strftime("%FT%T").or_else(|_| Err(fmt::Error))?,
|
|
|
|
self.0 % TIME_UNITS_PER_SEC,
|
|
|
|
if tm.tm_utcoff > 0 { '+' } else { '-' }, zone_minutes / 60, zone_minutes % 60)
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A duration specified in 1/90,000ths of a second.
|
|
|
|
/// Durations are typically non-negative, but a `db::CameraDayValue::duration` may be negative.
|
2018-01-30 18:29:19 -05:00
|
|
|
#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
|
2016-11-25 17:34:00 -05:00
|
|
|
pub struct Duration(pub i64);
|
|
|
|
|
2018-03-23 18:16:43 -04:00
|
|
|
impl Duration {
|
|
|
|
pub fn to_tm_duration(&self) -> time::Duration {
|
|
|
|
time::Duration::nanoseconds(self.0 * 100000 / 9)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
impl fmt::Display for Duration {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
let mut seconds = self.0 / TIME_UNITS_PER_SEC;
|
|
|
|
const MINUTE_IN_SECONDS: i64 = 60;
|
|
|
|
const HOUR_IN_SECONDS: i64 = 60 * MINUTE_IN_SECONDS;
|
|
|
|
const DAY_IN_SECONDS: i64 = 24 * HOUR_IN_SECONDS;
|
|
|
|
let days = seconds / DAY_IN_SECONDS;
|
|
|
|
seconds %= DAY_IN_SECONDS;
|
|
|
|
let hours = seconds / HOUR_IN_SECONDS;
|
|
|
|
seconds %= HOUR_IN_SECONDS;
|
|
|
|
let minutes = seconds / MINUTE_IN_SECONDS;
|
|
|
|
seconds %= MINUTE_IN_SECONDS;
|
|
|
|
let mut have_written = if days > 0 {
|
|
|
|
write!(f, "{} day{}", days, if days == 1 { "" } else { "s" })?;
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
};
|
|
|
|
if hours > 0 {
|
|
|
|
write!(f, "{}{} hour{}", if have_written { " " } else { "" },
|
|
|
|
hours, if hours == 1 { "" } else { "s" })?;
|
|
|
|
have_written = true;
|
|
|
|
}
|
|
|
|
if minutes > 0 {
|
|
|
|
write!(f, "{}{} minute{}", if have_written { " " } else { "" },
|
|
|
|
minutes, if minutes == 1 { "" } else { "s" })?;
|
|
|
|
have_written = true;
|
|
|
|
}
|
|
|
|
if seconds > 0 || !have_written {
|
|
|
|
write!(f, "{}{} second{}", if have_written { " " } else { "" },
|
|
|
|
seconds, if seconds == 1 { "" } else { "s" })?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::Add for Duration {
|
|
|
|
type Output = Duration;
|
|
|
|
fn add(self, rhs: Duration) -> Duration { Duration(self.0 + rhs.0) }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::AddAssign for Duration {
|
|
|
|
fn add_assign(&mut self, rhs: Duration) { self.0 += rhs.0 }
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ops::SubAssign for Duration {
|
|
|
|
fn sub_assign(&mut self, rhs: Duration) { self.0 -= rhs.0 }
|
|
|
|
}
|
|
|
|
|
2017-02-28 00:14:06 -05:00
|
|
|
/// An iterator through a sample index.
|
|
|
|
/// Initially invalid; call `next()` before each read.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[derive(Clone, Copy, Debug)]
|
|
|
|
pub struct SampleIndexIterator {
|
2017-02-28 00:14:06 -05:00
|
|
|
/// The index byte position of the next sample to read (low 31 bits) and if the current
|
|
|
|
/// same is a key frame (high bit).
|
|
|
|
i_and_is_key: u32,
|
|
|
|
|
|
|
|
/// The starting data byte position of this sample within the segment.
|
2016-11-25 17:34:00 -05:00
|
|
|
pub pos: i32,
|
2017-02-28 00:14:06 -05:00
|
|
|
|
|
|
|
/// The starting time of this sample within the segment (in 90 kHz units).
|
2016-11-25 17:34:00 -05:00
|
|
|
pub start_90k: i32,
|
2017-02-28 00:14:06 -05:00
|
|
|
|
|
|
|
/// The duration of this sample (in 90 kHz units).
|
2016-11-25 17:34:00 -05:00
|
|
|
pub duration_90k: i32,
|
|
|
|
|
2017-02-28 00:14:06 -05:00
|
|
|
/// The byte length of this frame.
|
|
|
|
pub bytes: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2017-02-28 00:14:06 -05:00
|
|
|
/// The byte length of the last frame of the "other" type: if this one is key, the last
|
|
|
|
/// non-key; if this one is non-key, the last key.
|
|
|
|
bytes_other: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SampleIndexIterator {
|
|
|
|
pub fn new() -> SampleIndexIterator {
|
2017-02-28 00:14:06 -05:00
|
|
|
SampleIndexIterator{i_and_is_key: 0,
|
2016-11-25 17:34:00 -05:00
|
|
|
pos: 0,
|
|
|
|
start_90k: 0,
|
|
|
|
duration_90k: 0,
|
|
|
|
bytes: 0,
|
2017-02-28 00:14:06 -05:00
|
|
|
bytes_other: 0}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn next(&mut self, data: &[u8]) -> Result<bool, Error> {
|
|
|
|
self.pos += self.bytes;
|
|
|
|
self.start_90k += self.duration_90k;
|
2017-02-28 00:14:06 -05:00
|
|
|
let i = (self.i_and_is_key & 0x7FFF_FFFF) as usize;
|
|
|
|
if i == data.len() {
|
2016-11-25 17:34:00 -05:00
|
|
|
return Ok(false)
|
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
let (raw1, i1) = match decode_varint32(data, i) {
|
2016-11-25 17:34:00 -05:00
|
|
|
Ok(tuple) => tuple,
|
2018-02-21 01:46:14 -05:00
|
|
|
Err(()) => bail!("bad varint 1 at offset {}", i),
|
2016-11-25 17:34:00 -05:00
|
|
|
};
|
|
|
|
let (raw2, i2) = match decode_varint32(data, i1) {
|
|
|
|
Ok(tuple) => tuple,
|
2018-02-21 01:46:14 -05:00
|
|
|
Err(()) => bail!("bad varint 2 at offset {}", i1),
|
2016-11-25 17:34:00 -05:00
|
|
|
};
|
|
|
|
let duration_90k_delta = unzigzag32(raw1 >> 1);
|
|
|
|
self.duration_90k += duration_90k_delta;
|
|
|
|
if self.duration_90k < 0 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("negative duration {} after applying delta {}",
|
|
|
|
self.duration_90k, duration_90k_delta);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
if self.duration_90k == 0 && data.len() > i2 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("zero duration only allowed at end; have {} bytes left", data.len() - i2);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
let (prev_bytes_key, prev_bytes_nonkey) = match self.is_key() {
|
|
|
|
true => (self.bytes, self.bytes_other),
|
|
|
|
false => (self.bytes_other, self.bytes),
|
|
|
|
};
|
|
|
|
self.i_and_is_key = (i2 as u32) | (((raw1 & 1) as u32) << 31);
|
2016-11-25 17:34:00 -05:00
|
|
|
let bytes_delta = unzigzag32(raw2);
|
2017-02-28 00:14:06 -05:00
|
|
|
if self.is_key() {
|
|
|
|
self.bytes = prev_bytes_key + bytes_delta;
|
|
|
|
self.bytes_other = prev_bytes_nonkey;
|
2016-11-25 17:34:00 -05:00
|
|
|
} else {
|
2017-02-28 00:14:06 -05:00
|
|
|
self.bytes = prev_bytes_nonkey + bytes_delta;
|
|
|
|
self.bytes_other = prev_bytes_key;
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
if self.bytes <= 0 {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("non-positive bytes {} after applying delta {} to key={} frame at ts {}",
|
|
|
|
self.bytes, bytes_delta, self.is_key(), self.start_90k);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
Ok(true)
|
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
|
|
|
|
pub fn uninitialized(&self) -> bool { self.i_and_is_key == 0 }
|
|
|
|
pub fn is_key(&self) -> bool { (self.i_and_is_key & 0x8000_0000) != 0 }
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct SampleIndexEncoder {
|
|
|
|
prev_duration_90k: i32,
|
|
|
|
prev_bytes_key: i32,
|
|
|
|
prev_bytes_nonkey: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SampleIndexEncoder {
|
|
|
|
pub fn new() -> Self {
|
2018-03-02 18:40:32 -05:00
|
|
|
SampleIndexEncoder {
|
2016-11-25 17:34:00 -05:00
|
|
|
prev_duration_90k: 0,
|
|
|
|
prev_bytes_key: 0,
|
|
|
|
prev_bytes_nonkey: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-02 18:40:32 -05:00
|
|
|
pub fn add_sample(&mut self, duration_90k: i32, bytes: i32, is_key: bool,
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
r: &mut db::RecordingToInsert) -> Result<(), Error> {
|
2016-11-25 17:34:00 -05:00
|
|
|
let duration_delta = duration_90k - self.prev_duration_90k;
|
|
|
|
self.prev_duration_90k = duration_90k;
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
let new_duration_90k = r.duration_90k + duration_90k;
|
|
|
|
if new_duration_90k as i64 > MAX_RECORDING_DURATION {
|
|
|
|
bail!("Duration {} exceeds maximum {}", new_duration_90k, MAX_RECORDING_DURATION);
|
|
|
|
}
|
2018-03-02 18:40:32 -05:00
|
|
|
r.duration_90k += duration_90k;
|
|
|
|
r.sample_file_bytes += bytes;
|
|
|
|
r.video_samples += 1;
|
2016-11-25 17:34:00 -05:00
|
|
|
let bytes_delta = bytes - if is_key {
|
|
|
|
let prev = self.prev_bytes_key;
|
2018-03-02 18:40:32 -05:00
|
|
|
r.video_sync_samples += 1;
|
2016-11-25 17:34:00 -05:00
|
|
|
self.prev_bytes_key = bytes;
|
|
|
|
prev
|
|
|
|
} else {
|
|
|
|
let prev = self.prev_bytes_nonkey;
|
|
|
|
self.prev_bytes_nonkey = bytes;
|
|
|
|
prev
|
|
|
|
};
|
2018-03-02 18:40:32 -05:00
|
|
|
append_varint32((zigzag32(duration_delta) << 1) | (is_key as u32), &mut r.video_index);
|
|
|
|
append_varint32(zigzag32(bytes_delta), &mut r.video_index);
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
Ok(())
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A segment represents a view of some or all of a single recording, starting from a key frame.
|
|
|
|
/// Used by the `Mp4FileBuilder` class to splice together recordings into a single virtual .mp4.
|
2017-10-09 09:32:43 -04:00
|
|
|
#[derive(Debug)]
|
2016-11-25 17:34:00 -05:00
|
|
|
pub struct Segment {
|
2018-02-20 13:11:10 -05:00
|
|
|
pub id: db::CompositeId,
|
2018-03-02 14:38:11 -05:00
|
|
|
pub open_id: u32,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub start: Time,
|
2017-03-27 23:55:58 -04:00
|
|
|
|
|
|
|
/// An iterator positioned at the beginning of the segment, or `None`. Most segments are
|
|
|
|
/// positioned at the beginning of the recording, so this is an optional box to shrink a long
|
|
|
|
/// of segments. `None` is equivalent to `SampleIndexIterator::new()`.
|
|
|
|
begin: Option<Box<SampleIndexIterator>>,
|
2016-11-25 17:34:00 -05:00
|
|
|
pub file_end: i32,
|
|
|
|
pub desired_range_90k: Range<i32>,
|
2017-02-28 00:14:06 -05:00
|
|
|
pub frames: u16,
|
|
|
|
pub key_frames: u16,
|
|
|
|
video_sample_entry_id_and_trailing_zero: i32,
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Segment {
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Creates a segment.
|
2016-11-25 17:34:00 -05:00
|
|
|
///
|
|
|
|
/// `desired_range_90k` represents the desired range of the segment relative to the start of
|
|
|
|
/// the recording. The actual range will start at the first key frame at or before the
|
|
|
|
/// desired start time. (The caller is responsible for creating an edit list to skip the
|
|
|
|
/// undesired portion.) It will end at the first frame after the desired range (unless the
|
2017-03-27 23:55:58 -04:00
|
|
|
/// desired range extends beyond the recording). (Likewise, the caller is responsible for
|
|
|
|
/// trimming the final frame's duration if desired.)
|
2017-03-02 22:29:28 -05:00
|
|
|
pub fn new(db: &db::LockedDatabase,
|
|
|
|
recording: &db::ListRecordingsRow,
|
2016-12-02 23:40:55 -05:00
|
|
|
desired_range_90k: Range<i32>) -> Result<Segment, Error> {
|
2018-01-23 14:05:07 -05:00
|
|
|
let mut self_ = Segment {
|
2018-02-20 13:11:10 -05:00
|
|
|
id: recording.id,
|
2018-03-02 14:38:11 -05:00
|
|
|
open_id: recording.open_id,
|
2016-11-25 17:34:00 -05:00
|
|
|
start: recording.start,
|
2017-03-27 23:55:58 -04:00
|
|
|
begin: None,
|
2016-11-25 17:34:00 -05:00
|
|
|
file_end: recording.sample_file_bytes,
|
|
|
|
desired_range_90k: desired_range_90k,
|
2017-02-28 00:14:06 -05:00
|
|
|
frames: recording.video_samples as u16,
|
|
|
|
key_frames: recording.video_sync_samples as u16,
|
|
|
|
video_sample_entry_id_and_trailing_zero:
|
2018-03-01 23:59:05 -05:00
|
|
|
recording.video_sample_entry_id |
|
2017-02-28 00:14:06 -05:00
|
|
|
((((recording.flags & db::RecordingFlags::TrailingZero as i32) != 0) as i32) << 31),
|
2016-12-02 23:40:55 -05:00
|
|
|
};
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
if self_.desired_range_90k.start > self_.desired_range_90k.end ||
|
2017-03-27 23:55:58 -04:00
|
|
|
self_.desired_range_90k.end > recording.duration_90k {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("desired range [{}, {}) invalid for recording of length {}",
|
|
|
|
self_.desired_range_90k.start, self_.desired_range_90k.end,
|
|
|
|
recording.duration_90k);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
if self_.desired_range_90k.start == 0 &&
|
2017-03-27 23:55:58 -04:00
|
|
|
self_.desired_range_90k.end == recording.duration_90k {
|
2016-11-25 17:34:00 -05:00
|
|
|
// Fast path. Existing entry is fine.
|
2017-10-17 11:55:21 -04:00
|
|
|
trace!("recording::Segment::new fast path, recording={:#?}", recording);
|
2016-12-02 23:40:55 -05:00
|
|
|
return Ok(self_)
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path. Need to iterate through the index.
|
2017-10-17 11:55:21 -04:00
|
|
|
trace!("recording::Segment::new slow path, desired_range_90k={:?}, recording={:#?}",
|
|
|
|
self_.desired_range_90k, recording);
|
2018-08-24 01:34:40 -04:00
|
|
|
db.with_recording_playback(self_.id, &mut |playback| {
|
2017-03-27 23:55:58 -04:00
|
|
|
let mut begin = Box::new(SampleIndexIterator::new());
|
2017-03-01 02:28:25 -05:00
|
|
|
let data = &(&playback).video_index;
|
|
|
|
let mut it = SampleIndexIterator::new();
|
|
|
|
if !it.next(data)? {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("no index");
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2017-03-01 02:28:25 -05:00
|
|
|
if !it.is_key() {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("not key frame");
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2017-03-01 02:28:25 -05:00
|
|
|
|
|
|
|
// Stop when hitting a frame with this start time.
|
|
|
|
// Going until the end of the recording is special-cased because there can be a trailing
|
|
|
|
// frame of zero duration. It's unclear exactly how this should be handled, but let's
|
|
|
|
// include it for consistency with the fast path. It'd be bizarre to have it included or
|
|
|
|
// not based on desired_range_90k.start.
|
2017-03-27 23:55:58 -04:00
|
|
|
let end_90k = if self_.desired_range_90k.end == recording.duration_90k {
|
2017-03-01 02:28:25 -05:00
|
|
|
i32::max_value()
|
|
|
|
} else {
|
|
|
|
self_.desired_range_90k.end
|
|
|
|
};
|
|
|
|
|
|
|
|
loop {
|
|
|
|
if it.start_90k <= self_.desired_range_90k.start && it.is_key() {
|
|
|
|
// new start candidate.
|
2017-03-27 23:55:58 -04:00
|
|
|
*begin = it;
|
2017-03-01 02:28:25 -05:00
|
|
|
self_.frames = 0;
|
|
|
|
self_.key_frames = 0;
|
|
|
|
}
|
2017-10-17 11:55:21 -04:00
|
|
|
if it.start_90k >= end_90k && self_.frames > 0 {
|
2017-03-01 02:28:25 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
self_.frames += 1;
|
|
|
|
self_.key_frames += it.is_key() as u16;
|
|
|
|
if !it.next(data)? {
|
|
|
|
break;
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2017-03-27 23:55:58 -04:00
|
|
|
self_.begin = Some(begin);
|
2017-03-01 02:28:25 -05:00
|
|
|
self_.file_end = it.pos;
|
|
|
|
self_.video_sample_entry_id_and_trailing_zero =
|
2018-03-01 23:59:05 -05:00
|
|
|
recording.video_sample_entry_id |
|
2017-03-01 02:28:25 -05:00
|
|
|
(((it.duration_90k == 0) as i32) << 31);
|
2018-08-24 01:34:40 -04:00
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
Ok(self_)
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2017-02-28 00:14:06 -05:00
|
|
|
pub fn video_sample_entry_id(&self) -> i32 {
|
|
|
|
self.video_sample_entry_id_and_trailing_zero & 0x7FFFFFFF
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn have_trailing_zero(&self) -> bool { self.video_sample_entry_id_and_trailing_zero < 0 }
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// Returns the byte range within the sample file of data associated with this segment.
|
2017-03-27 23:55:58 -04:00
|
|
|
pub fn sample_file_range(&self) -> Range<u64> {
|
|
|
|
self.begin.as_ref().map(|b| b.pos as u64).unwrap_or(0) .. self.file_end as u64
|
|
|
|
}
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2017-03-27 23:55:58 -04:00
|
|
|
/// Returns the actual start time as described in `new`.
|
|
|
|
pub fn actual_start_90k(&self) -> i32 { self.begin.as_ref().map(|b| b.start_90k).unwrap_or(0) }
|
2016-11-25 17:34:00 -05:00
|
|
|
|
|
|
|
/// Iterates through each frame in the segment.
|
|
|
|
/// Must be called without the database lock held; retrieves video index from the cache.
|
2017-03-01 02:28:25 -05:00
|
|
|
pub fn foreach<F>(&self, playback: &db::RecordingPlayback, mut f: F) -> Result<(), Error>
|
2017-02-26 23:21:46 -05:00
|
|
|
where F: FnMut(&SampleIndexIterator) -> Result<(), Error> {
|
2018-02-20 13:11:10 -05:00
|
|
|
trace!("foreach on recording {}: {} frames, actual_start_90k: {}",
|
|
|
|
self.id, self.frames, self.actual_start_90k());
|
2016-12-21 01:08:18 -05:00
|
|
|
let data = &(&playback).video_index;
|
2017-03-27 23:55:58 -04:00
|
|
|
let mut it = match self.begin {
|
|
|
|
Some(ref b) => **b,
|
|
|
|
None => SampleIndexIterator::new(),
|
|
|
|
};
|
2017-02-28 00:14:06 -05:00
|
|
|
if it.uninitialized() {
|
2016-11-25 17:34:00 -05:00
|
|
|
if !it.next(data)? {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("recording {}: no frames", self.id);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
if !it.is_key() {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("recording {}: doesn't start with key frame", self.id);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut have_frame = true;
|
|
|
|
let mut key_frame = 0;
|
2019-12-28 09:07:33 -05:00
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
for i in 0 .. self.frames {
|
|
|
|
if !have_frame {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("recording {}: expected {} frames, found only {}", self.id, self.frames, i+1);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
2017-02-28 00:14:06 -05:00
|
|
|
if it.is_key() {
|
2016-12-02 23:40:55 -05:00
|
|
|
key_frame += 1;
|
|
|
|
if key_frame > self.key_frames {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("recording {}: more than expected {} key frames",
|
|
|
|
self.id, self.key_frames);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
}
|
2017-02-26 23:21:46 -05:00
|
|
|
|
2019-12-28 09:07:33 -05:00
|
|
|
// Note: this inner loop avoids ? for performance. Don't change these lines without
|
|
|
|
// reading https://github.com/rust-lang/rust/issues/37939 and running
|
2017-02-26 23:21:46 -05:00
|
|
|
// mp4::bench::build_index.
|
2019-12-28 09:07:33 -05:00
|
|
|
if let Err(e) = f(&it) {
|
|
|
|
return Err(e);
|
|
|
|
}
|
|
|
|
have_frame = match it.next(data) {
|
|
|
|
Err(e) => return Err(e),
|
|
|
|
Ok(hf) => hf,
|
|
|
|
};
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
if key_frame < self.key_frames {
|
2018-02-21 01:46:14 -05:00
|
|
|
bail!("recording {}: expected {} key frames, found only {}",
|
|
|
|
self.id, self.key_frames, key_frame);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
Ok(())
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2018-12-28 22:53:29 -05:00
|
|
|
use base::clock::RealClocks;
|
2016-11-25 17:34:00 -05:00
|
|
|
use super::*;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::testutil::{self, TestDb};
|
2016-11-25 17:34:00 -05:00
|
|
|
|
2017-01-13 02:09:02 -05:00
|
|
|
#[test]
|
|
|
|
fn test_parse_time() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2017-01-13 02:09:02 -05:00
|
|
|
let tests = &[
|
|
|
|
("2006-01-02T15:04:05-07:00", 102261550050000),
|
|
|
|
("2006-01-02T15:04:05:00001-07:00", 102261550050001),
|
|
|
|
("2006-01-02T15:04:05-08:00", 102261874050000),
|
|
|
|
("2006-01-02T15:04:05", 102261874050000), // implied -08:00
|
|
|
|
("2006-01-02T15:04:05:00001", 102261874050001), // implied -08:00
|
|
|
|
("2006-01-02T15:04:05-00:00", 102259282050000),
|
|
|
|
("2006-01-02T15:04:05Z", 102259282050000),
|
|
|
|
("102261550050000", 102261550050000),
|
|
|
|
];
|
|
|
|
for test in tests {
|
|
|
|
assert_eq!(test.1, Time::parse(test.0).unwrap().0, "parsing {}", test.0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_format_time() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2017-01-13 02:09:02 -05:00
|
|
|
assert_eq!("2006-01-02T15:04:05:00000-08:00", format!("{}", Time(102261874050000)));
|
|
|
|
}
|
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
#[test]
|
|
|
|
fn test_display_duration() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2016-11-25 17:34:00 -05:00
|
|
|
let tests = &[
|
|
|
|
// (output, seconds)
|
|
|
|
("0 seconds", 0),
|
|
|
|
("1 second", 1),
|
|
|
|
("1 minute", 60),
|
|
|
|
("1 minute 1 second", 61),
|
|
|
|
("2 minutes", 120),
|
|
|
|
("1 hour", 3600),
|
|
|
|
("1 hour 1 minute", 3660),
|
|
|
|
("2 hours", 7200),
|
|
|
|
("1 day", 86400),
|
|
|
|
("1 day 1 hour", 86400 + 3600),
|
|
|
|
("2 days", 2 * 86400),
|
|
|
|
];
|
|
|
|
for test in tests {
|
|
|
|
assert_eq!(test.0, format!("{}", Duration(test.1 * TIME_UNITS_PER_SEC)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Tests encoding the example from design/schema.md.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[test]
|
|
|
|
fn test_encode_example() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-11-25 17:34:00 -05:00
|
|
|
let mut e = SampleIndexEncoder::new();
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
e.add_sample(10, 1000, true, &mut r).unwrap();
|
|
|
|
e.add_sample(9, 10, false, &mut r).unwrap();
|
|
|
|
e.add_sample(11, 15, false, &mut r).unwrap();
|
|
|
|
e.add_sample(10, 12, false, &mut r).unwrap();
|
|
|
|
e.add_sample(10, 1050, true, &mut r).unwrap();
|
2018-03-02 18:40:32 -05:00
|
|
|
assert_eq!(r.video_index, b"\x29\xd0\x0f\x02\x14\x08\x0a\x02\x05\x01\x64");
|
|
|
|
assert_eq!(10 + 9 + 11 + 10 + 10, r.duration_90k);
|
|
|
|
assert_eq!(5, r.video_samples);
|
|
|
|
assert_eq!(2, r.video_sync_samples);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Tests a round trip from `SampleIndexEncoder` to `SampleIndexIterator`.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[test]
|
|
|
|
fn test_round_trip() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2016-11-25 17:34:00 -05:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
|
|
|
struct Sample {
|
|
|
|
duration_90k: i32,
|
|
|
|
bytes: i32,
|
|
|
|
is_key: bool,
|
|
|
|
}
|
|
|
|
let samples = [
|
|
|
|
Sample{duration_90k: 10, bytes: 30000, is_key: true},
|
|
|
|
Sample{duration_90k: 9, bytes: 1000, is_key: false},
|
|
|
|
Sample{duration_90k: 11, bytes: 1100, is_key: false},
|
|
|
|
Sample{duration_90k: 18, bytes: 31000, is_key: true},
|
|
|
|
Sample{duration_90k: 0, bytes: 1000, is_key: false},
|
|
|
|
];
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-11-25 17:34:00 -05:00
|
|
|
let mut e = SampleIndexEncoder::new();
|
|
|
|
for sample in &samples {
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
e.add_sample(sample.duration_90k, sample.bytes, sample.is_key, &mut r).unwrap();
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
let mut it = SampleIndexIterator::new();
|
|
|
|
for sample in &samples {
|
2018-03-02 18:40:32 -05:00
|
|
|
assert!(it.next(&r.video_index).unwrap());
|
2016-11-25 17:34:00 -05:00
|
|
|
assert_eq!(sample,
|
2017-02-28 00:14:06 -05:00
|
|
|
&Sample{duration_90k: it.duration_90k,
|
|
|
|
bytes: it.bytes,
|
|
|
|
is_key: it.is_key()});
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
2018-03-02 18:40:32 -05:00
|
|
|
assert!(!it.next(&r.video_index).unwrap());
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Tests that `SampleIndexIterator` spots several classes of errors.
|
|
|
|
/// TODO: test and fix overflow cases.
|
2016-11-25 17:34:00 -05:00
|
|
|
#[test]
|
|
|
|
fn test_iterator_errors() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2016-11-25 17:34:00 -05:00
|
|
|
struct Test {
|
|
|
|
encoded: &'static [u8],
|
|
|
|
err: &'static str,
|
|
|
|
}
|
|
|
|
let tests = [
|
|
|
|
Test{encoded: b"\x80", err: "bad varint 1 at offset 0"},
|
|
|
|
Test{encoded: b"\x00\x80", err: "bad varint 2 at offset 1"},
|
|
|
|
Test{encoded: b"\x00\x02\x00\x00",
|
|
|
|
err: "zero duration only allowed at end; have 2 bytes left"},
|
|
|
|
Test{encoded: b"\x02\x02",
|
|
|
|
err: "negative duration -1 after applying delta -1"},
|
|
|
|
Test{encoded: b"\x04\x00",
|
|
|
|
err: "non-positive bytes 0 after applying delta 0 to key=false frame at ts 0"},
|
|
|
|
];
|
|
|
|
for test in &tests {
|
|
|
|
let mut it = SampleIndexIterator::new();
|
2018-02-21 01:46:14 -05:00
|
|
|
assert_eq!(it.next(test.encoded).unwrap_err().to_string(), test.err);
|
2016-11-25 17:34:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 02:28:25 -05:00
|
|
|
fn get_frames<F, T>(db: &db::Database, segment: &Segment, f: F) -> Vec<T>
|
|
|
|
where F: Fn(&SampleIndexIterator) -> T {
|
|
|
|
let mut v = Vec::new();
|
2018-08-24 01:34:40 -04:00
|
|
|
db.lock().with_recording_playback(segment.id, &mut |playback| {
|
2017-03-01 02:28:25 -05:00
|
|
|
segment.foreach(playback, |it| { v.push(f(it)); Ok(()) })
|
|
|
|
}).unwrap();
|
|
|
|
v
|
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Tests that a `Segment` correctly can clip at the beginning and end.
|
|
|
|
/// This is a simpler case; all sync samples means we can start on any frame.
|
|
|
|
#[test]
|
|
|
|
fn test_segment_clipping_with_all_sync() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
|
|
|
for i in 1..6 {
|
|
|
|
let duration_90k = 2 * i;
|
|
|
|
let bytes = 3 * i;
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(duration_90k, bytes, true, &mut r).unwrap();
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2016-12-02 23:40:55 -05:00
|
|
|
// Time range [2, 2 + 4 + 6 + 8) means the 2nd, 3rd, 4th samples should be
|
|
|
|
// included.
|
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 2 .. 2+4+6+8).unwrap();
|
2017-03-01 02:28:25 -05:00
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[4, 6, 8]);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Half sync frames means starting from the last sync frame <= desired point.
|
|
|
|
#[test]
|
|
|
|
fn test_segment_clipping_with_half_sync() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
|
|
|
for i in 1..6 {
|
|
|
|
let duration_90k = 2 * i;
|
|
|
|
let bytes = 3 * i;
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r).unwrap();
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2016-12-02 23:40:55 -05:00
|
|
|
// Time range [2 + 4 + 6, 2 + 4 + 6 + 8) means the 4th sample should be included.
|
|
|
|
// The 3rd also gets pulled in because it is a sync frame and the 4th is not.
|
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 2+4+6 .. 2+4+6+8).unwrap();
|
2017-03-01 02:28:25 -05:00
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[6, 8]);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_segment_clipping_with_trailing_zero() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(1, 1, true, &mut r).unwrap();
|
|
|
|
encoder.add_sample(1, 2, true, &mut r).unwrap();
|
|
|
|
encoder.add_sample(0, 3, true, &mut r).unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2016-12-02 23:40:55 -05:00
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 1 .. 2).unwrap();
|
2017-03-01 02:28:25 -05:00
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[2, 3]);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
|
2017-10-17 11:55:21 -04:00
|
|
|
/// Even if the desired duration is 0, there should still be a frame.
|
|
|
|
#[test]
|
|
|
|
fn test_segment_zero_desired_duration() {
|
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2017-10-17 11:55:21 -04:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(1, 1, true, &mut r).unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2017-10-17 11:55:21 -04:00
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 0 .. 0).unwrap();
|
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1]);
|
|
|
|
}
|
|
|
|
|
2016-12-02 23:40:55 -05:00
|
|
|
/// Test a `Segment` which uses the whole recording.
|
|
|
|
/// This takes a fast path which skips scanning the index in `new()`.
|
|
|
|
#[test]
|
|
|
|
fn test_segment_fast_path() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
|
|
|
for i in 1..6 {
|
|
|
|
let duration_90k = 2 * i;
|
|
|
|
let bytes = 3 * i;
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(duration_90k, bytes, (i % 2) == 1, &mut r).unwrap();
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2016-12-02 23:40:55 -05:00
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2+4+6+8+10).unwrap();
|
2017-03-01 02:28:25 -05:00
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.duration_90k), &[2, 4, 6, 8, 10]);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_segment_fast_path_with_trailing_zero() {
|
2017-10-11 23:08:26 -04:00
|
|
|
testutil::init();
|
2018-03-02 18:40:32 -05:00
|
|
|
let mut r = db::RecordingToInsert::default();
|
2016-12-02 23:40:55 -05:00
|
|
|
let mut encoder = SampleIndexEncoder::new();
|
make Writer enforce maximum recording duration
My installation recently somehow ended up with a recording with a
duration of 503793844 90,000ths of a second, way over the maximum of 5
minutes. (Looks like the machine was pretty unresponsive at the time
and/or having network problems.)
When this happens, the system really spirals. Every flush afterward (12
per minute with my installation) fails with a CHECK constraint failure
on the recording table. It never gives up on that recording. /var/log
fills pretty quickly as this failure is extremely verbose (a stack
trace, and a line for each byte of video_index). Eventually the sample
file dirs fill up too as it continues writing video samples while GC is
stuck. The video samples are useless anyway; given that they're not
referenced in the database, they'll be deleted on next startup.
This ensures the offending recording is never added to the database, so
we don't get the same persistent problem. Instead, writing to the
recording will fail. The stream will drop and be retried. If the
underlying condition that caused a too-long recording (many
non-key-frames, or the camera returning a crazy duration, or the
monotonic clock jumping forward extremely, or something) has gone away,
the system should recover.
2019-01-29 11:26:36 -05:00
|
|
|
encoder.add_sample(1, 1, true, &mut r).unwrap();
|
|
|
|
encoder.add_sample(1, 2, true, &mut r).unwrap();
|
|
|
|
encoder.add_sample(0, 3, true, &mut r).unwrap();
|
2018-03-23 16:31:23 -04:00
|
|
|
let db = TestDb::new(RealClocks {});
|
2018-03-02 18:40:32 -05:00
|
|
|
let row = db.insert_recording_from_encoder(r);
|
2016-12-02 23:40:55 -05:00
|
|
|
let segment = Segment::new(&db.db.lock(), &row, 0 .. 2).unwrap();
|
2017-03-01 02:28:25 -05:00
|
|
|
assert_eq!(&get_frames(&db.db, &segment, |it| it.bytes), &[1, 2, 3]);
|
2016-12-02 23:40:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: test segment error cases involving mismatch between row frames/key_frames and index.
|
2017-01-08 17:22:35 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(all(test, feature="nightly"))]
|
|
|
|
mod bench {
|
|
|
|
extern crate test;
|
2018-12-28 22:53:29 -05:00
|
|
|
|
2017-01-08 17:22:35 -05:00
|
|
|
use super::*;
|
2016-12-02 23:40:55 -05:00
|
|
|
|
2016-11-25 17:34:00 -05:00
|
|
|
/// Benchmarks the decoder, which is performance-critical for .mp4 serving.
|
|
|
|
#[bench]
|
2018-12-28 22:53:29 -05:00
|
|
|
fn bench_decoder(b: &mut test::Bencher) {
|
2016-11-25 17:34:00 -05:00
|
|
|
let data = include_bytes!("testdata/video_sample_index.bin");
|
|
|
|
b.bytes = data.len() as u64;
|
|
|
|
b.iter(|| {
|
|
|
|
let mut it = SampleIndexIterator::new();
|
|
|
|
while it.next(data).unwrap() {}
|
|
|
|
assert_eq!(30104460, it.pos);
|
|
|
|
assert_eq!(5399985, it.start_90k);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|