2020-03-01 22:53:41 -08:00
|
|
|
// This file is part of Moonfire NVR, a security camera network video recorder.
|
2021-02-17 13:28:48 -08:00
|
|
|
// Copyright (C) 2020 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
|
|
|
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
2016-11-25 14:34:00 -08:00
|
|
|
|
2018-12-28 21:53:29 -06:00
|
|
|
use crate::stream;
|
2021-02-16 22:15:54 -08:00
|
|
|
use base::clock::{Clocks, TimerGuard};
|
|
|
|
use db::{dir, recording, writer, Camera, Database, Stream};
|
2021-09-10 16:31:03 -07:00
|
|
|
use failure::{bail, format_err, Error};
|
2018-12-28 21:53:29 -06:00
|
|
|
use log::{debug, info, trace, warn};
|
2016-11-25 14:34:00 -08:00
|
|
|
use std::result::Result;
|
2021-10-27 14:27:23 -07:00
|
|
|
use std::str::FromStr;
|
2016-11-25 14:34:00 -08:00
|
|
|
use std::sync::Arc;
|
2019-06-30 23:54:52 -05:00
|
|
|
use url::Url;
|
2016-11-25 14:34:00 -08:00
|
|
|
|
|
|
|
pub static ROTATE_INTERVAL_SEC: i64 = 60;
|
|
|
|
|
2016-12-06 18:41:44 -08:00
|
|
|
/// Common state that can be used by multiple `Streamer` instances.
|
2021-06-07 14:36:53 -07:00
|
|
|
pub struct Environment<'a, 'tmp, C>
|
2021-02-16 22:15:54 -08:00
|
|
|
where
|
|
|
|
C: Clocks + Clone,
|
|
|
|
{
|
2021-06-07 14:36:53 -07:00
|
|
|
pub opener: &'a dyn stream::Opener,
|
|
|
|
pub db: &'tmp Arc<Database<C>>,
|
2021-09-23 15:55:53 -07:00
|
|
|
pub shutdown_rx: &'tmp base::shutdown::Receiver,
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
|
2021-04-10 17:34:52 -07:00
|
|
|
/// Connects to a given RTSP stream and writes recordings to the database via [`writer::Writer`].
|
|
|
|
/// Streamer is meant to be long-lived; it will sleep and retry after each failure.
|
2021-06-07 14:36:53 -07:00
|
|
|
pub struct Streamer<'a, C>
|
2021-02-16 22:15:54 -08:00
|
|
|
where
|
|
|
|
C: Clocks + Clone,
|
|
|
|
{
|
2021-09-23 15:55:53 -07:00
|
|
|
shutdown_rx: base::shutdown::Receiver,
|
2016-11-25 14:34:00 -08:00
|
|
|
|
|
|
|
// State below is only used by the thread in Run.
|
|
|
|
rotate_offset_sec: i64,
|
2016-12-06 18:41:44 -08:00
|
|
|
rotate_interval_sec: i64,
|
2018-03-23 13:31:23 -07:00
|
|
|
db: Arc<Database<C>>,
|
2016-11-25 14:34:00 -08:00
|
|
|
dir: Arc<dir::SampleFileDir>,
|
2018-03-04 12:24:24 -08:00
|
|
|
syncer_channel: writer::SyncerChannel<::std::fs::File>,
|
2021-06-07 14:36:53 -07:00
|
|
|
opener: &'a dyn stream::Opener,
|
2021-08-31 08:10:50 -07:00
|
|
|
transport: retina::client::Transport,
|
2018-01-23 11:05:07 -08:00
|
|
|
stream_id: i32,
|
2021-09-09 21:14:01 -07:00
|
|
|
session_group: Arc<retina::client::SessionGroup>,
|
2016-11-25 14:34:00 -08:00
|
|
|
short_name: String,
|
2019-06-30 23:54:52 -05:00
|
|
|
url: Url,
|
2021-09-10 16:31:03 -07:00
|
|
|
username: String,
|
|
|
|
password: String,
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
impl<'a, C> Streamer<'a, C>
|
2021-02-16 22:15:54 -08:00
|
|
|
where
|
|
|
|
C: 'a + Clocks + Clone,
|
|
|
|
{
|
2021-06-07 14:36:53 -07:00
|
|
|
pub fn new<'tmp>(
|
|
|
|
env: &Environment<'a, 'tmp, C>,
|
2021-02-16 22:15:54 -08:00
|
|
|
dir: Arc<dir::SampleFileDir>,
|
|
|
|
syncer_channel: writer::SyncerChannel<::std::fs::File>,
|
|
|
|
stream_id: i32,
|
|
|
|
c: &Camera,
|
|
|
|
s: &Stream,
|
2021-09-09 21:14:01 -07:00
|
|
|
session_group: Arc<retina::client::SessionGroup>,
|
2021-02-16 22:15:54 -08:00
|
|
|
rotate_offset_sec: i64,
|
|
|
|
rotate_interval_sec: i64,
|
|
|
|
) -> Result<Self, Error> {
|
2021-09-10 16:31:03 -07:00
|
|
|
let url = s
|
|
|
|
.config
|
|
|
|
.url
|
|
|
|
.as_ref()
|
|
|
|
.ok_or_else(|| format_err!("Stream has no RTSP URL"))?;
|
2021-06-07 14:36:53 -07:00
|
|
|
if !url.username().is_empty() || url.password().is_some() {
|
|
|
|
bail!("RTSP URL shouldn't include credentials");
|
2019-06-30 23:54:52 -05:00
|
|
|
}
|
2021-10-27 14:27:23 -07:00
|
|
|
let stream_transport = if s.config.rtsp_transport.is_empty() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
match retina::client::Transport::from_str(&s.config.rtsp_transport) {
|
|
|
|
Ok(t) => Some(t),
|
|
|
|
Err(_) => {
|
|
|
|
log::warn!(
|
|
|
|
"Unable to parse configured transport {:?} for {}/{}; ignoring.",
|
|
|
|
&s.config.rtsp_transport,
|
|
|
|
&c.short_name,
|
|
|
|
s.type_
|
|
|
|
);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2019-06-30 23:54:52 -05:00
|
|
|
Ok(Streamer {
|
2021-09-23 15:55:53 -07:00
|
|
|
shutdown_rx: env.shutdown_rx.clone(),
|
2020-04-13 23:03:49 -07:00
|
|
|
rotate_offset_sec,
|
|
|
|
rotate_interval_sec,
|
2016-12-06 18:41:44 -08:00
|
|
|
db: env.db.clone(),
|
2018-02-11 22:45:51 -08:00
|
|
|
dir,
|
2020-04-13 23:03:49 -07:00
|
|
|
syncer_channel,
|
2016-12-06 18:41:44 -08:00
|
|
|
opener: env.opener,
|
2022-03-09 13:12:33 -08:00
|
|
|
transport: stream_transport.unwrap_or_default(),
|
2020-04-13 23:03:49 -07:00
|
|
|
stream_id,
|
2021-09-09 21:14:01 -07:00
|
|
|
session_group,
|
2018-01-23 11:05:07 -08:00
|
|
|
short_name: format!("{}-{}", c.short_name, s.type_.as_str()),
|
2021-09-10 16:31:03 -07:00
|
|
|
url: url.clone(),
|
|
|
|
username: c.config.username.clone(),
|
|
|
|
password: c.config.password.clone(),
|
2019-06-30 23:54:52 -05:00
|
|
|
})
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
|
|
|
|
2021-02-16 22:15:54 -08:00
|
|
|
pub fn short_name(&self) -> &str {
|
|
|
|
&self.short_name
|
|
|
|
}
|
2016-11-25 14:34:00 -08:00
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
/// Runs the streamer; blocks.
|
2016-11-25 14:34:00 -08:00
|
|
|
pub fn run(&mut self) {
|
2022-03-18 10:30:23 -07:00
|
|
|
let rt = tokio::runtime::Builder::new_current_thread()
|
|
|
|
.enable_io()
|
|
|
|
.enable_time()
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
let _guard = rt.enter();
|
2021-09-23 15:55:53 -07:00
|
|
|
while self.shutdown_rx.check().is_ok() {
|
2022-03-18 10:30:23 -07:00
|
|
|
if let Err(e) = self.run_once(&rt) {
|
2016-12-06 18:41:44 -08:00
|
|
|
let sleep_time = time::Duration::seconds(1);
|
2021-02-16 22:15:54 -08:00
|
|
|
warn!(
|
2021-08-31 08:59:33 -07:00
|
|
|
"{}: sleeping for {} after error: {}",
|
2021-02-16 22:15:54 -08:00
|
|
|
self.short_name,
|
|
|
|
sleep_time,
|
|
|
|
base::prettify_failure(&e)
|
|
|
|
);
|
2018-03-23 13:31:23 -07:00
|
|
|
self.db.clocks().sleep(sleep_time);
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
info!("{}: shutting down", self.short_name);
|
|
|
|
}
|
|
|
|
|
2022-03-18 10:30:23 -07:00
|
|
|
fn run_once(&mut self, rt: &tokio::runtime::Runtime) -> Result<(), Error> {
|
2021-06-07 14:36:53 -07:00
|
|
|
info!("{}: Opening input: {}", self.short_name, self.url.as_str());
|
2018-03-23 13:31:23 -07:00
|
|
|
let clocks = self.db.clocks();
|
2016-11-25 14:34:00 -08:00
|
|
|
|
2021-09-09 21:14:01 -07:00
|
|
|
let mut waited = false;
|
|
|
|
loop {
|
|
|
|
let status = self.session_group.stale_sessions();
|
|
|
|
if let Some(max_expires) = status.max_expires {
|
2021-09-29 05:55:12 -07:00
|
|
|
log::info!(
|
|
|
|
"{}: waiting up to {:?} for TEARDOWN or expiration of {} stale sessions",
|
|
|
|
&self.short_name,
|
|
|
|
max_expires.saturating_duration_since(tokio::time::Instant::now()),
|
|
|
|
status.num_sessions
|
|
|
|
);
|
2022-03-18 10:30:23 -07:00
|
|
|
rt.block_on(async {
|
2021-09-29 05:55:12 -07:00
|
|
|
tokio::select! {
|
|
|
|
_ = self.session_group.await_stale_sessions(&status) => Ok(()),
|
|
|
|
_ = self.shutdown_rx.as_future() => Err(base::shutdown::ShutdownError),
|
|
|
|
}
|
|
|
|
})?;
|
|
|
|
waited = true;
|
2021-09-09 21:14:01 -07:00
|
|
|
} else {
|
|
|
|
if waited {
|
2021-09-29 05:55:12 -07:00
|
|
|
log::info!("{}: done waiting; no more stale sessions", &self.short_name);
|
2021-09-09 21:14:01 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
let (extra_data, mut stream) = {
|
|
|
|
let _t = TimerGuard::new(&clocks, || format!("opening {}", self.url.as_str()));
|
2021-06-28 17:49:29 -07:00
|
|
|
self.opener.open(
|
2022-03-18 10:30:23 -07:00
|
|
|
rt,
|
2021-06-28 17:49:29 -07:00
|
|
|
self.short_name.clone(),
|
2022-03-18 10:30:23 -07:00
|
|
|
self.url.clone(),
|
|
|
|
retina::client::SessionOptions::default()
|
|
|
|
.creds(if self.username.is_empty() {
|
2021-09-10 16:31:03 -07:00
|
|
|
None
|
|
|
|
} else {
|
2022-03-18 10:30:23 -07:00
|
|
|
Some(retina::client::Credentials {
|
|
|
|
username: self.username.clone(),
|
|
|
|
password: self.password.clone(),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.transport(self.transport)
|
|
|
|
.session_group(self.session_group.clone()),
|
2021-06-28 17:49:29 -07:00
|
|
|
)?
|
2018-01-31 14:20:30 -08:00
|
|
|
};
|
2018-03-23 13:31:23 -07:00
|
|
|
let realtime_offset = self.db.clocks().realtime() - clocks.monotonic();
|
2018-01-31 14:20:30 -08:00
|
|
|
let video_sample_entry_id = {
|
2018-03-23 13:31:23 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || "inserting video sample entry");
|
2022-03-18 10:30:23 -07:00
|
|
|
self.db.lock().insert_video_sample_entry(extra_data)?
|
2018-01-31 14:20:30 -08:00
|
|
|
};
|
2016-11-25 14:34:00 -08:00
|
|
|
let mut seen_key_frame = false;
|
2018-02-28 12:32:52 -08:00
|
|
|
|
|
|
|
// Seconds since epoch at which to next rotate.
|
|
|
|
let mut rotate: Option<i64> = None;
|
2021-02-16 22:15:54 -08:00
|
|
|
let mut w = writer::Writer::new(
|
|
|
|
&self.dir,
|
|
|
|
&self.db,
|
|
|
|
&self.syncer_channel,
|
|
|
|
self.stream_id,
|
|
|
|
video_sample_entry_id,
|
|
|
|
);
|
2021-09-23 15:55:53 -07:00
|
|
|
while self.shutdown_rx.check().is_ok() {
|
2018-01-31 14:20:30 -08:00
|
|
|
let pkt = {
|
2018-03-23 13:31:23 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || "getting next packet");
|
2021-09-16 16:24:17 -07:00
|
|
|
stream.next()
|
|
|
|
};
|
|
|
|
let pkt = match pkt {
|
|
|
|
Ok(p) => p,
|
|
|
|
Err(e) => {
|
|
|
|
let _ = w.close(None, Some(e.to_string()));
|
|
|
|
return Err(e);
|
|
|
|
}
|
2018-01-31 14:20:30 -08:00
|
|
|
};
|
2021-06-07 14:36:53 -07:00
|
|
|
if !seen_key_frame && !pkt.is_key {
|
2016-11-25 14:34:00 -08:00
|
|
|
continue;
|
|
|
|
} else if !seen_key_frame {
|
|
|
|
debug!("{}: have first key frame", self.short_name);
|
|
|
|
seen_key_frame = true;
|
|
|
|
}
|
2018-03-23 13:31:23 -07:00
|
|
|
let frame_realtime = clocks.monotonic() + realtime_offset;
|
2016-12-28 20:56:08 -08:00
|
|
|
let local_time = recording::Time::new(frame_realtime);
|
2018-02-28 12:32:52 -08:00
|
|
|
rotate = if let Some(r) = rotate {
|
2021-06-07 14:36:53 -07:00
|
|
|
if frame_realtime.sec > r && pkt.is_key {
|
2016-12-06 18:41:44 -08:00
|
|
|
trace!("{}: write on normal rotation", self.short_name);
|
2018-03-23 13:31:23 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || "closing writer");
|
2021-09-16 16:24:17 -07:00
|
|
|
w.close(Some(pkt.pts), None)?;
|
2016-12-28 20:56:08 -08:00
|
|
|
None
|
|
|
|
} else {
|
2018-02-28 12:32:52 -08:00
|
|
|
Some(r)
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
2021-02-16 22:15:54 -08:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2018-02-28 12:32:52 -08:00
|
|
|
let r = match rotate {
|
|
|
|
Some(r) => r,
|
2016-11-25 14:34:00 -08:00
|
|
|
None => {
|
2016-12-28 20:56:08 -08:00
|
|
|
let sec = frame_realtime.sec;
|
|
|
|
let r = sec - (sec % self.rotate_interval_sec) + self.rotate_offset_sec;
|
2021-02-16 22:15:54 -08:00
|
|
|
let r = r + if r <= sec {
|
|
|
|
self.rotate_interval_sec
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
2016-12-30 06:39:09 -08:00
|
|
|
|
|
|
|
// On the first recording, set rotate time to not the next rotate offset, but
|
|
|
|
// the one after, so that it's longer than usual rather than shorter than
|
|
|
|
// usual. This ensures there's plenty of frame times to use when calculating
|
|
|
|
// the start time.
|
2021-02-16 22:15:54 -08:00
|
|
|
let r = r + if w.previously_opened()? {
|
|
|
|
0
|
|
|
|
} else {
|
|
|
|
self.rotate_interval_sec
|
|
|
|
};
|
2018-03-23 13:31:23 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || "creating writer");
|
2018-02-28 12:32:52 -08:00
|
|
|
r
|
2021-02-16 22:15:54 -08:00
|
|
|
}
|
2016-11-25 14:34:00 -08:00
|
|
|
};
|
2021-06-07 14:36:53 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || format!("writing {} bytes", pkt.data.len()));
|
2021-09-23 15:55:53 -07:00
|
|
|
w.write(
|
|
|
|
&mut self.shutdown_rx,
|
2022-03-18 10:30:23 -07:00
|
|
|
&pkt.data[..],
|
2021-09-23 15:55:53 -07:00
|
|
|
local_time,
|
|
|
|
pkt.pts,
|
|
|
|
pkt.is_key,
|
|
|
|
)?;
|
2018-02-28 12:32:52 -08:00
|
|
|
rotate = Some(r);
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
2018-02-28 12:32:52 -08:00
|
|
|
if rotate.is_some() {
|
2018-03-23 13:31:23 -07:00
|
|
|
let _t = TimerGuard::new(&clocks, || "closing writer");
|
2021-09-16 16:24:17 -07:00
|
|
|
w.close(None, Some("NVR shutdown".to_owned()))?;
|
2016-11-25 14:34:00 -08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2016-12-06 18:41:44 -08:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2022-03-18 10:30:23 -07:00
|
|
|
use crate::stream::{self, Stream};
|
2021-02-16 22:15:54 -08:00
|
|
|
use base::clock::{self, Clocks};
|
|
|
|
use db::{recording, testutil, CompositeId};
|
|
|
|
use failure::{bail, Error};
|
2018-12-28 21:53:29 -06:00
|
|
|
use log::trace;
|
2018-02-22 16:35:34 -08:00
|
|
|
use parking_lot::Mutex;
|
2016-12-29 17:14:36 -08:00
|
|
|
use std::cmp;
|
2020-08-06 05:16:38 -07:00
|
|
|
use std::convert::TryFrom;
|
2021-02-16 22:15:54 -08:00
|
|
|
use std::sync::Arc;
|
2016-12-06 18:41:44 -08:00
|
|
|
use time;
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
struct ProxyingStream {
|
|
|
|
clocks: clock::SimulatedClocks,
|
|
|
|
inner: Box<dyn stream::Stream>,
|
2016-12-29 17:14:36 -08:00
|
|
|
buffered: time::Duration,
|
|
|
|
slept: time::Duration,
|
2016-12-06 18:41:44 -08:00
|
|
|
ts_offset: i64,
|
|
|
|
ts_offset_pkts_left: u32,
|
|
|
|
pkts_left: u32,
|
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
impl ProxyingStream {
|
2021-02-16 22:15:54 -08:00
|
|
|
fn new(
|
2021-06-07 14:36:53 -07:00
|
|
|
clocks: clock::SimulatedClocks,
|
2021-02-16 22:15:54 -08:00
|
|
|
buffered: time::Duration,
|
2021-06-07 14:36:53 -07:00
|
|
|
inner: Box<dyn stream::Stream>,
|
2021-02-16 22:15:54 -08:00
|
|
|
) -> ProxyingStream {
|
2016-12-29 21:05:57 -08:00
|
|
|
clocks.sleep(buffered);
|
2016-12-06 18:41:44 -08:00
|
|
|
ProxyingStream {
|
2021-06-07 14:36:53 -07:00
|
|
|
clocks,
|
|
|
|
inner,
|
|
|
|
buffered,
|
2016-12-29 17:14:36 -08:00
|
|
|
slept: time::Duration::seconds(0),
|
2016-12-06 18:41:44 -08:00
|
|
|
ts_offset: 0,
|
|
|
|
ts_offset_pkts_left: 0,
|
|
|
|
pkts_left: 0,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
impl Stream for ProxyingStream {
|
2022-04-12 14:57:16 -07:00
|
|
|
fn tool(&self) -> Option<&retina::client::Tool> {
|
|
|
|
self.inner.tool()
|
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
fn next(&mut self) -> Result<stream::VideoFrame, Error> {
|
2016-12-06 18:41:44 -08:00
|
|
|
if self.pkts_left == 0 {
|
2021-06-07 14:36:53 -07:00
|
|
|
bail!("end of stream");
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
self.pkts_left -= 1;
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
let mut frame = self.inner.next()?;
|
2016-12-06 18:41:44 -08:00
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
// XXX: comment wrong.
|
2020-08-06 05:16:38 -07:00
|
|
|
// Emulate the behavior of real cameras that send some pre-buffered frames immediately
|
|
|
|
// on connect. After that, advance clock to the end of this frame.
|
2016-12-29 17:14:36 -08:00
|
|
|
// Avoid accumulating conversion error by tracking the total amount to sleep and how
|
|
|
|
// much we've already slept, rather than considering each frame in isolation.
|
|
|
|
{
|
2021-06-07 14:36:53 -07:00
|
|
|
let goal = frame.pts + i64::from(frame.duration);
|
2016-12-29 17:14:36 -08:00
|
|
|
let goal = time::Duration::nanoseconds(
|
2021-02-16 22:15:54 -08:00
|
|
|
goal * 1_000_000_000 / recording::TIME_UNITS_PER_SEC,
|
|
|
|
);
|
2016-12-29 17:14:36 -08:00
|
|
|
let duration = goal - self.slept;
|
|
|
|
let buf_part = cmp::min(self.buffered, duration);
|
|
|
|
self.buffered = self.buffered - buf_part;
|
2016-12-29 21:05:57 -08:00
|
|
|
self.clocks.sleep(duration - buf_part);
|
2016-12-29 17:14:36 -08:00
|
|
|
self.slept = goal;
|
|
|
|
}
|
2016-12-06 18:41:44 -08:00
|
|
|
|
|
|
|
if self.ts_offset_pkts_left > 0 {
|
|
|
|
self.ts_offset_pkts_left -= 1;
|
2021-06-07 14:36:53 -07:00
|
|
|
frame.pts += self.ts_offset;
|
2016-12-06 18:41:44 -08:00
|
|
|
|
2017-09-20 21:06:06 -07:00
|
|
|
// In a real rtsp stream, the duration of a packet is not known until the
|
2020-08-06 05:16:38 -07:00
|
|
|
// next packet. ffmpeg's duration is an unreliable estimate. Set it to something
|
|
|
|
// ridiculous.
|
2021-06-07 14:36:53 -07:00
|
|
|
frame.duration = i32::try_from(3600 * recording::TIME_UNITS_PER_SEC).unwrap();
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
Ok(frame)
|
2021-02-16 22:15:54 -08:00
|
|
|
}
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
struct MockOpener {
|
|
|
|
expected_url: url::Url,
|
2022-03-18 10:30:23 -07:00
|
|
|
streams: Mutex<Vec<(db::VideoSampleEntryToInsert, Box<dyn stream::Stream>)>>,
|
2021-09-23 15:55:53 -07:00
|
|
|
shutdown_tx: Mutex<Option<base::shutdown::Sender>>,
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
|
2021-06-07 14:36:53 -07:00
|
|
|
impl stream::Opener for MockOpener {
|
|
|
|
fn open(
|
|
|
|
&self,
|
2022-03-18 10:30:23 -07:00
|
|
|
_rt: &tokio::runtime::Runtime,
|
2021-06-28 17:49:29 -07:00
|
|
|
_label: String,
|
2022-03-18 10:30:23 -07:00
|
|
|
url: url::Url,
|
|
|
|
_options: retina::client::SessionOptions,
|
|
|
|
) -> Result<(db::VideoSampleEntryToInsert, Box<dyn stream::Stream>), Error> {
|
|
|
|
assert_eq!(&url, &self.expected_url);
|
2018-02-22 16:35:34 -08:00
|
|
|
let mut l = self.streams.lock();
|
2016-12-06 18:41:44 -08:00
|
|
|
match l.pop() {
|
|
|
|
Some(stream) => {
|
|
|
|
trace!("MockOpener returning next stream");
|
|
|
|
Ok(stream)
|
2021-02-16 22:15:54 -08:00
|
|
|
}
|
2016-12-06 18:41:44 -08:00
|
|
|
None => {
|
|
|
|
trace!("MockOpener shutting down");
|
2021-09-23 15:55:53 -07:00
|
|
|
self.shutdown_tx.lock().take();
|
2018-02-20 22:46:14 -08:00
|
|
|
bail!("done")
|
2021-02-16 22:15:54 -08:00
|
|
|
}
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Eq, PartialEq)]
|
|
|
|
struct Frame {
|
|
|
|
start_90k: i32,
|
|
|
|
duration_90k: i32,
|
|
|
|
is_key: bool,
|
|
|
|
}
|
|
|
|
|
2018-02-20 10:11:10 -08:00
|
|
|
fn get_frames(db: &db::LockedDatabase, id: CompositeId) -> Vec<Frame> {
|
2018-08-23 22:34:40 -07:00
|
|
|
db.with_recording_playback(id, &mut |rec| {
|
2021-05-17 14:31:50 -07:00
|
|
|
let mut it = recording::SampleIndexIterator::default();
|
2017-02-28 23:28:25 -08:00
|
|
|
let mut frames = Vec::new();
|
|
|
|
while it.next(&rec.video_index).unwrap() {
|
2021-02-16 22:15:54 -08:00
|
|
|
frames.push(Frame {
|
2017-02-28 23:28:25 -08:00
|
|
|
start_90k: it.start_90k,
|
|
|
|
duration_90k: it.duration_90k,
|
|
|
|
is_key: it.is_key(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
Ok(frames)
|
2021-02-16 22:15:54 -08:00
|
|
|
})
|
|
|
|
.unwrap()
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn basic() {
|
|
|
|
testutil::init();
|
2016-12-29 21:05:57 -08:00
|
|
|
// 2015-04-25 00:00:00 UTC
|
2018-03-23 13:31:23 -07:00
|
|
|
let clocks = clock::SimulatedClocks::new(time::Timespec::new(1429920000, 0));
|
2021-02-16 22:15:54 -08:00
|
|
|
clocks.sleep(time::Duration::seconds(86400)); // to 2015-04-26 00:00:00 UTC
|
2016-12-29 17:14:36 -08:00
|
|
|
|
2022-03-18 10:30:23 -07:00
|
|
|
let (extra_data, stream) =
|
|
|
|
stream::testutil::Mp4Stream::open("src/testdata/clip.mp4").unwrap();
|
|
|
|
let mut stream =
|
|
|
|
ProxyingStream::new(clocks.clone(), time::Duration::seconds(2), Box::new(stream));
|
2021-02-16 22:15:54 -08:00
|
|
|
stream.ts_offset = 123456; // starting pts of the input should be irrelevant
|
2016-12-06 18:41:44 -08:00
|
|
|
stream.ts_offset_pkts_left = u32::max_value();
|
|
|
|
stream.pkts_left = u32::max_value();
|
2021-09-23 15:55:53 -07:00
|
|
|
let (shutdown_tx, shutdown_rx) = base::shutdown::channel();
|
2021-02-16 22:15:54 -08:00
|
|
|
let opener = MockOpener {
|
2021-06-07 14:36:53 -07:00
|
|
|
expected_url: url::Url::parse("rtsp://test-camera/main").unwrap(),
|
|
|
|
streams: Mutex::new(vec![(extra_data, Box::new(stream))]),
|
2021-09-23 15:55:53 -07:00
|
|
|
shutdown_tx: Mutex::new(Some(shutdown_tx)),
|
2016-12-06 18:41:44 -08:00
|
|
|
};
|
2018-03-23 13:31:23 -07:00
|
|
|
let db = testutil::TestDb::new(clocks.clone());
|
|
|
|
let env = super::Environment {
|
2016-12-06 18:41:44 -08:00
|
|
|
opener: &opener,
|
|
|
|
db: &db.db,
|
2021-09-23 15:55:53 -07:00
|
|
|
shutdown_rx: &shutdown_rx,
|
2016-12-06 18:41:44 -08:00
|
|
|
};
|
|
|
|
let mut stream;
|
|
|
|
{
|
|
|
|
let l = db.db.lock();
|
|
|
|
let camera = l.cameras_by_id().get(&testutil::TEST_CAMERA_ID).unwrap();
|
2018-01-23 11:05:07 -08:00
|
|
|
let s = l.streams_by_id().get(&testutil::TEST_STREAM_ID).unwrap();
|
2021-02-16 22:15:54 -08:00
|
|
|
let dir = db
|
|
|
|
.dirs_by_stream_id
|
|
|
|
.get(&testutil::TEST_STREAM_ID)
|
|
|
|
.unwrap()
|
|
|
|
.clone();
|
|
|
|
stream = super::Streamer::new(
|
|
|
|
&env,
|
|
|
|
dir,
|
|
|
|
db.syncer_channel.clone(),
|
|
|
|
testutil::TEST_STREAM_ID,
|
|
|
|
camera,
|
|
|
|
s,
|
2021-09-09 21:14:01 -07:00
|
|
|
Arc::new(retina::client::SessionGroup::default()),
|
2021-02-16 22:15:54 -08:00
|
|
|
0,
|
|
|
|
3,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
stream.run();
|
2018-02-22 16:35:34 -08:00
|
|
|
assert!(opener.streams.lock().is_empty());
|
2016-12-06 18:41:44 -08:00
|
|
|
db.syncer_channel.flush();
|
|
|
|
let db = db.db.lock();
|
|
|
|
|
|
|
|
// Compare frame-by-frame. Note below that while the rotation is scheduled to happen near
|
2016-12-29 13:07:25 -08:00
|
|
|
// 3-second boundaries (such as 2016-04-26 00:00:03), rotation happens somewhat later:
|
|
|
|
// * the first rotation is always skipped
|
|
|
|
// * the second rotation is deferred until a key frame.
|
2021-02-16 22:15:54 -08:00
|
|
|
#[rustfmt::skip]
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 16:17:32 -07:00
|
|
|
assert_eq!(get_frames(&db, CompositeId::new(testutil::TEST_STREAM_ID, 0)), &[
|
2021-02-16 22:15:54 -08:00
|
|
|
Frame { start_90k: 0, duration_90k: 90379, is_key: true },
|
|
|
|
Frame { start_90k: 90379, duration_90k: 89884, is_key: false },
|
|
|
|
Frame { start_90k: 180263, duration_90k: 89749, is_key: false },
|
|
|
|
Frame { start_90k: 270012, duration_90k: 89981, is_key: false }, // pts_time 3.0001...
|
|
|
|
Frame { start_90k: 359993, duration_90k: 90055, is_key: true },
|
|
|
|
Frame { start_90k: 450048, duration_90k: 89967, is_key: false },
|
|
|
|
Frame { start_90k: 540015, duration_90k: 90021, is_key: false }, // pts_time 6.0001...
|
|
|
|
Frame { start_90k: 630036, duration_90k: 89958, is_key: false },
|
2016-12-06 18:41:44 -08:00
|
|
|
]);
|
2021-02-16 22:15:54 -08:00
|
|
|
#[rustfmt::skip]
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 16:17:32 -07:00
|
|
|
assert_eq!(get_frames(&db, CompositeId::new(testutil::TEST_STREAM_ID, 1)), &[
|
2021-02-16 22:15:54 -08:00
|
|
|
Frame { start_90k: 0, duration_90k: 90011, is_key: true },
|
|
|
|
Frame { start_90k: 90011, duration_90k: 0, is_key: false },
|
2016-12-06 18:41:44 -08:00
|
|
|
]);
|
2016-12-29 17:14:36 -08:00
|
|
|
let mut recordings = Vec::new();
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 16:17:32 -07:00
|
|
|
db.list_recordings_by_id(testutil::TEST_STREAM_ID, 0..2, &mut |r| {
|
2016-12-29 17:14:36 -08:00
|
|
|
recordings.push(r);
|
|
|
|
Ok(())
|
2021-02-16 22:15:54 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
2016-12-29 17:14:36 -08:00
|
|
|
assert_eq!(2, recordings.len());
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 16:17:32 -07:00
|
|
|
assert_eq!(0, recordings[0].id.recording());
|
2016-12-29 17:14:36 -08:00
|
|
|
assert_eq!(recording::Time(128700575999999), recordings[0].start);
|
|
|
|
assert_eq!(0, recordings[0].flags);
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 16:17:32 -07:00
|
|
|
assert_eq!(1, recordings[1].id.recording());
|
2016-12-29 17:14:36 -08:00
|
|
|
assert_eq!(recording::Time(128700576719993), recordings[1].start);
|
|
|
|
assert_eq!(db::RecordingFlags::TrailingZero as i32, recordings[1].flags);
|
2021-06-07 14:36:53 -07:00
|
|
|
|
|
|
|
drop(env);
|
|
|
|
drop(opener);
|
2016-12-06 18:41:44 -08:00
|
|
|
}
|
|
|
|
}
|