mirror of
https://github.com/scottlamb/moonfire-nvr.git
synced 2025-12-04 14:37:19 -05:00
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59) in a non-obvious way. When constructing a HTML Media Source Extensions API SourceBuffer, the caller can specify a "mode" of either "segments" or "sequence": In "sequence" mode, playback assumes segments are added sequentially. This is good enough for a live view-only UI (#59) but not for a scrub bar UI in which you may want to seek backward to a segment you've never seen before. You will then need to insert a segment out-of-sequence. Imagine what happens when the user goes forward again until the end of the segment inserted immediately before it. The user should see the chronologically next segment or a pause for loading if it's unavailable. The best approximation of this is to track the mapping of timestamps to segments and insert a VTTCue with an enter/exit handler that seeks to the right position. But seeking isn't instantaneous; the user will likely briefly see first the segment they seeked to before. That's janky. Additionally, the "canplaythrough" event will behave strangely. In "segments" mode, playback respects the timestamps we set: * The obvious choice is to use wall clock timestamps. This is fine if they're known to be fixed and correct. They're not. The currently-recording segment may be "unanchored", meaning its start timestamp is not yet fixed. Older timestamps may overlap if the system clock was stepped between runs. The latter isn't /too/ bad from a user perspective, though it's confusing as a developer. We probably will only end up showing the more recent recording for a given timestamp anyway. But the former is quite annoying. It means we have to throw away part of the SourceBuffer that we may want to seek back (causing UI pauses when that happens) or keep our own spare copy of it (memory bloat). I'd like to avoid the whole mess. * Another approach is to use timestamps that are guaranteed to be in the correct order but that may have gaps. In particular, a timestamp of (recording_id * max_recording_duration) + time_within_recording. But again seeking isn't instantaneous. In my experiments, there's a visible pause between segments that drives me nuts. * Finally, the approach that led me to this schema change. Use timestamps that place each segment after the one before, possibly with an intentional gap between runs (to force a wait where we have an actual gap). This should make the browser's natural playback behavior work properly: it never goes to an incorrect place, and it only waits when/if we want it to. We have to maintain a mapping between its timestamps and segment ids but that's doable. This commit is only the schema change; the new data aren't exposed in the API yet, much less used by a UI. Note that stream.next_recording_id became stream.cum_recordings. I made a slight definition change in the process: recording ids for new streams start at 0 rather than 1. Various tests changed accordingly. The upgrade process makes a best effort to backfill these new fields, but of course it doesn't know the total duration or number of runs of previously deleted rows. That's good enough.
This commit is contained in:
@@ -141,6 +141,38 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
})?;
|
||||
}
|
||||
tx.execute_batch(r#"
|
||||
alter table stream rename to old_stream;
|
||||
create table stream (
|
||||
id integer primary key,
|
||||
camera_id integer not null references camera (id),
|
||||
sample_file_dir_id integer references sample_file_dir (id),
|
||||
type text not null check (type in ('main', 'sub')),
|
||||
record integer not null check (record in (1, 0)),
|
||||
rtsp_url text not null,
|
||||
retain_bytes integer not null check (retain_bytes >= 0),
|
||||
flush_if_sec integer not null,
|
||||
cum_recordings integer not null check (cum_recordings >= 0),
|
||||
cum_duration_90k integer not null check (cum_duration_90k >= 0),
|
||||
cum_runs integer not null check (cum_runs >= 0),
|
||||
unique (camera_id, type)
|
||||
);
|
||||
insert into stream
|
||||
select
|
||||
s.id,
|
||||
s.camera_id,
|
||||
s.sample_file_dir_id,
|
||||
s.type,
|
||||
s.record,
|
||||
s.rtsp_url,
|
||||
s.retain_bytes,
|
||||
s.flush_if_sec,
|
||||
s.next_recording_id as cum_recordings,
|
||||
coalesce(sum(r.duration_90k), 0) as cum_duration_90k,
|
||||
coalesce(sum(case when r.run_offset = 0 then 1 else 0 end), 0) as cum_runs
|
||||
from
|
||||
old_stream s left join recording r on (s.id = r.stream_id)
|
||||
group by 1;
|
||||
|
||||
alter table recording rename to old_recording;
|
||||
create table recording (
|
||||
composite_id integer primary key,
|
||||
@@ -150,6 +182,8 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
flags integer not null,
|
||||
sample_file_bytes integer not null check (sample_file_bytes > 0),
|
||||
start_time_90k integer not null check (start_time_90k > 0),
|
||||
prev_duration_90k integer not null check (prev_duration_90k >= 0),
|
||||
prev_runs integer not null check (prev_runs >= 0),
|
||||
duration_90k integer not null
|
||||
check (duration_90k >= 0 and duration_90k < 5*60*90000),
|
||||
video_samples integer not null check (video_samples > 0),
|
||||
@@ -157,7 +191,77 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
video_sample_entry_id integer references video_sample_entry (id),
|
||||
check (composite_id >> 32 = stream_id)
|
||||
);
|
||||
insert into recording select * from old_recording;
|
||||
"#)?;
|
||||
|
||||
// SQLite added window functions in 3.25.0. macOS still ships SQLite 3.24.0 (no support).
|
||||
// Compute cumulative columns by hand.
|
||||
let mut cur_stream_id = None;
|
||||
let mut cum_duration_90k = 0;
|
||||
let mut cum_runs = 0;
|
||||
let mut stmt = tx.prepare(r#"
|
||||
select
|
||||
composite_id,
|
||||
open_id,
|
||||
stream_id,
|
||||
run_offset,
|
||||
flags,
|
||||
sample_file_bytes,
|
||||
start_time_90k,
|
||||
duration_90k,
|
||||
video_samples,
|
||||
video_sync_samples,
|
||||
video_sample_entry_id
|
||||
from
|
||||
old_recording
|
||||
order by composite_id
|
||||
"#)?;
|
||||
let mut insert = tx.prepare(r#"
|
||||
insert into recording (composite_id, open_id, stream_id, run_offset, flags,
|
||||
sample_file_bytes, start_time_90k, prev_duration_90k, prev_runs,
|
||||
duration_90k, video_samples, video_sync_samples,
|
||||
video_sample_entry_id)
|
||||
values (:composite_id, :open_id, :stream_id, :run_offset, :flags,
|
||||
:sample_file_bytes, :start_time_90k, :prev_duration_90k, :prev_runs,
|
||||
:duration_90k, :video_samples, :video_sync_samples,
|
||||
:video_sample_entry_id)
|
||||
"#)?;
|
||||
let mut rows = stmt.query(params![])?;
|
||||
while let Some(row) = rows.next()? {
|
||||
let composite_id: i64 = row.get(0)?;
|
||||
let open_id: i32 = row.get(1)?;
|
||||
let stream_id: i32 = row.get(2)?;
|
||||
let run_offset: i32 = row.get(3)?;
|
||||
let flags: i32 = row.get(4)?;
|
||||
let sample_file_bytes: i32 = row.get(5)?;
|
||||
let start_time_90k: i64 = row.get(6)?;
|
||||
let duration_90k: i32 = row.get(7)?;
|
||||
let video_samples: i32 = row.get(8)?;
|
||||
let video_sync_samples: i32 = row.get(9)?;
|
||||
let video_sample_entry_id: i32 = row.get(10)?;
|
||||
if cur_stream_id != Some(stream_id) {
|
||||
cum_duration_90k = 0;
|
||||
cum_runs = 0;
|
||||
cur_stream_id = Some(stream_id);
|
||||
}
|
||||
insert.execute_named(named_params!{
|
||||
":composite_id": composite_id,
|
||||
":open_id": open_id,
|
||||
":stream_id": stream_id,
|
||||
":run_offset": run_offset,
|
||||
":flags": flags,
|
||||
":sample_file_bytes": sample_file_bytes,
|
||||
":start_time_90k": start_time_90k,
|
||||
":prev_duration_90k": cum_duration_90k,
|
||||
":prev_runs": cum_runs,
|
||||
":duration_90k": duration_90k,
|
||||
":video_samples": video_samples,
|
||||
":video_sync_samples": video_sync_samples,
|
||||
":video_sample_entry_id": video_sample_entry_id,
|
||||
})?;
|
||||
cum_duration_90k += duration_90k;
|
||||
cum_runs += if run_offset == 0 { 1 } else { 0 };
|
||||
}
|
||||
tx.execute_batch(r#"
|
||||
drop index recording_cover;
|
||||
create index recording_cover on recording (
|
||||
stream_id,
|
||||
@@ -172,7 +276,6 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
flags
|
||||
);
|
||||
|
||||
|
||||
alter table recording_integrity rename to old_recording_integrity;
|
||||
create table recording_integrity (
|
||||
composite_id integer primary key references recording (composite_id),
|
||||
@@ -201,6 +304,7 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
drop table old_recording_playback;
|
||||
drop table old_recording_integrity;
|
||||
drop table old_recording;
|
||||
drop table old_stream;
|
||||
drop table old_video_sample_entry;
|
||||
|
||||
update user_session
|
||||
|
||||
Reference in New Issue
Block a user