2020-03-02 01:53:41 -05:00
|
|
|
// This file is part of Moonfire NVR, a security camera network video recorder.
|
2020-08-05 00:44:01 -04:00
|
|
|
// Copyright (C) 2018-2020 The Moonfire NVR Authors
|
2018-02-22 19:35:34 -05:00
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// In addition, as a special exception, the copyright holders give
|
|
|
|
// permission to link the code of portions of this program with the
|
|
|
|
// OpenSSL library under certain conditions as described in each
|
|
|
|
// individual source file, and distribute linked combinations including
|
|
|
|
// the two.
|
|
|
|
//
|
|
|
|
// You must obey the GNU General Public License in all respects for all
|
|
|
|
// of the code used other than OpenSSL. If you modify file(s) with this
|
|
|
|
// exception, you may extend this exception to your version of the
|
|
|
|
// file(s), but you are not obligated to do so. If you do not wish to do
|
|
|
|
// so, delete this exception statement from your version. If you delete
|
|
|
|
// this exception statement from all source files in the program, then
|
|
|
|
// also delete it here.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
//! Raw database access: SQLite statements which do not touch any cached state.
|
|
|
|
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::db::{self, CompositeId, FromSqlUuid};
|
2018-12-28 22:53:29 -05:00
|
|
|
use failure::{Error, ResultExt, bail};
|
2018-02-22 19:35:34 -05:00
|
|
|
use fnv::FnvHashSet;
|
2018-12-28 13:21:49 -05:00
|
|
|
use crate::recording;
|
2020-03-19 23:46:25 -04:00
|
|
|
use rusqlite::{named_params, params};
|
2018-02-22 19:35:34 -05:00
|
|
|
use std::ops::Range;
|
2018-03-01 20:07:42 -05:00
|
|
|
use uuid::Uuid;
|
2018-02-22 19:35:34 -05:00
|
|
|
|
2018-03-01 23:59:05 -05:00
|
|
|
// Note: the magic number "27000000" below is recording::MAX_RECORDING_DURATION.
|
|
|
|
const LIST_RECORDINGS_BY_TIME_SQL: &'static str = r#"
|
|
|
|
select
|
|
|
|
recording.composite_id,
|
|
|
|
recording.run_offset,
|
|
|
|
recording.flags,
|
|
|
|
recording.start_time_90k,
|
2020-08-05 00:44:01 -04:00
|
|
|
recording.wall_duration_90k,
|
|
|
|
recording.media_duration_delta_90k,
|
2018-03-01 23:59:05 -05:00
|
|
|
recording.sample_file_bytes,
|
|
|
|
recording.video_samples,
|
|
|
|
recording.video_sync_samples,
|
2018-03-02 14:38:11 -05:00
|
|
|
recording.video_sample_entry_id,
|
|
|
|
recording.open_id
|
2018-03-01 23:59:05 -05:00
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
stream_id = :stream_id and
|
|
|
|
recording.start_time_90k > :start_time_90k - 27000000 and
|
|
|
|
recording.start_time_90k < :end_time_90k and
|
2020-08-05 00:44:01 -04:00
|
|
|
recording.start_time_90k + recording.wall_duration_90k > :start_time_90k
|
2018-03-01 23:59:05 -05:00
|
|
|
order by
|
|
|
|
recording.start_time_90k
|
|
|
|
"#;
|
|
|
|
|
|
|
|
const LIST_RECORDINGS_BY_ID_SQL: &'static str = r#"
|
|
|
|
select
|
|
|
|
recording.composite_id,
|
|
|
|
recording.run_offset,
|
|
|
|
recording.flags,
|
|
|
|
recording.start_time_90k,
|
2020-08-05 00:44:01 -04:00
|
|
|
recording.wall_duration_90k,
|
|
|
|
recording.media_duration_delta_90k,
|
2018-03-01 23:59:05 -05:00
|
|
|
recording.sample_file_bytes,
|
|
|
|
recording.video_samples,
|
|
|
|
recording.video_sync_samples,
|
2018-03-02 14:38:11 -05:00
|
|
|
recording.video_sample_entry_id,
|
2020-06-10 01:06:03 -04:00
|
|
|
recording.open_id,
|
2020-08-05 00:44:01 -04:00
|
|
|
recording.prev_media_duration_90k,
|
2020-06-10 01:06:03 -04:00
|
|
|
recording.prev_runs
|
2018-03-01 23:59:05 -05:00
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
|
|
|
order by
|
|
|
|
recording.composite_id
|
|
|
|
"#;
|
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
const STREAM_MIN_START_SQL: &'static str = r#"
|
|
|
|
select
|
|
|
|
start_time_90k
|
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
stream_id = :stream_id
|
|
|
|
order by start_time_90k limit 1
|
|
|
|
"#;
|
|
|
|
|
|
|
|
const STREAM_MAX_START_SQL: &'static str = r#"
|
|
|
|
select
|
|
|
|
start_time_90k,
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k
|
2018-02-22 19:35:34 -05:00
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
stream_id = :stream_id
|
|
|
|
order by start_time_90k desc;
|
|
|
|
"#;
|
|
|
|
|
2018-02-23 16:35:25 -05:00
|
|
|
const LIST_OLDEST_RECORDINGS_SQL: &'static str = r#"
|
|
|
|
select
|
|
|
|
composite_id,
|
|
|
|
start_time_90k,
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k,
|
2018-02-23 16:35:25 -05:00
|
|
|
sample_file_bytes
|
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
|
|
|
order by
|
|
|
|
composite_id
|
|
|
|
"#;
|
|
|
|
|
2018-03-01 23:59:05 -05:00
|
|
|
/// Lists the specified recordings in ascending order by start time, passing them to a supplied
|
|
|
|
/// function. Given that the function is called with the database lock held, it should be quick.
|
|
|
|
pub(crate) fn list_recordings_by_time(
|
|
|
|
conn: &rusqlite::Connection, stream_id: i32, desired_time: Range<recording::Time>,
|
2019-06-14 11:47:11 -04:00
|
|
|
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
|
2018-03-01 23:59:05 -05:00
|
|
|
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_TIME_SQL)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let rows = stmt.query_named(named_params!{
|
|
|
|
":stream_id": stream_id,
|
|
|
|
":start_time_90k": desired_time.start.0,
|
|
|
|
":end_time_90k": desired_time.end.0,
|
|
|
|
})?;
|
2020-06-10 01:06:03 -04:00
|
|
|
list_recordings_inner(rows, false, f)
|
2018-03-01 23:59:05 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Lists the specified recordings in ascending order by id.
|
|
|
|
pub(crate) fn list_recordings_by_id(
|
|
|
|
conn: &rusqlite::Connection, stream_id: i32, desired_ids: Range<i32>,
|
2019-06-14 11:47:11 -04:00
|
|
|
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>) -> Result<(), Error> {
|
2018-03-01 23:59:05 -05:00
|
|
|
let mut stmt = conn.prepare_cached(LIST_RECORDINGS_BY_ID_SQL)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let rows = stmt.query_named(named_params!{
|
|
|
|
":start": CompositeId::new(stream_id, desired_ids.start).0,
|
|
|
|
":end": CompositeId::new(stream_id, desired_ids.end).0,
|
|
|
|
})?;
|
2020-06-10 01:06:03 -04:00
|
|
|
list_recordings_inner(rows, true, f)
|
2018-03-01 23:59:05 -05:00
|
|
|
}
|
|
|
|
|
2020-06-10 01:06:03 -04:00
|
|
|
fn list_recordings_inner(mut rows: rusqlite::Rows, include_prev: bool,
|
2019-06-14 11:47:11 -04:00
|
|
|
f: &mut dyn FnMut(db::ListRecordingsRow) -> Result<(), Error>)
|
2018-03-01 23:59:05 -05:00
|
|
|
-> Result<(), Error> {
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
2020-08-05 00:44:01 -04:00
|
|
|
let wall_duration_90k = row.get(4)?;
|
|
|
|
let media_duration_delta_90k: i32 = row.get(5)?;
|
2018-03-01 23:59:05 -05:00
|
|
|
f(db::ListRecordingsRow {
|
2019-05-31 19:19:04 -04:00
|
|
|
id: CompositeId(row.get(0)?),
|
|
|
|
run_offset: row.get(1)?,
|
|
|
|
flags: row.get(2)?,
|
|
|
|
start: recording::Time(row.get(3)?),
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k,
|
|
|
|
media_duration_90k: wall_duration_90k + media_duration_delta_90k,
|
|
|
|
sample_file_bytes: row.get(6)?,
|
|
|
|
video_samples: row.get(7)?,
|
|
|
|
video_sync_samples: row.get(8)?,
|
|
|
|
video_sample_entry_id: row.get(9)?,
|
|
|
|
open_id: row.get(10)?,
|
|
|
|
prev_media_duration_and_runs: match include_prev {
|
2020-06-10 01:06:03 -04:00
|
|
|
false => None,
|
2020-08-05 00:44:01 -04:00
|
|
|
true => Some((recording::Duration(row.get(11)?), row.get(12)?)),
|
2020-06-10 01:06:03 -04:00
|
|
|
},
|
2018-03-01 23:59:05 -05:00
|
|
|
})?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-01 20:07:42 -05:00
|
|
|
pub(crate) fn get_db_uuid(conn: &rusqlite::Connection) -> Result<Uuid, Error> {
|
2020-03-19 23:46:25 -04:00
|
|
|
Ok(conn.query_row("select uuid from meta", params![], |row| -> rusqlite::Result<Uuid> {
|
2019-05-31 19:19:04 -04:00
|
|
|
let uuid: FromSqlUuid = row.get(0)?;
|
2018-03-01 20:07:42 -05:00
|
|
|
Ok(uuid.0)
|
2019-05-31 19:19:04 -04:00
|
|
|
})?)
|
2018-03-01 20:07:42 -05:00
|
|
|
}
|
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
/// Inserts the specified recording (for from `try_flush` only).
|
2018-02-23 00:46:41 -05:00
|
|
|
pub(crate) fn insert_recording(tx: &rusqlite::Transaction, o: &db::Open, id: CompositeId,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
r: &db::RecordingToInsert) -> Result<(), Error> {
|
2018-03-09 10:31:48 -05:00
|
|
|
let mut stmt = tx.prepare_cached(r#"
|
|
|
|
insert into recording (composite_id, stream_id, open_id, run_offset, flags,
|
2020-08-05 00:44:01 -04:00
|
|
|
sample_file_bytes, start_time_90k, prev_media_duration_90k,
|
|
|
|
prev_runs, wall_duration_90k, media_duration_delta_90k,
|
|
|
|
video_samples, video_sync_samples, video_sample_entry_id)
|
2018-03-09 10:31:48 -05:00
|
|
|
values (:composite_id, :stream_id, :open_id, :run_offset, :flags,
|
2020-08-05 00:44:01 -04:00
|
|
|
:sample_file_bytes, :start_time_90k, :prev_media_duration_90k,
|
|
|
|
:prev_runs, :wall_duration_90k, :media_duration_delta_90k,
|
|
|
|
:video_samples, :video_sync_samples, :video_sample_entry_id)
|
2018-03-09 10:31:48 -05:00
|
|
|
"#).with_context(|e| format!("can't prepare recording insert: {}", e))?;
|
2020-03-19 23:46:25 -04:00
|
|
|
stmt.execute_named(named_params!{
|
|
|
|
":composite_id": id.0,
|
|
|
|
":stream_id": i64::from(id.stream()),
|
|
|
|
":open_id": o.id,
|
|
|
|
":run_offset": r.run_offset,
|
|
|
|
":flags": r.flags,
|
|
|
|
":sample_file_bytes": r.sample_file_bytes,
|
|
|
|
":start_time_90k": r.start.0,
|
2020-08-05 00:44:01 -04:00
|
|
|
":wall_duration_90k": r.wall_duration_90k,
|
|
|
|
":media_duration_delta_90k": r.media_duration_90k - r.wall_duration_90k,
|
|
|
|
":prev_media_duration_90k": r.prev_media_duration.0,
|
track cumulative duration and runs
This is useful for a combo scrub bar-based UI (#32) + live view UI (#59)
in a non-obvious way. When constructing a HTML Media Source Extensions
API SourceBuffer, the caller can specify a "mode" of either "segments"
or "sequence":
In "sequence" mode, playback assumes segments are added sequentially.
This is good enough for a live view-only UI (#59) but not for a scrub
bar UI in which you may want to seek backward to a segment you've never
seen before. You will then need to insert a segment out-of-sequence.
Imagine what happens when the user goes forward again until the end of
the segment inserted immediately before it. The user should see the
chronologically next segment or a pause for loading if it's unavailable.
The best approximation of this is to track the mapping of timestamps to
segments and insert a VTTCue with an enter/exit handler that seeks to
the right position. But seeking isn't instantaneous; the user will
likely briefly see first the segment they seeked to before. That's
janky. Additionally, the "canplaythrough" event will behave strangely.
In "segments" mode, playback respects the timestamps we set:
* The obvious choice is to use wall clock timestamps. This is fine if
they're known to be fixed and correct. They're not. The
currently-recording segment may be "unanchored", meaning its start
timestamp is not yet fixed. Older timestamps may overlap if the system
clock was stepped between runs. The latter isn't /too/ bad from a user
perspective, though it's confusing as a developer. We probably will
only end up showing the more recent recording for a given
timestamp anyway. But the former is quite annoying. It means we have
to throw away part of the SourceBuffer that we may want to seek back
(causing UI pauses when that happens) or keep our own spare copy of it
(memory bloat). I'd like to avoid the whole mess.
* Another approach is to use timestamps that are guaranteed to be in
the correct order but that may have gaps. In particular, a timestamp
of (recording_id * max_recording_duration) + time_within_recording.
But again seeking isn't instantaneous. In my experiments, there's a
visible pause between segments that drives me nuts.
* Finally, the approach that led me to this schema change. Use
timestamps that place each segment after the one before, possibly with
an intentional gap between runs (to force a wait where we have an
actual gap). This should make the browser's natural playback behavior
work properly: it never goes to an incorrect place, and it only waits
when/if we want it to. We have to maintain a mapping between its
timestamps and segment ids but that's doable.
This commit is only the schema change; the new data aren't exposed in
the API yet, much less used by a UI.
Note that stream.next_recording_id became stream.cum_recordings. I made
a slight definition change in the process: recording ids for new streams
start at 0 rather than 1. Various tests changed accordingly.
The upgrade process makes a best effort to backfill these new fields,
but of course it doesn't know the total duration or number of runs of
previously deleted rows. That's good enough.
2020-06-09 19:17:32 -04:00
|
|
|
":prev_runs": r.prev_runs,
|
2020-03-19 23:46:25 -04:00
|
|
|
":video_samples": r.video_samples,
|
|
|
|
":video_sync_samples": r.video_sync_samples,
|
|
|
|
":video_sample_entry_id": r.video_sample_entry_id,
|
|
|
|
}).with_context(|e| format!("unable to insert recording for recording {} {:#?}: {}",
|
2019-01-04 19:11:58 -05:00
|
|
|
id, r, e))?;
|
2018-02-22 19:35:34 -05:00
|
|
|
|
2018-03-09 10:31:48 -05:00
|
|
|
let mut stmt = tx.prepare_cached(r#"
|
2020-03-20 23:52:30 -04:00
|
|
|
insert into recording_integrity (composite_id, local_time_delta_90k, sample_file_blake3)
|
|
|
|
values (:composite_id, :local_time_delta_90k, :sample_file_blake3)
|
2018-03-09 10:31:48 -05:00
|
|
|
"#).with_context(|e| format!("can't prepare recording_integrity insert: {}", e))?;
|
2020-03-20 23:52:30 -04:00
|
|
|
let blake3 = r.sample_file_blake3.as_ref().map(|b| &b[..]);
|
2018-03-09 10:31:48 -05:00
|
|
|
let delta = match r.run_offset {
|
|
|
|
0 => None,
|
|
|
|
_ => Some(r.local_time_delta.0),
|
|
|
|
};
|
2020-03-19 23:46:25 -04:00
|
|
|
stmt.execute_named(named_params!{
|
|
|
|
":composite_id": id.0,
|
|
|
|
":local_time_delta_90k": delta,
|
2020-03-20 23:52:30 -04:00
|
|
|
":sample_file_blake3": blake3,
|
2020-03-19 23:46:25 -04:00
|
|
|
}).with_context(|e| format!("unable to insert recording_integrity for {:#?}: {}", r, e))?;
|
2018-03-09 10:31:48 -05:00
|
|
|
|
|
|
|
let mut stmt = tx.prepare_cached(r#"
|
|
|
|
insert into recording_playback (composite_id, video_index)
|
|
|
|
values (:composite_id, :video_index)
|
|
|
|
"#).with_context(|e| format!("can't prepare recording_playback insert: {}", e))?;
|
2020-03-19 23:46:25 -04:00
|
|
|
stmt.execute_named(named_params!{
|
|
|
|
":composite_id": id.0,
|
|
|
|
":video_index": &r.video_index,
|
|
|
|
}).with_context(|e| format!("unable to insert recording_playback for {:#?}: {}", r, e))?;
|
2018-03-09 10:31:48 -05:00
|
|
|
|
2018-02-22 19:35:34 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-03-21 18:43:51 -04:00
|
|
|
/// Transfers the given recording range from the `recording` and associated tables to the `garbage`
|
|
|
|
/// table. `sample_file_dir_id` is assumed to be correct.
|
2018-02-23 17:05:07 -05:00
|
|
|
///
|
|
|
|
/// Returns the number of recordings which were deleted.
|
2018-02-23 16:35:25 -05:00
|
|
|
pub(crate) fn delete_recordings(tx: &rusqlite::Transaction, sample_file_dir_id: i32,
|
|
|
|
ids: Range<CompositeId>)
|
2018-08-25 01:06:14 -04:00
|
|
|
-> Result<usize, Error> {
|
2018-02-22 19:35:34 -05:00
|
|
|
let mut insert = tx.prepare_cached(r#"
|
2018-02-23 16:35:25 -05:00
|
|
|
insert into garbage (sample_file_dir_id, composite_id)
|
|
|
|
select
|
|
|
|
:sample_file_dir_id,
|
|
|
|
composite_id
|
|
|
|
from
|
|
|
|
recording
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
2018-02-22 19:35:34 -05:00
|
|
|
"#)?;
|
2020-03-21 18:43:51 -04:00
|
|
|
let mut del_playback = tx.prepare_cached(r#"
|
2018-02-23 16:35:25 -05:00
|
|
|
delete from recording_playback
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
|
|
|
"#)?;
|
2020-03-21 18:43:51 -04:00
|
|
|
let mut del_integrity = tx.prepare_cached(r#"
|
2018-03-09 10:31:48 -05:00
|
|
|
delete from recording_integrity
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
|
|
|
"#)?;
|
2020-03-21 18:43:51 -04:00
|
|
|
let mut del_main = tx.prepare_cached(r#"
|
2018-02-23 16:35:25 -05:00
|
|
|
delete from recording
|
|
|
|
where
|
|
|
|
:start <= composite_id and
|
|
|
|
composite_id < :end
|
|
|
|
"#)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let n = insert.execute_named(named_params!{
|
|
|
|
":sample_file_dir_id": sample_file_dir_id,
|
|
|
|
":start": ids.start.0,
|
|
|
|
":end": ids.end.0,
|
|
|
|
})?;
|
|
|
|
let p = named_params!{
|
|
|
|
":start": ids.start.0,
|
|
|
|
":end": ids.end.0,
|
|
|
|
};
|
2020-03-21 18:43:51 -04:00
|
|
|
let n_playback = del_playback.execute_named(p)?;
|
|
|
|
if n_playback != n {
|
|
|
|
bail!("inserted {} garbage rows but deleted {} recording_playback rows!", n, n_playback);
|
2018-02-23 17:05:07 -05:00
|
|
|
}
|
2020-03-21 18:43:51 -04:00
|
|
|
let n_integrity = del_integrity.execute_named(p)?;
|
|
|
|
if n_integrity > n { // fewer is okay; recording_integrity is optional.
|
|
|
|
bail!("inserted {} garbage rows but deleted {} recording_integrity rows!", n, n_integrity);
|
2018-03-09 10:31:48 -05:00
|
|
|
}
|
2020-03-21 18:43:51 -04:00
|
|
|
let n_main = del_main.execute_named(p)?;
|
|
|
|
if n_main != n {
|
|
|
|
bail!("inserted {} garbage rows but deleted {} recording rows!", n, n_main);
|
2018-02-23 17:05:07 -05:00
|
|
|
}
|
|
|
|
Ok(n)
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Marks the given sample files as deleted. This shouldn't be called until the files have
|
|
|
|
/// been `unlink()`ed and the parent directory `fsync()`ed.
|
|
|
|
pub(crate) fn mark_sample_files_deleted(tx: &rusqlite::Transaction, ids: &[CompositeId])
|
|
|
|
-> Result<(), Error> {
|
|
|
|
if ids.is_empty() { return Ok(()); }
|
|
|
|
let mut stmt = tx.prepare_cached("delete from garbage where composite_id = ?")?;
|
|
|
|
for &id in ids {
|
2020-03-19 23:46:25 -04:00
|
|
|
let changes = stmt.execute(params![id.0])?;
|
2018-02-22 19:35:34 -05:00
|
|
|
if changes != 1 {
|
2018-12-01 03:03:43 -05:00
|
|
|
// panic rather than return error. Errors get retried indefinitely, but there's no
|
|
|
|
// recovery from this condition.
|
|
|
|
//
|
|
|
|
// Tempting to just consider logging error and moving on, but this represents a logic
|
|
|
|
// flaw, so complain loudly. The freshly deleted file might still be referenced in the
|
|
|
|
// recording table.
|
|
|
|
panic!("no garbage row for {}", id);
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the time range of recordings for the given stream.
|
|
|
|
pub(crate) fn get_range(conn: &rusqlite::Connection, stream_id: i32)
|
|
|
|
-> Result<Option<Range<recording::Time>>, Error> {
|
|
|
|
// The minimum is straightforward, taking advantage of the start_time_90k index.
|
|
|
|
let mut stmt = conn.prepare_cached(STREAM_MIN_START_SQL)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query_named(named_params!{":stream_id": stream_id})?;
|
2019-05-31 19:19:04 -04:00
|
|
|
let min_start = match rows.next()? {
|
|
|
|
Some(row) => recording::Time(row.get(0)?),
|
2018-02-22 19:35:34 -05:00
|
|
|
None => return Ok(None),
|
|
|
|
};
|
|
|
|
|
|
|
|
// There was a minimum, so there should be a maximum too. Calculating it is less
|
|
|
|
// straightforward because recordings could overlap. All recordings starting in the
|
|
|
|
// last MAX_RECORDING_DURATION must be examined in order to take advantage of the
|
|
|
|
// start_time_90k index.
|
|
|
|
let mut stmt = conn.prepare_cached(STREAM_MAX_START_SQL)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query_named(named_params!{":stream_id": stream_id})?;
|
2018-02-22 19:35:34 -05:00
|
|
|
let mut maxes_opt = None;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
let row_start = recording::Time(row.get(0)?);
|
|
|
|
let row_duration: i64 = row.get(1)?;
|
2018-02-22 19:35:34 -05:00
|
|
|
let row_end = recording::Time(row_start.0 + row_duration);
|
|
|
|
let maxes = match maxes_opt {
|
|
|
|
None => row_start .. row_end,
|
|
|
|
Some(Range{start: s, end: e}) => s .. ::std::cmp::max(e, row_end),
|
|
|
|
};
|
2020-08-05 00:44:01 -04:00
|
|
|
if row_start.0 <= maxes.start.0 - recording::MAX_RECORDING_WALL_DURATION {
|
2018-02-22 19:35:34 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
maxes_opt = Some(maxes);
|
|
|
|
}
|
|
|
|
let max_end = match maxes_opt {
|
|
|
|
Some(Range{start: _, end: e}) => e,
|
|
|
|
None => bail!("missing max for stream {} which had min {}", stream_id, min_start),
|
|
|
|
};
|
|
|
|
Ok(Some(min_start .. max_end))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lists all garbage ids for the given sample file directory.
|
|
|
|
pub(crate) fn list_garbage(conn: &rusqlite::Connection, dir_id: i32)
|
|
|
|
-> Result<FnvHashSet<CompositeId>, Error> {
|
|
|
|
let mut garbage = FnvHashSet::default();
|
|
|
|
let mut stmt = conn.prepare_cached(
|
|
|
|
"select composite_id from garbage where sample_file_dir_id = ?")?;
|
|
|
|
let mut rows = stmt.query(&[&dir_id])?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
|
|
|
garbage.insert(CompositeId(row.get(0)?));
|
2018-02-22 19:35:34 -05:00
|
|
|
}
|
|
|
|
Ok(garbage)
|
|
|
|
}
|
2018-02-23 16:35:25 -05:00
|
|
|
|
|
|
|
/// Lists the oldest recordings for a stream, starting with the given id.
|
|
|
|
/// `f` should return true as long as further rows are desired.
|
|
|
|
pub(crate) fn list_oldest_recordings(conn: &rusqlite::Connection, start: CompositeId,
|
2019-06-14 11:47:11 -04:00
|
|
|
f: &mut dyn FnMut(db::ListOldestRecordingsRow) -> bool)
|
2018-02-23 16:35:25 -05:00
|
|
|
-> Result<(), Error> {
|
|
|
|
let mut stmt = conn.prepare_cached(LIST_OLDEST_RECORDINGS_SQL)?;
|
2020-03-19 23:46:25 -04:00
|
|
|
let mut rows = stmt.query_named(named_params!{
|
|
|
|
":start": start.0,
|
|
|
|
":end": CompositeId::new(start.stream() + 1, 0).0,
|
|
|
|
})?;
|
2019-05-31 19:19:04 -04:00
|
|
|
while let Some(row) = rows.next()? {
|
2018-02-23 16:35:25 -05:00
|
|
|
let should_continue = f(db::ListOldestRecordingsRow {
|
2019-05-31 19:19:04 -04:00
|
|
|
id: CompositeId(row.get(0)?),
|
|
|
|
start: recording::Time(row.get(1)?),
|
2020-08-05 00:44:01 -04:00
|
|
|
wall_duration_90k: row.get(2)?,
|
2019-05-31 19:19:04 -04:00
|
|
|
sample_file_bytes: row.get(3)?,
|
2018-02-23 16:35:25 -05:00
|
|
|
});
|
|
|
|
if !should_continue {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|