mirror of
https://github.com/scottlamb/moonfire-nvr.git
synced 2025-02-23 11:32:33 -05:00
Merge branch 'master' into new-schema
This commit is contained in:
commit
476bd86b12
@ -44,7 +44,7 @@ static MULTIPLIERS: [(char, u64); 4] = [
|
|||||||
('K', 10),
|
('K', 10),
|
||||||
];
|
];
|
||||||
|
|
||||||
/// Encodes a size into human-readable form.
|
/// Encodes a non-negative size into human-readable form.
|
||||||
pub fn encode_size(mut raw: i64) -> String {
|
pub fn encode_size(mut raw: i64) -> String {
|
||||||
let mut encoded = String::new();
|
let mut encoded = String::new();
|
||||||
for &(c, n) in &MULTIPLIERS {
|
for &(c, n) in &MULTIPLIERS {
|
||||||
|
82
db/db.rs
82
db/db.rs
@ -111,6 +111,20 @@ const UPDATE_STREAM_COUNTERS_SQL: &'static str = r#"
|
|||||||
where id = :stream_id
|
where id = :stream_id
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
|
/// The size of a filesystem block, to use in disk space accounting.
|
||||||
|
/// This should really be obtained by a stat call on the sample file directory in question,
|
||||||
|
/// but that requires some refactoring. See
|
||||||
|
/// [#89](https://github.com/scottlamb/moonfire-nvr/issues/89). We might be able to get away with
|
||||||
|
/// this hardcoded value for a while.
|
||||||
|
const ASSUMED_BLOCK_SIZE_BYTES: i64 = 4096;
|
||||||
|
|
||||||
|
/// Rounds a file size up to the next multiple of the block size.
|
||||||
|
/// This is useful in representing the actual amount of filesystem space used.
|
||||||
|
pub(crate) fn round_up(bytes: i64) -> i64 {
|
||||||
|
let blk = ASSUMED_BLOCK_SIZE_BYTES;
|
||||||
|
(bytes + blk - 1) / blk * blk
|
||||||
|
}
|
||||||
|
|
||||||
pub struct FromSqlUuid(pub Uuid);
|
pub struct FromSqlUuid(pub Uuid);
|
||||||
|
|
||||||
impl rusqlite::types::FromSql for FromSqlUuid {
|
impl rusqlite::types::FromSql for FromSqlUuid {
|
||||||
@ -194,8 +208,7 @@ pub struct ListAggregatedRecordingsRow {
|
|||||||
pub growing: bool,
|
pub growing: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ListAggregatedRecordingsRow {
|
impl ListAggregatedRecordingsRow { fn from(row: ListRecordingsRow) -> Self {
|
||||||
fn from(row: ListRecordingsRow) -> Self {
|
|
||||||
let recording_id = row.id.recording();
|
let recording_id = row.id.recording();
|
||||||
let uncommitted = (row.flags & RecordingFlags::Uncommitted as i32) != 0;
|
let uncommitted = (row.flags & RecordingFlags::Uncommitted as i32) != 0;
|
||||||
let growing = (row.flags & RecordingFlags::Growing as i32) != 0;
|
let growing = (row.flags & RecordingFlags::Growing as i32) != 0;
|
||||||
@ -440,8 +453,15 @@ pub struct Stream {
|
|||||||
/// The time range of recorded data associated with this stream (minimum start time and maximum
|
/// The time range of recorded data associated with this stream (minimum start time and maximum
|
||||||
/// end time). `None` iff there are no recordings for this camera.
|
/// end time). `None` iff there are no recordings for this camera.
|
||||||
pub range: Option<Range<recording::Time>>,
|
pub range: Option<Range<recording::Time>>,
|
||||||
|
|
||||||
|
/// The total bytes of flushed sample files. This doesn't include disk space wasted in the
|
||||||
|
/// last filesystem block allocated to each file ("internal fragmentation").
|
||||||
pub sample_file_bytes: i64,
|
pub sample_file_bytes: i64,
|
||||||
|
|
||||||
|
/// The total bytes on the filesystem used by this stream. This slightly more than
|
||||||
|
/// `sample_file_bytes` because it includes the wasted space in the last filesystem block.
|
||||||
|
pub fs_bytes: i64,
|
||||||
|
|
||||||
/// On flush, delete the following recordings (move them to the `garbage` table, to be
|
/// On flush, delete the following recordings (move them to the `garbage` table, to be
|
||||||
/// collected later). Note they must be the oldest recordings. The later collection involves
|
/// collected later). Note they must be the oldest recordings. The later collection involves
|
||||||
/// the syncer unlinking the files on disk and syncing the directory then enqueueing for
|
/// the syncer unlinking the files on disk and syncing the directory then enqueueing for
|
||||||
@ -450,10 +470,12 @@ pub struct Stream {
|
|||||||
|
|
||||||
/// The total bytes to delete with the next flush.
|
/// The total bytes to delete with the next flush.
|
||||||
pub bytes_to_delete: i64,
|
pub bytes_to_delete: i64,
|
||||||
|
pub fs_bytes_to_delete: i64,
|
||||||
|
|
||||||
/// The total bytes to add with the next flush. (`mark_synced` has already been called on these
|
/// The total bytes to add with the next flush. (`mark_synced` has already been called on these
|
||||||
/// recordings.)
|
/// recordings.)
|
||||||
pub bytes_to_add: i64,
|
pub bytes_to_add: i64,
|
||||||
|
pub fs_bytes_to_add: i64,
|
||||||
|
|
||||||
/// The total duration of undeleted recorded data. This may not be `range.end - range.start`
|
/// The total duration of undeleted recorded data. This may not be `range.end - range.start`
|
||||||
/// due to gaps and overlap.
|
/// due to gaps and overlap.
|
||||||
@ -605,6 +627,7 @@ impl Stream {
|
|||||||
});
|
});
|
||||||
self.duration += r.end - r.start;
|
self.duration += r.end - r.start;
|
||||||
self.sample_file_bytes += sample_file_bytes as i64;
|
self.sample_file_bytes += sample_file_bytes as i64;
|
||||||
|
self.fs_bytes += round_up(i64::from(sample_file_bytes));
|
||||||
adjust_days(r, 1, &mut self.days);
|
adjust_days(r, 1, &mut self.days);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -801,9 +824,12 @@ impl StreamStateChanger {
|
|||||||
flush_if_sec: sc.flush_if_sec,
|
flush_if_sec: sc.flush_if_sec,
|
||||||
range: None,
|
range: None,
|
||||||
sample_file_bytes: 0,
|
sample_file_bytes: 0,
|
||||||
|
fs_bytes: 0,
|
||||||
to_delete: Vec::new(),
|
to_delete: Vec::new(),
|
||||||
bytes_to_delete: 0,
|
bytes_to_delete: 0,
|
||||||
|
fs_bytes_to_delete: 0,
|
||||||
bytes_to_add: 0,
|
bytes_to_add: 0,
|
||||||
|
fs_bytes_to_add: 0,
|
||||||
duration: recording::Duration(0),
|
duration: recording::Duration(0),
|
||||||
days: BTreeMap::new(),
|
days: BTreeMap::new(),
|
||||||
record: sc.record,
|
record: sc.record,
|
||||||
@ -898,7 +924,9 @@ impl LockedDatabase {
|
|||||||
bail!("can't sync un-added recording {}", id);
|
bail!("can't sync un-added recording {}", id);
|
||||||
}
|
}
|
||||||
let l = stream.uncommitted[stream.synced_recordings].lock();
|
let l = stream.uncommitted[stream.synced_recordings].lock();
|
||||||
stream.bytes_to_add += l.sample_file_bytes as i64;
|
let bytes = i64::from(l.sample_file_bytes);
|
||||||
|
stream.bytes_to_add += bytes;
|
||||||
|
stream.fs_bytes_to_add += round_up(bytes);
|
||||||
stream.synced_recordings += 1;
|
stream.synced_recordings += 1;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -1055,17 +1083,19 @@ impl LockedDatabase {
|
|||||||
for (stream_id, new_range) in new_ranges.drain() {
|
for (stream_id, new_range) in new_ranges.drain() {
|
||||||
let s = self.streams_by_id.get_mut(&stream_id).unwrap();
|
let s = self.streams_by_id.get_mut(&stream_id).unwrap();
|
||||||
let dir_id = s.sample_file_dir_id.unwrap();
|
let dir_id = s.sample_file_dir_id.unwrap();
|
||||||
let d = self.sample_file_dirs_by_id.get_mut(&dir_id).unwrap();
|
let dir = self.sample_file_dirs_by_id.get_mut(&dir_id).unwrap();
|
||||||
let log = dir_logs.entry(dir_id).or_default();
|
let log = dir_logs.entry(dir_id).or_default();
|
||||||
|
|
||||||
// Process delete_oldest_recordings.
|
// Process delete_oldest_recordings.
|
||||||
s.sample_file_bytes -= s.bytes_to_delete;
|
s.sample_file_bytes -= s.bytes_to_delete;
|
||||||
|
s.fs_bytes -= s.fs_bytes_to_delete;
|
||||||
log.deleted_bytes += s.bytes_to_delete;
|
log.deleted_bytes += s.bytes_to_delete;
|
||||||
s.bytes_to_delete = 0;
|
s.bytes_to_delete = 0;
|
||||||
|
s.fs_bytes_to_delete = 0;
|
||||||
log.deleted.reserve(s.to_delete.len());
|
log.deleted.reserve(s.to_delete.len());
|
||||||
for row in s.to_delete.drain(..) {
|
for row in s.to_delete.drain(..) {
|
||||||
log.deleted.push(row.id);
|
log.deleted.push(row.id);
|
||||||
d.garbage_needs_unlink.insert(row.id);
|
dir.garbage_needs_unlink.insert(row.id);
|
||||||
let d = recording::Duration(row.duration as i64);
|
let d = recording::Duration(row.duration as i64);
|
||||||
s.duration -= d;
|
s.duration -= d;
|
||||||
adjust_days(row.start .. row.start + d, -1, &mut s.days);
|
adjust_days(row.start .. row.start + d, -1, &mut s.days);
|
||||||
@ -1074,6 +1104,7 @@ impl LockedDatabase {
|
|||||||
// Process add_recordings.
|
// Process add_recordings.
|
||||||
log.added_bytes += s.bytes_to_add;
|
log.added_bytes += s.bytes_to_add;
|
||||||
s.bytes_to_add = 0;
|
s.bytes_to_add = 0;
|
||||||
|
s.fs_bytes_to_add = 0;
|
||||||
log.added.reserve(s.synced_recordings);
|
log.added.reserve(s.synced_recordings);
|
||||||
for _ in 0..s.synced_recordings {
|
for _ in 0..s.synced_recordings {
|
||||||
let u = s.uncommitted.pop_front().unwrap();
|
let u = s.uncommitted.pop_front().unwrap();
|
||||||
@ -1382,7 +1413,7 @@ impl LockedDatabase {
|
|||||||
Err(format_err!("no such recording {}", id))
|
Err(format_err!("no such recording {}", id))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes the oldest recordings that aren't already queued for deletion.
|
/// Queues for deletion the oldest recordings that aren't already queued.
|
||||||
/// `f` should return true for each row that should be deleted.
|
/// `f` should return true for each row that should be deleted.
|
||||||
pub(crate) fn delete_oldest_recordings(
|
pub(crate) fn delete_oldest_recordings(
|
||||||
&mut self, stream_id: i32, f: &mut dyn FnMut(&ListOldestRecordingsRow) -> bool)
|
&mut self, stream_id: i32, f: &mut dyn FnMut(&ListOldestRecordingsRow) -> bool)
|
||||||
@ -1398,7 +1429,9 @@ impl LockedDatabase {
|
|||||||
raw::list_oldest_recordings(&self.conn, CompositeId::new(stream_id, end), &mut |r| {
|
raw::list_oldest_recordings(&self.conn, CompositeId::new(stream_id, end), &mut |r| {
|
||||||
if f(&r) {
|
if f(&r) {
|
||||||
s.to_delete.push(r);
|
s.to_delete.push(r);
|
||||||
s.bytes_to_delete += r.sample_file_bytes as i64;
|
let bytes = i64::from(r.sample_file_bytes);
|
||||||
|
s.bytes_to_delete += bytes;
|
||||||
|
s.fs_bytes_to_delete += round_up(bytes);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
@ -1558,9 +1591,12 @@ impl LockedDatabase {
|
|||||||
flush_if_sec,
|
flush_if_sec,
|
||||||
range: None,
|
range: None,
|
||||||
sample_file_bytes: 0,
|
sample_file_bytes: 0,
|
||||||
|
fs_bytes: 0,
|
||||||
to_delete: Vec::new(),
|
to_delete: Vec::new(),
|
||||||
bytes_to_delete: 0,
|
bytes_to_delete: 0,
|
||||||
|
fs_bytes_to_delete: 0,
|
||||||
bytes_to_add: 0,
|
bytes_to_add: 0,
|
||||||
|
fs_bytes_to_add: 0,
|
||||||
duration: recording::Duration(0),
|
duration: recording::Duration(0),
|
||||||
days: BTreeMap::new(),
|
days: BTreeMap::new(),
|
||||||
cum_recordings: row.get(7)?,
|
cum_recordings: row.get(7)?,
|
||||||
@ -1910,13 +1946,25 @@ impl LockedDatabase {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sets pragmas for full database integrity.
|
||||||
|
pub(crate) fn set_integrity_pragmas(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||||
|
// Enforce foreign keys. This is on by default with --features=bundled (as rusqlite
|
||||||
|
// compiles the SQLite3 amalgamation with -DSQLITE_DEFAULT_FOREIGN_KEYS=1). Ensure it's
|
||||||
|
// always on. Note that our foreign keys are immediate rather than deferred, so we have to
|
||||||
|
// be careful about the order of operations during the upgrade.
|
||||||
|
conn.execute("pragma foreign_keys = on", params![])?;
|
||||||
|
|
||||||
|
// Make the database actually durable.
|
||||||
|
conn.execute("pragma fullfsync = on", params![])?;
|
||||||
|
conn.execute("pragma synchronous = 3", params![])?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Initializes a database.
|
/// Initializes a database.
|
||||||
/// Note this doesn't set journal options, so that it can be used on in-memory databases for
|
/// Note this doesn't set journal options, so that it can be used on in-memory databases for
|
||||||
/// test code.
|
/// test code.
|
||||||
pub fn init(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
pub fn init(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||||
conn.execute("pragma foreign_keys = on", params![])?;
|
set_integrity_pragmas(conn)?;
|
||||||
conn.execute("pragma fullfsync = on", params![])?;
|
|
||||||
conn.execute("pragma synchronous = 2", params![])?;
|
|
||||||
let tx = conn.transaction()?;
|
let tx = conn.transaction()?;
|
||||||
tx.execute_batch(include_str!("schema.sql"))?;
|
tx.execute_batch(include_str!("schema.sql"))?;
|
||||||
{
|
{
|
||||||
@ -1975,11 +2023,9 @@ fn operation() -> &'static str { "database operation" }
|
|||||||
|
|
||||||
impl<C: Clocks + Clone> Database<C> {
|
impl<C: Clocks + Clone> Database<C> {
|
||||||
/// Creates the database from a caller-supplied SQLite connection.
|
/// Creates the database from a caller-supplied SQLite connection.
|
||||||
pub fn new(clocks: C, conn: rusqlite::Connection,
|
pub fn new(clocks: C, mut conn: rusqlite::Connection,
|
||||||
read_write: bool) -> Result<Database<C>, Error> {
|
read_write: bool) -> Result<Database<C>, Error> {
|
||||||
conn.execute("pragma foreign_keys = on", params![])?;
|
set_integrity_pragmas(&mut conn)?;
|
||||||
conn.execute("pragma fullfsync = on", params![])?;
|
|
||||||
conn.execute("pragma synchronous = 2", params![])?;
|
|
||||||
{
|
{
|
||||||
let ver = get_schema_version(&conn)?.ok_or_else(|| format_err!(
|
let ver = get_schema_version(&conn)?.ok_or_else(|| format_err!(
|
||||||
"no such table: version. \
|
"no such table: version. \
|
||||||
@ -2466,4 +2512,12 @@ mod tests {
|
|||||||
.collect();
|
.collect();
|
||||||
assert_eq!(&g, &[]);
|
assert_eq!(&g, &[]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn round_up() {
|
||||||
|
assert_eq!(super::round_up(0), 0);
|
||||||
|
assert_eq!(super::round_up(8_191), 8_192);
|
||||||
|
assert_eq!(super::round_up(8_192), 8_192);
|
||||||
|
assert_eq!(super::round_up(8_193), 12_288);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,16 +105,7 @@ fn upgrade(args: &Args, target_ver: i32, conn: &mut rusqlite::Connection) -> Res
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(args: &Args, conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
pub fn run(args: &Args, conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||||
// Enforce foreign keys. This is on by default with --features=bundled (as rusqlite
|
db::set_integrity_pragmas(conn)?;
|
||||||
// compiles the SQLite3 amalgamation with -DSQLITE_DEFAULT_FOREIGN_KEYS=1). Ensure it's
|
|
||||||
// always on. Note that our foreign keys are immediate rather than deferred, so we have to
|
|
||||||
// be careful about the order of operations during the upgrade.
|
|
||||||
conn.execute("pragma foreign_keys = on", params![])?;
|
|
||||||
|
|
||||||
// Make the database actually durable.
|
|
||||||
conn.execute("pragma fullfsync = on", params![])?;
|
|
||||||
conn.execute("pragma synchronous = 2", params![])?;
|
|
||||||
|
|
||||||
upgrade(args, db::EXPECTED_VERSION, conn)?;
|
upgrade(args, db::EXPECTED_VERSION, conn)?;
|
||||||
|
|
||||||
// WAL is the preferred journal mode for normal operation; it reduces the number of syncs
|
// WAL is the preferred journal mode for normal operation; it reduces the number of syncs
|
||||||
|
27
db/writer.rs
27
db/writer.rs
@ -193,15 +193,15 @@ pub fn lower_retention(db: Arc<db::Database>, dir_id: i32, limits: &[NewLimit])
|
|||||||
let (mut syncer, _) = Syncer::new(&db.lock(), db2, dir_id)?;
|
let (mut syncer, _) = Syncer::new(&db.lock(), db2, dir_id)?;
|
||||||
syncer.do_rotation(|db| {
|
syncer.do_rotation(|db| {
|
||||||
for l in limits {
|
for l in limits {
|
||||||
let (bytes_before, extra);
|
let (fs_bytes_before, extra);
|
||||||
{
|
{
|
||||||
let stream = db.streams_by_id().get(&l.stream_id)
|
let stream = db.streams_by_id().get(&l.stream_id)
|
||||||
.ok_or_else(|| format_err!("no such stream {}", l.stream_id))?;
|
.ok_or_else(|| format_err!("no such stream {}", l.stream_id))?;
|
||||||
bytes_before = stream.sample_file_bytes + stream.bytes_to_add -
|
fs_bytes_before = stream.fs_bytes + stream.fs_bytes_to_add -
|
||||||
stream.bytes_to_delete;
|
stream.fs_bytes_to_delete;
|
||||||
extra = stream.retain_bytes - l.limit;
|
extra = stream.retain_bytes - l.limit;
|
||||||
}
|
}
|
||||||
if l.limit >= bytes_before { continue }
|
if l.limit >= fs_bytes_before { continue }
|
||||||
delete_recordings(db, l.stream_id, extra)?;
|
delete_recordings(db, l.stream_id, extra)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -211,23 +211,24 @@ pub fn lower_retention(db: Arc<db::Database>, dir_id: i32, limits: &[NewLimit])
|
|||||||
/// Deletes recordings to bring a stream's disk usage within bounds.
|
/// Deletes recordings to bring a stream's disk usage within bounds.
|
||||||
fn delete_recordings(db: &mut db::LockedDatabase, stream_id: i32,
|
fn delete_recordings(db: &mut db::LockedDatabase, stream_id: i32,
|
||||||
extra_bytes_needed: i64) -> Result<(), Error> {
|
extra_bytes_needed: i64) -> Result<(), Error> {
|
||||||
let bytes_needed = {
|
let fs_bytes_needed = {
|
||||||
let stream = match db.streams_by_id().get(&stream_id) {
|
let stream = match db.streams_by_id().get(&stream_id) {
|
||||||
None => bail!("no stream {}", stream_id),
|
None => bail!("no stream {}", stream_id),
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
};
|
};
|
||||||
stream.sample_file_bytes + stream.bytes_to_add - stream.bytes_to_delete + extra_bytes_needed
|
stream.fs_bytes + stream.fs_bytes_to_add - stream.fs_bytes_to_delete + extra_bytes_needed
|
||||||
- stream.retain_bytes
|
- stream.retain_bytes
|
||||||
};
|
};
|
||||||
let mut bytes_to_delete = 0;
|
let mut fs_bytes_to_delete = 0;
|
||||||
if bytes_needed <= 0 {
|
if fs_bytes_needed <= 0 {
|
||||||
debug!("{}: have remaining quota of {}", stream_id, -bytes_needed);
|
debug!("{}: have remaining quota of {}", stream_id,
|
||||||
|
base::strutil::encode_size(-fs_bytes_needed));
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let mut n = 0;
|
let mut n = 0;
|
||||||
db.delete_oldest_recordings(stream_id, &mut |row| {
|
db.delete_oldest_recordings(stream_id, &mut |row| {
|
||||||
if bytes_needed >= bytes_to_delete {
|
if fs_bytes_needed >= fs_bytes_to_delete {
|
||||||
bytes_to_delete += i64::from(row.sample_file_bytes);
|
fs_bytes_to_delete += db::round_up(i64::from(row.sample_file_bytes));
|
||||||
n += 1;
|
n += 1;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -993,7 +994,7 @@ mod tests {
|
|||||||
h.db.lock().update_retention(&[db::RetentionChange {
|
h.db.lock().update_retention(&[db::RetentionChange {
|
||||||
stream_id: testutil::TEST_STREAM_ID,
|
stream_id: testutil::TEST_STREAM_ID,
|
||||||
new_record: true,
|
new_record: true,
|
||||||
new_limit: 3,
|
new_limit: 0,
|
||||||
}]).unwrap();
|
}]).unwrap();
|
||||||
|
|
||||||
// Setup: add a 3-byte recording.
|
// Setup: add a 3-byte recording.
|
||||||
@ -1152,7 +1153,7 @@ mod tests {
|
|||||||
h.db.lock().update_retention(&[db::RetentionChange {
|
h.db.lock().update_retention(&[db::RetentionChange {
|
||||||
stream_id: testutil::TEST_STREAM_ID,
|
stream_id: testutil::TEST_STREAM_ID,
|
||||||
new_record: true,
|
new_record: true,
|
||||||
new_limit: 3,
|
new_limit: 0,
|
||||||
}]).unwrap();
|
}]).unwrap();
|
||||||
|
|
||||||
// Setup: add a 3-byte recording.
|
// Setup: add a 3-byte recording.
|
||||||
|
@ -91,6 +91,10 @@ The `application/json` response will have a dict as follows:
|
|||||||
be lesser if there are gaps in the recorded data.
|
be lesser if there are gaps in the recorded data.
|
||||||
* `totalSampleFileBytes`: the total number of bytes of sample data
|
* `totalSampleFileBytes`: the total number of bytes of sample data
|
||||||
(the `mdat` portion of a `.mp4` file).
|
(the `mdat` portion of a `.mp4` file).
|
||||||
|
* `fsBytes`: the total number of bytes on the filesystem used by
|
||||||
|
this stream. This is slightly more than `totalSampleFileBytes`
|
||||||
|
because it also includes the wasted portion of the final
|
||||||
|
filesystem block allocated to each file.
|
||||||
* `days`: (only included if request pararameter `days` is true)
|
* `days`: (only included if request pararameter `days` is true)
|
||||||
dictionary representing calendar days (in the server's time zone)
|
dictionary representing calendar days (in the server's time zone)
|
||||||
with non-zero total duration of recordings for that day. The keys
|
with non-zero total duration of recordings for that day. The keys
|
||||||
|
@ -326,8 +326,8 @@ fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i
|
|||||||
let u = if s.retain_bytes == 0 {
|
let u = if s.retain_bytes == 0 {
|
||||||
"0 / 0 (0.0%)".to_owned()
|
"0 / 0 (0.0%)".to_owned()
|
||||||
} else {
|
} else {
|
||||||
format!("{} / {} ({:.1}%)", s.sample_file_bytes, s.retain_bytes,
|
format!("{} / {} ({:.1}%)", s.fs_bytes, s.retain_bytes,
|
||||||
100. * s.sample_file_bytes as f32 / s.retain_bytes as f32)
|
100. * s.fs_bytes as f32 / s.retain_bytes as f32)
|
||||||
};
|
};
|
||||||
dialog.call_on_name(&format!("{}_rtsp_url", t.as_str()),
|
dialog.call_on_name(&format!("{}_rtsp_url", t.as_str()),
|
||||||
|v: &mut views::EditView| v.set_content(s.rtsp_url.to_owned()));
|
|v: &mut views::EditView| v.set_content(s.rtsp_url.to_owned()));
|
||||||
|
@ -290,11 +290,11 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
|||||||
}
|
}
|
||||||
streams.insert(id, Stream {
|
streams.insert(id, Stream {
|
||||||
label: format!("{}: {}: {}", id, c.short_name, s.type_.as_str()),
|
label: format!("{}: {}: {}", id, c.short_name, s.type_.as_str()),
|
||||||
used: s.sample_file_bytes,
|
used: s.fs_bytes,
|
||||||
record: s.record,
|
record: s.record,
|
||||||
retain: Some(s.retain_bytes),
|
retain: Some(s.retain_bytes),
|
||||||
});
|
});
|
||||||
total_used += s.sample_file_bytes;
|
total_used += s.fs_bytes;
|
||||||
total_retain += s.retain_bytes;
|
total_retain += s.retain_bytes;
|
||||||
}
|
}
|
||||||
if streams.is_empty() {
|
if streams.is_empty() {
|
||||||
|
@ -106,6 +106,7 @@ pub struct Stream<'a> {
|
|||||||
pub max_end_time_90k: Option<i64>,
|
pub max_end_time_90k: Option<i64>,
|
||||||
pub total_duration_90k: i64,
|
pub total_duration_90k: i64,
|
||||||
pub total_sample_file_bytes: i64,
|
pub total_sample_file_bytes: i64,
|
||||||
|
pub fs_bytes: i64,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(serialize_with = "Stream::serialize_days")]
|
#[serde(serialize_with = "Stream::serialize_days")]
|
||||||
@ -245,6 +246,7 @@ impl<'a> Stream<'a> {
|
|||||||
max_end_time_90k: s.range.as_ref().map(|r| r.end.0),
|
max_end_time_90k: s.range.as_ref().map(|r| r.end.0),
|
||||||
total_duration_90k: s.duration.0,
|
total_duration_90k: s.duration.0,
|
||||||
total_sample_file_bytes: s.sample_file_bytes,
|
total_sample_file_bytes: s.sample_file_bytes,
|
||||||
|
fs_bytes: s.fs_bytes,
|
||||||
days: if include_days { Some(&s.days) } else { None },
|
days: if include_days { Some(&s.days) } else { None },
|
||||||
config: match include_config {
|
config: match include_config {
|
||||||
false => None,
|
false => None,
|
||||||
|
@ -84,11 +84,11 @@ export default class NVRSettingsView {
|
|||||||
*/
|
*/
|
||||||
wireControls_() {
|
wireControls_() {
|
||||||
const videoLengthEl = $(`#${this.ids_.videoLenId}`);
|
const videoLengthEl = $(`#${this.ids_.videoLenId}`);
|
||||||
this.videoLength_ = this.findSelectedOrFirst_(videoLengthEl);
|
|
||||||
|
const normalize = (v) => v == 'infinite' ? Infinity : Number(v);
|
||||||
|
this.videoLength_ = normalize(this.findSelectedOrFirst_(videoLengthEl));
|
||||||
videoLengthEl.change((e) => {
|
videoLengthEl.change((e) => {
|
||||||
const newValueStr = e.currentTarget.value;
|
this.videoLength_ = normalize(e.currentTarget.value);
|
||||||
this.videoLength_ =
|
|
||||||
newValueStr == 'infinite' ? Infinity : Number(newValueStr);
|
|
||||||
if (this.videoLengthHandler_) {
|
if (this.videoLengthHandler_) {
|
||||||
this.videoLengthHandler_(this.videoLength_);
|
this.videoLengthHandler_(this.videoLength_);
|
||||||
}
|
}
|
||||||
|
@ -7339,9 +7339,9 @@ websocket-driver@>=0.5.1:
|
|||||||
websocket-extensions ">=0.1.1"
|
websocket-extensions ">=0.1.1"
|
||||||
|
|
||||||
websocket-extensions@>=0.1.1:
|
websocket-extensions@>=0.1.1:
|
||||||
version "0.1.3"
|
version "0.1.4"
|
||||||
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29"
|
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42"
|
||||||
integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==
|
integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==
|
||||||
|
|
||||||
which-module@^2.0.0:
|
which-module@^2.0.0:
|
||||||
version "2.0.0"
|
version "2.0.0"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user