knob to reduce db commits (SSD write cycles)

This improves the practicality of having many streams (including the doubling
of streams by having main + sub streams for each camera). With these tuned
properly, extra streams don't cause any extra write cycles in normal or error
cases. Consider the worst case in which each RTSP session immediately sends a
single frame and then fails. Moonfire retries every second, so this would
formerly cause one commit per second per stream. (flush_if_sec=0 preserves
this behavior.) Now the commits can be arbitrarily infrequent by setting
higher values of flush_if_sec.

WARNING: this isn't production-ready! I hacked up dir.rs to make tests pass
and "moonfire-nvr run" work in the best-case scenario, but it doesn't handle
errors gracefully. I've been debating what to do when writing a recording
fails. I considered "abandoning" the recording then either reusing or skipping
its id. (in the latter case, marking the file as garbage if it can't be
unlinked immediately). I think now there's no point in abandoning a recording.
If I can't write to that file, there's no reason to believe another will work
better. It's better to retry that recording forever, and perhaps put the whole
directory into an error state that stops recording until those writes go
through. I'm planning to redesign dir.rs to make this happen.
This commit is contained in:
Scott Lamb
2018-02-22 16:35:34 -08:00
parent 31adbc1e9f
commit b037c9bdd7
15 changed files with 822 additions and 694 deletions

View File

@@ -31,7 +31,7 @@
//! Clock interface and implementations for testability.
use libc;
#[cfg(test)] use std::sync::Mutex;
#[cfg(test)] use parking_lot::Mutex;
use std::mem;
use std::thread;
use time::{Duration, Timespec};
@@ -123,12 +123,12 @@ impl SimulatedClocks {
#[cfg(test)]
impl Clocks for SimulatedClocks {
fn realtime(&self) -> Timespec { self.boot + *self.uptime.lock().unwrap() }
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.uptime.lock().unwrap() }
fn realtime(&self) -> Timespec { self.boot + *self.uptime.lock() }
fn monotonic(&self) -> Timespec { Timespec::new(0, 0) + *self.uptime.lock() }
/// Advances the clock by the specified amount without actually sleeping.
fn sleep(&self, how_long: Duration) {
let mut l = self.uptime.lock().unwrap();
let mut l = self.uptime.lock();
*l = *l + how_long;
}
}

View File

@@ -36,6 +36,7 @@ use self::cursive::views;
use db::{self, dir};
use failure::Error;
use std::collections::BTreeMap;
use std::str::FromStr;
use std::sync::Arc;
use stream::{self, Opener, Stream};
use super::{decode_size, encode_size};
@@ -62,6 +63,9 @@ fn get_change(siv: &mut Cursive) -> db::CameraChange {
.unwrap().get_content().as_str().into();
let r = siv.find_id::<views::Checkbox>(&format!("{}_record", t.as_str()))
.unwrap().is_checked();
let f = i64::from_str(siv.find_id::<views::EditView>(
&format!("{}_flush_if_sec", t.as_str())).unwrap().get_content().as_str())
.unwrap_or(0);
let d = *siv.find_id::<views::SelectView<Option<i32>>>(
&format!("{}_sample_file_dir", t.as_str()))
.unwrap().selection();
@@ -69,6 +73,7 @@ fn get_change(siv: &mut Cursive) -> db::CameraChange {
rtsp_path: p,
sample_file_dir_id: d,
record: r,
flush_if_sec: f,
};
}
c
@@ -270,9 +275,11 @@ fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i
.popup()
.with_id(format!("{}_sample_file_dir", type_.as_str())))
.child("record", views::Checkbox::new().with_id(format!("{}_record", type_.as_str())))
.child("flush_if_sec", views::EditView::new()
.with_id(format!("{}_flush_if_sec", type_.as_str())))
.child("usage/capacity",
views::TextView::new("").with_id(format!("{}_usage_cap", type_.as_str())))
.min_height(4);
.min_height(5);
layout.add_child(views::DummyView);
layout.add_child(views::TextView::new(format!("{} stream", type_.as_str())));
layout.add_child(list);
@@ -313,6 +320,8 @@ fn edit_camera_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: &Option<i
|v: &mut views::TextView| v.set_content(u));
dialog.find_id(&format!("{}_record", t.as_str()),
|v: &mut views::Checkbox| v.set_checked(s.record));
dialog.find_id(&format!("{}_flush_if_sec", t.as_str()),
|v: &mut views::EditView| v.set_content(s.flush_if_sec.to_string()));
}
dialog.find_id(&format!("{}_sample_file_dir", t.as_str()),
|v: &mut views::SelectView<Option<i32>>| v.set_selection(selected_dir));

View File

@@ -60,12 +60,15 @@ struct Model {
/// Updates the limits in the database. Doesn't delete excess data (if any).
fn update_limits_inner(model: &Model) -> Result<(), Error> {
let mut db = model.db.lock();
let mut tx = db.tx()?;
for (&id, stream) in &model.streams {
tx.update_retention(id, stream.record, stream.retain.unwrap())?;
let mut changes = Vec::with_capacity(model.streams.len());
for (&stream_id, stream) in &model.streams {
changes.push(db::RetentionChange {
stream_id,
new_record: stream.record,
new_limit: stream.retain.unwrap(),
});
}
tx.commit()
model.db.lock().update_retention(&changes)
}
fn update_limits(model: &Model, siv: &mut Cursive) {

View File

@@ -199,6 +199,9 @@ pub fn run() -> Result<(), Error> {
}
if let Some(mut ss) = syncers {
// The syncers shut down when all channels to them have been dropped.
// The database maintains one; and `ss` holds one. Drop both.
db.lock().clear_on_flush();
for (_, s) in ss.drain() {
drop(s.channel);
s.join.join().unwrap();

View File

@@ -159,6 +159,7 @@ impl<'a> super::Upgrader for U<'a> {
record integer not null check (record in (1, 0)),
rtsp_path text not null,
retain_bytes integer not null check (retain_bytes >= 0),
flush_if_sec integer not null,
next_recording_id integer not null check (next_recording_id >= 0),
unique (camera_id, type)
);
@@ -227,6 +228,7 @@ impl<'a> super::Upgrader for U<'a> {
1,
old_camera.main_rtsp_path,
old_camera.retain_bytes,
0,
old_camera.next_recording_id
from
old_camera cross join sample_file_dir;
@@ -241,7 +243,8 @@ impl<'a> super::Upgrader for U<'a> {
0,
old_camera.sub_rtsp_path,
0,
0
60,
1
from
old_camera cross join sample_file_dir
where

View File

@@ -34,7 +34,7 @@ use serde::ser::{SerializeMap, SerializeSeq, Serializer};
use std::collections::BTreeMap;
use uuid::Uuid;
#[derive(Debug, Serialize)]
#[derive(Serialize)]
#[serde(rename_all="camelCase")]
pub struct TopLevel<'a> {
pub time_zone_name: &'a str,

View File

@@ -2105,6 +2105,7 @@ mod tests {
const EXPECTED_ETAG: &'static str = "c56ef7eb3b4a713ceafebc3dc7958bd9e62a2fae";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
@@ -2125,6 +2126,7 @@ mod tests {
const EXPECTED_ETAG: &'static str = "3bdc2c8ce521df50155d0ca4d7497ada448fa7c3";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
@@ -2145,6 +2147,7 @@ mod tests {
const EXPECTED_ETAG: &'static str = "3986d3bd9b866c3455fb7359fb134aa2d9107af7";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
@@ -2165,6 +2168,7 @@ mod tests {
const EXPECTED_ETAG: &'static str = "9e789398c9a71ca834fec8fbc55b389f99d12dda";
assert_eq!(Some(header::EntityTag::strong(EXPECTED_ETAG.to_owned())), mp4.etag());
drop(db.syncer_channel);
db.db.lock().clear_on_flush();
db.syncer_join.join().unwrap();
}
}

View File

@@ -205,8 +205,9 @@ mod tests {
use failure::Error;
use h264;
use moonfire_ffmpeg;
use parking_lot::Mutex;
use std::cmp;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use stream::{self, Opener, Stream};
use time;
@@ -290,7 +291,7 @@ mod tests {
stream::Source::Rtsp(url) => assert_eq!(url, &self.expected_url),
stream::Source::File(_) => panic!("expected rtsp url"),
};
let mut l = self.streams.lock().unwrap();
let mut l = self.streams.lock();
match l.pop() {
Some(stream) => {
trace!("MockOpener returning next stream");
@@ -361,7 +362,7 @@ mod tests {
testutil::TEST_STREAM_ID, camera, s, 0, 3);
}
stream.run();
assert!(opener.streams.lock().unwrap().is_empty());
assert!(opener.streams.lock().is_empty());
db.syncer_channel.flush();
let db = db.db.lock();