read-only signals support (#28)

This is mostly untested and useless by itself, but it's a starting
point. In particular:

* there's no way to set up signals or add/remove/update events yet
  except by manual changes to the database.
* if you associate a signal with a camera then remove the camera,
  hitting /api/ will error out.
This commit is contained in:
Scott Lamb 2019-06-06 16:18:13 -07:00
parent cb1bb5d810
commit 6f2c63ffac
9 changed files with 833 additions and 47 deletions

View File

@ -58,6 +58,7 @@ use crate::dir;
use crate::raw; use crate::raw;
use crate::recording::{self, TIME_UNITS_PER_SEC}; use crate::recording::{self, TIME_UNITS_PER_SEC};
use crate::schema; use crate::schema;
use crate::signal;
use failure::{Error, bail, format_err}; use failure::{Error, bail, format_err};
use fnv::{FnvHashMap, FnvHashSet}; use fnv::{FnvHashMap, FnvHashSet};
use itertools::Itertools; use itertools::Itertools;
@ -81,7 +82,7 @@ use time;
use uuid::Uuid; use uuid::Uuid;
/// Expected schema version. See `guide/schema.md` for more information. /// Expected schema version. See `guide/schema.md` for more information.
pub const EXPECTED_VERSION: i32 = 3; pub const EXPECTED_VERSION: i32 = 4;
const GET_RECORDING_PLAYBACK_SQL: &'static str = r#" const GET_RECORDING_PLAYBACK_SQL: &'static str = r#"
select select
@ -611,6 +612,7 @@ pub struct LockedDatabase {
open_monotonic: recording::Time, open_monotonic: recording::Time,
auth: auth::State, auth: auth::State,
signal: signal::State,
sample_file_dirs_by_id: BTreeMap<i32, SampleFileDir>, sample_file_dirs_by_id: BTreeMap<i32, SampleFileDir>,
cameras_by_id: BTreeMap<i32, Camera>, cameras_by_id: BTreeMap<i32, Camera>,
@ -1784,6 +1786,18 @@ impl LockedDatabase {
req: auth::Request, hash: &auth::SessionHash) -> Result<(), Error> { req: auth::Request, hash: &auth::SessionHash) -> Result<(), Error> {
self.auth.revoke_session(&self.conn, reason, detail, req, hash) self.auth.revoke_session(&self.conn, reason, detail, req, hash)
} }
// ---- signal ----
pub fn signals_by_id(&self) -> &BTreeMap<u32, signal::Signal> { self.signal.signals_by_id() }
pub fn signal_types_by_uuid(&self) -> &FnvHashMap<Uuid, signal::Type> {
self.signal.types_by_uuid()
}
pub fn list_changes_by_time(
&self, desired_time: Range<recording::Time>, f: &mut FnMut(&signal::ListStateChangesRow))
-> Result<(), Error> {
self.signal.list_changes_by_time(desired_time, f)
}
} }
/// Initializes a database. /// Initializes a database.
@ -1888,6 +1902,7 @@ impl<C: Clocks + Clone> Database<C> {
}) })
} else { None }; } else { None };
let auth = auth::State::init(&conn)?; let auth = auth::State::init(&conn)?;
let signal = signal::State::init(&conn)?;
let db = Database { let db = Database {
db: Some(Mutex::new(LockedDatabase { db: Some(Mutex::new(LockedDatabase {
conn, conn,
@ -1896,6 +1911,7 @@ impl<C: Clocks + Clone> Database<C> {
open, open,
open_monotonic, open_monotonic,
auth, auth,
signal,
sample_file_dirs_by_id: BTreeMap::new(), sample_file_dirs_by_id: BTreeMap::new(),
cameras_by_id: BTreeMap::new(), cameras_by_id: BTreeMap::new(),
cameras_by_uuid: BTreeMap::new(), cameras_by_uuid: BTreeMap::new(),
@ -2154,20 +2170,20 @@ mod tests {
fn test_version_too_old() { fn test_version_too_old() {
testutil::init(); testutil::init();
let c = setup_conn(); let c = setup_conn();
c.execute_batch("delete from version; insert into version values (2, 0, '');").unwrap(); c.execute_batch("delete from version; insert into version values (3, 0, '');").unwrap();
let e = Database::new(clock::RealClocks {}, c, false).err().unwrap(); let e = Database::new(clock::RealClocks {}, c, false).err().unwrap();
assert!(e.to_string().starts_with( assert!(e.to_string().starts_with(
"Database schema version 2 is too old (expected 3)"), "got: {:?}", e); "Database schema version 3 is too old (expected 4)"), "got: {:?}", e);
} }
#[test] #[test]
fn test_version_too_new() { fn test_version_too_new() {
testutil::init(); testutil::init();
let c = setup_conn(); let c = setup_conn();
c.execute_batch("delete from version; insert into version values (4, 0, '');").unwrap(); c.execute_batch("delete from version; insert into version values (5, 0, '');").unwrap();
let e = Database::new(clock::RealClocks {}, c, false).err().unwrap(); let e = Database::new(clock::RealClocks {}, c, false).err().unwrap();
assert!(e.to_string().starts_with( assert!(e.to_string().starts_with(
"Database schema version 4 is too new (expected 3)"), "got: {:?}", e); "Database schema version 5 is too new (expected 4)"), "got: {:?}", e);
} }
/// Basic test of running some queries on a fresh database. /// Basic test of running some queries on a fresh database.

View File

@ -38,6 +38,7 @@ pub mod dir;
mod raw; mod raw;
pub mod recording; pub mod recording;
mod schema; mod schema;
pub mod signal;
pub mod upgrade; pub mod upgrade;
pub mod writer; pub mod writer;
@ -46,3 +47,4 @@ pub mod writer;
pub mod testutil; pub mod testutil;
pub use crate::db::*; pub use crate::db::*;
pub use crate::signal::Signal;

View File

@ -396,5 +396,88 @@ create table user_session (
create index user_session_uid on user_session (user_id); create index user_session_uid on user_session (user_id);
create table signal (
id integer primary key,
-- a uuid describing the originating object, such as the uuid of the camera
-- for built-in motion detection. There will be a JSON interface for adding
-- events; it will require this UUID to be supplied. An external uuid might
-- indicate "my house security system's zone 23".
source_uuid blob not null check (length(source_uuid) = 16),
-- a uuid describing the type of event. A registry (TBD) will list built-in
-- supported types, such as "Hikvision on-camera motion detection", or
-- "ONVIF on-camera motion detection". External programs can use their own
-- uuids, such as "Elk security system watcher".
type_uuid blob not null check (length(type_uuid) = 16),
-- a short human-readable description of the event to use in mouseovers or event
-- lists, such as "driveway motion" or "front door open".
short_name not null,
unique (source_uuid, type_uuid)
);
-- e.g. "moving/still", "disarmed/away/stay", etc.
-- TODO: just do a protobuf for each type? might be simpler, more flexible.
create table signal_type_enum (
type_uuid blob not null check (length(type_uuid) = 16),
value integer not null check (value > 0 and value < 16),
name text not null,
-- true/1 iff this signal value should be considered "motion" for directly associated cameras.
motion int not null check (motion in (0, 1)) default 0,
color text
);
-- Associations between event sources and cameras.
-- For example, if two cameras have overlapping fields of view, they might be
-- configured such that each camera is associated with both its own motion and
-- the other camera's motion.
create table signal_camera (
signal_id integer references signal (id),
camera_id integer references camera (id),
-- type:
--
-- 0 means direct association, as if the event source if the camera's own
-- motion detection. Here are a couple ways this could be used:
--
-- * when viewing the camera, hotkeys to go to the start of the next or
-- previous event should respect this event.
-- * a list of events might include the recordings associated with the
-- camera in the same timespan.
--
-- 1 means indirect association. A screen associated with the camera should
-- given some indication of this event, but there should be no assumption
-- that the camera will have a direct view of the event. For example, all
-- cameras might be indirectly associated with a doorknob press. Cameras at
-- the back of the house shouldn't be expected to have a direct view of this
-- event, but motion events shortly afterward might warrant extra scrutiny.
type integer not null,
primary key (signal_id, camera_id)
) without rowid;
-- State of signals as of a given timestamp.
create table signal_state (
-- seconds since 1970-01-01 00:00:00 UTC.
time_sec integer primary key,
-- Changes at this timestamp.
--
-- It's possible for a single signal to have multiple states; this means
-- that the signal held a state only momentarily.
--
-- A blob of varints representing a list of (signal number delta, state)
-- pairs. For example,
-- input signals: 1 3 3 200 (must be sorted)
-- deltas: 1 2 0 197 (must be non-negative)
-- states: 1 1 0 2
-- varint: \x01 \x01 \x02 \x01 \x00 \x00 \xc5 \x01 \x02
changes blob
);
insert into version (id, unix_time, notes) insert into version (id, unix_time, notes)
values (3, cast(strftime('%s', 'now') as int), 'db creation'); values (4, cast(strftime('%s', 'now') as int), 'db creation');

360
db/signal.rs Normal file
View File

@ -0,0 +1,360 @@
// This file is part of Moonfire NVR, a security camera network video recorder.
// Copyright (C) 2019 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use crate::coding;
use crate::db::FromSqlUuid;
use crate::recording;
use failure::{Error, bail, format_err};
use fnv::FnvHashMap;
use rusqlite::{Connection, types::ToSql};
use std::collections::BTreeMap;
use std::ops::Range;
use uuid::Uuid;
/// All state associated with signals. This is the entry point to this module.
pub(crate) struct State {
signals_by_id: BTreeMap<u32, Signal>,
types_by_uuid: FnvHashMap<Uuid, Type>,
points_by_time: BTreeMap<u32, Point>,
}
struct Point {
data: Vec<u8>,
changes_off: usize,
}
impl Point {
fn new(cur: &BTreeMap<u32, u16>, changes: &[u8]) -> Self {
let mut data = Vec::with_capacity(changes.len());
let mut last_signal = 0;
for (&signal, &state) in cur {
let delta = (signal - last_signal) as u32;
coding::append_varint32(delta, &mut data);
coding::append_varint32(state as u32, &mut data);
last_signal = signal;
}
let changes_off = data.len();
data.extend(changes);
Point {
data,
changes_off,
}
}
fn cur(&self) -> PointDataIterator {
PointDataIterator::new(&self.data[0..self.changes_off])
}
fn changes(&self) -> PointDataIterator {
PointDataIterator::new(&self.data[self.changes_off..])
}
}
struct PointDataIterator<'a> {
data: &'a [u8],
cur_pos: usize,
cur_signal: u32,
cur_state: u16,
}
impl<'a> PointDataIterator<'a> {
fn new(data: &'a [u8]) -> Self {
PointDataIterator {
data,
cur_pos: 0,
cur_signal: 0,
cur_state: 0,
}
}
fn next(&mut self) -> Result<Option<(u32, u16)>, Error> {
if self.cur_pos == self.data.len() {
return Ok(None);
}
let (signal_delta, p) = coding::decode_varint32(self.data, self.cur_pos)
.map_err(|()| format_err!("varint32 decode failure; data={:?} pos={}",
self.data, self.cur_pos))?;
let (state, p) = coding::decode_varint32(self.data, p)
.map_err(|()| format_err!("varint32 decode failure; data={:?} pos={}",
self.data, p))?;
let signal = self.cur_signal.checked_add(signal_delta)
.ok_or_else(|| format_err!("signal overflow: {} + {}",
self.cur_signal, signal_delta))?;
if state > u16::max_value() as u32 {
bail!("state overflow: {}", state);
}
self.cur_pos = p;
self.cur_signal = signal;
self.cur_state = state as u16;
Ok(Some((signal, self.cur_state)))
}
}
/// Representation of a `signal_camera` row.
/// `signal_id` is implied by the `Signal` which owns this struct.
#[derive(Debug)]
pub struct SignalCamera {
pub camera_id: i32,
pub type_: SignalCameraType,
}
/// Representation of the `type` field in a `signal_camera` row.
#[derive(Debug)]
pub enum SignalCameraType {
Direct = 0,
Indirect = 1,
}
#[derive(Debug)]
pub struct ListStateChangesRow {
pub when: recording::Time,
pub signal: u32,
pub state: u16,
}
impl State {
pub fn init(conn: &Connection) -> Result<Self, Error> {
let mut signals_by_id = State::init_signals(conn)?;
State::fill_signal_cameras(conn, &mut signals_by_id)?;
Ok(State {
signals_by_id,
types_by_uuid: State::init_types(conn)?,
points_by_time: State::init_points(conn)?,
})
}
pub fn list_changes_by_time(
&self, desired_time: Range<recording::Time>, f: &mut FnMut(&ListStateChangesRow))
-> Result<(), Error> {
// Convert the desired range to seconds. Reducing precision of the end carefully.
let start = desired_time.start.unix_seconds() as u32;
let mut end = desired_time.end.unix_seconds();
end += ((end * recording::TIME_UNITS_PER_SEC) < desired_time.end.0) as i64;
let end = end as u32;
// First find the state immediately before. If it exists, include it.
if let Some((&t, p)) = self.points_by_time.range(..start).next_back() {
let mut cur = BTreeMap::new();
let mut it = p.cur();
while let Some((signal, state)) = it.next()? {
cur.insert(signal, state);
}
let mut it = p.changes();
while let Some((signal, state)) = it.next()? {
if state == 0 {
cur.remove(&signal);
} else {
cur.insert(signal, state);
}
}
for (&signal, &state) in &cur {
f(&ListStateChangesRow {
when: recording::Time(t as i64 * recording::TIME_UNITS_PER_SEC),
signal,
state,
});
}
}
// Then include changes up to (but not including) the end time.
for (&t, p) in self.points_by_time.range(start..end) {
let mut it = p.changes();
while let Some((signal, state)) = it.next()? {
f(&ListStateChangesRow {
when: recording::Time(t as i64 * recording::TIME_UNITS_PER_SEC),
signal,
state,
});
}
}
Ok(())
}
fn init_signals(conn: &Connection) -> Result<BTreeMap<u32, Signal>, Error> {
let mut signals = BTreeMap::new();
let mut stmt = conn.prepare(r#"
select
id,
source_uuid,
type_uuid,
short_name
from
signal
"#)?;
let mut rows = stmt.query(&[] as &[&ToSql])?;
while let Some(row) = rows.next()? {
let id = row.get(0)?;
let source: FromSqlUuid = row.get(1)?;
let type_: FromSqlUuid = row.get(2)?;
signals.insert(id, Signal {
id,
source: source.0,
type_: type_.0,
short_name: row.get(3)?,
cameras: Vec::new(),
});
}
Ok(signals)
}
fn init_points(conn: &Connection) -> Result<BTreeMap<u32, Point>, Error> {
let mut stmt = conn.prepare(r#"
select
time_sec,
changes
from
signal_state
order by time_sec
"#)?;
let mut rows = stmt.query(&[] as &[&ToSql])?;
let mut points = BTreeMap::new();
let mut cur = BTreeMap::new(); // latest signal -> state, where state != 0
while let Some(row) = rows.next()? {
let time_sec = row.get(0)?;
let changes = row.get_raw_checked(1)?.as_blob()?;
let mut it = PointDataIterator::new(changes);
while let Some((signal, state)) = it.next()? {
if state == 0 {
cur.remove(&signal);
} else {
cur.insert(signal, state);
}
}
points.insert(time_sec, Point::new(&cur, changes));
}
Ok(points)
}
/// Fills the `cameras` field of the `Signal` structs within the supplied `signals`.
fn fill_signal_cameras(conn: &Connection, signals: &mut BTreeMap<u32, Signal>)
-> Result<(), Error> {
let mut stmt = conn.prepare(r#"
select
signal_id,
camera_id,
type
from
signal_camera
order by signal_id, camera_id
"#)?;
let mut rows = stmt.query(&[] as &[&ToSql])?;
while let Some(row) = rows.next()? {
let signal_id = row.get(0)?;
let s = signals.get_mut(&signal_id)
.ok_or_else(|| format_err!("signal_camera row for unknown signal id {}",
signal_id))?;
let type_ = row.get(2)?;
s.cameras.push(SignalCamera {
camera_id: row.get(1)?,
type_: match type_ {
0 => SignalCameraType::Direct,
1 => SignalCameraType::Indirect,
_ => bail!("unknown signal_camera type {}", type_),
},
});
}
Ok(())
}
fn init_types(conn: &Connection) -> Result<FnvHashMap<Uuid, Type>, Error> {
let mut types = FnvHashMap::default();
let mut stmt = conn.prepare(r#"
select
type_uuid,
value,
name,
motion,
color
from
signal_type_enum
order by type_uuid, value
"#)?;
let mut rows = stmt.query(&[] as &[&ToSql])?;
while let Some(row) = rows.next()? {
let type_: FromSqlUuid = row.get(0)?;
types.entry(type_.0).or_insert_with(Type::default).states.push(TypeState {
value: row.get(1)?,
name: row.get(2)?,
motion: row.get(3)?,
color: row.get(4)?,
});
}
Ok(types)
}
pub fn signals_by_id(&self) -> &BTreeMap<u32, Signal> { &self.signals_by_id }
pub fn types_by_uuid(&self) -> &FnvHashMap<Uuid, Type> { & self.types_by_uuid }
}
/// Representation of a `signal` row.
#[derive(Debug)]
pub struct Signal {
pub id: u32,
pub source: Uuid,
pub type_: Uuid,
pub short_name: String,
/// The cameras this signal is associated with. Sorted by camera id, which is unique.
pub cameras: Vec<SignalCamera>,
}
/// Representation of a `signal_type_enum` row.
/// `type_uuid` is implied by the `Type` which owns this struct.
#[derive(Debug)]
pub struct TypeState {
pub value: u16,
pub name: String,
pub motion: bool,
pub color: String,
}
/// Representation of a signal type; currently this just gathers together the TypeStates.
#[derive(Debug, Default)]
pub struct Type {
/// The possible states associated with this type. They are sorted by value, which is unique.
pub states: Vec<TypeState>,
}
#[cfg(test)]
mod tests {
#[test]
fn test_point_data_it() {
// Example taken from the .sql file.
let data = b"\x01\x01\x02\x01\x00\x00\xc5\x01\x02";
let mut it = super::PointDataIterator::new(data);
assert_eq!(it.next().unwrap(), Some((1, 1)));
assert_eq!(it.next().unwrap(), Some((3, 1)));
assert_eq!(it.next().unwrap(), Some((3, 0)));
assert_eq!(it.next().unwrap(), Some((200, 2)));
assert_eq!(it.next().unwrap(), None);
}
}

View File

@ -40,6 +40,7 @@ use rusqlite::types::ToSql;
mod v0_to_v1; mod v0_to_v1;
mod v1_to_v2; mod v1_to_v2;
mod v2_to_v3; mod v2_to_v3;
mod v3_to_v4;
const UPGRADE_NOTES: &'static str = const UPGRADE_NOTES: &'static str =
concat!("upgraded using moonfire-db ", env!("CARGO_PKG_VERSION")); concat!("upgraded using moonfire-db ", env!("CARGO_PKG_VERSION"));
@ -64,6 +65,7 @@ pub fn run(args: &Args, conn: &mut rusqlite::Connection) -> Result<(), Error> {
v0_to_v1::run, v0_to_v1::run,
v1_to_v2::run, v1_to_v2::run,
v2_to_v3::run, v2_to_v3::run,
v3_to_v4::run,
]; ];
{ {

67
db/upgrade/v3_to_v4.rs Normal file
View File

@ -0,0 +1,67 @@
// This file is part of Moonfire NVR, a security camera digital video recorder.
// Copyright (C) 2018 Scott Lamb <slamb@slamb.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// In addition, as a special exception, the copyright holders give
// permission to link the code of portions of this program with the
// OpenSSL library under certain conditions as described in each
// individual source file, and distribute linked combinations including
// the two.
//
// You must obey the GNU General Public License in all respects for all
// of the code used other than OpenSSL. If you modify file(s) with this
// exception, you may extend this exception to your version of the
// file(s), but you are not obligated to do so. If you do not wish to do
// so, delete this exception statement from your version. If you delete
// this exception statement from all source files in the program, then
// also delete it here.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
/// Upgrades a version 3 schema to a version 4 schema.
use failure::Error;
pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error> {
// These create statements match the schema.sql when version 4 was the latest.
tx.execute_batch(r#"
create table signal (
id integer primary key,
source_uuid blob not null check (length(source_uuid) = 16),
type_uuid blob not null check (length(type_uuid) = 16),
short_name not null,
unique (source_uuid, type_uuid)
);
create table signal_type_enum (
type_uuid blob not null check (length(type_uuid) = 16),
value integer not null check (value > 0 and value < 16),
name text not null,
motion int not null check (motion in (0, 1)) default 0,
color text
);
create table signal_camera (
signal_id integer references signal (id),
camera_id integer references camera (id),
type integer not null,
primary key (signal_id, camera_id)
) without rowid;
create table signal_state (
time_sec integer primary key,
changes blob
);
"#)?;
Ok(())
}

View File

@ -13,6 +13,12 @@ In the future, this is likely to be expanded:
(at least for bootstrapping web authentication) (at least for bootstrapping web authentication)
* mobile interface * mobile interface
## Terminology
*signal:* a timeseries with an enum value. Signals might represent a camera's
motion detection or day/night status. They could also represent an external
input such as a burglar alarm system's zone status.
## Detailed design ## Detailed design
All requests for JSON data should be sent with the header All requests for JSON data should be sent with the header
@ -88,6 +94,21 @@ The `application/json` response will have a dict as follows:
time zone. It is usually 24 hours after the start time. It time zone. It is usually 24 hours after the start time. It
might be 23 hours or 25 hours during spring forward or fall might be 23 hours or 25 hours during spring forward or fall
back, respectively. back, respectively.
* `signals`: a map of all signals known to the server. Keys are ids. Values are
dictionaries with the following properties:
* `shortName`: a unique, human-readable description of the signal
* `cameras`: a map of associated cameras' UUIDs to the type of association:
`direct` or `indirect`. See `db/schema.sql` for more description.
* `type`: a UUID, expected to match one of `signalTypes`.
* `days`: as in `cameras.streams.days` above.
* `signalTypes`: a list of all known signal types.
* `uuid`: in text format.
* `states`: a map of all possible states of the enumeration to more
information about them:
* `color`: a recommended color to use in UIs to represent this state,
as in the [HTML specification](https://html.spec.whatwg.org/#colours).
* `motion`: if present and true, directly associated cameras will be
considered to have motion when this signal is in this state.
* `session`: if logged in, a dict with the following properties: * `session`: if logged in, a dict with the following properties:
* `username` * `username`
* `csrf`: a cross-site request forgery token for use in `POST` requests. * `csrf`: a cross-site request forgery token for use in `POST` requests.
@ -126,9 +147,45 @@ Example response:
}, },
... ...
], ],
"signals": {
1: {
"shortName": "driveway motion",
"cameras": {
"fd20f7a2-9d69-4cb3-94ed-d51a20c3edfe": "direct"
},
"type": "ee66270f-d9c6-4819-8b33-9720d4cbca6b",
"days": {
"2016-05-01": {
"endTime90k": 131595516000000,
"startTime90k": 131587740000000,
"totalDuration90k": 5400000
}
}
}
],
"signalTypes": [
{
"uuid": "ee66270f-d9c6-4819-8b33-9720d4cbca6b",
"states": {
0: {
"name": "unknown",
"color": "#000000"
},
1: {
"name": "off",
"color": "#888888"
},
2: {
"name": "on",
"color": "#ff8888",
"motion": true
}
}
}
],
"session": { "session": {
"username": "slamb", "username": "slamb",
"csrf": "2DivvlnKUQ9JD4ao6YACBJm8XK4bFmOc", "csrf": "2DivvlnKUQ9JD4ao6YACBJm8XK4bFmOc"
} }
} }
``` ```
@ -182,9 +239,6 @@ Valid request parameters:
server should return a `continue` key which is expected to be returned on server should return a `continue` key which is expected to be returned on
following requests.) following requests.)
TODO(slamb): once we support annotations, should they be included in the same
URI or as a separate `/annotations`?
In the property `recordings`, returns a list of recordings in arbitrary order. In the property `recordings`, returns a list of recordings in arbitrary order.
Each recording object has the following properties: Each recording object has the following properties:
@ -420,6 +474,54 @@ a `codecs` parameter as specified in [RFC 6381][rfc-6381].
A GET returns a `text/plain` debugging string for the `.mp4` generated by the A GET returns a `text/plain` debugging string for the `.mp4` generated by the
same URL minus the `.txt` suffix. same URL minus the `.txt` suffix.
### `/api/signals`
A GET returns an `application/json` response with state of every signal for
the requested timespan.
Valid request parameters:
* `startTime90k` and and `endTime90k` limit the data returned to only
events relevant to the given half-open interval. Either or both
may be absent; they default to the beginning and end of time, respectively.
This will return the current state as of the latest change (to any signal)
before the start time (if any), then all changes in the interval. This
allows the caller to determine the state at every moment during the
selected timespan, as well as observe all events (even instantaneous
ones).
Responses are several parallel arrays for each observation:
* `times90k`: the time of each event. Events are given in ascending order.
* `signalIds`: the id of the relevant signal; expected to match one in the
`signals` field of the `/api/` response.
* `states`: the new state.
Example request URI (with added whitespace between parameters):
```
/api/signals
?startTime90k=130888729442361
&endTime90k=130985466591817
```
Example response:
```json
{
"signalIds": [1, 1, 1],
"states": [1, 2, 1],
"times90k": [130888729440000, 130985424000000, 130985418600000]
}
```
This represents the following observations:
1. time 130888729440000 was the last change before the requested start;
signal 1 (`driveway motion`) was in state 1 (`off`).
2. signal 1 entered state 2 (`on`) at time 130985424000000.
3. signal 1 entered state 1 (`off`) at time 130985418600000.
[media-segment]: https://w3c.github.io/media-source/isobmff-byte-stream-format.html#iso-media-segments [media-segment]: https://w3c.github.io/media-source/isobmff-byte-stream-format.html#iso-media-segments
[init-segment]: https://w3c.github.io/media-source/isobmff-byte-stream-format.html#iso-init-segments [init-segment]: https://w3c.github.io/media-source/isobmff-byte-stream-format.html#iso-init-segments
[rfc-6381]: https://tools.ietf.org/html/rfc6381 [rfc-6381]: https://tools.ietf.org/html/rfc6381

View File

@ -31,7 +31,7 @@
use db::auth::SessionHash; use db::auth::SessionHash;
use failure::{Error, format_err}; use failure::{Error, format_err};
use serde::Serialize; use serde::Serialize;
use serde::ser::{SerializeMap, SerializeSeq, Serializer}; use serde::ser::{Error as _, SerializeMap, SerializeSeq, Serializer};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::ops::Not; use std::ops::Not;
use uuid::Uuid; use uuid::Uuid;
@ -46,7 +46,14 @@ pub struct TopLevel<'a> {
#[serde(serialize_with = "TopLevel::serialize_cameras")] #[serde(serialize_with = "TopLevel::serialize_cameras")]
pub cameras: (&'a db::LockedDatabase, bool), pub cameras: (&'a db::LockedDatabase, bool),
#[serde(skip_serializing_if = "Option::is_none")]
pub session: Option<Session>, pub session: Option<Session>,
#[serde(serialize_with = "TopLevel::serialize_signals")]
pub signals: (&'a db::LockedDatabase, bool),
#[serde(serialize_with = "TopLevel::serialize_signal_types")]
pub signal_types: &'a db::LockedDatabase,
} }
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
@ -94,6 +101,44 @@ pub struct Stream<'a> {
pub days: Option<&'a BTreeMap<db::StreamDayKey, db::StreamDayValue>>, pub days: Option<&'a BTreeMap<db::StreamDayKey, db::StreamDayValue>>,
} }
#[derive(Serialize)]
#[serde(rename_all="camelCase")]
pub struct Signal<'a> {
#[serde(serialize_with = "Signal::serialize_cameras")]
pub cameras: (&'a db::Signal, &'a db::LockedDatabase),
pub source: Uuid,
pub type_: Uuid,
pub short_name: &'a str,
}
#[derive(Default, Serialize)]
#[serde(rename_all="camelCase")]
pub struct Signals {
pub times_90k: Vec<i64>,
pub signal_ids: Vec<u32>,
pub states: Vec<u16>,
}
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
pub struct SignalType<'a> {
pub uuid: Uuid,
#[serde(serialize_with = "SignalType::serialize_states")]
pub states: &'a db::signal::Type,
}
#[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")]
pub struct SignalTypeState<'a> {
value: u16,
name: &'a str,
#[serde(skip_serializing_if = "Not::not")]
motion: bool,
color: &'a str,
}
impl<'a> Camera<'a> { impl<'a> Camera<'a> {
pub fn wrap(c: &'a db::Camera, db: &'a db::LockedDatabase, include_days: bool) -> Result<Self, Error> { pub fn wrap(c: &'a db::Camera, db: &'a db::LockedDatabase, include_days: bool) -> Result<Self, Error> {
Ok(Camera { Ok(Camera {
@ -158,6 +203,66 @@ impl<'a> Stream<'a> {
} }
} }
impl<'a> Signal<'a> {
pub fn wrap(s: &'a db::Signal, db: &'a db::LockedDatabase, _include_days: bool) -> Self {
Signal {
cameras: (s, db),
source: s.source,
type_: s.type_,
short_name: &s.short_name,
}
}
fn serialize_cameras<S>(cameras: &(&db::Signal, &db::LockedDatabase),
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let (s, db) = cameras;
let mut map = serializer.serialize_map(Some(s.cameras.len()))?;
for sc in &s.cameras {
let c = db.cameras_by_id()
.get(&sc.camera_id)
.ok_or_else(|| S::Error::custom(format!("signal has missing camera id {}",
sc.camera_id)))?;
map.serialize_key(&c.uuid)?;
map.serialize_value(match sc.type_ {
db::signal::SignalCameraType::Direct => "direct",
db::signal::SignalCameraType::Indirect => "indirect",
})?;
}
map.end()
}
}
impl<'a> SignalType<'a> {
pub fn wrap(uuid: Uuid, type_: &'a db::signal::Type) -> Self {
SignalType {
uuid,
states: type_,
}
}
fn serialize_states<S>(type_: &db::signal::Type,
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let mut seq = serializer.serialize_seq(Some(type_.states.len()))?;
for s in &type_.states {
seq.serialize_element(&SignalTypeState::wrap(s))?;
}
seq.end()
}
}
impl<'a> SignalTypeState<'a> {
pub fn wrap(s: &'a db::signal::TypeState) -> Self {
SignalTypeState {
value: s.value,
name: &s.name,
motion: s.motion,
color: &s.color,
}
}
}
#[derive(Debug, Serialize)] #[derive(Debug, Serialize)]
#[serde(rename_all="camelCase")] #[serde(rename_all="camelCase")]
struct StreamDayValue { struct StreamDayValue {
@ -175,7 +280,33 @@ impl<'a> TopLevel<'a> {
let cs = db.cameras_by_id(); let cs = db.cameras_by_id();
let mut seq = serializer.serialize_seq(Some(cs.len()))?; let mut seq = serializer.serialize_seq(Some(cs.len()))?;
for (_, c) in cs { for (_, c) in cs {
seq.serialize_element(&Camera::wrap(c, db, include_days).unwrap())?; // TODO: no unwrap. seq.serialize_element(
&Camera::wrap(c, db, include_days).map_err(|e| S::Error::custom(e))?)?;
}
seq.end()
}
/// Serializes signals as a list (rather than a map), optionally including the `days` field.
fn serialize_signals<S>(signals: &(&db::LockedDatabase, bool),
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let (db, include_days) = *signals;
let ss = db.signals_by_id();
let mut seq = serializer.serialize_seq(Some(ss.len()))?;
for (_, s) in ss {
seq.serialize_element(&Signal::wrap(s, db, include_days))?;
}
seq.end()
}
/// Serializes signals as a list (rather than a map), optionally including the `days` field.
fn serialize_signal_types<S>(db: &db::LockedDatabase,
serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
let ss = db.signal_types_by_uuid();
let mut seq = serializer.serialize_seq(Some(ss.len()))?;
for (u, t) in ss {
seq.serialize_element(&SignalType::wrap(*u, t))?;
} }
seq.end() seq.end()
} }

View File

@ -74,6 +74,7 @@ enum Path {
Request, // "/api/request" Request, // "/api/request"
InitSegment([u8; 20], bool), // "/api/init/<sha1>.mp4{.txt}" InitSegment([u8; 20], bool), // "/api/init/<sha1>.mp4{.txt}"
Camera(Uuid), // "/api/cameras/<uuid>/" Camera(Uuid), // "/api/cameras/<uuid>/"
Signals, // "/api/signals"
StreamRecordings(Uuid, db::StreamType), // "/api/cameras/<uuid>/<type>/recordings" StreamRecordings(Uuid, db::StreamType), // "/api/cameras/<uuid>/<type>/recordings"
StreamViewMp4(Uuid, db::StreamType, bool), // "/api/cameras/<uuid>/<type>/view.mp4{.txt}" StreamViewMp4(Uuid, db::StreamType, bool), // "/api/cameras/<uuid>/<type>/view.mp4{.txt}"
StreamViewMp4Segment(Uuid, db::StreamType, bool), // "/api/cameras/<uuid>/<type>/view.m4s{.txt}" StreamViewMp4Segment(Uuid, db::StreamType, bool), // "/api/cameras/<uuid>/<type>/view.m4s{.txt}"
@ -94,9 +95,10 @@ impl Path {
return Path::TopLevel; return Path::TopLevel;
} }
match path { match path {
"/request" => return Path::Request,
"/login" => return Path::Login, "/login" => return Path::Login,
"/logout" => return Path::Logout, "/logout" => return Path::Logout,
"/request" => return Path::Request,
"/signals" => return Path::Signals,
_ => {}, _ => {},
}; };
if path.starts_with("/init/") { if path.starts_with("/init/") {
@ -251,6 +253,16 @@ struct ServiceInner {
type ResponseResult = Result<Response<Body>, Response<Body>>; type ResponseResult = Result<Response<Body>, Response<Body>>;
fn serve_json<T: serde::ser::Serialize>(req: &Request<hyper::Body>, out: &T) -> ResponseResult {
let (mut resp, writer) = http_serve::streaming_body(&req).build();
resp.headers_mut().insert(header::CONTENT_TYPE,
HeaderValue::from_static("application/json"));
if let Some(mut w) = writer {
serde_json::to_writer(&mut w, out).map_err(internal_server_err)?;
}
Ok(resp)
}
impl ServiceInner { impl ServiceInner {
fn top_level(&self, req: &Request<::hyper::Body>, session: Option<json::Session>) fn top_level(&self, req: &Request<::hyper::Body>, session: Option<json::Session>)
-> ResponseResult { -> ResponseResult {
@ -265,34 +277,21 @@ impl ServiceInner {
} }
} }
let (mut resp, writer) = http_serve::streaming_body(&req).build();
resp.headers_mut().insert(header::CONTENT_TYPE,
HeaderValue::from_static("application/json"));
if let Some(mut w) = writer {
let db = self.db.lock(); let db = self.db.lock();
serde_json::to_writer(&mut w, &json::TopLevel { serve_json(req, &json::TopLevel {
time_zone_name: &self.time_zone_name, time_zone_name: &self.time_zone_name,
cameras: (&db, days), cameras: (&db, days),
session, session,
}).map_err(internal_server_err)?; signals: (&db, days),
} signal_types: &db,
Ok(resp) })
} }
fn camera(&self, req: &Request<::hyper::Body>, uuid: Uuid) -> ResponseResult { fn camera(&self, req: &Request<::hyper::Body>, uuid: Uuid) -> ResponseResult {
let (mut resp, writer) = http_serve::streaming_body(&req).build();
resp.headers_mut().insert(header::CONTENT_TYPE,
HeaderValue::from_static("application/json"));
if let Some(mut w) = writer {
let db = self.db.lock(); let db = self.db.lock();
let camera = db.get_camera(uuid) let camera = db.get_camera(uuid)
.ok_or_else(|| not_found(format!("no such camera {}", uuid)))?; .ok_or_else(|| not_found(format!("no such camera {}", uuid)))?;
serde_json::to_writer( serve_json(req, &json::Camera::wrap(camera, &db, true).map_err(internal_server_err)?)
&mut w,
&json::Camera::wrap(camera, &db, true).map_err(internal_server_err)?
).map_err(internal_server_err)?
};
Ok(resp)
} }
fn stream_recordings(&self, req: &Request<::hyper::Body>, uuid: Uuid, type_: db::StreamType) fn stream_recordings(&self, req: &Request<::hyper::Body>, uuid: Uuid, type_: db::StreamType)
@ -351,13 +350,7 @@ impl ServiceInner {
Ok(()) Ok(())
}).map_err(internal_server_err)?; }).map_err(internal_server_err)?;
} }
let (mut resp, writer) = http_serve::streaming_body(&req).build(); serve_json(req, &out)
resp.headers_mut().insert(header::CONTENT_TYPE,
HeaderValue::from_static("application/json"));
if let Some(mut w) = writer {
serde_json::to_writer(&mut w, &out).map_err(internal_server_err)?
};
Ok(resp)
} }
fn init_segment(&self, sha1: [u8; 20], debug: bool, req: &Request<::hyper::Body>) fn init_segment(&self, sha1: [u8; 20], debug: bool, req: &Request<::hyper::Body>)
@ -636,6 +629,34 @@ impl ServiceInner {
Ok(res) Ok(res)
} }
fn signals(&self, req: &Request<hyper::Body>) -> ResponseResult {
let mut time = recording::Time(i64::min_value()) .. recording::Time(i64::max_value());
if let Some(q) = req.uri().query() {
for (key, value) in form_urlencoded::parse(q.as_bytes()) {
let (key, value) = (key.borrow(), value.borrow());
match key {
"startTime90k" => {
time.start = recording::Time::parse(value)
.map_err(|_| bad_req("unparseable startTime90k"))?
},
"endTime90k" => {
time.end = recording::Time::parse(value)
.map_err(|_| bad_req("unparseable endTime90k"))?
},
_ => {},
}
}
}
let mut signals = json::Signals::default();
self.db.lock().list_changes_by_time(time, &mut |c: &db::signal::ListStateChangesRow| {
signals.times_90k.push(c.when.0);
signals.signal_ids.push(c.signal);
signals.states.push(c.state);
}).map_err(internal_server_err)?;
serve_json(req, &signals)
}
fn authenticated(&self, req: &Request<hyper::Body>) -> Result<Option<json::Session>, Error> { fn authenticated(&self, req: &Request<hyper::Body>) -> Result<Option<json::Session>, Error> {
if let Some(sid) = extract_sid(req) { if let Some(sid) = extract_sid(req) {
let authreq = self.authreq(req); let authreq = self.authreq(req);
@ -950,6 +971,7 @@ impl ::hyper::service::Service for Service {
let s = self.clone(); let s = self.clone();
move |(req, b)| { s.0.logout(&req, b) } move |(req, b)| { s.0.logout(&req, b) }
})), })),
Path::Signals => wrap_r(true, self.0.signals(&req)),
Path::Static => wrap_r(false, self.0.static_file(&req, req.uri().path())), Path::Static => wrap_r(false, self.0.static_file(&req, req.uri().path())),
} }
} }
@ -1098,6 +1120,7 @@ mod tests {
Path::NotFound); Path::NotFound);
assert_eq!(Path::decode("/api/login"), Path::Login); assert_eq!(Path::decode("/api/login"), Path::Login);
assert_eq!(Path::decode("/api/logout"), Path::Logout); assert_eq!(Path::decode("/api/logout"), Path::Logout);
assert_eq!(Path::decode("/api/signals"), Path::Signals);
assert_eq!(Path::decode("/api/junk"), Path::NotFound); assert_eq!(Path::decode("/api/junk"), Path::NotFound);
} }