From d083797e42101963687cd5004361875d334ab19c Mon Sep 17 00:00:00 2001 From: Scott Lamb Date: Tue, 10 May 2016 17:37:53 -0700 Subject: [PATCH] Coalesce adjacent recordings for efficiency --- design/api.md | 14 ++++++----- src/web.cc | 67 ++++++++++++++++++++++++++++++--------------------- src/web.h | 6 +++++ 3 files changed, 53 insertions(+), 34 deletions(-) diff --git a/design/api.md b/design/api.md index a67f649..dc62cff 100644 --- a/design/api.md +++ b/design/api.md @@ -141,13 +141,15 @@ object has the following properties: * `video_sample_entry_sha1` * `video_sample_entry_width` * `video_sample_entry_height` +* `video_samples`: the number of samples (aka frames) of video in this + recording. +* TODO: recording id(s)? interior split points for coalesced recordings? -TODO(slamb): consider ways to reduce the data size; this is in theory quite -compressible but I'm not sure how effective gzip will be without some tweaks. -One simple approach would be to just combine some adjacent list entries if -one's start matches the other's end exactly and the `video_sample_entry_*` -parameters are the same. So you might get one entry that represents 2 hours of -video instead of 120 entries representing a minute each. +Recordings may be coalesced if they are adjacent and have the same +`video_sample_entry_*` data. That is, if recording A spans times [t, u) and +recording B spans times [u, v), they may be returned as a single recording +AB spanning times [t, v). Arbitrarily many recordings may be coalesced in this +fashion. Example request URI (with added whitespace between parameters): diff --git a/src/web.cc b/src/web.cc index da12741..4d4f843 100644 --- a/src/web.cc +++ b/src/web.cc @@ -193,6 +193,37 @@ void WebInterface::HandleJsonCameraList(evhttp_request *req) { ReplyWithJson(req, cameras); } +bool WebInterface::ListAggregatedCameraRecordings( + Uuid camera_uuid, int64_t start_time_90k, int64_t end_time_90k, + int64_t forced_split_duration_90k, + const std::function &fn, + std::string *error_message) { + ListCameraRecordingsRow aggregated; + auto handle_sql_row = [&](const ListCameraRecordingsRow &row) { + auto new_duration_90k = aggregated.end_time_90k - row.start_time_90k; + if (row.video_sample_entry_sha1 == aggregated.video_sample_entry_sha1 && + row.end_time_90k == aggregated.start_time_90k && + new_duration_90k < forced_split_duration_90k) { + // Append to current .mp4. + aggregated.start_time_90k = row.start_time_90k; + aggregated.video_samples += row.video_samples; + aggregated.sample_file_bytes += row.sample_file_bytes; + } else { + // Start a new .mp4. + if (aggregated.start_time_90k != -1) { fn(aggregated); } + aggregated = row; + } + return IterationControl::kContinue; + }; + if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k, + end_time_90k, handle_sql_row, + error_message)) { + return false; + } + if (aggregated.start_time_90k != -1) { fn(aggregated); } + return true; +} + void WebInterface::HandleHtmlCameraDetail(evhttp_request *req, Uuid camera_uuid) { GetCameraRow camera_row; @@ -234,11 +265,7 @@ void WebInterface::HandleHtmlCameraDetail(evhttp_request *req, // aggregated .mp4 files of up to kForceSplitDuration90k each, provided // there is no gap or change in video parameters between recordings. static const int64_t kForceSplitDuration90k = 60 * 60 * kTimeUnitsPerSecond; - ListCameraRecordingsRow aggregated; - auto maybe_finish_html_row = [&]() { - if (aggregated.start_time_90k == -1) { - return; // there is no row to finish. - } + auto finish_html_row = [&](const ListCameraRecordingsRow &aggregated) { auto seconds = static_cast(aggregated.end_time_90k - aggregated.start_time_90k) / kTimeUnitsPerSecond; @@ -258,31 +285,14 @@ void WebInterface::HandleHtmlCameraDetail(evhttp_request *req, "bps") .c_str()); }; - auto handle_sql_row = [&](const ListCameraRecordingsRow &row) { - auto new_duration_90k = aggregated.end_time_90k - row.start_time_90k; - if (row.video_sample_entry_sha1 == aggregated.video_sample_entry_sha1 && - row.end_time_90k == aggregated.start_time_90k && - new_duration_90k < kForceSplitDuration90k) { - // Append to current .mp4. - aggregated.start_time_90k = row.start_time_90k; - aggregated.video_samples += row.video_samples; - aggregated.sample_file_bytes += row.sample_file_bytes; - } else { - // Start a new .mp4. - maybe_finish_html_row(); - aggregated = row; - } - return IterationControl::kContinue; - }; std::string error_message; - if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k, - end_time_90k, handle_sql_row, - &error_message)) { + if (!ListAggregatedCameraRecordings(camera_uuid, start_time_90k, + end_time_90k, kForceSplitDuration90k, + finish_html_row, &error_message)) { return evhttp_send_error( req, HTTP_INTERNAL, StrCat("sqlite query failed: ", EscapeHtml(error_message)).c_str()); } - maybe_finish_html_row(); buf.Add( "\n" "\n"); @@ -366,9 +376,10 @@ void WebInterface::HandleJsonCameraRecordings(evhttp_request *req, return IterationControl::kContinue; }; std::string error_message; - if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k, - end_time_90k, handle_row, - &error_message)) { + const auto kForceSplitDuration90k = std::numeric_limits::max(); + if (!ListAggregatedCameraRecordings(camera_uuid, start_time_90k, + end_time_90k, kForceSplitDuration90k, + handle_row, &error_message)) { return evhttp_send_error( req, HTTP_INTERNAL, StrCat("sqlite query failed: ", EscapeHtml(error_message)).c_str()); diff --git a/src/web.h b/src/web.h index b5a11de..b4a500c 100644 --- a/src/web.h +++ b/src/web.h @@ -72,6 +72,12 @@ class WebInterface { void HandleJsonCameraRecordings(evhttp_request *req, Uuid camera_uuid); void HandleMp4View(evhttp_request *req, Uuid camera_uuid); + bool ListAggregatedCameraRecordings( + Uuid camera_uuid, int64_t start_time_90k, int64_t end_time_90k, + int64_t forced_split_duration_90k, + const std::function &fn, + std::string *error_message); + // TODO: more nuanced error code for HTTP. std::shared_ptr BuildMp4(Uuid camera_uuid, int64_t start_time_90k,