Coalesce adjacent recordings for efficiency

This commit is contained in:
Scott Lamb 2016-05-10 17:37:53 -07:00
parent b27df92cac
commit d083797e42
3 changed files with 53 additions and 34 deletions

View File

@ -141,13 +141,15 @@ object has the following properties:
* `video_sample_entry_sha1`
* `video_sample_entry_width`
* `video_sample_entry_height`
* `video_samples`: the number of samples (aka frames) of video in this
recording.
* TODO: recording id(s)? interior split points for coalesced recordings?
TODO(slamb): consider ways to reduce the data size; this is in theory quite
compressible but I'm not sure how effective gzip will be without some tweaks.
One simple approach would be to just combine some adjacent list entries if
one's start matches the other's end exactly and the `video_sample_entry_*`
parameters are the same. So you might get one entry that represents 2 hours of
video instead of 120 entries representing a minute each.
Recordings may be coalesced if they are adjacent and have the same
`video_sample_entry_*` data. That is, if recording A spans times [t, u) and
recording B spans times [u, v), they may be returned as a single recording
AB spanning times [t, v). Arbitrarily many recordings may be coalesced in this
fashion.
Example request URI (with added whitespace between parameters):

View File

@ -193,6 +193,37 @@ void WebInterface::HandleJsonCameraList(evhttp_request *req) {
ReplyWithJson(req, cameras);
}
bool WebInterface::ListAggregatedCameraRecordings(
Uuid camera_uuid, int64_t start_time_90k, int64_t end_time_90k,
int64_t forced_split_duration_90k,
const std::function<void(const ListCameraRecordingsRow &)> &fn,
std::string *error_message) {
ListCameraRecordingsRow aggregated;
auto handle_sql_row = [&](const ListCameraRecordingsRow &row) {
auto new_duration_90k = aggregated.end_time_90k - row.start_time_90k;
if (row.video_sample_entry_sha1 == aggregated.video_sample_entry_sha1 &&
row.end_time_90k == aggregated.start_time_90k &&
new_duration_90k < forced_split_duration_90k) {
// Append to current .mp4.
aggregated.start_time_90k = row.start_time_90k;
aggregated.video_samples += row.video_samples;
aggregated.sample_file_bytes += row.sample_file_bytes;
} else {
// Start a new .mp4.
if (aggregated.start_time_90k != -1) { fn(aggregated); }
aggregated = row;
}
return IterationControl::kContinue;
};
if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k,
end_time_90k, handle_sql_row,
error_message)) {
return false;
}
if (aggregated.start_time_90k != -1) { fn(aggregated); }
return true;
}
void WebInterface::HandleHtmlCameraDetail(evhttp_request *req,
Uuid camera_uuid) {
GetCameraRow camera_row;
@ -234,11 +265,7 @@ void WebInterface::HandleHtmlCameraDetail(evhttp_request *req,
// aggregated .mp4 files of up to kForceSplitDuration90k each, provided
// there is no gap or change in video parameters between recordings.
static const int64_t kForceSplitDuration90k = 60 * 60 * kTimeUnitsPerSecond;
ListCameraRecordingsRow aggregated;
auto maybe_finish_html_row = [&]() {
if (aggregated.start_time_90k == -1) {
return; // there is no row to finish.
}
auto finish_html_row = [&](const ListCameraRecordingsRow &aggregated) {
auto seconds = static_cast<float>(aggregated.end_time_90k -
aggregated.start_time_90k) /
kTimeUnitsPerSecond;
@ -258,31 +285,14 @@ void WebInterface::HandleHtmlCameraDetail(evhttp_request *req,
"bps")
.c_str());
};
auto handle_sql_row = [&](const ListCameraRecordingsRow &row) {
auto new_duration_90k = aggregated.end_time_90k - row.start_time_90k;
if (row.video_sample_entry_sha1 == aggregated.video_sample_entry_sha1 &&
row.end_time_90k == aggregated.start_time_90k &&
new_duration_90k < kForceSplitDuration90k) {
// Append to current .mp4.
aggregated.start_time_90k = row.start_time_90k;
aggregated.video_samples += row.video_samples;
aggregated.sample_file_bytes += row.sample_file_bytes;
} else {
// Start a new .mp4.
maybe_finish_html_row();
aggregated = row;
}
return IterationControl::kContinue;
};
std::string error_message;
if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k,
end_time_90k, handle_sql_row,
&error_message)) {
if (!ListAggregatedCameraRecordings(camera_uuid, start_time_90k,
end_time_90k, kForceSplitDuration90k,
finish_html_row, &error_message)) {
return evhttp_send_error(
req, HTTP_INTERNAL,
StrCat("sqlite query failed: ", EscapeHtml(error_message)).c_str());
}
maybe_finish_html_row();
buf.Add(
"</table>\n"
"</html>\n");
@ -366,9 +376,10 @@ void WebInterface::HandleJsonCameraRecordings(evhttp_request *req,
return IterationControl::kContinue;
};
std::string error_message;
if (!env_->mdb->ListCameraRecordings(camera_uuid, start_time_90k,
end_time_90k, handle_row,
&error_message)) {
const auto kForceSplitDuration90k = std::numeric_limits<int64_t>::max();
if (!ListAggregatedCameraRecordings(camera_uuid, start_time_90k,
end_time_90k, kForceSplitDuration90k,
handle_row, &error_message)) {
return evhttp_send_error(
req, HTTP_INTERNAL,
StrCat("sqlite query failed: ", EscapeHtml(error_message)).c_str());

View File

@ -72,6 +72,12 @@ class WebInterface {
void HandleJsonCameraRecordings(evhttp_request *req, Uuid camera_uuid);
void HandleMp4View(evhttp_request *req, Uuid camera_uuid);
bool ListAggregatedCameraRecordings(
Uuid camera_uuid, int64_t start_time_90k, int64_t end_time_90k,
int64_t forced_split_duration_90k,
const std::function<void (const ListCameraRecordingsRow &)> &fn,
std::string *error_message);
// TODO: more nuanced error code for HTTP.
std::shared_ptr<VirtualFile> BuildMp4(Uuid camera_uuid,
int64_t start_time_90k,