2016-01-01 22:06:47 -08:00
|
|
|
// This file is part of Moonfire NVR, a security camera digital video recorder.
|
|
|
|
// Copyright (C) 2016 Scott Lamb <slamb@slamb.org>
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// In addition, as a special exception, the copyright holders give
|
|
|
|
// permission to link the code of portions of this program with the
|
|
|
|
// OpenSSL library under certain conditions as described in each
|
|
|
|
// individual source file, and distribute linked combinations including
|
|
|
|
// the two.
|
|
|
|
//
|
|
|
|
// You must obey the GNU General Public License in all respects for all
|
|
|
|
// of the code used other than OpenSSL. If you modify file(s) with this
|
|
|
|
// exception, you may extend this exception to your version of the
|
|
|
|
// file(s), but you are not obligated to do so. If you do not wish to do
|
|
|
|
// so, delete this exception statement from your version. If you delete
|
|
|
|
// this exception statement from all source files in the program, then
|
|
|
|
// also delete it here.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
//
|
|
|
|
// http.h: classes for HTTP serving. In particular, there are helpers for
|
|
|
|
// serving HTTP byte range requests with libevent.
|
|
|
|
|
|
|
|
#ifndef MOONFIRE_NVR_HTTP_H
|
|
|
|
#define MOONFIRE_NVR_HTTP_H
|
|
|
|
|
|
|
|
#include <dirent.h>
|
|
|
|
#include <stdarg.h>
|
2016-01-16 18:00:58 -08:00
|
|
|
#include <sys/queue.h>
|
2016-01-01 22:06:47 -08:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
|
|
|
#include <functional>
|
|
|
|
#include <iostream>
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
#include <memory>
|
2016-01-01 22:06:47 -08:00
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include <event2/buffer.h>
|
2016-01-16 18:00:58 -08:00
|
|
|
#include <event2/keyvalq_struct.h>
|
2016-01-01 22:06:47 -08:00
|
|
|
#include <event2/http.h>
|
|
|
|
#include <glog/logging.h>
|
|
|
|
#include <re2/stringpiece.h>
|
|
|
|
|
2016-01-09 22:15:22 -08:00
|
|
|
#include "string.h"
|
|
|
|
|
2016-01-01 22:06:47 -08:00
|
|
|
namespace moonfire_nvr {
|
|
|
|
|
2016-01-16 18:00:58 -08:00
|
|
|
// Single-use object to represent a set of HTTP query parameters.
|
|
|
|
class QueryParameters {
|
|
|
|
public:
|
|
|
|
// Parse parameters from the given URI.
|
|
|
|
// Caller should check ok() afterward.
|
|
|
|
QueryParameters(const char *uri) {
|
|
|
|
TAILQ_INIT(&me_);
|
|
|
|
ok_ = evhttp_parse_query_str(uri, &me_) == 0;
|
|
|
|
}
|
|
|
|
QueryParameters(const QueryParameters &) = delete;
|
|
|
|
void operator=(const QueryParameters &) = delete;
|
|
|
|
|
|
|
|
~QueryParameters() { evhttp_clear_headers(&me_); }
|
|
|
|
|
|
|
|
bool ok() const { return ok_; }
|
|
|
|
const char *Get(const char *param) const {
|
|
|
|
return evhttp_find_header(&me_, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct evkeyvalq me_;
|
|
|
|
bool ok_ = false;
|
|
|
|
};
|
|
|
|
|
2016-01-01 22:06:47 -08:00
|
|
|
// Wrapped version of libevent's "struct evbuffer" which uses RAII and simply
|
|
|
|
// aborts the process if allocations fail. (Moonfire NVR is intended to run on
|
|
|
|
// Linux systems with the default vm.overcommit_memory=0, so there's probably
|
|
|
|
// no point in trying to gracefully recover from a condition that's unlikely
|
|
|
|
// to ever happen.)
|
|
|
|
class EvBuffer {
|
|
|
|
public:
|
|
|
|
EvBuffer() { buf_ = CHECK_NOTNULL(evbuffer_new()); }
|
|
|
|
EvBuffer(const EvBuffer &) = delete;
|
|
|
|
EvBuffer &operator=(const EvBuffer &) = delete;
|
|
|
|
~EvBuffer() { evbuffer_free(buf_); }
|
|
|
|
|
|
|
|
struct evbuffer *get() {
|
|
|
|
return buf_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Add(const re2::StringPiece &s) {
|
|
|
|
CHECK_EQ(0, evbuffer_add(buf_, s.data(), s.size()));
|
|
|
|
}
|
|
|
|
|
|
|
|
void AddPrintf(const char *fmt, ...) __attribute__((format(printf, 2, 3))) {
|
|
|
|
va_list argp;
|
|
|
|
va_start(argp, fmt);
|
|
|
|
CHECK_LE(0, evbuffer_add_vprintf(buf_, fmt, argp));
|
|
|
|
va_end(argp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delegates to evbuffer_add_file.
|
|
|
|
// On success, |fd| will be closed by libevent. On failure, it remains open.
|
|
|
|
bool AddFile(int fd, ev_off_t offset, ev_off_t length,
|
|
|
|
std::string *error_message);
|
|
|
|
|
2016-01-09 12:02:36 -08:00
|
|
|
void AddReference(const void *data, size_t datlen,
|
|
|
|
evbuffer_ref_cleanup_cb cleanupfn, void *cleanupfn_arg) {
|
|
|
|
CHECK_EQ(
|
|
|
|
0, evbuffer_add_reference(buf_, data, datlen, cleanupfn, cleanupfn_arg))
|
|
|
|
<< strerror(errno);
|
|
|
|
}
|
|
|
|
|
2016-01-01 22:06:47 -08:00
|
|
|
private:
|
|
|
|
struct evbuffer *buf_;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ByteRange {
|
|
|
|
ByteRange() {}
|
|
|
|
ByteRange(int64_t begin, int64_t end) : begin(begin), end(end) {}
|
|
|
|
int64_t begin = 0;
|
|
|
|
int64_t end = 0; // exclusive.
|
2016-01-09 12:02:36 -08:00
|
|
|
int64_t size() const { return end - begin; }
|
2016-01-01 22:06:47 -08:00
|
|
|
bool operator==(const ByteRange &o) const {
|
|
|
|
return begin == o.begin && end == o.end;
|
|
|
|
}
|
2016-01-09 22:15:22 -08:00
|
|
|
std::string DebugString() const { return StrCat("[", begin, ", ", end, ")"); }
|
2016-01-01 22:06:47 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
inline std::ostream &operator<<(std::ostream &out, const ByteRange &range) {
|
2016-01-09 22:15:22 -08:00
|
|
|
return out << range.DebugString();
|
2016-01-01 22:06:47 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Helper for sending HTTP errors based on POSIX error returns.
|
|
|
|
void HttpSendError(evhttp_request *req, int http_err, const std::string &prefix,
|
|
|
|
int posix_errno);
|
|
|
|
|
2016-01-09 12:02:36 -08:00
|
|
|
class FileSlice {
|
|
|
|
public:
|
|
|
|
virtual ~FileSlice() {}
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
|
2016-01-09 12:02:36 -08:00
|
|
|
virtual int64_t size() const = 0;
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
|
|
|
|
// Add some to all of the given non-empty |range| to |buf|.
|
|
|
|
// Returns the number of bytes added, or < 0 on error.
|
|
|
|
// On error, |error_message| should be populated. (|error_message| may also be
|
|
|
|
// populated if 0 <= return value < range.size(), such as if one of a
|
|
|
|
// FileSlices object's failed. However, it's safe to simply retry such
|
|
|
|
// partial failures later.)
|
|
|
|
virtual int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const = 0;
|
2016-01-09 12:02:36 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
class VirtualFile : public FileSlice {
|
2016-01-01 22:06:47 -08:00
|
|
|
public:
|
|
|
|
virtual ~VirtualFile() {}
|
|
|
|
|
|
|
|
// Return the given property of the file.
|
|
|
|
virtual time_t last_modified() const = 0;
|
|
|
|
virtual std::string etag() const = 0;
|
|
|
|
virtual std::string mime_type() const = 0;
|
2016-01-09 12:02:36 -08:00
|
|
|
};
|
2016-01-01 22:06:47 -08:00
|
|
|
|
2016-01-09 23:26:02 -08:00
|
|
|
class RealFileSlice : public FileSlice {
|
|
|
|
public:
|
|
|
|
void Init(re2::StringPiece filename, ByteRange range);
|
|
|
|
|
|
|
|
int64_t size() const final { return range_.size(); }
|
|
|
|
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const final;
|
2016-01-09 23:26:02 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
std::string filename_;
|
|
|
|
ByteRange range_;
|
|
|
|
};
|
|
|
|
|
2016-01-09 12:02:36 -08:00
|
|
|
// A FileSlice of a pre-defined length which calls a function which fills the
|
|
|
|
// slice on demand. The FillerFileSlice is responsible for subsetting.
|
|
|
|
class FillerFileSlice : public FileSlice {
|
|
|
|
public:
|
|
|
|
using FillFunction =
|
|
|
|
std::function<bool(std::string *slice, std::string *error_message)>;
|
|
|
|
|
|
|
|
void Init(size_t size, FillFunction fn) {
|
|
|
|
fn_ = fn;
|
|
|
|
size_ = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t size() const final { return size_; }
|
|
|
|
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const final;
|
2016-01-09 12:02:36 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
FillFunction fn_;
|
|
|
|
size_t size_;
|
2016-01-01 22:06:47 -08:00
|
|
|
};
|
|
|
|
|
2016-01-09 23:26:02 -08:00
|
|
|
// A FileSlice backed by in-memory data which lives forever (static data).
|
|
|
|
class StaticStringPieceSlice : public FileSlice {
|
|
|
|
public:
|
|
|
|
explicit StaticStringPieceSlice(re2::StringPiece piece) : piece_(piece) {}
|
|
|
|
|
|
|
|
int64_t size() const final { return piece_.size(); }
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const final;
|
2016-01-09 23:26:02 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
re2::StringPiece piece_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A FileSlice backed by in-memory data which should be copied.
|
|
|
|
class CopyingStringPieceSlice : public FileSlice {
|
|
|
|
public:
|
|
|
|
explicit CopyingStringPieceSlice(re2::StringPiece piece) : piece_(piece) {}
|
|
|
|
|
|
|
|
int64_t size() const final { return piece_.size(); }
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const final;
|
2016-01-09 23:26:02 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
re2::StringPiece piece_;
|
|
|
|
};
|
|
|
|
|
2016-01-09 22:15:22 -08:00
|
|
|
// A slice composed of other slices.
|
|
|
|
class FileSlices : public FileSlice {
|
|
|
|
public:
|
|
|
|
FileSlices() {}
|
|
|
|
FileSlices(const FileSlices &) = delete;
|
|
|
|
FileSlices &operator=(const FileSlices &) = delete;
|
|
|
|
|
|
|
|
// |slice| must outlive the FileSlices.
|
|
|
|
// |slice->size()| should not change after this call.
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
// |flags| should be a bitmask of Flags values below.
|
|
|
|
void Append(const FileSlice *slice, int flags = 0) {
|
2016-01-09 22:15:22 -08:00
|
|
|
int64_t new_size = size_ + slice->size();
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
slices_.emplace_back(ByteRange(size_, new_size), slice, flags);
|
2016-01-09 22:15:22 -08:00
|
|
|
size_ = new_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t size() const final { return size_; }
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int64_t AddRange(ByteRange range, EvBuffer *buf,
|
|
|
|
std::string *error_message) const final;
|
|
|
|
|
|
|
|
enum Flags {
|
|
|
|
// kLazy, as an argument to Append, instructs the FileSlices to append
|
|
|
|
// this slice in AddRange only if it is the first slice in the requested
|
|
|
|
// range. Otherwise it returns early, expecting HttpServe to call AddRange
|
|
|
|
// again after the earlier ranges have been sent. This is useful if it is
|
|
|
|
// expensive to have the given slice pending. In particular, it is useful
|
|
|
|
// when serving many file slices on 32-bit machines to avoid exhausting
|
|
|
|
// the address space with too many memory mappings.
|
|
|
|
kLazy = 1
|
|
|
|
};
|
2016-01-09 22:15:22 -08:00
|
|
|
|
|
|
|
private:
|
|
|
|
struct SliceInfo {
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
SliceInfo(ByteRange range, const FileSlice *slice, int flags)
|
|
|
|
: range(range), slice(slice), flags(flags) {}
|
2016-01-09 22:15:22 -08:00
|
|
|
ByteRange range;
|
|
|
|
const FileSlice *slice = nullptr;
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
int flags;
|
2016-01-09 22:15:22 -08:00
|
|
|
};
|
|
|
|
int64_t size_ = 0;
|
|
|
|
|
|
|
|
std::vector<SliceInfo> slices_;
|
|
|
|
};
|
|
|
|
|
2016-01-01 22:06:47 -08:00
|
|
|
// Serve an HTTP request |req| from |file|, handling byte range and
|
|
|
|
// conditional serving. (Similar to golang's http.ServeContent.)
|
|
|
|
//
|
|
|
|
// |file| only needs to live through the call to HttpServe itself.
|
|
|
|
// This contract may change in the future; currently all the ranges are added
|
|
|
|
// at the beginning of the request, so if large memory-backed buffers (as
|
|
|
|
// opposed to file-backed buffers) are used, the program's memory usage will
|
|
|
|
// spike, even if the HTTP client aborts early in the request. If this becomes
|
|
|
|
// problematic, this interface may change to take advantage of
|
|
|
|
// evbuffer_add_cb, adding buffers incrementally, and some mechanism will be
|
|
|
|
// added to guarantee VirtualFile objects outlive the HTTP requests they serve.
|
Construct HTTP responses incrementally.
This isn't as much of a speed-up as you might imagine; most of the large HTTP
content was mmap()ed files which are relatively efficient. The big improvement
here is that it's now possible to serve large files (4 GiB and up) on 32-bit
machines. This actually works: I was just able to browse a 25-hour, 37 GiB
.mp4 file on my Raspberry Pi 2 Model B. It takes about 400 ms to start serving
each request, which isn't exactly zippy but might be forgivable for such a
large file. I still intend for the common request from the web interface to be
for much smaller fragmented .mp4 files.
Speed could be improved later through caching. Right now my test code is
creating a fresh VirtualFile from a database query on each request, even
though it hasn't changed. The tricky part will be doing cache invalidation
cleanly if it does change---new recordings are added to the requested time
range, recordings are deleted, or existing recordings' timestamps are changed.
The downside to the approach here is that it requires libevent 2.1 for
evhttp_send_reply_chunk_with_cb. Unfortunately, Ubuntu 15.10 and Debian Jessie
still bundle libevent 2.0. There are a few possible improvements here:
1. fall back to assuming chunks are added immediately, so that people with
libevent 2.0 get the old bad behavior and people with libevent 2.1 get the
better behavior. This is kind of lame, though; it's easy to go through
the whole address space pretty fast, particularly when the browsers send
out requests so quickly so there may be some unintentional concurrency.
2. alter the FileSlice interface to return a pointer/destructor rather than
add something to the evbuffer. HttpServe would then add each chunk via
evbuffer_add_reference, and it'd supply a cleanupfn that (in addition to
calling the FileSlice-supplied destructor) notes that this chunk has been
fully sent. For all the currently-used FileSlices, this shouldn't be too
hard, and there are a few other reasons it might be beneficial:
* RealFileSlice could call madvise() to control the OS buffering
* RealFileSlice could track when file descriptors are open and thus
FileManager's unlink() calls don't actually free up space
* It feels dirty to expose libevent stuff through the otherwise-nice
FileSlice interface.
3. support building libevent 2.1 statically in-tree if the OS-supplied
libevent is unsuitable.
I'm tempted to go with #2, but probably not right now. More urgent to commit
support for writing the new format and the wrapper bits for viewing it.
2016-01-14 22:41:49 -08:00
|
|
|
void HttpServe(const std::shared_ptr<VirtualFile> &file, evhttp_request *req);
|
2016-01-01 22:06:47 -08:00
|
|
|
|
|
|
|
// Serve a file over HTTP. Expects the caller to supply a sanitized |filename|
|
|
|
|
// (rather than taking it straight from the path specified in |req|).
|
|
|
|
void HttpServeFile(evhttp_request *req, const std::string &mime_type,
|
|
|
|
const std::string &filename, const struct stat &statbuf);
|
|
|
|
|
|
|
|
namespace internal {
|
|
|
|
|
|
|
|
// Value to represent result of parsing HTTP 1.1 "Range:" header.
|
|
|
|
enum class RangeHeaderType {
|
|
|
|
// Ignore the header, serving all bytes in the file.
|
|
|
|
kAbsentOrInvalid,
|
|
|
|
|
|
|
|
// The server SHOULD return a response with status 416 (Requested range not
|
|
|
|
// satisfiable).
|
|
|
|
kNotSatisfiable,
|
|
|
|
|
|
|
|
// The server SHOULD return a response with status 406 (Partial Content).
|
|
|
|
kSatisfiable
|
|
|
|
};
|
|
|
|
|
|
|
|
// Parse an HTTP 1.1 "Range:" header value, following RFC 2616 section 14.35.
|
|
|
|
// This function is for use by HttpServe; it is exposed for testing only.
|
|
|
|
//
|
|
|
|
// |value| on entry should be the header value (after the ": "), or nullptr.
|
|
|
|
// |size| on entry should be the number of bytes available to serve.
|
|
|
|
// On kSatisfiable return, |ranges| will be filled with the satisfiable ranges.
|
|
|
|
// Otherwise, its contents are undefined.
|
|
|
|
RangeHeaderType ParseRangeHeader(const char *value, int64_t size,
|
|
|
|
std::vector<ByteRange> *ranges);
|
|
|
|
|
|
|
|
} // namespace internal
|
|
|
|
|
|
|
|
} // namespace moonfire_nvr
|
|
|
|
|
|
|
|
#endif // MOONFIRE_NVR_HTTP_H
|