mirror of
https://github.com/scottlamb/moonfire-nvr.git
synced 2025-10-28 23:35:02 -04:00
Compare commits
51 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff383147e4 | ||
|
|
5cc93349fc | ||
|
|
dc1909d073 | ||
|
|
bc93712314 | ||
|
|
104ffdc2dc | ||
|
|
7968700aae | ||
|
|
239256e2ba | ||
|
|
1817931c6f | ||
|
|
5147a50771 | ||
|
|
39b6f6c49c | ||
|
|
0ccc6d0769 | ||
|
|
2903b680df | ||
|
|
2985214d87 | ||
|
|
3cc9603ff3 | ||
|
|
e204fe0864 | ||
|
|
f894f889be | ||
|
|
284a3dd5a9 | ||
|
|
7f7b95c56c | ||
|
|
c75292e43b | ||
|
|
0bfa09b1f1 | ||
|
|
d780b28cc2 | ||
|
|
1da6a44298 | ||
|
|
d75d8d0b96 | ||
|
|
738ad9a558 | ||
|
|
24bbb024ee | ||
|
|
b4836f3abb | ||
|
|
dd30d5bcf8 | ||
|
|
cbb2c30b56 | ||
|
|
c46832369a | ||
|
|
865328f02d | ||
|
|
b42bb53503 | ||
|
|
904842cc2d | ||
|
|
9256561362 | ||
|
|
3739e634ab | ||
|
|
3efff2cfd6 | ||
|
|
1473e79e96 | ||
|
|
8b951200c9 | ||
|
|
06f942582c | ||
|
|
f9e3fb56b3 | ||
|
|
6c587c1b2e | ||
|
|
04b49f06db | ||
|
|
140f625a80 | ||
|
|
dad664c244 | ||
|
|
89f230004e | ||
|
|
d43e09d959 | ||
|
|
0422593ec6 | ||
|
|
adf73a2da1 | ||
|
|
c20c644747 | ||
|
|
6c227ec0f5 | ||
|
|
e6c7b800fe | ||
|
|
1ae61b4c64 |
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
name: Rust ${{ matrix.rust }}
|
||||
strategy:
|
||||
matrix:
|
||||
rust: [ "stable", "1.70", "nightly" ]
|
||||
rust: ["stable", "1.88", "nightly"]
|
||||
include:
|
||||
- rust: nightly
|
||||
extra_args: "--features nightly --benches"
|
||||
@ -29,9 +29,9 @@ jobs:
|
||||
# `git describe` output gets baked into the binary for `moonfire-nvr --version`.
|
||||
# Fetch all revs so it can see tag history.
|
||||
fetch-depth: 0
|
||||
filter: 'tree:0'
|
||||
filter: "tree:0"
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
@ -63,7 +63,7 @@ jobs:
|
||||
name: Node ${{ matrix.node }}
|
||||
strategy:
|
||||
matrix:
|
||||
node: [ "18", "20", "21" ]
|
||||
node: ["18", "20", "21"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -79,7 +79,7 @@ jobs:
|
||||
- run: cd ui && pnpm run check-format
|
||||
license:
|
||||
name: Check copyright/license headers
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
35
.github/workflows/release.yml
vendored
35
.github/workflows/release.yml
vendored
@ -33,12 +33,12 @@ jobs:
|
||||
- run: cd ui && pnpm run build
|
||||
- run: cd ui && pnpm run test
|
||||
# Upload the UI and changelog as *job* artifacts (not *release* artifacts), used below.
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: moonfire-nvr-ui-${{ github.ref_name }}
|
||||
path: ui/dist
|
||||
if-no-files-found: error
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: CHANGELOG-${{ github.ref_name }}
|
||||
path: CHANGELOG-${{ github.ref_name }}.md
|
||||
@ -52,22 +52,23 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
# Note: keep these arches in sync with `Upload Docker Manifest` list.
|
||||
- arch: x86_64 # as in `uname -m` on Linux.
|
||||
rust_target: x86_64-unknown-linux-musl # as in <https://doc.rust-lang.org/rustc/platform-support.html>
|
||||
docker_platform: linux/amd64 # as in <https://docs.docker.com/build/building/multi-platform/>
|
||||
- arch: aarch64
|
||||
rust_target: aarch64-unknown-linux-musl
|
||||
docker_platform: linux/arm64
|
||||
- arch: armv7l
|
||||
rust_target: armv7-unknown-linux-musleabihf
|
||||
docker_platform: linux/arm/v7
|
||||
# Note: keep these arches in sync with `Upload Docker Manifest` list.
|
||||
- arch: x86_64 # as in `uname -m` on Linux.
|
||||
rust_target: x86_64-unknown-linux-musl # as in <https://doc.rust-lang.org/rustc/platform-support.html>
|
||||
docker_platform: linux/amd64 # as in <https://docs.docker.com/build/building/multi-platform/>
|
||||
- arch: aarch64
|
||||
rust_target: aarch64-unknown-linux-musl
|
||||
docker_platform: linux/arm64
|
||||
- arch: armv7l
|
||||
rust_target: armv7-unknown-linux-musleabihf
|
||||
docker_platform: linux/arm/v7
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Download UI
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: moonfire-nvr-ui-${{ github.ref_name }}
|
||||
path: ui/dist
|
||||
@ -89,7 +90,7 @@ jobs:
|
||||
working-directory: server
|
||||
target: ${{ matrix.rust_target }}
|
||||
command: build
|
||||
args: --release --features bundled
|
||||
args: --release --features bundled,mimalloc
|
||||
- name: Upload Docker Artifact
|
||||
run: |
|
||||
tag="${DOCKER_TAG}-${{ matrix.arch }}"
|
||||
@ -107,20 +108,20 @@ jobs:
|
||||
EOF
|
||||
# Upload as a *job* artifact (not *release* artifact), used below.
|
||||
- name: Upload Job Artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: moonfire-nvr-${{ github.ref_name }}-${{ matrix.arch }}
|
||||
path: output/moonfire-nvr
|
||||
if-no-files-found: error
|
||||
|
||||
release:
|
||||
needs: [ base, cross ]
|
||||
needs: [base, cross]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
- name: ls before rearranging
|
||||
|
||||
52
CHANGELOG.md
52
CHANGELOG.md
@ -8,6 +8,58 @@ upgrades, e.g. `v0.6.x` -> `v0.7.x`. The config file format and
|
||||
[API](ref/api.md) currently have no stability guarantees, so they may change
|
||||
even on minor releases, e.g. `v0.7.5` -> `v0.7.6`.
|
||||
|
||||
## v0.7.23 (2025-10-03)
|
||||
|
||||
* update Retina to [v0.4.14](https://github.com/scottlamb/retina/blob/main/CHANGELOG.md#v0414-2025-10-03),
|
||||
improving camera compatibility. Fixes [#344](https://github.com/scottlamb/moonfire-nvr/issues/344).
|
||||
* bump minimum Rust version to 1.88.
|
||||
|
||||
## v0.7.22 (2025-10-03)
|
||||
|
||||
(This version was tagged but never released due to an error.)
|
||||
|
||||
* switch from per-layer to global `tracing` filter to avoid missing log lines
|
||||
(tokio-rs/tracing#2519)[https://github.com/tokio-rs/tracing/issues/2519]).
|
||||
|
||||
## v0.7.21 (2025-04-04)
|
||||
|
||||
* Release with `mimalloc` allocator, which is significantly faster than the memory
|
||||
allocator built into `musl`.
|
||||
* Eliminate some memory allocations.
|
||||
|
||||
## v0.7.20 (2025-01-31)
|
||||
|
||||
* H.265 fixes.
|
||||
|
||||
## v0.7.19 (2025-01-28)
|
||||
|
||||
* support recording H.265 ([#33](https://github.com/scottlamb/moonfire-nvr/issues/33)).
|
||||
Browser support may vary.
|
||||
* bump minimum Rust version to 1.82.
|
||||
* improve error message on timeout opening stream.
|
||||
* use `jiff` for time manipulations.
|
||||
|
||||
## v0.7.18 (2025-01-28)
|
||||
|
||||
This release was skipped due to build problems on `armv7-unknown-linux-musleabif`.
|
||||
|
||||
## v0.7.17 (2024-09-03)
|
||||
|
||||
* bump minimum Rust version to 1.79.
|
||||
* in UI's list view, add a tooltip on the end time which shows why the
|
||||
recording ended.
|
||||
* fix [#121](https://github.com/scottlamb/moonfire-nvr/issues/121):
|
||||
iPhone live view.
|
||||
* update to hyper and http version 1.0. In the process, no longer wait for
|
||||
pending HTTP requests on shutdown. This just extended the time Moonfire was
|
||||
running without streaming.
|
||||
* upgrade to Retina 0.4.10, adding support for recording MJPEG video. Note
|
||||
major browsers do not support playback of MJPEG videos, however.
|
||||
|
||||
## v0.7.16 (2024-05-30)
|
||||
|
||||
* further changes to improve Reolink camera compatibility.
|
||||
|
||||
## v0.7.15 (2024-05-26)
|
||||
|
||||
* update Retina to 0.4.8, improving compatibility with some Reolink cameras.
|
||||
|
||||
@ -68,7 +68,7 @@ following command:
|
||||
$ brew install node
|
||||
```
|
||||
|
||||
Next, you need Rust 1.65+ and Cargo. The easiest way to install them is by
|
||||
Next, you need Rust 1.88+ and Cargo. The easiest way to install them is by
|
||||
following the instructions at [rustup.rs](https://www.rustup.rs/). Avoid
|
||||
your Linux distribution's Rust packages, which tend to be too old.
|
||||
(At least on Debian-based systems; Arch and Gentoo might be okay.)
|
||||
|
||||
@ -26,10 +26,10 @@ left, and pick the [latest tagged version](https://github.com/scottlamb/moonfire
|
||||
|
||||
Download the binary for your platform from the matching GitHub release.
|
||||
Install it as `/usr/local/bin/moonfire-nvr` and ensure it is executable, e.g.
|
||||
for version `v0.7.14`:
|
||||
for version `v0.7.23`:
|
||||
|
||||
```console
|
||||
$ VERSION=v0.7.14
|
||||
$ VERSION=v0.7.23
|
||||
$ ARCH=$(uname -m)
|
||||
$ curl -OL "https://github.com/scottlamb/moonfire-nvr/releases/download/$VERSION/moonfire-nvr-$VERSION-$ARCH"
|
||||
$ sudo install -m 755 "moonfire-nvr-$VERSION-$ARCH" /usr/local/bin/moonfire-nvr
|
||||
@ -65,7 +65,7 @@ services:
|
||||
moonfire-nvr:
|
||||
# The `vX.Y.Z` images will work on any architecture (x86-64, arm, or
|
||||
# aarch64); just pick the correct version.
|
||||
image: ghcr.io/scottlamb/moonfire-nvr:v0.7.11
|
||||
image: ghcr.io/scottlamb/moonfire-nvr:v0.7.23
|
||||
command: run
|
||||
|
||||
volumes:
|
||||
@ -105,8 +105,8 @@ services:
|
||||
# - seccomp:unconfined
|
||||
|
||||
environment:
|
||||
# Edit zone below to taste. The `:` is functional.
|
||||
TZ: ":America/Los_Angeles"
|
||||
# Edit zone below to taste.
|
||||
TZ: "America/Los_Angeles"
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
# docker's default log driver won't rotate logs properly, and will throw
|
||||
@ -323,7 +323,6 @@ After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/moonfire-nvr run
|
||||
Environment=TZ=:/etc/localtime
|
||||
Environment=MOONFIRE_FORMAT=systemd
|
||||
Environment=MOONFIRE_LOG=info
|
||||
Environment=RUST_BACKTRACE=1
|
||||
|
||||
@ -13,9 +13,10 @@ need more help.
|
||||
* [Docker setup](#docker-setup)
|
||||
* [`"/etc/moonfire-nvr.toml" is a directory`](#etcmoonfire-nvrtoml-is-a-directory)
|
||||
* [`Error response from daemon: unable to find user UID: no matching entries in passwd file`](#error-response-from-daemon-unable-to-find-user-uid-no-matching-entries-in-passwd-file)
|
||||
* [`clock_gettime failed: EPERM: Operation not permitted`](#clock_gettime-failed-eperm-operation-not-permitted)
|
||||
* [`clock_gettime(CLOCK_MONOTONIC) failed: EPERM: Operation not permitted`](#clock_gettimeclock_monotonic-failed-eperm-operation-not-permitted)
|
||||
* [`VFS is unable to determine a suitable directory for temporary files`](#vfs-is-unable-to-determine-a-suitable-directory-for-temporary-files)
|
||||
* [Server errors](#server-errors)
|
||||
* [`unable to get IANA time zone name; check your $TZ and /etc/localtime`](#unable-to-get-iana-time-zone-name-check-your-tz-and-etclocaltime)
|
||||
* [`Error: pts not monotonically increasing; got 26615520 then 26539470`](#error-pts-not-monotonically-increasing-got-26615520-then-26539470)
|
||||
* [Out of disk space](#out-of-disk-space)
|
||||
* [Database or filesystem corruption errors](#database-or-filesystem-corruption-errors)
|
||||
@ -217,7 +218,7 @@ If Docker produces this error, look at this section of the docker compose setup:
|
||||
user: UID:GID
|
||||
```
|
||||
|
||||
#### `clock_gettime failed: EPERM: Operation not permitted`
|
||||
#### `clock_gettime(CLOCK_MONOTONIC) failed: EPERM: Operation not permitted`
|
||||
|
||||
If commands fail with an error like the following, you're likely running
|
||||
Docker with an overly restrictive `seccomp` setup. [This stackoverflow
|
||||
@ -227,7 +228,7 @@ the `- seccomp: unconfined` line in your Docker compose file.
|
||||
|
||||
```console
|
||||
$ sudo docker compose run --rm moonfire-nvr --version
|
||||
clock_gettime failed: EPERM: Operation not permitted
|
||||
clock_gettime(CLOCK_MONOTONIC) failed: EPERM: Operation not permitted
|
||||
|
||||
This indicates a broken environment. See the troubleshooting guide.
|
||||
```
|
||||
@ -250,6 +251,12 @@ container in your Docker compose file.
|
||||
|
||||
### Server errors
|
||||
|
||||
#### `unable to get IANA time zone name; check your $TZ and /etc/localtime`
|
||||
|
||||
Moonfire NVR loads the system time zone via the logic described at
|
||||
[`jiff::tz::TimeZone::system`](https://docs.rs/jiff/0.1.8/jiff/tz/struct.TimeZone.html#method.system)
|
||||
and expects to be able to get the IANA zone name.
|
||||
|
||||
#### `Error: pts not monotonically increasing; got 26615520 then 26539470`
|
||||
|
||||
If your streams cut out and you see error messages like this one in Moonfire
|
||||
|
||||
@ -375,6 +375,9 @@ arbitrary order. Each recording object has the following properties:
|
||||
and Moonfire NVR fills in a duration of 0. When using `/view.mp4`, it's
|
||||
not possible to append additional segments after such frames, as noted
|
||||
below.
|
||||
* `endReason`: the reason the recording ended. Absent if the recording did
|
||||
not end (`growing` is true or this was split via `split90k`) or if the
|
||||
reason was unknown (recording predates schema version 7).
|
||||
|
||||
Under the property `videoSampleEntries`, an object mapping ids to objects with
|
||||
the following properties:
|
||||
@ -651,6 +654,9 @@ However, there are two important differences:
|
||||
* The `/view.m4s` endpoint always returns a time range that starts with a key frame;
|
||||
`/live.m4s` messages may not include a key frame.
|
||||
|
||||
If the caller falls too many frames behind, the connection will drop with an
|
||||
text message error.
|
||||
|
||||
Note: an earlier version of this API used a `multipart/mixed` segment instead,
|
||||
compatible with the [multipart-stream-js][multipart-stream-js] library. The
|
||||
problem with this approach is that browsers have low limits on the number of
|
||||
|
||||
@ -96,7 +96,7 @@ Service=moonfire-nvr.service
|
||||
|
||||
## Reference
|
||||
|
||||
At the top level, before any `[[bind]]` lines, the following
|
||||
At the top level, before any `[[binds]]` lines, the following
|
||||
keys are understood:
|
||||
|
||||
* `dbDir`: path to the SQLite database directory. Defaults to `/var/lib/moonfire-nvr/db`.
|
||||
@ -118,7 +118,7 @@ should start with a `[[binds]]` line and specify one of the following:
|
||||
* `ipv4`: an IPv4 socket address. `0.0.0.0:8080` would allow connections from outside the machine;
|
||||
`127.0.0.1:8080` would allow connections only from the local host.
|
||||
* `ipv6`: an IPv6 socket address. `[::0]:8080` would allow connections from outside the machine;
|
||||
`[[::1]:8080` would allow connections from only the local host.
|
||||
`[::1]:8080` would allow connections from only the local host.
|
||||
* `unix`: a path in the local filesystem where a UNIX-domain socket can be created. Permissions on the
|
||||
enclosing directories control which users are allowed to connect to it. Web browsers typically don't
|
||||
support directly connecting to UNIX domain sockets, but other tools do, e.g.:
|
||||
|
||||
1934
server/Cargo.lock
generated
1934
server/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,7 @@ authors = ["Scott Lamb <slamb@slamb.org>"]
|
||||
edition = "2021"
|
||||
resolver = "2"
|
||||
license-file = "../LICENSE.txt"
|
||||
rust-version = "1.70"
|
||||
rust-version = "1.88"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
@ -18,66 +18,91 @@ nightly = ["db/nightly"]
|
||||
bundled = ["rusqlite/bundled", "bundled-ui"]
|
||||
|
||||
bundled-ui = []
|
||||
mimalloc = ["base/mimalloc"]
|
||||
|
||||
[workspace]
|
||||
members = ["base", "db"]
|
||||
|
||||
[workspace.dependencies]
|
||||
base64 = "0.21.0"
|
||||
h264-reader = "0.7.0"
|
||||
itertools = "0.12.0"
|
||||
base64 = "0.22.0"
|
||||
h264-reader = "0.8.0"
|
||||
itertools = "0.14.0"
|
||||
jiff = "0.2.1"
|
||||
nix = "0.27.0"
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-log = "0.2"
|
||||
pretty-hex = "0.4.0"
|
||||
ring = "0.17.0"
|
||||
rusqlite = "0.30.0"
|
||||
rusqlite = "0.37.0"
|
||||
tracing = { version = "0.1" }
|
||||
tracing-core = "0.1.30"
|
||||
tracing-futures = { version = "0.2.5", features = ["futures-03", "std-future"] }
|
||||
tracing-log = "0.2"
|
||||
tracing-subscriber = { version = "0.3.16" }
|
||||
uuid = { version = "1.1.2", features = ["serde", "std", "v7", "fast-rng"] }
|
||||
|
||||
# This is 3.7.2 + dependency updates.
|
||||
protobuf = { git = "https://github.com/scottlamb/rust-protobuf", rev = "593aa7f26bb5fc736c2e61d410afa34efb914ecb" }
|
||||
protobuf-codegen = { git = "https://github.com/scottlamb/rust-protobuf", rev = "593aa7f26bb5fc736c2e61d410afa34efb914ecb" }
|
||||
|
||||
[dependencies]
|
||||
base = { package = "moonfire-base", path = "base" }
|
||||
base64 = { workspace = true }
|
||||
blake3 = "1.0.0"
|
||||
bpaf = { version = "0.9.1", features = ["autocomplete", "bright-color", "derive"]}
|
||||
bpaf = { version = "0.9.15", features = [
|
||||
"autocomplete",
|
||||
"bright-color",
|
||||
"derive",
|
||||
] }
|
||||
bytes = "1"
|
||||
byteorder = "1.0"
|
||||
chrono = "0.4.23"
|
||||
cursive = { version = "0.20.0", default-features = false, features = ["termion-backend"] }
|
||||
cursive = { version = "0.21.1", default-features = false, features = [
|
||||
"termion-backend",
|
||||
] }
|
||||
data-encoding = "2.7.0"
|
||||
db = { package = "moonfire-db", path = "db" }
|
||||
futures = "0.3"
|
||||
h264-reader = { workspace = true }
|
||||
http = "0.2.3"
|
||||
http-serve = { version = "0.3.1", features = ["dir"] }
|
||||
hyper = { version = "0.14.2", features = ["http1", "server", "stream", "tcp"] }
|
||||
http = "1.1.0"
|
||||
http-serve = { version = "0.4.0-rc.1", features = ["dir"] }
|
||||
hyper = { version = "1.4.1", features = ["http1", "server"] }
|
||||
itertools = { workspace = true }
|
||||
jiff = { workspace = true, features = ["tz-system"] }
|
||||
libc = "0.2"
|
||||
log = { version = "0.4" }
|
||||
memchr = "2.0.2"
|
||||
nix = { workspace = true, features = ["time", "user"] }
|
||||
nom = "7.0.0"
|
||||
password-hash = "0.5.0"
|
||||
protobuf = "3.0"
|
||||
pretty-hex = { workspace = true }
|
||||
protobuf = { workspace = true }
|
||||
reffers = "0.7.0"
|
||||
retina = "0.4.0"
|
||||
retina = "0.4.14"
|
||||
ring = { workspace = true }
|
||||
rusqlite = { workspace = true }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = { version = "1.7", features = ["union"] }
|
||||
sync_wrapper = "0.1.0"
|
||||
time = "0.1"
|
||||
tokio = { version = "1.24", features = ["macros", "rt-multi-thread", "signal", "sync", "time"] }
|
||||
tokio-stream = "0.1.5"
|
||||
tokio-tungstenite = "0.20.0"
|
||||
toml = "0.8"
|
||||
tracing = { workspace = true }
|
||||
tokio = { version = "1.24", features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
"signal",
|
||||
"sync",
|
||||
"time",
|
||||
] }
|
||||
tokio-tungstenite = "0.26.1"
|
||||
toml = "0.9"
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }
|
||||
tracing-core = "0.1.30"
|
||||
tracing-futures = { version = "0.2.5", features = ["futures-03", "std-future"] }
|
||||
tracing-log = { workspace = true }
|
||||
ulid = "1.0.0"
|
||||
url = "2.1.1"
|
||||
uuid = { version = "1.1.2", features = ["serde", "std", "v4"] }
|
||||
uuid = { workspace = true }
|
||||
flate2 = "1.0.26"
|
||||
git-version = "0.3.5"
|
||||
hyper-util = { version = "0.1.7", features = ["server-graceful", "tokio"] }
|
||||
http-body = "1.0.1"
|
||||
http-body-util = "0.1.2"
|
||||
pin-project = "1.1.10"
|
||||
subtle = "2.6.1"
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
libsystemd = "0.7.0"
|
||||
@ -89,8 +114,10 @@ walkdir = "2.3.3"
|
||||
|
||||
[dev-dependencies]
|
||||
mp4 = { git = "https://github.com/scottlamb/mp4-rust", branch = "moonfire" }
|
||||
num-rational = { version = "0.4.0", default-features = false, features = ["std"] }
|
||||
reqwest = { version = "0.11.0", default-features = false, features = ["json"] }
|
||||
num-rational = { version = "0.4.0", default-features = false, features = [
|
||||
"std",
|
||||
] }
|
||||
reqwest = { version = "0.12.0", default-features = false, features = ["json"] }
|
||||
tempfile = "3.2.0"
|
||||
tracing-test = "0.2.4"
|
||||
|
||||
@ -111,9 +138,12 @@ lto = true
|
||||
debug = 1
|
||||
|
||||
[patch.crates-io]
|
||||
# update to indexmap 2
|
||||
protobuf-codegen = { git = "https://github.com/scottlamb/rust-protobuf.git", rev = "a61e09785c957eb9a183d129b426710146bfde38" }
|
||||
protobuf-parse = { git = "https://github.com/scottlamb/rust-protobuf.git", rev = "a61e09785c957eb9a183d129b426710146bfde38" }
|
||||
|
||||
# This version uses fallible-iterator v0.3 (same one rusqlite 0.30 uses) and hasn't been released yet.
|
||||
sdp-types = { git = "https://github.com/sdroege/sdp-types", rev = "e8d0a2c4b8b1fc1ddf1c60a01dc717a2f4e2d514" }
|
||||
# Override the `tracing` crate versions with a branch that updates the
|
||||
# `matchers` dependency to avoid duplicate `regex-automata` crate versions.
|
||||
# This branch is based on tracing's `0.1.x` branch with changes similar to
|
||||
# <https://github.com/tokio-rs/tracing/pull/3033> applied.
|
||||
tracing = { git = "https://github.com/scottlamb/tracing", rev = "861b443d7b2da400ca7b09111957f33c80135908" }
|
||||
tracing-core = { git = "https://github.com/scottlamb/tracing", rev = "861b443d7b2da400ca7b09111957f33c80135908" }
|
||||
tracing-log = { git = "https://github.com/scottlamb/tracing", rev = "861b443d7b2da400ca7b09111957f33c80135908" }
|
||||
tracing-subscriber = { git = "https://github.com/scottlamb/tracing", rev = "861b443d7b2da400ca7b09111957f33c80135908" }
|
||||
|
||||
@ -6,8 +6,10 @@ readme = "../README.md"
|
||||
edition = "2021"
|
||||
license-file = "../../LICENSE.txt"
|
||||
publish = false
|
||||
rust-version = "1.88"
|
||||
|
||||
[features]
|
||||
mimalloc = ["dep:libmimalloc-sys"]
|
||||
nightly = []
|
||||
|
||||
[lib]
|
||||
@ -15,18 +17,21 @@ path = "lib.rs"
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.8"
|
||||
chrono = "0.4.23"
|
||||
coded = { git = "https://github.com/scottlamb/coded", rev = "2c97994974a73243d5dd12134831814f42cdb0e8"}
|
||||
coded = { git = "https://github.com/scottlamb/coded", rev = "2c97994974a73243d5dd12134831814f42cdb0e8" }
|
||||
futures = "0.3"
|
||||
jiff = { workspace = true }
|
||||
libc = "0.2"
|
||||
nix = { workspace = true }
|
||||
libmimalloc-sys = { version = "0.1.44", features = [
|
||||
"override",
|
||||
"extended",
|
||||
], optional = true }
|
||||
nix = { workspace = true, features = ["time"] }
|
||||
nom = "7.0.0"
|
||||
rusqlite = { workspace = true }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
slab = "0.4"
|
||||
time = "0.1"
|
||||
tracing = { workspace = true }
|
||||
tracing-core = "0.1.30"
|
||||
tracing-core = { workspace = true }
|
||||
tracing-log = { workspace = true }
|
||||
tracing-subscriber = { version = "0.3.16", features = ["env-filter", "json"] }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
|
||||
|
||||
@ -3,28 +3,94 @@
|
||||
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
||||
|
||||
//! Clock interface and implementations for testability.
|
||||
//!
|
||||
//! Note these types are in a more standard nanosecond-based format, where
|
||||
//! [`crate::time`] uses Moonfire's 90 kHz time base.
|
||||
|
||||
use std::mem;
|
||||
use std::sync::Mutex;
|
||||
use crate::Mutex;
|
||||
use nix::sys::time::{TimeSpec, TimeValLike as _};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::thread;
|
||||
use std::time::Duration as StdDuration;
|
||||
use time::{Duration, Timespec};
|
||||
pub use std::time::Duration;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::shutdown::ShutdownError;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub struct SystemTime(pub TimeSpec);
|
||||
|
||||
impl SystemTime {
|
||||
pub fn new(sec: u64, nsec: i64) -> Self {
|
||||
// `TimeSpec::new`'s arguments vary by platform.
|
||||
// * currently uses 32-bit time_t on musl <https://github.com/rust-lang/libc/issues/1848>
|
||||
// * nsec likewise can vary.
|
||||
SystemTime(TimeSpec::new(sec as _, nsec as _))
|
||||
}
|
||||
|
||||
pub fn as_secs(&self) -> i64 {
|
||||
self.0.num_seconds()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add<Duration> for SystemTime {
|
||||
type Output = SystemTime;
|
||||
|
||||
fn add(self, rhs: Duration) -> SystemTime {
|
||||
SystemTime(self.0 + TimeSpec::from(rhs))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Instant(pub TimeSpec);
|
||||
|
||||
impl Instant {
|
||||
pub fn from_secs(secs: i64) -> Self {
|
||||
Instant(TimeSpec::seconds(secs))
|
||||
}
|
||||
|
||||
pub fn saturating_sub(&self, o: &Instant) -> Duration {
|
||||
if o > self {
|
||||
Duration::default()
|
||||
} else {
|
||||
Duration::from(self.0 - o.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Instant {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: should use saturating always?
|
||||
impl std::ops::Sub<Instant> for Instant {
|
||||
type Output = Duration;
|
||||
|
||||
fn sub(self, rhs: Instant) -> Duration {
|
||||
Duration::from(self.0 - rhs.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Add<Duration> for Instant {
|
||||
type Output = Instant;
|
||||
|
||||
fn add(self, rhs: Duration) -> Instant {
|
||||
Instant(self.0 + TimeSpec::from(rhs))
|
||||
}
|
||||
}
|
||||
|
||||
/// Abstract interface to the system clocks. This is for testability.
|
||||
pub trait Clocks: Send + Sync + 'static {
|
||||
/// Gets the current time from `CLOCK_REALTIME`.
|
||||
fn realtime(&self) -> Timespec;
|
||||
fn realtime(&self) -> SystemTime;
|
||||
|
||||
/// Gets the current time from a monotonic clock.
|
||||
///
|
||||
/// On Linux, this uses `CLOCK_BOOTTIME`, which includes suspended time.
|
||||
/// On other systems, it uses `CLOCK_MONOTONIC`.
|
||||
fn monotonic(&self) -> Timespec;
|
||||
fn monotonic(&self) -> Instant;
|
||||
|
||||
/// Causes the current thread to sleep for the specified time.
|
||||
fn sleep(&self, how_long: Duration);
|
||||
@ -33,7 +99,7 @@ pub trait Clocks: Send + Sync + 'static {
|
||||
fn recv_timeout<T>(
|
||||
&self,
|
||||
rcv: &mpsc::Receiver<T>,
|
||||
timeout: StdDuration,
|
||||
timeout: Duration,
|
||||
) -> Result<T, mpsc::RecvTimeoutError>;
|
||||
}
|
||||
|
||||
@ -52,7 +118,7 @@ where
|
||||
Err(e) => e.into(),
|
||||
};
|
||||
shutdown_rx.check()?;
|
||||
let sleep_time = Duration::seconds(1);
|
||||
let sleep_time = Duration::from_secs(1);
|
||||
warn!(
|
||||
exception = %e.chain(),
|
||||
"sleeping for 1 s after error"
|
||||
@ -64,49 +130,38 @@ where
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct RealClocks {}
|
||||
|
||||
impl RealClocks {
|
||||
fn get(&self, clock: libc::clockid_t) -> Timespec {
|
||||
unsafe {
|
||||
let mut ts = mem::MaybeUninit::uninit();
|
||||
assert_eq!(0, libc::clock_gettime(clock, ts.as_mut_ptr()));
|
||||
let ts = ts.assume_init();
|
||||
Timespec::new(
|
||||
// On 32-bit arm builds, `tv_sec` is an `i32` and requires conversion.
|
||||
// On other platforms, the `.into()` is a no-op.
|
||||
#[allow(clippy::useless_conversion)]
|
||||
ts.tv_sec.into(),
|
||||
ts.tv_nsec as i32,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clocks for RealClocks {
|
||||
fn realtime(&self) -> Timespec {
|
||||
self.get(libc::CLOCK_REALTIME)
|
||||
fn realtime(&self) -> SystemTime {
|
||||
SystemTime(
|
||||
nix::time::clock_gettime(nix::time::ClockId::CLOCK_REALTIME)
|
||||
.expect("clock_gettime(REALTIME) should succeed"),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn monotonic(&self) -> Timespec {
|
||||
self.get(libc::CLOCK_BOOTTIME)
|
||||
fn monotonic(&self) -> Instant {
|
||||
Instant(
|
||||
nix::time::clock_gettime(nix::time::ClockId::CLOCK_BOOTTIME)
|
||||
.expect("clock_gettime(BOOTTIME) should succeed"),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn monotonic(&self) -> Timespec {
|
||||
self.get(libc::CLOCK_MONOTONIC)
|
||||
fn monotonic(&self) -> Instant {
|
||||
Instant(
|
||||
nix::time::clock_gettime(nix::time::ClockId::CLOCK_MONOTONIC)
|
||||
.expect("clock_gettime(MONOTONIC) should succeed"),
|
||||
)
|
||||
}
|
||||
|
||||
fn sleep(&self, how_long: Duration) {
|
||||
match how_long.to_std() {
|
||||
Ok(d) => thread::sleep(d),
|
||||
Err(err) => warn!(%err, "invalid duration {:?}", how_long),
|
||||
};
|
||||
thread::sleep(how_long)
|
||||
}
|
||||
|
||||
fn recv_timeout<T>(
|
||||
&self,
|
||||
rcv: &mpsc::Receiver<T>,
|
||||
timeout: StdDuration,
|
||||
timeout: Duration,
|
||||
) -> Result<T, mpsc::RecvTimeoutError> {
|
||||
rcv.recv_timeout(timeout)
|
||||
}
|
||||
@ -117,7 +172,7 @@ impl Clocks for RealClocks {
|
||||
pub struct TimerGuard<'a, C: Clocks + ?Sized, S: AsRef<str>, F: FnOnce() -> S + 'a> {
|
||||
clocks: &'a C,
|
||||
label_f: Option<F>,
|
||||
start: Timespec,
|
||||
start: Instant,
|
||||
}
|
||||
|
||||
impl<'a, C: Clocks + ?Sized, S: AsRef<str>, F: FnOnce() -> S + 'a> TimerGuard<'a, C, S, F> {
|
||||
@ -138,9 +193,9 @@ where
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
let elapsed = self.clocks.monotonic() - self.start;
|
||||
if elapsed.num_seconds() >= 1 {
|
||||
if elapsed.as_secs() >= 1 {
|
||||
let label_f = self.label_f.take().unwrap();
|
||||
warn!("{} took {}!", label_f().as_ref(), elapsed);
|
||||
warn!("{} took {:?}!", label_f().as_ref(), elapsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,42 +205,42 @@ where
|
||||
pub struct SimulatedClocks(Arc<SimulatedClocksInner>);
|
||||
|
||||
struct SimulatedClocksInner {
|
||||
boot: Timespec,
|
||||
boot: SystemTime,
|
||||
uptime: Mutex<Duration>,
|
||||
}
|
||||
|
||||
impl SimulatedClocks {
|
||||
pub fn new(boot: Timespec) -> Self {
|
||||
pub fn new(boot: SystemTime) -> Self {
|
||||
SimulatedClocks(Arc::new(SimulatedClocksInner {
|
||||
boot,
|
||||
uptime: Mutex::new(Duration::seconds(0)),
|
||||
uptime: Mutex::new(Duration::from_secs(0)),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl Clocks for SimulatedClocks {
|
||||
fn realtime(&self) -> Timespec {
|
||||
self.0.boot + *self.0.uptime.lock().unwrap()
|
||||
fn realtime(&self) -> SystemTime {
|
||||
self.0.boot + *self.0.uptime.lock()
|
||||
}
|
||||
fn monotonic(&self) -> Timespec {
|
||||
Timespec::new(0, 0) + *self.0.uptime.lock().unwrap()
|
||||
fn monotonic(&self) -> Instant {
|
||||
Instant(TimeSpec::from(*self.0.uptime.lock()))
|
||||
}
|
||||
|
||||
/// Advances the clock by the specified amount without actually sleeping.
|
||||
fn sleep(&self, how_long: Duration) {
|
||||
let mut l = self.0.uptime.lock().unwrap();
|
||||
*l = *l + how_long;
|
||||
let mut l = self.0.uptime.lock();
|
||||
*l += how_long;
|
||||
}
|
||||
|
||||
/// Advances the clock by the specified amount if data is not immediately available.
|
||||
fn recv_timeout<T>(
|
||||
&self,
|
||||
rcv: &mpsc::Receiver<T>,
|
||||
timeout: StdDuration,
|
||||
timeout: Duration,
|
||||
) -> Result<T, mpsc::RecvTimeoutError> {
|
||||
let r = rcv.recv_timeout(StdDuration::new(0, 0));
|
||||
let r = rcv.recv_timeout(Duration::new(0, 0));
|
||||
if r.is_err() {
|
||||
self.sleep(Duration::from_std(timeout).unwrap());
|
||||
self.sleep(timeout);
|
||||
}
|
||||
r
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ impl ToErrKind for rusqlite::types::FromSqlError {
|
||||
impl ToErrKind for nix::Error {
|
||||
fn err_kind(&self) -> ErrorKind {
|
||||
use nix::Error;
|
||||
match self {
|
||||
match *self {
|
||||
Error::EACCES | Error::EPERM => ErrorKind::PermissionDenied,
|
||||
Error::EDQUOT => ErrorKind::ResourceExhausted,
|
||||
Error::EBUSY
|
||||
|
||||
@ -14,3 +14,77 @@ pub use crate::error::{Error, ErrorBuilder, ErrorKind, ResultExt};
|
||||
pub use ahash::RandomState;
|
||||
pub type FastHashMap<K, V> = std::collections::HashMap<K, V, ahash::RandomState>;
|
||||
pub type FastHashSet<K> = std::collections::HashSet<K, ahash::RandomState>;
|
||||
|
||||
const NOT_POISONED: &str =
|
||||
"not poisoned; this is a consequence of an earlier panic while holding this mutex; see logs.";
|
||||
|
||||
/// [`std::sync::Mutex`] wrapper which always panics on encountering poison.
|
||||
#[derive(Default)]
|
||||
pub struct Mutex<T>(std::sync::Mutex<T>);
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
#[inline]
|
||||
pub const fn new(value: T) -> Self {
|
||||
Mutex(std::sync::Mutex::new(value))
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn lock(&self) -> std::sync::MutexGuard<'_, T> {
|
||||
self.0.lock().expect(NOT_POISONED)
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn into_inner(self) -> T {
|
||||
self.0.into_inner().expect(NOT_POISONED)
|
||||
}
|
||||
}
|
||||
|
||||
/// [`std::sync::Condvar`] wrapper which always panics on encountering poison.
|
||||
#[derive(Default)]
|
||||
pub struct Condvar(std::sync::Condvar);
|
||||
|
||||
impl Condvar {
|
||||
#[inline]
|
||||
pub const fn new() -> Self {
|
||||
Self(std::sync::Condvar::new())
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn wait_timeout_while<'a, T, F>(
|
||||
&self,
|
||||
guard: std::sync::MutexGuard<'a, T>,
|
||||
dur: std::time::Duration,
|
||||
condition: F,
|
||||
) -> (std::sync::MutexGuard<'a, T>, std::sync::WaitTimeoutResult)
|
||||
where
|
||||
F: FnMut(&mut T) -> bool,
|
||||
{
|
||||
self.0
|
||||
.wait_timeout_while(guard, dur, condition)
|
||||
.expect(NOT_POISONED)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::Deref for Condvar {
|
||||
type Target = std::sync::Condvar;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ensure_malloc_used() {
|
||||
#[cfg(feature = "mimalloc")]
|
||||
{
|
||||
// This is a load-bearing debug line.
|
||||
// Building `libmimalloc-sys` with the `override` feature will override `malloc` and
|
||||
// `free` as used through the Rust global allocator, SQLite, and `libc`. But...`cargo`
|
||||
// doesn't seem to build `libmimalloc-sys` at all if it's not referenced from Rust code.
|
||||
tracing::debug!("mimalloc version {}", unsafe {
|
||||
libmimalloc_sys::mi_version()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,9 +15,10 @@ use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll, Waker};
|
||||
|
||||
use crate::Condvar;
|
||||
use crate::Mutex;
|
||||
use futures::Future;
|
||||
use slab::Slab;
|
||||
use std::sync::{Condvar, Mutex};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ShutdownError;
|
||||
@ -47,7 +48,6 @@ impl Drop for Sender {
|
||||
.0
|
||||
.wakers
|
||||
.lock()
|
||||
.unwrap()
|
||||
.take()
|
||||
.expect("only the single Sender takes the slab");
|
||||
for w in wakers.drain() {
|
||||
@ -78,14 +78,14 @@ const NO_WAKER: usize = usize::MAX;
|
||||
|
||||
impl Receiver {
|
||||
pub fn check(&self) -> Result<(), ShutdownError> {
|
||||
if self.0.wakers.lock().unwrap().is_none() {
|
||||
if self.0.wakers.lock().is_none() {
|
||||
Err(ShutdownError)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_future(&self) -> ReceiverRefFuture {
|
||||
pub fn as_future(&self) -> ReceiverRefFuture<'_> {
|
||||
ReceiverRefFuture {
|
||||
receiver: self,
|
||||
waker_i: NO_WAKER,
|
||||
@ -107,12 +107,11 @@ impl Receiver {
|
||||
}
|
||||
|
||||
pub fn wait_for(&self, timeout: std::time::Duration) -> Result<(), ShutdownError> {
|
||||
let l = self.0.wakers.lock().unwrap();
|
||||
let l = self.0.wakers.lock();
|
||||
let result = self
|
||||
.0
|
||||
.condvar
|
||||
.wait_timeout_while(l, timeout, |wakers| wakers.is_some())
|
||||
.unwrap();
|
||||
.wait_timeout_while(l, timeout, |wakers| wakers.is_some());
|
||||
if result.1.timed_out() {
|
||||
Ok(())
|
||||
} else {
|
||||
@ -122,7 +121,7 @@ impl Receiver {
|
||||
}
|
||||
|
||||
fn poll_impl(inner: &Inner, waker_i: &mut usize, cx: &mut Context<'_>) -> Poll<()> {
|
||||
let mut l = inner.wakers.lock().unwrap();
|
||||
let mut l = inner.wakers.lock();
|
||||
let wakers = match &mut *l {
|
||||
None => return Poll::Ready(()),
|
||||
Some(w) => w,
|
||||
@ -133,13 +132,13 @@ fn poll_impl(inner: &Inner, waker_i: &mut usize, cx: &mut Context<'_>) -> Poll<(
|
||||
} else {
|
||||
let existing_waker = &mut wakers[*waker_i];
|
||||
if !new_waker.will_wake(existing_waker) {
|
||||
*existing_waker = new_waker.clone();
|
||||
existing_waker.clone_from(new_waker);
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
impl<'receiver> Future for ReceiverRefFuture<'receiver> {
|
||||
impl Future for ReceiverRefFuture<'_> {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
@ -147,6 +146,18 @@ impl<'receiver> Future for ReceiverRefFuture<'receiver> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ReceiverRefFuture<'_> {
|
||||
fn drop(&mut self) {
|
||||
if self.waker_i == NO_WAKER {
|
||||
return;
|
||||
}
|
||||
let mut l = self.receiver.0.wakers.lock();
|
||||
if let Some(wakers) = &mut *l {
|
||||
wakers.remove(self.waker_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ReceiverFuture {
|
||||
type Output = ();
|
||||
|
||||
@ -156,6 +167,18 @@ impl Future for ReceiverFuture {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ReceiverFuture {
|
||||
fn drop(&mut self) {
|
||||
if self.waker_i == NO_WAKER {
|
||||
return;
|
||||
}
|
||||
let mut l = self.receiver.wakers.lock();
|
||||
if let Some(wakers) = &mut *l {
|
||||
wakers.remove(self.waker_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a sender and receiver for graceful shutdown.
|
||||
///
|
||||
/// Dropping the sender will request shutdown.
|
||||
|
||||
@ -14,24 +14,48 @@ use std::fmt;
|
||||
use std::ops;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::clock::SystemTime;
|
||||
|
||||
type IResult<'a, I, O> = nom::IResult<I, O, nom::error::VerboseError<&'a str>>;
|
||||
|
||||
pub const TIME_UNITS_PER_SEC: i64 = 90_000;
|
||||
|
||||
/// The zone to use for all time handling.
|
||||
///
|
||||
/// In normal operation this is assigned from `jiff::tz::TimeZone::system()` at
|
||||
/// startup, but tests set it to a known political time zone instead.
|
||||
///
|
||||
/// Note that while fresh calls to `jiff::tz::TimeZone::system()` might return
|
||||
/// new values, this time zone is fixed for the entire run. This is important
|
||||
/// for `moonfire_db::days::Map`, where it's expected that adding values and
|
||||
/// then later subtracting them will cancel out.
|
||||
static GLOBAL_ZONE: std::sync::OnceLock<jiff::tz::TimeZone> = std::sync::OnceLock::new();
|
||||
|
||||
pub fn init_zone<F: FnOnce() -> jiff::tz::TimeZone>(f: F) {
|
||||
GLOBAL_ZONE.get_or_init(f);
|
||||
}
|
||||
|
||||
pub fn global_zone() -> jiff::tz::TimeZone {
|
||||
GLOBAL_ZONE
|
||||
.get()
|
||||
.expect("global zone should be initialized")
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// A time specified as 90,000ths of a second since 1970-01-01 00:00:00 UTC.
|
||||
#[derive(Clone, Copy, Default, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
|
||||
pub struct Time(pub i64);
|
||||
|
||||
/// Returns a parser for a `len`-digit non-negative number which fits into an i32.
|
||||
fn fixed_len_num<'a>(len: usize) -> impl FnMut(&'a str) -> IResult<'a, &'a str, i32> {
|
||||
/// Returns a parser for a `len`-digit non-negative number which fits into `T`.
|
||||
fn fixed_len_num<'a, T: FromStr>(len: usize) -> impl FnMut(&'a str) -> IResult<'a, &'a str, T> {
|
||||
map_res(
|
||||
take_while_m_n(len, len, |c: char| c.is_ascii_digit()),
|
||||
|input: &str| input.parse::<i32>(),
|
||||
|input: &str| input.parse(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Parses `YYYY-mm-dd` into pieces.
|
||||
fn parse_datepart(input: &str) -> IResult<&str, (i32, i32, i32)> {
|
||||
fn parse_datepart(input: &str) -> IResult<'_, &str, (i16, i8, i8)> {
|
||||
tuple((
|
||||
fixed_len_num(4),
|
||||
preceded(tag("-"), fixed_len_num(2)),
|
||||
@ -40,7 +64,7 @@ fn parse_datepart(input: &str) -> IResult<&str, (i32, i32, i32)> {
|
||||
}
|
||||
|
||||
/// Parses `HH:MM[:SS[:FFFFF]]` into pieces.
|
||||
fn parse_timepart(input: &str) -> IResult<&str, (i32, i32, i32, i32)> {
|
||||
fn parse_timepart(input: &str) -> IResult<'_, &str, (i8, i8, i8, i32)> {
|
||||
let (input, (hr, _, min)) = tuple((fixed_len_num(2), tag(":"), fixed_len_num(2)))(input)?;
|
||||
let (input, stuff) = opt(tuple((
|
||||
preceded(tag(":"), fixed_len_num(2)),
|
||||
@ -51,22 +75,22 @@ fn parse_timepart(input: &str) -> IResult<&str, (i32, i32, i32, i32)> {
|
||||
}
|
||||
|
||||
/// Parses `Z` (UTC) or `{+,-,}HH:MM` into a time zone offset in seconds.
|
||||
fn parse_zone(input: &str) -> IResult<&str, i32> {
|
||||
fn parse_zone(input: &str) -> IResult<'_, &str, i32> {
|
||||
alt((
|
||||
nom::combinator::value(0, tag("Z")),
|
||||
map(
|
||||
tuple((
|
||||
opt(nom::character::complete::one_of(&b"+-"[..])),
|
||||
fixed_len_num(2),
|
||||
fixed_len_num::<i32>(2),
|
||||
tag(":"),
|
||||
fixed_len_num(2),
|
||||
fixed_len_num::<i32>(2),
|
||||
)),
|
||||
|(sign, hr, _, min)| {
|
||||
let off = hr * 3600 + min * 60;
|
||||
if sign == Some('-') {
|
||||
off
|
||||
} else {
|
||||
-off
|
||||
} else {
|
||||
off
|
||||
}
|
||||
},
|
||||
),
|
||||
@ -74,16 +98,8 @@ fn parse_zone(input: &str) -> IResult<&str, i32> {
|
||||
}
|
||||
|
||||
impl Time {
|
||||
pub fn new(tm: time::Timespec) -> Self {
|
||||
Time(tm.sec * TIME_UNITS_PER_SEC + tm.nsec as i64 * TIME_UNITS_PER_SEC / 1_000_000_000)
|
||||
}
|
||||
|
||||
pub const fn min_value() -> Self {
|
||||
Time(i64::min_value())
|
||||
}
|
||||
pub const fn max_value() -> Self {
|
||||
Time(i64::max_value())
|
||||
}
|
||||
pub const MIN: Self = Time(i64::MIN);
|
||||
pub const MAX: Self = Time(i64::MAX);
|
||||
|
||||
/// Parses a time as either 90,000ths of a second since epoch or a RFC 3339-like string.
|
||||
///
|
||||
@ -118,38 +134,22 @@ impl Time {
|
||||
);
|
||||
}
|
||||
let (tm_hour, tm_min, tm_sec, subsec) = opt_time.unwrap_or((0, 0, 0, 0));
|
||||
let mut tm = time::Tm {
|
||||
tm_sec,
|
||||
tm_min,
|
||||
tm_hour,
|
||||
tm_mday,
|
||||
tm_mon,
|
||||
tm_year,
|
||||
tm_wday: 0,
|
||||
tm_yday: 0,
|
||||
tm_isdst: -1,
|
||||
tm_utcoff: 0,
|
||||
tm_nsec: 0,
|
||||
};
|
||||
if tm.tm_mon == 0 {
|
||||
bail!(InvalidArgument, msg("time {input:?} has month 0"));
|
||||
}
|
||||
tm.tm_mon -= 1;
|
||||
if tm.tm_year < 1900 {
|
||||
bail!(InvalidArgument, msg("time {input:?} has year before 1900"));
|
||||
}
|
||||
tm.tm_year -= 1900;
|
||||
|
||||
// The time crate doesn't use tm_utcoff properly; it just calls timegm() if tm_utcoff == 0,
|
||||
// mktime() otherwise. If a zone is specified, use the timegm path and a manual offset.
|
||||
// If no zone is specified, use the tm_utcoff path. This is pretty lame, but follow the
|
||||
// chrono crate's lead and just use 0 or 1 to choose between these functions.
|
||||
let sec = if let Some(off) = opt_zone {
|
||||
tm.to_timespec().sec + i64::from(off)
|
||||
} else {
|
||||
tm.tm_utcoff = 1;
|
||||
tm.to_timespec().sec
|
||||
};
|
||||
let dt = jiff::civil::DateTime::new(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, 0)
|
||||
.map_err(|e| err!(InvalidArgument, source(e)))?;
|
||||
let tz =
|
||||
if let Some(off) = opt_zone {
|
||||
jiff::tz::TimeZone::fixed(jiff::tz::Offset::from_seconds(off).map_err(|e| {
|
||||
err!(InvalidArgument, msg("invalid time zone offset"), source(e))
|
||||
})?)
|
||||
} else {
|
||||
global_zone()
|
||||
};
|
||||
let sec = tz
|
||||
.into_ambiguous_zoned(dt)
|
||||
.compatible()
|
||||
.map_err(|e| err!(InvalidArgument, source(e)))?
|
||||
.timestamp()
|
||||
.as_second();
|
||||
Ok(Time(sec * TIME_UNITS_PER_SEC + i64::from(subsec)))
|
||||
}
|
||||
|
||||
@ -159,6 +159,19 @@ impl Time {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SystemTime> for Time {
|
||||
fn from(tm: SystemTime) -> Self {
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
Time((tm.0.tv_sec() as i64) * TIME_UNITS_PER_SEC + (tm.0.tv_nsec() as i64) * 9 / 100_000)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<jiff::Timestamp> for Time {
|
||||
fn from(tm: jiff::Timestamp) -> Self {
|
||||
Time((tm.as_nanosecond() * 9 / 100_000) as i64)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Time {
|
||||
type Err = Error;
|
||||
|
||||
@ -203,32 +216,39 @@ impl fmt::Debug for Time {
|
||||
|
||||
impl fmt::Display for Time {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let tm = time::at(time::Timespec {
|
||||
sec: self.0 / TIME_UNITS_PER_SEC,
|
||||
nsec: 0,
|
||||
});
|
||||
let zone_minutes = tm.tm_utcoff.abs() / 60;
|
||||
let tm = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(self.0 / TIME_UNITS_PER_SEC).map_err(|_| fmt::Error)?,
|
||||
global_zone(),
|
||||
);
|
||||
write!(
|
||||
f,
|
||||
"{}:{:05}{}{:02}:{:02}",
|
||||
tm.strftime("%FT%T").map_err(|_| fmt::Error)?,
|
||||
"{}:{:05}{}",
|
||||
tm.strftime("%FT%T"),
|
||||
self.0 % TIME_UNITS_PER_SEC,
|
||||
if tm.tm_utcoff > 0 { '+' } else { '-' },
|
||||
zone_minutes / 60,
|
||||
zone_minutes % 60
|
||||
tm.strftime("%:z"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A duration specified in 1/90,000ths of a second.
|
||||
/// Durations are typically non-negative, but a `moonfire_db::db::CameraDayValue::duration` may be
|
||||
/// negative.
|
||||
/// Durations are typically non-negative, but a `moonfire_db::db::StreamDayValue::duration` may be
|
||||
/// negative when used as a `<StreamDayValue as Value>::Change`.
|
||||
#[derive(Clone, Copy, Default, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
|
||||
pub struct Duration(pub i64);
|
||||
|
||||
impl Duration {
|
||||
pub fn to_tm_duration(&self) -> time::Duration {
|
||||
time::Duration::nanoseconds(self.0 * 100000 / 9)
|
||||
impl From<Duration> for jiff::SignedDuration {
|
||||
fn from(d: Duration) -> Self {
|
||||
jiff::SignedDuration::from_nanos(d.0 * 100_000 / 9)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Duration> for std::time::Duration {
|
||||
type Error = std::num::TryFromIntError;
|
||||
|
||||
fn try_from(value: Duration) -> Result<Self, Self::Error> {
|
||||
Ok(std::time::Duration::from_nanos(
|
||||
u64::try_from(value.0)? * 100_000 / 9,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -331,6 +351,15 @@ impl ops::SubAssign for Duration {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod testutil {
|
||||
pub fn init_zone() {
|
||||
super::init_zone(|| {
|
||||
jiff::tz::TimeZone::get("America/Los_Angeles")
|
||||
.expect("America/Los_Angeles should exist")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Duration, Time, TIME_UNITS_PER_SEC};
|
||||
@ -338,8 +367,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_time() {
|
||||
std::env::set_var("TZ", "America/Los_Angeles");
|
||||
time::tzset();
|
||||
super::testutil::init_zone();
|
||||
#[rustfmt::skip]
|
||||
let tests = &[
|
||||
("2006-01-02T15:04:05-07:00", 102261550050000),
|
||||
@ -362,8 +390,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_format_time() {
|
||||
std::env::set_var("TZ", "America/Los_Angeles");
|
||||
time::tzset();
|
||||
super::testutil::init_zone();
|
||||
assert_eq!(
|
||||
"2006-01-02T15:04:05:00000-08:00",
|
||||
format!("{}", Time(102261874050000))
|
||||
|
||||
@ -17,12 +17,18 @@ use tracing_subscriber::{
|
||||
|
||||
struct FormatSystemd;
|
||||
|
||||
struct ChronoTimer;
|
||||
struct JiffTimer;
|
||||
|
||||
impl FormatTime for ChronoTimer {
|
||||
impl FormatTime for JiffTimer {
|
||||
fn format_time(&self, w: &mut Writer<'_>) -> std::fmt::Result {
|
||||
const TIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6f";
|
||||
write!(w, "{}", chrono::Local::now().format(TIME_FORMAT))
|
||||
|
||||
// Always use the system time zone here, not `base::time::GLOBAL_ZONE`,
|
||||
// to resolve a chicken-and-egg problem. `jiff::tz::TimeZone::system()`
|
||||
// may log an error that is worth seeing. Therefore, we install the
|
||||
// tracing subscriber before initializing `GLOBAL_ZONE`. The latter
|
||||
// only exists to override the zone for tests anyway.
|
||||
write!(w, "{}", jiff::Zoned::now().strftime(TIME_FORMAT))
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,7 +96,7 @@ where
|
||||
/// This means it includes a timestamp, follows [OpenTelemetry Semantic
|
||||
/// Conventions for Exceptions](https://opentelemetry.io/docs/reference/specification/logs/semantic_conventions/exceptions/),
|
||||
/// etc.
|
||||
fn panic_hook(p: &std::panic::PanicInfo) {
|
||||
fn panic_hook(p: &std::panic::PanicHookInfo) {
|
||||
let payload: Option<&str> = if let Some(s) = p.payload().downcast_ref::<&str>() {
|
||||
Some(*s)
|
||||
} else if let Some(s) = p.payload().downcast_ref::<String>() {
|
||||
@ -116,33 +122,36 @@ pub fn install() {
|
||||
|
||||
match std::env::var("MOONFIRE_FORMAT") {
|
||||
Ok(s) if s == "systemd" => {
|
||||
let sub = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_ansi(false)
|
||||
.event_format(FormatSystemd)
|
||||
.with_filter(filter),
|
||||
);
|
||||
let sub = tracing_subscriber::registry()
|
||||
.with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_ansi(false)
|
||||
.event_format(FormatSystemd),
|
||||
)
|
||||
.with(filter);
|
||||
tracing::subscriber::set_global_default(sub).unwrap();
|
||||
}
|
||||
Ok(s) if s == "json" => {
|
||||
let sub = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_thread_names(true)
|
||||
.json()
|
||||
.with_filter(filter),
|
||||
);
|
||||
let sub = tracing_subscriber::registry()
|
||||
.with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_thread_names(true)
|
||||
.json(),
|
||||
)
|
||||
.with(filter);
|
||||
tracing::subscriber::set_global_default(sub).unwrap();
|
||||
}
|
||||
_ => {
|
||||
let sub = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_timer(ChronoTimer)
|
||||
.with_thread_names(true)
|
||||
.with_filter(filter),
|
||||
);
|
||||
let sub = tracing_subscriber::registry()
|
||||
.with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_timer(JiffTimer)
|
||||
.with_thread_names(true),
|
||||
)
|
||||
.with(filter);
|
||||
tracing::subscriber::set_global_default(sub).unwrap();
|
||||
}
|
||||
}
|
||||
@ -164,7 +173,7 @@ pub fn install_for_tests() {
|
||||
let sub = tracing_subscriber::registry().with(
|
||||
tracing_subscriber::fmt::Layer::new()
|
||||
.with_test_writer()
|
||||
.with_timer(ChronoTimer)
|
||||
.with_timer(JiffTimer)
|
||||
.with_thread_names(true)
|
||||
.with_filter(filter),
|
||||
);
|
||||
|
||||
@ -113,7 +113,7 @@ fn handle_bundled_ui() -> Result<(), BoxError> {
|
||||
None => {
|
||||
bare_path = path;
|
||||
encoding = FileEncoding::Uncompressed;
|
||||
if files.get(bare_path).is_some() {
|
||||
if files.contains_key(bare_path) {
|
||||
continue; // don't replace with suboptimal encoding.
|
||||
}
|
||||
}
|
||||
@ -152,6 +152,34 @@ fn handle_bundled_ui() -> Result<(), BoxError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns one-line `stdout` from a `git` command; `args` are simply space-separated (no escapes).
|
||||
fn git_oneline_output(args: &str) -> Result<String, BoxError> {
|
||||
static HELP_TEXT: &str =
|
||||
"If you are building from a release archive or without the `git` CLI available, \n\
|
||||
try again with the `VERSION` environment variable set";
|
||||
|
||||
// `output()` returns `Err` e.g. if `git` was not found.
|
||||
let mut output = Command::new("git")
|
||||
.args(args.split(' '))
|
||||
.output()
|
||||
.map_err(|e| format!("`git {args}` failed: {e}\n\n{HELP_TEXT}"))?;
|
||||
|
||||
// `status` is non-success if `git` launched and then failed.
|
||||
if !output.status.success() {
|
||||
let status = output.status;
|
||||
let stderr = output.stderr.escape_ascii();
|
||||
return Err(format!("`git {args}` failed with {status}: {stderr}\n\n{HELP_TEXT}").into());
|
||||
}
|
||||
if output.stdout.pop() != Some(b'\n') {
|
||||
return Err(format!("`git {args}` stdout should end with newline").into());
|
||||
}
|
||||
if output.stdout.contains(&b'\n') {
|
||||
return Err(format!("`git {args}` stdout should be single line").into());
|
||||
}
|
||||
Ok(String::from_utf8(output.stdout)
|
||||
.map_err(|_| format!("`git {args}` stdout should be valid UTF-8"))?)
|
||||
}
|
||||
|
||||
fn handle_version() -> Result<(), BoxError> {
|
||||
println!("cargo:rerun-if-env-changed=VERSION");
|
||||
if std::env::var("VERSION").is_ok() {
|
||||
@ -164,25 +192,12 @@ fn handle_version() -> Result<(), BoxError> {
|
||||
|
||||
// Avoid reruns when the output doesn't meaningfully change. I don't think this is quite right:
|
||||
// it won't recognize toggling between `-dirty` and not. But it'll do.
|
||||
let dir = Command::new("git")
|
||||
.arg("rev-parse")
|
||||
.arg("--git-dir")
|
||||
.output()?
|
||||
.stdout;
|
||||
let dir = String::from_utf8(dir).unwrap();
|
||||
let dir = dir.strip_suffix('\n').unwrap();
|
||||
let dir = git_oneline_output("rev-parse --git-dir")?;
|
||||
println!("cargo:rerun-if-changed={dir}/logs/HEAD");
|
||||
println!("cargo:rerun-if-changed={dir}/index");
|
||||
|
||||
// Plumb the version through.
|
||||
let version = Command::new("git")
|
||||
.arg("describe")
|
||||
.arg("--always")
|
||||
.arg("--dirty")
|
||||
.output()?
|
||||
.stdout;
|
||||
let version = String::from_utf8(version).unwrap();
|
||||
let version = version.strip_suffix('\n').unwrap();
|
||||
let version = git_oneline_output("describe --always --dirty")?;
|
||||
println!("cargo:rustc-env=VERSION={version}");
|
||||
|
||||
Ok(())
|
||||
|
||||
@ -5,7 +5,7 @@ authors = ["Scott Lamb <slamb@slamb.org>"]
|
||||
readme = "../README.md"
|
||||
edition = "2021"
|
||||
license-file = "../../LICENSE.txt"
|
||||
rust-version = "1.70"
|
||||
rust-version = "1.88"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
@ -19,18 +19,19 @@ base = { package = "moonfire-base", path = "../base" }
|
||||
base64 = { workspace = true }
|
||||
blake3 = "1.0.0"
|
||||
byteorder = "1.0"
|
||||
cstr = "0.2.5"
|
||||
diff = "0.1.12"
|
||||
futures = "0.3"
|
||||
h264-reader = { workspace = true }
|
||||
hashlink = "0.8.1"
|
||||
hashlink = "0.10.0"
|
||||
itertools = { workspace = true }
|
||||
jiff = { workspace = true }
|
||||
libc = "0.2"
|
||||
nix = { workspace = true, features = ["dir", "feature", "fs", "mman"] }
|
||||
num-rational = { version = "0.4.0", default-features = false, features = ["std"] }
|
||||
odds = { version = "0.4.0", features = ["std-vec"] }
|
||||
pretty-hex = "0.4.0"
|
||||
protobuf = "3.0"
|
||||
num-rational = { version = "0.4.0", default-features = false, features = [
|
||||
"std",
|
||||
] }
|
||||
pretty-hex = { workspace = true }
|
||||
protobuf = { workspace = true }
|
||||
ring = { workspace = true }
|
||||
rusqlite = { workspace = true }
|
||||
scrypt = "0.11.0"
|
||||
@ -38,12 +39,10 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
smallvec = "1.0"
|
||||
tempfile = "3.2.0"
|
||||
time = "0.1"
|
||||
tokio = { version = "1.24", features = ["macros", "rt-multi-thread", "sync"] }
|
||||
tracing = "0.1.37"
|
||||
ulid = "1.0.0"
|
||||
tracing = { workspace = true }
|
||||
url = { version = "2.1.1", features = ["serde"] }
|
||||
uuid = { version = "1.1.2", features = ["serde", "std", "v4"] }
|
||||
uuid = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
protobuf-codegen = "3.0"
|
||||
protobuf-codegen = { workspace = true }
|
||||
|
||||
@ -1073,8 +1073,8 @@ mod tests {
|
||||
.unwrap();
|
||||
assert_eq!(s.use_count, 1);
|
||||
|
||||
let mut tx = conn.transaction().unwrap();
|
||||
state.flush(&mut tx).unwrap();
|
||||
let tx = conn.transaction().unwrap();
|
||||
state.flush(&tx).unwrap();
|
||||
tx.commit().unwrap();
|
||||
state.post_flush();
|
||||
|
||||
@ -1224,8 +1224,8 @@ mod tests {
|
||||
c.username = "foo".to_owned();
|
||||
state.apply(&conn, c).unwrap();
|
||||
|
||||
assert!(state.users_by_name.get("slamb").is_none());
|
||||
assert!(state.users_by_name.get("foo").is_some());
|
||||
assert!(!state.users_by_name.contains_key("slamb"));
|
||||
assert!(state.users_by_name.contains_key("foo"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@ -309,7 +309,7 @@ fn compare_stream(
|
||||
ctx: &mut Context,
|
||||
) -> Result<bool, Error> {
|
||||
let start = CompositeId::new(stream_id, 0);
|
||||
let end = CompositeId::new(stream_id, i32::max_value());
|
||||
let end = CompositeId::new(stream_id, i32::MAX);
|
||||
let mut printed_error = false;
|
||||
let cum_recordings = stream
|
||||
.cum_recordings
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
//! In-memory indexes by calendar day.
|
||||
|
||||
use base::time::{Duration, Time, TIME_UNITS_PER_SEC};
|
||||
use base::{err, Error};
|
||||
use base::Error;
|
||||
use smallvec::SmallVec;
|
||||
use std::cmp;
|
||||
use std::collections::BTreeMap;
|
||||
@ -20,28 +20,22 @@ use tracing::{error, trace};
|
||||
pub struct Key(pub(crate) [u8; 10]);
|
||||
|
||||
impl Key {
|
||||
fn new(tm: time::Tm) -> Result<Self, Error> {
|
||||
fn new(tm: &jiff::Zoned) -> Result<Self, Error> {
|
||||
let mut s = Key([0u8; 10]);
|
||||
write!(
|
||||
&mut s.0[..],
|
||||
"{}",
|
||||
tm.strftime("%Y-%m-%d")
|
||||
.map_err(|e| err!(Internal, source(e)))?
|
||||
)?;
|
||||
write!(&mut s.0[..], "{}", tm.strftime("%Y-%m-%d"))?;
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
pub fn bounds(&self) -> Range<Time> {
|
||||
let mut my_tm = time::strptime(self.as_ref(), "%Y-%m-%d").expect("days must be parseable");
|
||||
my_tm.tm_utcoff = 1; // to the time crate, values != 0 mean local time.
|
||||
my_tm.tm_isdst = -1;
|
||||
let start = Time(my_tm.to_timespec().sec * TIME_UNITS_PER_SEC);
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
my_tm.tm_mday += 1;
|
||||
let end = Time(my_tm.to_timespec().sec * TIME_UNITS_PER_SEC);
|
||||
start..end
|
||||
let date: jiff::civil::Date = self.as_ref().parse().expect("Key should be valid date");
|
||||
let start = date
|
||||
.to_zoned(base::time::global_zone())
|
||||
.expect("Key should be valid date");
|
||||
let end = start.tomorrow().expect("Key should have valid tomorrow");
|
||||
|
||||
// Note day boundaries are expected to always be whole numbers of seconds.
|
||||
Time(start.timestamp().as_second() * TIME_UNITS_PER_SEC)
|
||||
..Time(end.timestamp().as_second() * TIME_UNITS_PER_SEC)
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,13 +54,14 @@ impl std::fmt::Debug for Key {
|
||||
pub trait Value: std::fmt::Debug + Default {
|
||||
type Change: std::fmt::Debug;
|
||||
|
||||
/// Applies the given change to this value.
|
||||
/// Applies the given change to this value; `c` may be positive or negative.
|
||||
fn apply(&mut self, c: &Self::Change);
|
||||
|
||||
fn is_empty(&self) -> bool;
|
||||
}
|
||||
|
||||
/// In-memory state about a particular stream on a particular day.
|
||||
/// In-memory state about a particular stream on a particular day, or a change
|
||||
/// to make via `<StreamValue as Value::apply>`.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct StreamValue {
|
||||
/// The number of recordings that overlap with this day.
|
||||
@ -81,6 +76,7 @@ pub struct StreamValue {
|
||||
impl Value for StreamValue {
|
||||
type Change = Self;
|
||||
|
||||
/// Applies the given change, which may have positive or negative recordings and duration.
|
||||
fn apply(&mut self, c: &StreamValue) {
|
||||
self.recordings += c.recordings;
|
||||
self.duration += c.duration;
|
||||
@ -198,42 +194,34 @@ impl<'a, V: Value> IntoIterator for &'a Map<V> {
|
||||
|
||||
impl Map<StreamValue> {
|
||||
/// Adjusts `self` to reflect the range of the given recording.
|
||||
///
|
||||
/// Note that the specified range may span two days. It will never span more because the maximum
|
||||
/// length of a recording entry is less than a day (even a 23-hour "spring forward" day).
|
||||
///
|
||||
/// This function swallows/logs date formatting errors because they shouldn't happen and there's
|
||||
/// not much that can be done about them. (The database operation has already gone through.)
|
||||
/// See [`crate::recording::MAX_RECORDING_WALL_DURATION`].
|
||||
pub(crate) fn adjust(&mut self, r: Range<Time>, sign: i64) {
|
||||
// Find first day key.
|
||||
let sec = r.start.unix_seconds();
|
||||
let mut my_tm = time::at(time::Timespec { sec, nsec: 0 });
|
||||
let day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill first day key from {:?}->{:?}: {}; will ignore.",
|
||||
r, my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let start = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(sec).expect("valid timestamp"),
|
||||
base::time::global_zone(),
|
||||
);
|
||||
let start_day = Key::new(&start).expect("valid key");
|
||||
|
||||
// Determine the start of the next day.
|
||||
// Use mytm to hold a non-normalized representation of the boundary.
|
||||
my_tm.tm_isdst = -1;
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
my_tm.tm_mday += 1;
|
||||
let boundary = my_tm.to_timespec();
|
||||
let boundary_90k = boundary.sec * TIME_UNITS_PER_SEC;
|
||||
let boundary = start
|
||||
.date()
|
||||
.tomorrow()
|
||||
.expect("valid tomorrow")
|
||||
.to_zoned(start.time_zone().clone())
|
||||
.expect("valid tomorrow");
|
||||
let boundary_90k = boundary.timestamp().as_second() * TIME_UNITS_PER_SEC;
|
||||
|
||||
// Adjust the first day.
|
||||
let first_day_delta = StreamValue {
|
||||
recordings: sign,
|
||||
duration: Duration(sign * (cmp::min(r.end.0, boundary_90k) - r.start.0)),
|
||||
};
|
||||
self.adjust_day(day, first_day_delta);
|
||||
self.adjust_day(start_day, first_day_delta);
|
||||
|
||||
if r.end.0 <= boundary_90k {
|
||||
return;
|
||||
@ -242,13 +230,12 @@ impl Map<StreamValue> {
|
||||
// Fill day with the second day. This requires a normalized representation so recalculate.
|
||||
// (The C mktime(3) already normalized for us once, but .to_timespec() discarded that
|
||||
// result.)
|
||||
let my_tm = time::at(boundary);
|
||||
let day = match Key::new(my_tm) {
|
||||
let day = match Key::new(&boundary) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill second day key from {:?}: {}; will ignore.",
|
||||
my_tm, e
|
||||
boundary, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
@ -263,35 +250,29 @@ impl Map<StreamValue> {
|
||||
|
||||
impl Map<SignalValue> {
|
||||
/// Adjusts `self` to reflect the range of the given recording.
|
||||
/// Note that the specified range may span several days (unlike StreamValue).
|
||||
///
|
||||
/// This function swallows/logs date formatting errors because they shouldn't happen and there's
|
||||
/// not much that can be done about them. (The database operation has already gone through.)
|
||||
/// Note that the specified range may span several days (unlike `StreamValue`).
|
||||
pub(crate) fn adjust(&mut self, mut r: Range<Time>, old_state: u16, new_state: u16) {
|
||||
// Find first day key.
|
||||
let sec = r.start.unix_seconds();
|
||||
let mut my_tm = time::at(time::Timespec { sec, nsec: 0 });
|
||||
let mut day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill first day key from {:?}->{:?}: {}; will ignore.",
|
||||
r, my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let mut tm = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(sec).expect("valid timestamp"),
|
||||
base::time::global_zone(),
|
||||
);
|
||||
let mut day = Key::new(&tm).expect("valid date");
|
||||
|
||||
// Determine the start of the next day.
|
||||
// Use mytm to hold a non-normalized representation of the boundary.
|
||||
my_tm.tm_isdst = -1;
|
||||
my_tm.tm_hour = 0;
|
||||
my_tm.tm_min = 0;
|
||||
my_tm.tm_sec = 0;
|
||||
// Determine the starts of subsequent days.
|
||||
tm = tm
|
||||
.with()
|
||||
.hour(0)
|
||||
.minute(0)
|
||||
.second(0)
|
||||
.build()
|
||||
.expect("midnight is valid");
|
||||
|
||||
loop {
|
||||
my_tm.tm_mday += 1;
|
||||
let boundary_90k = my_tm.to_timespec().sec * TIME_UNITS_PER_SEC;
|
||||
tm = tm.tomorrow().expect("valid tomorrow");
|
||||
let boundary_90k = tm.timestamp().as_second() * TIME_UNITS_PER_SEC;
|
||||
|
||||
// Adjust this day.
|
||||
let duration = Duration(cmp::min(r.end.0, boundary_90k) - r.start.0);
|
||||
@ -308,23 +289,8 @@ impl Map<SignalValue> {
|
||||
return;
|
||||
}
|
||||
|
||||
// Fill day with the next day. This requires a normalized representation so
|
||||
// recalculate. (The C mktime(3) already normalized for us once, but .to_timespec()
|
||||
// discarded that result.)
|
||||
let my_tm = time::at(time::Timespec {
|
||||
sec: Time(boundary_90k).unix_seconds(),
|
||||
nsec: 0,
|
||||
});
|
||||
day = match Key::new(my_tm) {
|
||||
Ok(d) => d,
|
||||
Err(ref e) => {
|
||||
error!(
|
||||
"Unable to fill day key from {:?}: {}; will ignore.",
|
||||
my_tm, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// Fill day with the next day.
|
||||
day = Key::new(&tm).expect("valid date");
|
||||
r.start.0 = boundary_90k;
|
||||
}
|
||||
}
|
||||
|
||||
127
server/db/db.rs
127
server/db/db.rs
@ -36,6 +36,7 @@ use crate::schema;
|
||||
use crate::signal;
|
||||
use base::clock::{self, Clocks};
|
||||
use base::strutil::encode_size;
|
||||
use base::Mutex;
|
||||
use base::{bail, err, Error};
|
||||
use base::{FastHashMap, FastHashSet};
|
||||
use hashlink::LinkedHashMap;
|
||||
@ -52,7 +53,7 @@ use std::path::PathBuf;
|
||||
use std::str;
|
||||
use std::string::String;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex, MutexGuard};
|
||||
use std::sync::MutexGuard;
|
||||
use std::vec::Vec;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info, trace};
|
||||
@ -66,6 +67,13 @@ pub const EXPECTED_SCHEMA_VERSION: i32 = 7;
|
||||
/// Make it one less than a power of two so that the data structure's size is efficient.
|
||||
const VIDEO_INDEX_CACHE_LEN: usize = 1023;
|
||||
|
||||
/// Maximum number of live segments references to keep.
|
||||
///
|
||||
/// These should only be 16 bytes each, so they're fairly cheap, but we should
|
||||
/// have some bound in case subscribers are slow, and anyway it's generally
|
||||
/// not a good experience for subscribers to fall too far behind.
|
||||
const LIVE_SEGMENTS_BUF_LEN: usize = 128;
|
||||
|
||||
const GET_RECORDING_PLAYBACK_SQL: &str = r#"
|
||||
select
|
||||
video_index
|
||||
@ -179,7 +187,7 @@ impl std::fmt::Debug for VideoSampleEntryToInsert {
|
||||
}
|
||||
|
||||
/// A row used in `list_recordings_by_time` and `list_recordings_by_id`.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ListRecordingsRow {
|
||||
pub start: recording::Time,
|
||||
pub video_sample_entry_id: i32,
|
||||
@ -200,6 +208,7 @@ pub struct ListRecordingsRow {
|
||||
/// (It's not included in the `recording_cover` index, so adding it to
|
||||
/// `list_recordings_by_time` would be inefficient.)
|
||||
pub prev_media_duration_and_runs: Option<(recording::Duration, i32)>,
|
||||
pub end_reason: Option<String>,
|
||||
}
|
||||
|
||||
/// A row used in `list_aggregated_recordings`.
|
||||
@ -217,6 +226,7 @@ pub struct ListAggregatedRecordingsRow {
|
||||
pub first_uncommitted: Option<i32>,
|
||||
pub growing: bool,
|
||||
pub has_trailing_zero: bool,
|
||||
pub end_reason: Option<String>,
|
||||
}
|
||||
|
||||
impl ListAggregatedRecordingsRow {
|
||||
@ -241,6 +251,7 @@ impl ListAggregatedRecordingsRow {
|
||||
},
|
||||
growing,
|
||||
has_trailing_zero: (row.flags & RecordingFlags::TrailingZero as i32) != 0,
|
||||
end_reason: row.end_reason,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -301,6 +312,7 @@ impl RecordingToInsert {
|
||||
open_id,
|
||||
flags: self.flags | RecordingFlags::Uncommitted as i32,
|
||||
prev_media_duration_and_runs: Some((self.prev_media_duration, self.prev_runs)),
|
||||
end_reason: self.end_reason.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -496,21 +508,22 @@ pub struct Stream {
|
||||
/// The number of recordings in `uncommitted` which are synced and ready to commit.
|
||||
synced_recordings: usize,
|
||||
|
||||
on_live_segment: Vec<Box<dyn FnMut(LiveSegment) -> bool + Send>>,
|
||||
live_segments: tokio::sync::broadcast::Sender<LiveFrame>,
|
||||
}
|
||||
|
||||
/// Bounds of a live view segment. Currently this is a single frame of video.
|
||||
/// Bounds of a live view frame.
|
||||
///
|
||||
/// This is used for live stream recordings. The stream id should already be known to the
|
||||
/// subscriber. Note this doesn't actually contain the video, just a reference that can be
|
||||
/// looked up within the database.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LiveSegment {
|
||||
pub struct LiveFrame {
|
||||
pub recording: i32,
|
||||
|
||||
/// If the segment's one frame is a key frame.
|
||||
pub is_key: bool,
|
||||
|
||||
/// The pts, relative to the start of the recording, of the start and end of this live segment,
|
||||
/// The pts, relative to the start of the recording, of the start and end of this frame,
|
||||
/// in 90kHz units.
|
||||
pub media_off_90k: Range<i32>,
|
||||
}
|
||||
@ -550,7 +563,7 @@ impl Stream {
|
||||
pub fn days(&self) -> days::Map<days::StreamValue> {
|
||||
let mut days = self.committed_days.clone();
|
||||
for u in &self.uncommitted {
|
||||
let l = u.lock().unwrap();
|
||||
let l = u.lock();
|
||||
days.adjust(
|
||||
l.start..l.start + recording::Duration(i64::from(l.wall_duration_90k)),
|
||||
1,
|
||||
@ -609,7 +622,7 @@ pub struct LockedDatabase {
|
||||
|
||||
/// The monotonic time when the database was opened (whether in read-write mode or read-only
|
||||
/// mode).
|
||||
open_monotonic: recording::Time,
|
||||
open_monotonic: base::clock::Instant,
|
||||
|
||||
auth: auth::State,
|
||||
signal: signal::State,
|
||||
@ -638,7 +651,7 @@ pub struct CompositeId(pub i64);
|
||||
|
||||
impl CompositeId {
|
||||
pub fn new(stream_id: i32, recording_id: i32) -> Self {
|
||||
CompositeId((stream_id as i64) << 32 | recording_id as i64)
|
||||
CompositeId(((stream_id as i64) << 32) | recording_id as i64)
|
||||
}
|
||||
|
||||
pub fn stream(self) -> i32 {
|
||||
@ -819,7 +832,7 @@ impl StreamStateChanger {
|
||||
cum_runs: 0,
|
||||
uncommitted: VecDeque::new(),
|
||||
synced_recordings: 0,
|
||||
on_live_segment: Vec::new(),
|
||||
live_segments: tokio::sync::broadcast::channel(LIVE_SEGMENTS_BUF_LEN).0,
|
||||
});
|
||||
}
|
||||
(Entry::Vacant(_), None) => {}
|
||||
@ -884,7 +897,7 @@ impl LockedDatabase {
|
||||
);
|
||||
match stream.uncommitted.back() {
|
||||
Some(s) => {
|
||||
let l = s.lock().unwrap();
|
||||
let l = s.lock();
|
||||
r.prev_media_duration =
|
||||
l.prev_media_duration + recording::Duration(l.media_duration_90k.into());
|
||||
r.prev_runs = l.prev_runs + if l.run_offset == 0 { 1 } else { 0 };
|
||||
@ -924,7 +937,7 @@ impl LockedDatabase {
|
||||
msg("can't sync un-added recording {id}")
|
||||
);
|
||||
}
|
||||
let l = stream.uncommitted[stream.synced_recordings].lock().unwrap();
|
||||
let l = stream.uncommitted[stream.synced_recordings].lock();
|
||||
let bytes = i64::from(l.sample_file_bytes);
|
||||
stream.bytes_to_add += bytes;
|
||||
stream.fs_bytes_to_add += round_up(bytes);
|
||||
@ -958,42 +971,27 @@ impl LockedDatabase {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Registers a callback to run on every live segment immediately after it's recorded.
|
||||
/// The callback is run with the database lock held, so it must not call back into the database
|
||||
/// or block. The callback should return false to unregister.
|
||||
/// Returns a watcher for live segments of the given stream.
|
||||
pub fn watch_live(
|
||||
&mut self,
|
||||
stream_id: i32,
|
||||
cb: Box<dyn FnMut(LiveSegment) -> bool + Send>,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<tokio::sync::broadcast::Receiver<LiveFrame>, Error> {
|
||||
let s = match self.streams_by_id.get_mut(&stream_id) {
|
||||
None => bail!(NotFound, msg("no such stream {stream_id}")),
|
||||
Some(s) => s,
|
||||
};
|
||||
s.on_live_segment.push(cb);
|
||||
Ok(())
|
||||
Ok(s.live_segments.subscribe())
|
||||
}
|
||||
|
||||
/// Clears all watches on all streams.
|
||||
/// Normally watches are self-cleaning: when a segment is sent, the callback returns false if
|
||||
/// it is no longer interested (typically because hyper has just noticed the client is no
|
||||
/// longer connected). This doesn't work when the system is shutting down and nothing more is
|
||||
/// sent, though.
|
||||
pub fn clear_watches(&mut self) {
|
||||
for s in self.streams_by_id.values_mut() {
|
||||
s.on_live_segment.clear();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send_live_segment(&mut self, stream: i32, l: LiveSegment) -> Result<(), Error> {
|
||||
pub(crate) fn send_live_segment(&mut self, stream: i32, l: LiveFrame) -> Result<(), Error> {
|
||||
let s = match self.streams_by_id.get_mut(&stream) {
|
||||
None => bail!(Internal, msg("no such stream {stream}")),
|
||||
Some(s) => s,
|
||||
};
|
||||
|
||||
// TODO: use std's retain_mut after it's available in our minimum supported Rust version.
|
||||
// <https://github.com/rust-lang/rust/issues/48919>
|
||||
odds::vec::VecExt::retain_mut(&mut s.on_live_segment, |cb| cb(l.clone()));
|
||||
// Note that `send` will fail if there are no active receivers.
|
||||
// That's fine, so ignore this error.
|
||||
let _ = s.live_segments.send(l);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1017,7 +1015,7 @@ impl LockedDatabase {
|
||||
let mut new_duration = 0;
|
||||
let mut new_runs = 0;
|
||||
for i in 0..s.synced_recordings {
|
||||
let l = s.uncommitted[i].lock().unwrap();
|
||||
let l = s.uncommitted[i].lock();
|
||||
raw::insert_recording(
|
||||
&tx,
|
||||
o,
|
||||
@ -1079,8 +1077,10 @@ impl LockedDatabase {
|
||||
r"update open set duration_90k = ?, end_time_90k = ? where id = ?",
|
||||
)?;
|
||||
let rows = stmt.execute(params![
|
||||
(recording::Time::new(clocks.monotonic()) - self.open_monotonic).0,
|
||||
recording::Time::new(clocks.realtime()).0,
|
||||
recording::Duration::try_from(clocks.monotonic() - self.open_monotonic)
|
||||
.expect("valid duration")
|
||||
.0,
|
||||
recording::Time::from(clocks.realtime()).0,
|
||||
o.id,
|
||||
])?;
|
||||
if rows != 1 {
|
||||
@ -1142,7 +1142,7 @@ impl LockedDatabase {
|
||||
let u = s.uncommitted.pop_front().unwrap();
|
||||
log.added
|
||||
.push(CompositeId::new(stream_id, s.cum_recordings));
|
||||
let l = u.lock().unwrap();
|
||||
let l = u.lock();
|
||||
s.cum_recordings += 1;
|
||||
let wall_dur = recording::Duration(l.wall_duration_90k.into());
|
||||
let media_dur = recording::Duration(l.media_duration_90k.into());
|
||||
@ -1311,7 +1311,7 @@ impl LockedDatabase {
|
||||
raw::list_recordings_by_time(&self.conn, stream_id, desired_time.clone(), f)?;
|
||||
for (i, u) in s.uncommitted.iter().enumerate() {
|
||||
let row = {
|
||||
let l = u.lock().unwrap();
|
||||
let l = u.lock();
|
||||
if l.video_samples > 0 {
|
||||
let end = l.start + recording::Duration(l.wall_duration_90k as i64);
|
||||
if l.start > desired_time.end || end < desired_time.start {
|
||||
@ -1352,7 +1352,7 @@ impl LockedDatabase {
|
||||
);
|
||||
for i in start..end {
|
||||
let row = {
|
||||
let l = s.uncommitted[i].lock().unwrap();
|
||||
let l = s.uncommitted[i].lock();
|
||||
if l.video_samples > 0 {
|
||||
l.to_list_row(
|
||||
CompositeId::new(stream_id, s.cum_recordings + i as i32),
|
||||
@ -1376,7 +1376,7 @@ impl LockedDatabase {
|
||||
stream_id: i32,
|
||||
desired_time: Range<recording::Time>,
|
||||
forced_split: recording::Duration,
|
||||
f: &mut dyn FnMut(&ListAggregatedRecordingsRow) -> Result<(), base::Error>,
|
||||
f: &mut dyn FnMut(ListAggregatedRecordingsRow) -> Result<(), base::Error>,
|
||||
) -> Result<(), base::Error> {
|
||||
// Iterate, maintaining a map from a recording_id to the aggregated row for the latest
|
||||
// batch of recordings from the run starting at that id. Runs can be split into multiple
|
||||
@ -1410,8 +1410,7 @@ impl LockedDatabase {
|
||||
|| new_dur >= forced_split;
|
||||
if needs_flush {
|
||||
// flush then start a new entry.
|
||||
f(a)?;
|
||||
*a = ListAggregatedRecordingsRow::from(row);
|
||||
f(std::mem::replace(a, ListAggregatedRecordingsRow::from(row)))?;
|
||||
} else {
|
||||
// append.
|
||||
if a.time.end != row.start {
|
||||
@ -1450,6 +1449,7 @@ impl LockedDatabase {
|
||||
}
|
||||
a.growing = growing;
|
||||
a.has_trailing_zero = has_trailing_zero;
|
||||
a.end_reason = row.end_reason;
|
||||
}
|
||||
}
|
||||
Entry::Vacant(e) => {
|
||||
@ -1458,7 +1458,7 @@ impl LockedDatabase {
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
for a in aggs.values() {
|
||||
for a in aggs.into_values() {
|
||||
f(a)?;
|
||||
}
|
||||
Ok(())
|
||||
@ -1490,7 +1490,7 @@ impl LockedDatabase {
|
||||
),
|
||||
);
|
||||
}
|
||||
let l = s.uncommitted[i as usize].lock().unwrap();
|
||||
let l = s.uncommitted[i as usize].lock();
|
||||
return f(&RecordingPlayback {
|
||||
video_index: &l.video_index,
|
||||
});
|
||||
@ -1734,7 +1734,7 @@ impl LockedDatabase {
|
||||
cum_runs: row.get(7)?,
|
||||
uncommitted: VecDeque::new(),
|
||||
synced_recordings: 0,
|
||||
on_live_segment: Vec::new(),
|
||||
live_segments: tokio::sync::broadcast::channel(LIVE_SEGMENTS_BUF_LEN).0,
|
||||
},
|
||||
);
|
||||
c.streams[type_.index()] = Some(id);
|
||||
@ -1798,7 +1798,7 @@ impl LockedDatabase {
|
||||
|
||||
pub fn add_sample_file_dir(&mut self, path: PathBuf) -> Result<i32, Error> {
|
||||
let mut meta = schema::DirMeta::default();
|
||||
let uuid = Uuid::new_v4();
|
||||
let uuid = Uuid::now_v7();
|
||||
let uuid_bytes = &uuid.as_bytes()[..];
|
||||
let o = self
|
||||
.open
|
||||
@ -1907,7 +1907,7 @@ impl LockedDatabase {
|
||||
|
||||
/// Adds a camera.
|
||||
pub fn add_camera(&mut self, mut camera: CameraChange) -> Result<i32, Error> {
|
||||
let uuid = Uuid::new_v4();
|
||||
let uuid = Uuid::now_v7();
|
||||
let uuid_bytes = &uuid.as_bytes()[..];
|
||||
let tx = self.conn.transaction()?;
|
||||
let streams;
|
||||
@ -2228,7 +2228,7 @@ pub fn init(conn: &mut rusqlite::Connection) -> Result<(), Error> {
|
||||
tx.execute_batch(include_str!("schema.sql"))
|
||||
.map_err(|e| err!(e, msg("unable to create database schema")))?;
|
||||
{
|
||||
let uuid = ::uuid::Uuid::new_v4();
|
||||
let uuid = ::uuid::Uuid::now_v7();
|
||||
let uuid_bytes = &uuid.as_bytes()[..];
|
||||
tx.execute("insert into meta (uuid) values (?)", params![uuid_bytes])?;
|
||||
}
|
||||
@ -2320,7 +2320,7 @@ impl<C: Clocks + Clone> Drop for Database<C> {
|
||||
return; // don't flush while panicking.
|
||||
}
|
||||
if let Some(m) = self.db.take() {
|
||||
if let Err(e) = m.into_inner().unwrap().flush(&self.clocks, "drop") {
|
||||
if let Err(e) = m.into_inner().flush(&self.clocks, "drop") {
|
||||
error!(err = %e.chain(), "final database flush failed");
|
||||
}
|
||||
}
|
||||
@ -2349,12 +2349,12 @@ impl<C: Clocks + Clone> Database<C> {
|
||||
// Note: the meta check comes after the version check to improve the error message when
|
||||
// trying to open a version 0 or version 1 database (which lacked the meta table).
|
||||
let (db_uuid, config) = raw::read_meta(&conn)?;
|
||||
let open_monotonic = recording::Time::new(clocks.monotonic());
|
||||
let open_monotonic = clocks.monotonic();
|
||||
let open = if read_write {
|
||||
let real = recording::Time::new(clocks.realtime());
|
||||
let real = recording::Time::from(clocks.realtime());
|
||||
let mut stmt = conn
|
||||
.prepare(" insert into open (uuid, start_time_90k, boot_uuid) values (?, ?, ?)")?;
|
||||
let open_uuid = SqlUuid(Uuid::new_v4());
|
||||
let open_uuid = SqlUuid(Uuid::now_v7());
|
||||
let boot_uuid = match get_boot_uuid() {
|
||||
Err(e) => {
|
||||
warn!(err = %e.chain(), "unable to get boot uuid");
|
||||
@ -2417,9 +2417,9 @@ impl<C: Clocks + Clone> Database<C> {
|
||||
|
||||
/// Locks the database; the returned reference is the only way to perform (read or write)
|
||||
/// operations.
|
||||
pub fn lock(&self) -> DatabaseGuard<C> {
|
||||
pub fn lock(&self) -> DatabaseGuard<'_, C> {
|
||||
let timer = clock::TimerGuard::new(&self.clocks, acquisition);
|
||||
let db = self.db.as_ref().unwrap().lock().unwrap();
|
||||
let db = self.db.as_ref().unwrap().lock();
|
||||
drop(timer);
|
||||
let _timer = clock::TimerGuard::<C, &'static str, fn() -> &'static str>::new(
|
||||
&self.clocks,
|
||||
@ -2436,7 +2436,7 @@ impl<C: Clocks + Clone> Database<C> {
|
||||
/// This allows verification that a newly opened database is in an acceptable state.
|
||||
#[cfg(test)]
|
||||
fn close(mut self) -> rusqlite::Connection {
|
||||
self.db.take().unwrap().into_inner().unwrap().conn
|
||||
self.db.take().unwrap().into_inner().conn
|
||||
}
|
||||
}
|
||||
|
||||
@ -2447,7 +2447,7 @@ pub struct DatabaseGuard<'db, C: Clocks> {
|
||||
_timer: clock::TimerGuard<'db, C, &'static str, fn() -> &'static str>,
|
||||
}
|
||||
|
||||
impl<'db, C: Clocks + Clone> DatabaseGuard<'db, C> {
|
||||
impl<C: Clocks + Clone> DatabaseGuard<'_, C> {
|
||||
/// Tries to flush unwritten changes from the stream directories.
|
||||
///
|
||||
/// * commits any recordings added with `add_recording` that have since been marked as
|
||||
@ -2462,14 +2462,14 @@ impl<'db, C: Clocks + Clone> DatabaseGuard<'db, C> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'db, C: Clocks + Clone> ::std::ops::Deref for DatabaseGuard<'db, C> {
|
||||
impl<C: Clocks + Clone> ::std::ops::Deref for DatabaseGuard<'_, C> {
|
||||
type Target = LockedDatabase;
|
||||
fn deref(&self) -> &LockedDatabase {
|
||||
&self.db
|
||||
}
|
||||
}
|
||||
|
||||
impl<'db, C: Clocks + Clone> ::std::ops::DerefMut for DatabaseGuard<'db, C> {
|
||||
impl<C: Clocks + Clone> ::std::ops::DerefMut for DatabaseGuard<'_, C> {
|
||||
fn deref_mut(&mut self) -> &mut LockedDatabase {
|
||||
&mut self.db
|
||||
}
|
||||
@ -2520,7 +2520,7 @@ mod tests {
|
||||
rows = 0;
|
||||
{
|
||||
let db = db.lock();
|
||||
let all_time = recording::Time(i64::min_value())..recording::Time(i64::max_value());
|
||||
let all_time = recording::Time(i64::MIN)..recording::Time(i64::MAX);
|
||||
db.list_recordings_by_time(stream_id, all_time, &mut |_row| {
|
||||
rows += 1;
|
||||
Ok(())
|
||||
@ -2547,7 +2547,7 @@ mod tests {
|
||||
let mut recording_id = None;
|
||||
{
|
||||
let db = db.lock();
|
||||
let all_time = recording::Time(i64::min_value())..recording::Time(i64::max_value());
|
||||
let all_time = recording::Time(i64::MIN)..recording::Time(i64::MAX);
|
||||
db.list_recordings_by_time(stream_id, all_time, &mut |row| {
|
||||
rows += 1;
|
||||
recording_id = Some(row.id);
|
||||
@ -2869,9 +2869,8 @@ mod tests {
|
||||
.get(&sample_file_dir_id)
|
||||
.unwrap()
|
||||
.garbage_unlinked
|
||||
.iter()
|
||||
.copied()
|
||||
.collect();
|
||||
.to_vec();
|
||||
|
||||
assert_eq!(&g, &[]);
|
||||
}
|
||||
|
||||
|
||||
@ -7,13 +7,12 @@
|
||||
//! This mostly includes opening a directory and looking for recordings within it.
|
||||
//! Updates to the directory happen through [crate::writer].
|
||||
|
||||
mod reader;
|
||||
pub mod reader;
|
||||
|
||||
use crate::coding;
|
||||
use crate::db::CompositeId;
|
||||
use crate::schema;
|
||||
use base::{bail, err, Error};
|
||||
use cstr::cstr;
|
||||
use nix::sys::statvfs::Statvfs;
|
||||
use nix::{
|
||||
fcntl::{FlockArg, OFlag},
|
||||
@ -134,7 +133,7 @@ impl Fd {
|
||||
/// Reads `dir`'s metadata. If none is found, returns an empty proto.
|
||||
pub(crate) fn read_meta(dir: &Fd) -> Result<schema::DirMeta, Error> {
|
||||
let mut meta = schema::DirMeta::default();
|
||||
let mut f = match crate::fs::openat(dir.0, cstr!("meta"), OFlag::O_RDONLY, Mode::empty()) {
|
||||
let mut f = match crate::fs::openat(dir.0, c"meta", OFlag::O_RDONLY, Mode::empty()) {
|
||||
Err(e) => {
|
||||
if e == nix::Error::ENOENT {
|
||||
return Ok(meta);
|
||||
@ -184,7 +183,7 @@ pub(crate) fn write_meta(dirfd: RawFd, meta: &schema::DirMeta) -> Result<(), Err
|
||||
data.resize(FIXED_DIR_META_LEN, 0); // pad to required length.
|
||||
let mut f = crate::fs::openat(
|
||||
dirfd,
|
||||
cstr!("meta"),
|
||||
c"meta",
|
||||
OFlag::O_CREAT | OFlag::O_WRONLY,
|
||||
Mode::S_IRUSR | Mode::S_IWUSR,
|
||||
)
|
||||
@ -422,12 +421,12 @@ mod tests {
|
||||
meta.dir_uuid.extend_from_slice(fake_uuid);
|
||||
{
|
||||
let o = meta.last_complete_open.mut_or_insert_default();
|
||||
o.id = u32::max_value();
|
||||
o.id = u32::MAX;
|
||||
o.uuid.extend_from_slice(fake_uuid);
|
||||
}
|
||||
{
|
||||
let o = meta.in_progress_open.mut_or_insert_default();
|
||||
o.id = u32::max_value();
|
||||
o.id = u32::MAX;
|
||||
o.uuid.extend_from_slice(fake_uuid);
|
||||
}
|
||||
let data = meta
|
||||
|
||||
@ -26,7 +26,8 @@ const LIST_RECORDINGS_BY_TIME_SQL: &str = r#"
|
||||
recording.video_samples,
|
||||
recording.video_sync_samples,
|
||||
recording.video_sample_entry_id,
|
||||
recording.open_id
|
||||
recording.open_id,
|
||||
recording.end_reason
|
||||
from
|
||||
recording
|
||||
where
|
||||
@ -51,6 +52,7 @@ const LIST_RECORDINGS_BY_ID_SQL: &str = r#"
|
||||
recording.video_sync_samples,
|
||||
recording.video_sample_entry_id,
|
||||
recording.open_id,
|
||||
recording.end_reason,
|
||||
recording.prev_media_duration_90k,
|
||||
recording.prev_runs
|
||||
from
|
||||
@ -158,11 +160,12 @@ fn list_recordings_inner(
|
||||
video_sync_samples: row.get(8).err_kind(ErrorKind::Internal)?,
|
||||
video_sample_entry_id: row.get(9).err_kind(ErrorKind::Internal)?,
|
||||
open_id: row.get(10).err_kind(ErrorKind::Internal)?,
|
||||
end_reason: row.get(11).err_kind(ErrorKind::Internal)?,
|
||||
prev_media_duration_and_runs: match include_prev {
|
||||
false => None,
|
||||
true => Some((
|
||||
recording::Duration(row.get(11).err_kind(ErrorKind::Internal)?),
|
||||
row.get(12).err_kind(ErrorKind::Internal)?,
|
||||
recording::Duration(row.get(12).err_kind(ErrorKind::Internal)?),
|
||||
row.get(13).err_kind(ErrorKind::Internal)?,
|
||||
)),
|
||||
},
|
||||
})?;
|
||||
|
||||
@ -281,7 +281,7 @@ impl Segment {
|
||||
// include it for consistency with the fast path. It'd be bizarre to have it included or
|
||||
// not based on desired_media_range_90k.start.
|
||||
let end_90k = if desired_media_range_90k.end == recording.media_duration_90k {
|
||||
i32::max_value()
|
||||
i32::MAX
|
||||
} else {
|
||||
desired_media_range_90k.end
|
||||
};
|
||||
@ -395,10 +395,7 @@ impl Segment {
|
||||
if let Err(e) = f(&it) {
|
||||
return Err(e);
|
||||
}
|
||||
have_frame = match it.next(data) {
|
||||
Err(e) => return Err(e),
|
||||
Ok(hf) => hf,
|
||||
};
|
||||
have_frame = it.next(data)?;
|
||||
}
|
||||
if key_frame < self.key_frames {
|
||||
bail!(
|
||||
@ -659,13 +656,16 @@ mod bench {
|
||||
/// Benchmarks the decoder, which is performance-critical for .mp4 serving.
|
||||
#[bench]
|
||||
fn bench_decoder(b: &mut test::Bencher) {
|
||||
crate::testutil::init();
|
||||
let data = include_bytes!("testdata/video_sample_index.bin");
|
||||
b.bytes = data.len() as u64;
|
||||
b.iter(|| {
|
||||
let mut it = SampleIndexIterator::default();
|
||||
while it.next(data).unwrap() {}
|
||||
assert_eq!(30104460, it.pos);
|
||||
assert_eq!(5399985, it.start_90k);
|
||||
for _i in 0..100 {
|
||||
let mut it = SampleIndexIterator::default();
|
||||
while it.next(data).unwrap() {}
|
||||
assert_eq!(30104460, it.pos);
|
||||
assert_eq!(5399985, it.start_90k);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,12 +81,12 @@ impl Point {
|
||||
}
|
||||
|
||||
/// Returns an iterator over state as of immediately before this point.
|
||||
fn prev(&self) -> PointDataIterator {
|
||||
fn prev(&self) -> PointDataIterator<'_> {
|
||||
PointDataIterator::new(&self.data[0..self.changes_off])
|
||||
}
|
||||
|
||||
/// Returns an iterator over changes in this point.
|
||||
fn changes(&self) -> PointDataIterator {
|
||||
fn changes(&self) -> PointDataIterator<'_> {
|
||||
PointDataIterator::new(&self.data[self.changes_off..])
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ impl<'a> PointDataIterator<'a> {
|
||||
msg("signal overflow: {} + {}", self.cur_signal, signal_delta)
|
||||
)
|
||||
})?;
|
||||
if state > u16::max_value() as u32 {
|
||||
if state > u32::from(u16::MAX) {
|
||||
bail!(OutOfRange, msg("state overflow: {state}"));
|
||||
}
|
||||
self.cur_pos = p;
|
||||
@ -858,10 +858,9 @@ mod tests {
|
||||
let mut conn = Connection::open_in_memory().unwrap();
|
||||
db::init(&mut conn).unwrap();
|
||||
let s = State::init(&conn, &GlobalConfig::default()).unwrap();
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |_r| panic!("no changes expected"),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |_r| {
|
||||
panic!("no changes expected")
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -912,10 +911,9 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
let mut s = State::init(&conn, &config).unwrap();
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |_r| panic!("no changes expected"),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |_r| {
|
||||
panic!("no changes expected")
|
||||
});
|
||||
const START: recording::Time = recording::Time(140067462600000); // 2019-04-26T11:59:00
|
||||
const NOW: recording::Time = recording::Time(140067468000000); // 2019-04-26T12:00:00
|
||||
s.update_signals(START..NOW, &[1, 2], &[2, 1]).unwrap();
|
||||
@ -944,14 +942,12 @@ mod tests {
|
||||
},
|
||||
];
|
||||
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r),
|
||||
);
|
||||
s.list_changes_by_time(
|
||||
recording::Time::max_value()..recording::Time::min_value(),
|
||||
&mut |_r| panic!("no changes expected"),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |r| {
|
||||
rows.push(*r)
|
||||
});
|
||||
s.list_changes_by_time(recording::Time::MAX..recording::Time::MIN, &mut |_r| {
|
||||
panic!("no changes expected")
|
||||
});
|
||||
assert_eq!(&rows[..], EXPECTED);
|
||||
let mut expected_days = days::Map::default();
|
||||
expected_days.0.insert(
|
||||
@ -979,10 +975,9 @@ mod tests {
|
||||
drop(s);
|
||||
let mut s = State::init(&conn, &config).unwrap();
|
||||
rows.clear();
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |r| {
|
||||
rows.push(*r)
|
||||
});
|
||||
assert_eq!(&rows[..], EXPECTED);
|
||||
|
||||
// Go through it again. This time, hit the max number of signals, forcing START to be
|
||||
@ -1012,10 +1007,9 @@ mod tests {
|
||||
state: 0,
|
||||
},
|
||||
];
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |r| {
|
||||
rows.push(*r)
|
||||
});
|
||||
assert_eq!(&rows[..], EXPECTED2);
|
||||
|
||||
{
|
||||
@ -1026,10 +1020,9 @@ mod tests {
|
||||
drop(s);
|
||||
let s = State::init(&conn, &config).unwrap();
|
||||
rows.clear();
|
||||
s.list_changes_by_time(
|
||||
recording::Time::min_value()..recording::Time::max_value(),
|
||||
&mut |r| rows.push(*r),
|
||||
);
|
||||
s.list_changes_by_time(recording::Time::MIN..recording::Time::MAX, &mut |r| {
|
||||
rows.push(*r)
|
||||
});
|
||||
assert_eq!(&rows[..], EXPECTED2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -10,7 +10,6 @@ use crate::dir;
|
||||
use crate::writer;
|
||||
use base::clock::Clocks;
|
||||
use base::FastHashMap;
|
||||
use std::env;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use tempfile::TempDir;
|
||||
@ -33,14 +32,14 @@ pub const TEST_VIDEO_SAMPLE_ENTRY_DATA: &[u8] =
|
||||
/// Performs global initialization for tests.
|
||||
/// * set up logging. (Note the output can be confusing unless `RUST_TEST_THREADS=1` is set in
|
||||
/// the program's environment prior to running.)
|
||||
/// * set `TZ=America/Los_Angeles` so that tests that care about calendar time get the expected
|
||||
/// results regardless of machine setup.)
|
||||
/// * set time zone `America/Los_Angeles` so that tests that care about
|
||||
/// calendar time get the expected results regardless of machine setup.)
|
||||
/// * use a fast but insecure password hashing format.
|
||||
pub fn init() {
|
||||
INIT.call_once(|| {
|
||||
base::ensure_malloc_used();
|
||||
base::tracing_setup::install_for_tests();
|
||||
env::set_var("TZ", "America/Los_Angeles");
|
||||
time::tzset();
|
||||
base::time::testutil::init_zone();
|
||||
crate::auth::set_test_config();
|
||||
});
|
||||
}
|
||||
@ -200,7 +199,7 @@ pub fn add_dummy_recordings_to_db(db: &db::Database, num: usize) {
|
||||
wall_duration_90k: 5399985,
|
||||
video_samples: 1800,
|
||||
video_sync_samples: 60,
|
||||
video_sample_entry_id: video_sample_entry_id,
|
||||
video_sample_entry_id,
|
||||
video_index: data,
|
||||
run_offset: 0,
|
||||
..Default::default()
|
||||
|
||||
@ -80,17 +80,17 @@ pub fn run(args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
create index user_session_uid on user_session (user_id);
|
||||
"#,
|
||||
)?;
|
||||
let db_uuid = ::uuid::Uuid::new_v4();
|
||||
let db_uuid = ::uuid::Uuid::now_v7();
|
||||
let db_uuid_bytes = &db_uuid.as_bytes()[..];
|
||||
tx.execute("insert into meta (uuid) values (?)", params![db_uuid_bytes])?;
|
||||
let open_uuid = ::uuid::Uuid::new_v4();
|
||||
let open_uuid = ::uuid::Uuid::now_v7();
|
||||
let open_uuid_bytes = &open_uuid.as_bytes()[..];
|
||||
tx.execute(
|
||||
"insert into open (uuid) values (?)",
|
||||
params![open_uuid_bytes],
|
||||
)?;
|
||||
let open_id = tx.last_insert_rowid() as u32;
|
||||
let dir_uuid = ::uuid::Uuid::new_v4();
|
||||
let dir_uuid = ::uuid::Uuid::now_v7();
|
||||
let dir_uuid_bytes = &dir_uuid.as_bytes()[..];
|
||||
|
||||
// Write matching metadata to the directory.
|
||||
|
||||
@ -9,7 +9,6 @@
|
||||
use crate::db::SqlUuid;
|
||||
use crate::{dir, schema};
|
||||
use base::{bail, err, Error};
|
||||
use cstr::cstr;
|
||||
use nix::fcntl::{FlockArg, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use protobuf::Message;
|
||||
@ -24,8 +23,8 @@ const FIXED_DIR_META_LEN: usize = 512;
|
||||
|
||||
/// Maybe upgrades the `meta` file, returning if an upgrade happened (and thus a sync is needed).
|
||||
fn maybe_upgrade_meta(dir: &dir::Fd, db_meta: &schema::DirMeta) -> Result<bool, Error> {
|
||||
let tmp_path = cstr!("meta.tmp");
|
||||
let meta_path = cstr!("meta");
|
||||
let tmp_path = c"meta.tmp";
|
||||
let meta_path = c"meta";
|
||||
let mut f = crate::fs::openat(
|
||||
dir.as_fd().as_raw_fd(),
|
||||
meta_path,
|
||||
|
||||
@ -27,7 +27,7 @@ fn default_pixel_aspect_ratio(width: u16, height: u16) -> (u16, u16) {
|
||||
(1, 1)
|
||||
}
|
||||
|
||||
fn parse(data: &[u8]) -> Result<AvcDecoderConfigurationRecord, Error> {
|
||||
fn parse(data: &[u8]) -> Result<AvcDecoderConfigurationRecord<'_>, Error> {
|
||||
if data.len() < 94 || &data[4..8] != b"avc1" || &data[90..94] != b"avcC" {
|
||||
bail!(
|
||||
DataLoss,
|
||||
@ -133,7 +133,7 @@ pub fn run(_args: &super::Args, tx: &rusqlite::Transaction) -> Result<(), Error>
|
||||
)
|
||||
})?;
|
||||
let sps = ctx
|
||||
.sps_by_id(h264_reader::nal::pps::ParamSetId::from_u32(0).unwrap())
|
||||
.sps_by_id(h264_reader::nal::sps::SeqParamSetId::from_u32(0).unwrap())
|
||||
.ok_or_else(|| {
|
||||
err!(
|
||||
Unimplemented,
|
||||
|
||||
@ -10,17 +10,15 @@ use crate::recording::{self, MAX_RECORDING_WALL_DURATION};
|
||||
use base::clock::{self, Clocks};
|
||||
use base::shutdown::ShutdownError;
|
||||
use base::FastHashMap;
|
||||
use base::Mutex;
|
||||
use base::{bail, err, Error};
|
||||
use std::cmp::{self, Ordering};
|
||||
use std::convert::TryFrom;
|
||||
use std::io;
|
||||
use std::mem;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::thread;
|
||||
use std::time::Duration as StdDuration;
|
||||
use time::{Duration, Timespec};
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
/// Trait to allow mocking out [crate::dir::SampleFileDir] in syncer tests.
|
||||
@ -103,7 +101,7 @@ struct Syncer<C: Clocks + Clone, D: DirWriter> {
|
||||
/// A plan to flush at a given instant due to a recently-saved recording's `flush_if_sec` parameter.
|
||||
struct PlannedFlush {
|
||||
/// Monotonic time at which this flush should happen.
|
||||
when: Timespec,
|
||||
when: base::clock::Instant,
|
||||
|
||||
/// Recording which prompts this flush. If this recording is already flushed at the planned
|
||||
/// time, it can be skipped.
|
||||
@ -440,9 +438,7 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
let now = self.db.clocks().monotonic();
|
||||
|
||||
// Calculate the timeout to use, mapping negative durations to 0.
|
||||
let timeout = (t - now)
|
||||
.to_std()
|
||||
.unwrap_or_else(|_| StdDuration::new(0, 0));
|
||||
let timeout = t.saturating_sub(&now);
|
||||
match self.db.clocks().recv_timeout(cmds, timeout) {
|
||||
Err(mpsc::RecvTimeoutError::Disconnected) => return false, // cmd senders gone.
|
||||
Err(mpsc::RecvTimeoutError::Timeout) => {
|
||||
@ -534,8 +530,11 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
let c = db.cameras_by_id().get(&s.camera_id).unwrap();
|
||||
|
||||
// Schedule a flush.
|
||||
let how_soon =
|
||||
Duration::seconds(i64::from(s.config.flush_if_sec)) - wall_duration.to_tm_duration();
|
||||
let how_soon = base::clock::Duration::from_secs(u64::from(s.config.flush_if_sec))
|
||||
.saturating_sub(
|
||||
base::clock::Duration::try_from(wall_duration)
|
||||
.expect("wall_duration is non-negative"),
|
||||
);
|
||||
let now = self.db.clocks().monotonic();
|
||||
let when = now + how_soon;
|
||||
let reason = format!(
|
||||
@ -546,7 +545,7 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
s.type_.as_str(),
|
||||
id
|
||||
);
|
||||
trace!("scheduling flush in {} because {}", how_soon, &reason);
|
||||
trace!("scheduling flush in {:?} because {}", how_soon, &reason);
|
||||
self.planned_flushes.push(PlannedFlush {
|
||||
when,
|
||||
reason,
|
||||
@ -600,15 +599,15 @@ impl<C: Clocks + Clone, D: DirWriter> Syncer<C, D> {
|
||||
return;
|
||||
}
|
||||
if let Err(e) = l.flush(&f.reason) {
|
||||
let d = Duration::minutes(1);
|
||||
let d = base::clock::Duration::from_secs(60);
|
||||
warn!(
|
||||
"flush failure on save for reason {}; will retry after {}: {:?}",
|
||||
"flush failure on save for reason {}; will retry after {:?}: {:?}",
|
||||
f.reason, d, e
|
||||
);
|
||||
self.planned_flushes
|
||||
.peek_mut()
|
||||
.expect("planned_flushes is non-empty")
|
||||
.when = self.db.clocks().monotonic() + Duration::minutes(1);
|
||||
.when = self.db.clocks().monotonic() + base::clock::Duration::from_secs(60);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -730,9 +729,7 @@ impl<'a, C: Clocks + Clone, D: DirWriter> Writer<'a, C, D> {
|
||||
self.stream_id,
|
||||
db::RecordingToInsert {
|
||||
run_offset: prev.map(|p| p.run_offset + 1).unwrap_or(0),
|
||||
start: prev
|
||||
.map(|p| p.end)
|
||||
.unwrap_or(recording::Time(i64::max_value())),
|
||||
start: prev.map(|p| p.end).unwrap_or(recording::Time::MAX),
|
||||
video_sample_entry_id,
|
||||
flags: db::RecordingFlags::Growing as i32,
|
||||
..Default::default()
|
||||
@ -749,7 +746,7 @@ impl<'a, C: Clocks + Clone, D: DirWriter> Writer<'a, C, D> {
|
||||
e: recording::SampleIndexEncoder::default(),
|
||||
id,
|
||||
hasher: blake3::Hasher::new(),
|
||||
local_start: recording::Time(i64::max_value()),
|
||||
local_start: recording::Time::MAX,
|
||||
unindexed_sample: None,
|
||||
video_sample_entry_id,
|
||||
});
|
||||
@ -883,7 +880,7 @@ impl<F: FileWriter> InnerWriter<F> {
|
||||
db: &db::Database<C>,
|
||||
stream_id: i32,
|
||||
) -> Result<(), Error> {
|
||||
let mut l = self.r.lock().unwrap();
|
||||
let mut l = self.r.lock();
|
||||
|
||||
// design/time.md explains these time manipulations in detail.
|
||||
let prev_media_duration_90k = l.media_duration_90k;
|
||||
@ -915,7 +912,7 @@ impl<F: FileWriter> InnerWriter<F> {
|
||||
db.lock()
|
||||
.send_live_segment(
|
||||
stream_id,
|
||||
db::LiveSegment {
|
||||
db::LiveFrame {
|
||||
recording: self.id.recording(),
|
||||
is_key,
|
||||
media_off_90k: prev_media_duration_90k..media_duration_90k,
|
||||
@ -972,7 +969,7 @@ impl<F: FileWriter> InnerWriter<F> {
|
||||
// This always ends a live segment.
|
||||
let wall_duration;
|
||||
{
|
||||
let mut l = self.r.lock().unwrap();
|
||||
let mut l = self.r.lock();
|
||||
l.flags = flags;
|
||||
l.local_time_delta = self.local_start - l.start;
|
||||
l.sample_file_blake3 = Some(*blake3.as_bytes());
|
||||
@ -987,7 +984,7 @@ impl<F: FileWriter> InnerWriter<F> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, C: Clocks + Clone, D: DirWriter> Drop for Writer<'a, C, D> {
|
||||
impl<C: Clocks + Clone, D: DirWriter> Drop for Writer<'_, C, D> {
|
||||
fn drop(&mut self) {
|
||||
if ::std::thread::panicking() {
|
||||
// This will probably panic again. Don't do it.
|
||||
@ -1015,11 +1012,11 @@ mod tests {
|
||||
use crate::recording;
|
||||
use crate::testutil;
|
||||
use base::clock::{Clocks, SimulatedClocks};
|
||||
use base::Mutex;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use tracing::{trace, warn};
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -1042,10 +1039,10 @@ mod tests {
|
||||
MockDir(Arc::new(Mutex::new(VecDeque::new())))
|
||||
}
|
||||
fn expect(&self, action: MockDirAction) {
|
||||
self.0.lock().unwrap().push_back(action);
|
||||
self.0.lock().push_back(action);
|
||||
}
|
||||
fn ensure_done(&self) {
|
||||
assert_eq!(self.0.lock().unwrap().len(), 0);
|
||||
assert_eq!(self.0.lock().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1056,7 +1053,6 @@ mod tests {
|
||||
match self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.expect("got create_file with no expectation")
|
||||
{
|
||||
@ -1071,7 +1067,6 @@ mod tests {
|
||||
match self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.expect("got sync with no expectation")
|
||||
{
|
||||
@ -1083,7 +1078,6 @@ mod tests {
|
||||
match self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.expect("got unlink_file with no expectation")
|
||||
{
|
||||
@ -1099,7 +1093,7 @@ mod tests {
|
||||
impl Drop for MockDir {
|
||||
fn drop(&mut self) {
|
||||
if !::std::thread::panicking() {
|
||||
assert_eq!(self.0.lock().unwrap().len(), 0);
|
||||
assert_eq!(self.0.lock().len(), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1109,6 +1103,8 @@ mod tests {
|
||||
|
||||
enum MockFileAction {
|
||||
SyncAll(Box<dyn Fn() -> Result<(), io::Error> + Send>),
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
Write(Box<dyn Fn(&[u8]) -> Result<usize, io::Error> + Send>),
|
||||
}
|
||||
|
||||
@ -1117,10 +1113,10 @@ mod tests {
|
||||
MockFile(Arc::new(Mutex::new(VecDeque::new())))
|
||||
}
|
||||
fn expect(&self, action: MockFileAction) {
|
||||
self.0.lock().unwrap().push_back(action);
|
||||
self.0.lock().push_back(action);
|
||||
}
|
||||
fn ensure_done(&self) {
|
||||
assert_eq!(self.0.lock().unwrap().len(), 0);
|
||||
assert_eq!(self.0.lock().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1129,7 +1125,6 @@ mod tests {
|
||||
match self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.expect("got sync_all with no expectation")
|
||||
{
|
||||
@ -1141,7 +1136,6 @@ mod tests {
|
||||
match self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.expect("got write with no expectation")
|
||||
{
|
||||
@ -1164,7 +1158,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn new_harness(flush_if_sec: u32) -> Harness {
|
||||
let clocks = SimulatedClocks::new(::time::Timespec::new(0, 0));
|
||||
let clocks = SimulatedClocks::new(base::clock::SystemTime::new(0, 0));
|
||||
let tdb = testutil::TestDb::new_with_flush_if_sec(clocks, flush_if_sec);
|
||||
let dir_id = *tdb
|
||||
.db
|
||||
@ -1218,7 +1212,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn eio() -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, "got EIO")
|
||||
io::Error::other("got EIO")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1266,7 +1260,7 @@ mod tests {
|
||||
&mut h.shutdown_rx,
|
||||
b"2",
|
||||
recording::Time(2),
|
||||
i32::max_value() as i64 + 1,
|
||||
i64::from(i32::MAX) + 1,
|
||||
true,
|
||||
video_sample_entry_id,
|
||||
)
|
||||
@ -1655,7 +1649,7 @@ mod tests {
|
||||
let mut h = new_harness(60); // flush_if_sec=60
|
||||
|
||||
// There's a database constraint forbidding a recording starting at t=0, so advance.
|
||||
h.db.clocks().sleep(time::Duration::seconds(1));
|
||||
h.db.clocks().sleep(base::clock::Duration::from_secs(1));
|
||||
|
||||
// Setup: add a 3-byte recording.
|
||||
let video_sample_entry_id =
|
||||
@ -1702,7 +1696,7 @@ mod tests {
|
||||
h.db.lock().flush("forced").unwrap();
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
||||
h.db.clocks().sleep(time::Duration::seconds(30));
|
||||
h.db.clocks().sleep(base::clock::Duration::from_secs(30));
|
||||
|
||||
// Then, a 1-byte recording.
|
||||
let mut w = Writer::new(&h.dir, &h.db, &h.channel, testutil::TEST_STREAM_ID);
|
||||
@ -1737,13 +1731,22 @@ mod tests {
|
||||
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 2);
|
||||
let db_flush_count_before = h.db.lock().flushes();
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(31, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(31)
|
||||
);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush (no-op)
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(61, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(61)
|
||||
);
|
||||
assert_eq!(h.db.lock().flushes(), db_flush_count_before);
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 1);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // planned flush
|
||||
assert_eq!(h.db.clocks().monotonic(), time::Timespec::new(91, 0));
|
||||
assert_eq!(
|
||||
h.db.clocks().monotonic(),
|
||||
base::clock::Instant::from_secs(91)
|
||||
);
|
||||
assert_eq!(h.db.lock().flushes(), db_flush_count_before + 1);
|
||||
assert_eq!(h.syncer.planned_flushes.len(), 0);
|
||||
assert!(h.syncer.iter(&h.syncer_rx)); // DatabaseFlushed
|
||||
|
||||
@ -11,20 +11,15 @@
|
||||
//! although this is a pretty small optimization.
|
||||
//!
|
||||
//! Some day I expect [bytes::Bytes] will expose its vtable (see link above),
|
||||
//! allowing us to minimize reference-counting while using the standard
|
||||
//! [hyper::Body].
|
||||
//! allowing us to minimize reference-counting without a custom chunk type.
|
||||
|
||||
use base::Error;
|
||||
use futures::{stream, Stream};
|
||||
use reffers::ARefss;
|
||||
use std::error::Error as StdError;
|
||||
use std::pin::Pin;
|
||||
use sync_wrapper::SyncWrapper;
|
||||
|
||||
pub struct Chunk(ARefss<'static, [u8]>);
|
||||
|
||||
pub type BoxedError = Box<dyn StdError + Send + Sync>;
|
||||
pub type BodyStream = Box<dyn Stream<Item = Result<Chunk, BoxedError>> + Send>;
|
||||
|
||||
pub fn wrap_error(e: Error) -> BoxedError {
|
||||
Box::new(e)
|
||||
@ -72,55 +67,4 @@ impl hyper::body::Buf for Chunk {
|
||||
}
|
||||
}
|
||||
|
||||
// This SyncWrapper stuff is blindly copied from hyper's body type.
|
||||
// See <https://github.com/hyperium/hyper/pull/2187>, matched by
|
||||
// <https://github.com/scottlamb/http-serve/pull/18>.
|
||||
pub struct Body(SyncWrapper<Pin<BodyStream>>);
|
||||
|
||||
impl hyper::body::HttpBody for Body {
|
||||
type Data = Chunk;
|
||||
type Error = BoxedError;
|
||||
|
||||
fn poll_data(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut std::task::Context,
|
||||
) -> std::task::Poll<Option<Result<Self::Data, Self::Error>>> {
|
||||
// This is safe because the pin is not structural.
|
||||
// https://doc.rust-lang.org/std/pin/#pinning-is-not-structural-for-field
|
||||
// (The field _holds_ a pin, but isn't itself pinned.)
|
||||
unsafe { self.get_unchecked_mut() }
|
||||
.0
|
||||
.get_mut()
|
||||
.as_mut()
|
||||
.poll_next(cx)
|
||||
}
|
||||
|
||||
fn poll_trailers(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut std::task::Context,
|
||||
) -> std::task::Poll<Result<Option<http::header::HeaderMap>, Self::Error>> {
|
||||
std::task::Poll::Ready(Ok(None))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BodyStream> for Body {
|
||||
fn from(b: BodyStream) -> Self {
|
||||
Body(SyncWrapper::new(Pin::from(b)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Into<Chunk>> From<C> for Body {
|
||||
fn from(c: C) -> Self {
|
||||
Body(SyncWrapper::new(Box::pin(stream::once(
|
||||
futures::future::ok(c.into()),
|
||||
))))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for Body {
|
||||
fn from(e: Error) -> Self {
|
||||
Body(SyncWrapper::new(Box::pin(stream::once(
|
||||
futures::future::err(wrap_error(e)),
|
||||
))))
|
||||
}
|
||||
}
|
||||
pub type Body = http_serve::Body<Chunk>;
|
||||
|
||||
@ -6,8 +6,8 @@
|
||||
|
||||
use base::FastHashMap;
|
||||
use http::{header, HeaderMap, HeaderValue};
|
||||
use std::io::Read;
|
||||
use std::sync::OnceLock;
|
||||
use std::{io::Read, pin::Pin};
|
||||
|
||||
use crate::body::{BoxedError, Chunk};
|
||||
|
||||
@ -150,9 +150,9 @@ impl http_serve::Entity for Entity {
|
||||
fn get_range(
|
||||
&self,
|
||||
range: std::ops::Range<u64>,
|
||||
) -> Box<dyn futures::Stream<Item = Result<Self::Data, Self::Error>> + Send + Sync> {
|
||||
) -> Pin<Box<dyn futures::Stream<Item = Result<Self::Data, Self::Error>> + Send + Sync>> {
|
||||
let file = self.file;
|
||||
Box::new(futures::stream::once(async move {
|
||||
Box::pin(futures::stream::once(async move {
|
||||
let r = usize::try_from(range.start)?..usize::try_from(range.end)?;
|
||||
let Some(data) = file.data.get(r) else {
|
||||
let len = file.data.len();
|
||||
|
||||
@ -181,14 +181,16 @@ fn press_edit(siv: &mut Cursive, db: &Arc<db::Database>, id: Option<i32>) {
|
||||
);
|
||||
}
|
||||
let stream_change = &mut change.streams[i];
|
||||
stream_change.config.mode = (if stream.record {
|
||||
(if stream.record {
|
||||
db::json::STREAM_MODE_RECORD
|
||||
} else {
|
||||
""
|
||||
})
|
||||
.to_owned();
|
||||
.clone_into(&mut stream_change.config.mode);
|
||||
stream_change.config.url = parse_stream_url(type_, &stream.url)?;
|
||||
stream_change.config.rtsp_transport = stream.rtsp_transport.to_owned();
|
||||
stream
|
||||
.rtsp_transport
|
||||
.clone_into(&mut stream_change.config.rtsp_transport);
|
||||
stream_change.sample_file_dir_id = stream.sample_file_dir_id;
|
||||
stream_change.config.flush_if_sec = if stream.flush_if_sec.is_empty() {
|
||||
0
|
||||
|
||||
@ -4,15 +4,14 @@
|
||||
|
||||
use base::strutil::{decode_size, encode_size};
|
||||
use base::Error;
|
||||
use base::Mutex;
|
||||
use cursive::traits::{Nameable, Resizable};
|
||||
use cursive::view::Scrollable;
|
||||
use cursive::Cursive;
|
||||
use cursive::{views, With};
|
||||
use db::writer;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, trace};
|
||||
|
||||
@ -58,10 +57,8 @@ fn update_limits(model: &Model, siv: &mut Cursive) {
|
||||
}
|
||||
}
|
||||
|
||||
fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str) {
|
||||
fn edit_limit(model: &mut Model, siv: &mut Cursive, id: i32, content: &str) {
|
||||
debug!("on_edit called for id {}", id);
|
||||
let mut model = model.borrow_mut();
|
||||
let model: &mut Model = &mut model;
|
||||
let stream = model.streams.get_mut(&id).unwrap();
|
||||
let new_value = decode_size(content).ok();
|
||||
let delta = new_value.unwrap_or(0) - stream.retain.unwrap_or(0);
|
||||
@ -96,14 +93,12 @@ fn edit_limit(model: &RefCell<Model>, siv: &mut Cursive, id: i32, content: &str)
|
||||
}
|
||||
}
|
||||
|
||||
fn edit_record(model: &RefCell<Model>, id: i32, record: bool) {
|
||||
let mut model = model.borrow_mut();
|
||||
let model: &mut Model = &mut model;
|
||||
fn edit_record(model: &mut Model, id: i32, record: bool) {
|
||||
let stream = model.streams.get_mut(&id).unwrap();
|
||||
stream.record = record;
|
||||
}
|
||||
|
||||
fn confirm_deletion(model: &RefCell<Model>, siv: &mut Cursive, to_delete: i64) {
|
||||
fn confirm_deletion(model: &Mutex<Model>, siv: &mut Cursive, to_delete: i64) {
|
||||
let typed = siv
|
||||
.find_name::<views::EditView>("confirm")
|
||||
.unwrap()
|
||||
@ -124,8 +119,8 @@ fn confirm_deletion(model: &RefCell<Model>, siv: &mut Cursive, to_delete: i64) {
|
||||
}
|
||||
}
|
||||
|
||||
fn actually_delete(model: &RefCell<Model>, siv: &mut Cursive) {
|
||||
let model = &*model.borrow();
|
||||
fn actually_delete(model: &Mutex<Model>, siv: &mut Cursive) {
|
||||
let model = model.lock();
|
||||
let new_limits: Vec<_> = model
|
||||
.streams
|
||||
.iter()
|
||||
@ -147,20 +142,21 @@ fn actually_delete(model: &RefCell<Model>, siv: &mut Cursive) {
|
||||
.dismiss_button("Abort"),
|
||||
);
|
||||
} else {
|
||||
update_limits(model, siv);
|
||||
update_limits(&model, siv);
|
||||
}
|
||||
}
|
||||
|
||||
fn press_change(model: &Rc<RefCell<Model>>, siv: &mut Cursive) {
|
||||
if model.borrow().errors > 0 {
|
||||
return;
|
||||
}
|
||||
let to_delete = model
|
||||
.borrow()
|
||||
.streams
|
||||
.values()
|
||||
.map(|s| ::std::cmp::max(s.used - s.retain.unwrap(), 0))
|
||||
.sum();
|
||||
fn press_change(model: &Arc<Mutex<Model>>, siv: &mut Cursive) {
|
||||
let to_delete = {
|
||||
let l = model.lock();
|
||||
if l.errors > 0 {
|
||||
return;
|
||||
}
|
||||
l.streams
|
||||
.values()
|
||||
.map(|s| ::std::cmp::max(s.used - s.retain.unwrap(), 0))
|
||||
.sum()
|
||||
};
|
||||
debug!("change press, to_delete={}", to_delete);
|
||||
if to_delete > 0 {
|
||||
let prompt = format!(
|
||||
@ -190,7 +186,7 @@ fn press_change(model: &Rc<RefCell<Model>>, siv: &mut Cursive) {
|
||||
siv.add_layer(dialog);
|
||||
} else {
|
||||
siv.pop_layer();
|
||||
update_limits(&model.borrow(), siv);
|
||||
update_limits(&model.lock(), siv);
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,7 +363,7 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
||||
fs_capacity = stat.block_size() as i64 * stat.blocks_available() as i64 + total_used;
|
||||
path = dir.path.clone();
|
||||
}
|
||||
Rc::new(RefCell::new(Model {
|
||||
Arc::new(Mutex::new(Model {
|
||||
dir_id,
|
||||
db: db.clone(),
|
||||
fs_capacity,
|
||||
@ -389,12 +385,13 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
||||
.child(views::TextView::new("usage").fixed_width(BYTES_WIDTH))
|
||||
.child(views::TextView::new("limit").fixed_width(BYTES_WIDTH)),
|
||||
);
|
||||
for (&id, stream) in &model.borrow().streams {
|
||||
let l = model.lock();
|
||||
for (&id, stream) in &l.streams {
|
||||
let mut record_cb = views::Checkbox::new();
|
||||
record_cb.set_checked(stream.record);
|
||||
record_cb.set_on_change({
|
||||
let model = model.clone();
|
||||
move |_siv, record| edit_record(&model, id, record)
|
||||
move |_siv, record| edit_record(&mut model.lock(), id, record)
|
||||
});
|
||||
list.add_child(
|
||||
&stream.label,
|
||||
@ -406,7 +403,9 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
||||
.content(encode_size(stream.retain.unwrap()))
|
||||
.on_edit({
|
||||
let model = model.clone();
|
||||
move |siv, content, _pos| edit_limit(&model, siv, id, content)
|
||||
move |siv, content, _pos| {
|
||||
edit_limit(&mut model.lock(), siv, id, content)
|
||||
}
|
||||
})
|
||||
.on_submit({
|
||||
let model = model.clone();
|
||||
@ -421,17 +420,14 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
||||
),
|
||||
);
|
||||
}
|
||||
let over = model.borrow().total_retain > model.borrow().fs_capacity;
|
||||
let over = l.total_retain > l.fs_capacity;
|
||||
list.add_child(
|
||||
"total",
|
||||
views::LinearLayout::horizontal()
|
||||
.child(views::DummyView {}.fixed_width(RECORD_WIDTH))
|
||||
.child(views::TextView::new(encode_size(l.total_used)).fixed_width(BYTES_WIDTH))
|
||||
.child(
|
||||
views::TextView::new(encode_size(model.borrow().total_used))
|
||||
.fixed_width(BYTES_WIDTH),
|
||||
)
|
||||
.child(
|
||||
views::TextView::new(encode_size(model.borrow().total_retain))
|
||||
views::TextView::new(encode_size(l.total_retain))
|
||||
.with_name("total_retain")
|
||||
.fixed_width(BYTES_WIDTH),
|
||||
)
|
||||
@ -442,11 +438,9 @@ fn edit_dir_dialog(db: &Arc<db::Database>, siv: &mut Cursive, dir_id: i32) {
|
||||
views::LinearLayout::horizontal()
|
||||
.child(views::DummyView {}.fixed_width(RECORD_WIDTH))
|
||||
.child(views::DummyView {}.fixed_width(BYTES_WIDTH))
|
||||
.child(
|
||||
views::TextView::new(encode_size(model.borrow().fs_capacity))
|
||||
.fixed_width(BYTES_WIDTH),
|
||||
),
|
||||
.child(views::TextView::new(encode_size(l.fs_capacity)).fixed_width(BYTES_WIDTH)),
|
||||
);
|
||||
drop(l);
|
||||
let mut change_button = views::Button::new("Change", move |siv| press_change(&model, siv));
|
||||
change_button.set_enabled(!over);
|
||||
let mut buttons = views::LinearLayout::horizontal().child(views::DummyView.full_width());
|
||||
|
||||
@ -2,7 +2,8 @@
|
||||
// Copyright (C) 2020 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
||||
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
use base::Mutex;
|
||||
use std::sync::Arc;
|
||||
|
||||
use cursive::{
|
||||
direction::Direction,
|
||||
@ -13,46 +14,49 @@ use cursive::{
|
||||
Printer, Rect, Vec2, View, With,
|
||||
};
|
||||
|
||||
type TabCompleteFn = Rc<dyn Fn(&str) -> Vec<String>>;
|
||||
type TabCompleteFn = Arc<dyn Fn(&str) -> Vec<String> + Send + Sync>;
|
||||
|
||||
pub struct TabCompleteEditView {
|
||||
edit_view: Rc<RefCell<EditView>>,
|
||||
edit_view: Arc<Mutex<EditView>>,
|
||||
tab_completer: Option<TabCompleteFn>,
|
||||
}
|
||||
|
||||
impl TabCompleteEditView {
|
||||
pub fn new(edit_view: EditView) -> Self {
|
||||
Self {
|
||||
edit_view: Rc::new(RefCell::new(edit_view)),
|
||||
edit_view: Arc::new(Mutex::new(edit_view)),
|
||||
tab_completer: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_tab_complete(mut self, handler: impl Fn(&str) -> Vec<String> + 'static) -> Self {
|
||||
self.tab_completer = Some(Rc::new(handler));
|
||||
pub fn on_tab_complete(
|
||||
mut self,
|
||||
handler: impl Fn(&str) -> Vec<String> + Send + Sync + 'static,
|
||||
) -> Self {
|
||||
self.tab_completer = Some(Arc::new(handler));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn get_content(&self) -> Rc<String> {
|
||||
self.edit_view.borrow_mut().get_content()
|
||||
pub fn get_content(&self) -> Arc<String> {
|
||||
self.edit_view.lock().get_content()
|
||||
}
|
||||
}
|
||||
|
||||
impl View for TabCompleteEditView {
|
||||
fn draw(&self, printer: &Printer) {
|
||||
self.edit_view.borrow().draw(printer)
|
||||
self.edit_view.lock().draw(printer)
|
||||
}
|
||||
|
||||
fn layout(&mut self, size: Vec2) {
|
||||
self.edit_view.borrow_mut().layout(size)
|
||||
self.edit_view.lock().layout(size)
|
||||
}
|
||||
|
||||
fn take_focus(&mut self, source: Direction) -> Result<EventResult, CannotFocus> {
|
||||
self.edit_view.borrow_mut().take_focus(source)
|
||||
self.edit_view.lock().take_focus(source)
|
||||
}
|
||||
|
||||
fn on_event(&mut self, event: Event) -> EventResult {
|
||||
if !self.edit_view.borrow().is_enabled() {
|
||||
if !self.edit_view.lock().is_enabled() {
|
||||
return EventResult::Ignored;
|
||||
}
|
||||
|
||||
@ -63,32 +67,32 @@ impl View for TabCompleteEditView {
|
||||
EventResult::consumed()
|
||||
}
|
||||
} else {
|
||||
self.edit_view.borrow_mut().on_event(event)
|
||||
self.edit_view.lock().on_event(event)
|
||||
}
|
||||
}
|
||||
|
||||
fn important_area(&self, view_size: Vec2) -> Rect {
|
||||
self.edit_view.borrow().important_area(view_size)
|
||||
self.edit_view.lock().important_area(view_size)
|
||||
}
|
||||
}
|
||||
|
||||
fn tab_complete(
|
||||
edit_view: Rc<RefCell<EditView>>,
|
||||
edit_view: Arc<Mutex<EditView>>,
|
||||
tab_completer: TabCompleteFn,
|
||||
autofill_one: bool,
|
||||
) -> EventResult {
|
||||
let completions = tab_completer(edit_view.borrow().get_content().as_str());
|
||||
let completions = tab_completer(edit_view.lock().get_content().as_str());
|
||||
EventResult::with_cb_once(move |siv| match *completions {
|
||||
[] => {}
|
||||
[ref completion] if autofill_one => edit_view.borrow_mut().set_content(completion)(siv),
|
||||
[ref completion] if autofill_one => edit_view.lock().set_content(completion)(siv),
|
||||
[..] => {
|
||||
siv.add_layer(TabCompletePopup {
|
||||
popup: views::MenuPopup::new(Rc::new({
|
||||
popup: views::MenuPopup::new(Arc::new({
|
||||
menu::Tree::new().with(|tree| {
|
||||
for completion in completions {
|
||||
let edit_view = edit_view.clone();
|
||||
tree.add_leaf(&completion.clone(), move |siv| {
|
||||
edit_view.borrow_mut().set_content(&completion)(siv)
|
||||
tree.add_leaf(completion.clone(), move |siv| {
|
||||
edit_view.lock().set_content(&completion)(siv)
|
||||
})
|
||||
}
|
||||
})
|
||||
@ -101,7 +105,7 @@ fn tab_complete(
|
||||
}
|
||||
|
||||
struct TabCompletePopup {
|
||||
edit_view: Rc<RefCell<EditView>>,
|
||||
edit_view: Arc<Mutex<EditView>>,
|
||||
popup: MenuPopup,
|
||||
tab_completer: TabCompleteFn,
|
||||
}
|
||||
@ -111,7 +115,7 @@ impl TabCompletePopup {
|
||||
let tab_completer = self.tab_completer.clone();
|
||||
EventResult::with_cb_once(move |s| {
|
||||
s.pop_layer();
|
||||
edit_view.borrow_mut().on_event(event).process(s);
|
||||
edit_view.lock().on_event(event).process(s);
|
||||
tab_complete(edit_view, tab_completer, false).process(s);
|
||||
})
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ fn edit_user_dialog(db: &Arc<db::Database>, siv: &mut Cursive, item: Option<i32>
|
||||
] {
|
||||
let mut checkbox = views::Checkbox::new();
|
||||
checkbox.set_checked(*b);
|
||||
perms.add_child(name, checkbox.with_name(format!("perm_{name}")));
|
||||
perms.add_child(*name, checkbox.with_name(format!("perm_{name}")));
|
||||
}
|
||||
layout.add_child(perms);
|
||||
|
||||
|
||||
@ -78,7 +78,7 @@ pub fn run(args: Args) -> Result<i32, Error> {
|
||||
.map(db::Permissions::from)
|
||||
.unwrap_or_else(|| u.permissions.clone());
|
||||
let creation = db::auth::Request {
|
||||
when_sec: Some(db.clocks().realtime().sec),
|
||||
when_sec: Some(db.clocks().realtime().as_secs()),
|
||||
user_agent: None,
|
||||
addr: None,
|
||||
};
|
||||
|
||||
@ -44,7 +44,7 @@ pub struct ConfigFile {
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiDir {
|
||||
FromFilesystem(PathBuf),
|
||||
Bundled(BundledUi),
|
||||
Bundled(#[allow(unused)] BundledUi),
|
||||
}
|
||||
|
||||
impl Default for UiDir {
|
||||
@ -99,7 +99,7 @@ pub struct BindConfig {
|
||||
pub own_uid_is_privileged: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum AddressConfig {
|
||||
@ -116,5 +116,16 @@ pub enum AddressConfig {
|
||||
///
|
||||
/// See [systemd.socket(5) manual
|
||||
/// page](https://www.freedesktop.org/software/systemd/man/systemd.socket.html).
|
||||
Systemd(String),
|
||||
Systemd(#[cfg_attr(not(target_os = "linux"), allow(unused))] String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for AddressConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
AddressConfig::Ipv4(addr) => write!(f, "ipv4:{}", addr),
|
||||
AddressConfig::Ipv6(addr) => write!(f, "ipv6:{}", addr),
|
||||
AddressConfig::Unix(path) => write!(f, "unix:{}", path.display()),
|
||||
AddressConfig::Systemd(name) => write!(f, "systemd:{name}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ use base::FastHashMap;
|
||||
use base::{bail, Error};
|
||||
use bpaf::Bpaf;
|
||||
use db::{dir, writer};
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::service::service_fn;
|
||||
use itertools::Itertools;
|
||||
use retina::client::SessionGroup;
|
||||
use std::net::SocketAddr;
|
||||
@ -44,89 +44,6 @@ pub struct Args {
|
||||
read_only: bool,
|
||||
}
|
||||
|
||||
// These are used in a hack to get the name of the current time zone (e.g. America/Los_Angeles).
|
||||
// They seem to be correct for Linux and macOS at least.
|
||||
const LOCALTIME_PATH: &str = "/etc/localtime";
|
||||
const TIMEZONE_PATH: &str = "/etc/timezone";
|
||||
|
||||
// Some well-known zone paths looks like the following:
|
||||
// /usr/share/zoneinfo/* for Linux and macOS < High Sierra
|
||||
// /var/db/timezone/zoneinfo/* for macOS High Sierra
|
||||
// /etc/zoneinfo/* for NixOS
|
||||
fn zoneinfo_name(path: &str) -> Option<&str> {
|
||||
path.rsplit_once("/zoneinfo/").map(|(_, name)| name)
|
||||
}
|
||||
|
||||
/// Attempt to resolve the timezone of the server.
|
||||
/// The Javascript running in the browser needs this to match the server's timezone calculations.
|
||||
fn resolve_zone() -> Result<String, Error> {
|
||||
// If the environmental variable `TZ` exists, is valid UTF-8, and doesn't just reference
|
||||
// `/etc/localtime/`, use that.
|
||||
if let Ok(tz) = ::std::env::var("TZ") {
|
||||
let mut p: &str = &tz;
|
||||
|
||||
// Strip of an initial `:` if present. Having `TZ` set in this way is a trick to avoid
|
||||
// repeated `tzset` calls:
|
||||
// https://blog.packagecloud.io/eng/2017/02/21/set-environment-variable-save-thousands-of-system-calls/
|
||||
if p.starts_with(':') {
|
||||
p = &p[1..];
|
||||
}
|
||||
|
||||
if let Some(p) = zoneinfo_name(p) {
|
||||
return Ok(p.to_owned());
|
||||
}
|
||||
|
||||
if !p.starts_with('/') {
|
||||
return Ok(p.to_owned());
|
||||
}
|
||||
|
||||
if p != LOCALTIME_PATH {
|
||||
bail!(
|
||||
FailedPrecondition,
|
||||
msg("unable to resolve env TZ={tz} to a timezone")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// If `LOCALTIME_PATH` is a symlink, use that. On some systems, it's instead a copy of the
|
||||
// desired timezone, which unfortunately doesn't contain its own name.
|
||||
match ::std::fs::read_link(LOCALTIME_PATH) {
|
||||
Ok(localtime_dest) => {
|
||||
let localtime_dest = match localtime_dest.to_str() {
|
||||
Some(d) => d,
|
||||
None => bail!(
|
||||
FailedPrecondition,
|
||||
msg("{LOCALTIME_PATH} symlink destination is invalid UTF-8")
|
||||
),
|
||||
};
|
||||
if let Some(p) = zoneinfo_name(localtime_dest) {
|
||||
return Ok(p.to_owned());
|
||||
}
|
||||
bail!(
|
||||
FailedPrecondition,
|
||||
msg("unable to resolve {LOCALTIME_PATH} symlink destination {localtime_dest} to a timezone"),
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
use ::std::io::ErrorKind;
|
||||
if e.kind() != ErrorKind::NotFound && e.kind() != ErrorKind::InvalidInput {
|
||||
bail!(e, msg("unable to read {LOCALTIME_PATH} symlink"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If `TIMEZONE_PATH` is a file, use its contents as the zone name, trimming whitespace.
|
||||
match ::std::fs::read_to_string(TIMEZONE_PATH) {
|
||||
Ok(z) => Ok(z.trim().to_owned()),
|
||||
Err(e) => {
|
||||
bail!(
|
||||
e,
|
||||
msg("unable to resolve timezone from TZ env, {LOCALTIME_PATH}, or {TIMEZONE_PATH}"),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Syncer {
|
||||
dir: Arc<dir::SampleFileDir>,
|
||||
channel: writer::SyncerChannel<::std::fs::File>,
|
||||
@ -337,7 +254,13 @@ async fn inner(
|
||||
}
|
||||
info!("Directories are opened.");
|
||||
|
||||
let time_zone_name = resolve_zone()?;
|
||||
let zone = base::time::global_zone();
|
||||
let Some(time_zone_name) = zone.iana_name() else {
|
||||
bail!(
|
||||
Unknown,
|
||||
msg("unable to get IANA time zone name; check your $TZ and /etc/localtime")
|
||||
);
|
||||
};
|
||||
info!("Resolved timezone: {}", &time_zone_name);
|
||||
|
||||
// Start a streamer for each stream.
|
||||
@ -448,35 +371,43 @@ async fn inner(
|
||||
// Start the web interface(s).
|
||||
let own_euid = nix::unistd::Uid::effective();
|
||||
let mut preopened = get_preopened_sockets()?;
|
||||
let web_handles: Result<Vec<_>, Error> = config
|
||||
.binds
|
||||
.iter()
|
||||
.map(|b| {
|
||||
let svc = Arc::new(web::Service::new(web::Config {
|
||||
db: db.clone(),
|
||||
ui_dir: Some(&config.ui_dir),
|
||||
allow_unauthenticated_permissions: b
|
||||
.allow_unauthenticated_permissions
|
||||
.clone()
|
||||
.map(db::Permissions::from),
|
||||
trust_forward_hdrs: b.trust_forward_headers,
|
||||
time_zone_name: time_zone_name.clone(),
|
||||
privileged_unix_uid: b.own_uid_is_privileged.then_some(own_euid),
|
||||
})?);
|
||||
let make_svc = make_service_fn(move |conn: &crate::web::accept::Conn| {
|
||||
for bind in &config.binds {
|
||||
let svc = Arc::new(web::Service::new(web::Config {
|
||||
db: db.clone(),
|
||||
ui_dir: Some(&config.ui_dir),
|
||||
allow_unauthenticated_permissions: bind
|
||||
.allow_unauthenticated_permissions
|
||||
.clone()
|
||||
.map(db::Permissions::from),
|
||||
trust_forward_hdrs: bind.trust_forward_headers,
|
||||
time_zone_name: time_zone_name.to_owned(),
|
||||
privileged_unix_uid: bind.own_uid_is_privileged.then_some(own_euid),
|
||||
})?);
|
||||
let mut listener = make_listener(&bind.address, &mut preopened)?;
|
||||
let addr = bind.address.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let conn = match listener.accept().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!(err = %e, listener = %addr, "accept failed; will retry in 1 sec");
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let svc = Arc::clone(&svc);
|
||||
let conn_data = *conn.data();
|
||||
futures::future::ok::<_, std::convert::Infallible>(service_fn({
|
||||
let svc = Arc::clone(&svc);
|
||||
move |req| Arc::clone(&svc).serve(req, conn_data)
|
||||
}))
|
||||
});
|
||||
let listener = make_listener(&b.address, &mut preopened)?;
|
||||
let server = ::hyper::Server::builder(listener).serve(make_svc);
|
||||
let server = server.with_graceful_shutdown(shutdown_rx.future());
|
||||
Ok(tokio::spawn(server))
|
||||
})
|
||||
.collect();
|
||||
let web_handles = web_handles?;
|
||||
let io = hyper_util::rt::TokioIo::new(conn);
|
||||
let svc = Arc::clone(&svc);
|
||||
let svc_fn = service_fn(move |req| Arc::clone(&svc).serve(req, conn_data));
|
||||
tokio::spawn(
|
||||
hyper::server::conn::http1::Builder::new()
|
||||
.serve_connection(io, svc_fn)
|
||||
.with_upgrades(),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
if !preopened.is_empty() {
|
||||
warn!(
|
||||
"ignoring systemd sockets not referenced in config: {}",
|
||||
@ -524,15 +455,6 @@ async fn inner(
|
||||
.await
|
||||
.map_err(|e| err!(Unknown, source(e)))?;
|
||||
|
||||
db.lock().clear_watches();
|
||||
|
||||
info!("Waiting for HTTP requests to finish.");
|
||||
for h in web_handles {
|
||||
h.await
|
||||
.map_err(|e| err!(Unknown, source(e)))?
|
||||
.map_err(|e| err!(Unknown, source(e)))?;
|
||||
}
|
||||
|
||||
info!("Waiting for TEARDOWN requests to complete.");
|
||||
for g in session_groups_by_camera.values() {
|
||||
if let Err(err) = g.await_teardown().await {
|
||||
|
||||
@ -1,235 +0,0 @@
|
||||
// This file is part of Moonfire NVR, a security camera network video recorder.
|
||||
// Copyright (C) 2021 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
||||
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
||||
|
||||
//! H.264 decoding
|
||||
//!
|
||||
//! For the most part, Moonfire NVR does not try to understand the video codec. However, H.264 has
|
||||
//! two byte stream encodings: ISO/IEC 14496-10 Annex B, and ISO/IEC 14496-15 AVC access units.
|
||||
//! When streaming from RTSP, ffmpeg supplies the former. We need the latter to stick into `.mp4`
|
||||
//! files. This file manages the conversion, both for the ffmpeg "extra data" (which should become
|
||||
//! the ISO/IEC 14496-15 section 5.2.4.1 `AVCDecoderConfigurationRecord`) and the actual samples.
|
||||
//!
|
||||
//! See the [wiki page on standards and
|
||||
//! specifications](https://github.com/scottlamb/moonfire-nvr/wiki/Standards-and-specifications)
|
||||
//! for help finding a copy of the relevant standards. This code won't make much sense without them!
|
||||
//!
|
||||
//! ffmpeg of course has logic to do the same thing, but unfortunately it is not exposed except
|
||||
//! through ffmpeg's own generated `.mp4` file. Extracting just this part of their `.mp4` files
|
||||
//! would be more trouble than it's worth.
|
||||
|
||||
use base::{bail, err, Error};
|
||||
use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
|
||||
use db::VideoSampleEntryToInsert;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
// For certain common sub stream anamorphic resolutions, add a pixel aspect ratio box.
|
||||
// Assume the camera is 16x9. These are just the standard wide mode; default_pixel_aspect_ratio
|
||||
// tries the transpose also.
|
||||
const PIXEL_ASPECT_RATIOS: [((u16, u16), (u16, u16)); 6] = [
|
||||
((320, 240), (4, 3)),
|
||||
((352, 240), (40, 33)),
|
||||
((640, 352), (44, 45)),
|
||||
((640, 480), (4, 3)),
|
||||
((704, 480), (40, 33)),
|
||||
((720, 480), (32, 27)),
|
||||
];
|
||||
|
||||
/// Get the pixel aspect ratio to use if none is specified.
|
||||
///
|
||||
/// The Dahua IPC-HDW5231R-Z sets the aspect ratio in the H.264 SPS (correctly) for both square and
|
||||
/// non-square pixels. The Hikvision DS-2CD2032-I doesn't set it, even though the sub stream's
|
||||
/// pixels aren't square. So define a default based on the pixel dimensions to use if the camera
|
||||
/// doesn't tell us what to do.
|
||||
///
|
||||
/// Note that at least in the case of .mp4 muxing, we don't need to fix up the underlying SPS.
|
||||
/// PixelAspectRatioBox's definition says that it overrides the H.264-level declaration.
|
||||
fn default_pixel_aspect_ratio(width: u16, height: u16) -> (u16, u16) {
|
||||
if width >= height {
|
||||
PIXEL_ASPECT_RATIOS
|
||||
.iter()
|
||||
.find(|r| r.0 == (width, height))
|
||||
.map(|r| r.1)
|
||||
.unwrap_or((1, 1))
|
||||
} else {
|
||||
PIXEL_ASPECT_RATIOS
|
||||
.iter()
|
||||
.find(|r| r.0 == (height, width))
|
||||
.map(|r| (r.1 .1, r.1 .0))
|
||||
.unwrap_or((1, 1))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses the `AvcDecoderConfigurationRecord` in the "extra data".
|
||||
pub fn parse_extra_data(extradata: &[u8]) -> Result<VideoSampleEntryToInsert, Error> {
|
||||
let avcc =
|
||||
h264_reader::avcc::AvcDecoderConfigurationRecord::try_from(extradata).map_err(|e| {
|
||||
err!(
|
||||
InvalidArgument,
|
||||
msg("bad AvcDecoderConfigurationRecord: {:?}", e)
|
||||
)
|
||||
})?;
|
||||
if avcc.num_of_sequence_parameter_sets() != 1 {
|
||||
bail!(Unimplemented, msg("multiple SPSs!"));
|
||||
}
|
||||
let ctx = avcc
|
||||
.create_context()
|
||||
.map_err(|e| err!(Unknown, msg("can't load SPS+PPS: {:?}", e)))?;
|
||||
let sps = ctx
|
||||
.sps_by_id(h264_reader::nal::pps::ParamSetId::from_u32(0).unwrap())
|
||||
.ok_or_else(|| err!(Unimplemented, msg("no SPS 0")))?;
|
||||
let pixel_dimensions = sps.pixel_dimensions().map_err(|e| {
|
||||
err!(
|
||||
InvalidArgument,
|
||||
msg("SPS has invalid pixel dimensions: {:?}", e)
|
||||
)
|
||||
})?;
|
||||
let (Ok(width), Ok(height)) = (
|
||||
u16::try_from(pixel_dimensions.0),
|
||||
u16::try_from(pixel_dimensions.1),
|
||||
) else {
|
||||
bail!(
|
||||
InvalidArgument,
|
||||
msg(
|
||||
"bad dimensions {}x{}",
|
||||
pixel_dimensions.0,
|
||||
pixel_dimensions.1
|
||||
)
|
||||
);
|
||||
};
|
||||
|
||||
let mut sample_entry = Vec::with_capacity(256);
|
||||
|
||||
// This is a concatenation of the following boxes/classes.
|
||||
|
||||
// SampleEntry, ISO/IEC 14496-12 section 8.5.2.
|
||||
let avc1_len_pos = sample_entry.len();
|
||||
// length placeholder + type + reserved + data_reference_index = 1
|
||||
sample_entry.extend_from_slice(b"\x00\x00\x00\x00avc1\x00\x00\x00\x00\x00\x00\x00\x01");
|
||||
|
||||
// VisualSampleEntry, ISO/IEC 14496-12 section 12.1.3.
|
||||
sample_entry.extend_from_slice(&[0; 16]); // pre-defined + reserved
|
||||
sample_entry.write_u16::<BigEndian>(width)?;
|
||||
sample_entry.write_u16::<BigEndian>(height)?;
|
||||
sample_entry.extend_from_slice(&[
|
||||
0x00, 0x48, 0x00, 0x00, // horizresolution
|
||||
0x00, 0x48, 0x00, 0x00, // vertresolution
|
||||
0x00, 0x00, 0x00, 0x00, // reserved
|
||||
0x00, 0x01, // frame count
|
||||
0x00, 0x00, 0x00, 0x00, // compressorname
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x00, 0x00, 0x00, //
|
||||
0x00, 0x18, 0xff, 0xff, // depth + pre_defined
|
||||
]);
|
||||
|
||||
// AVCSampleEntry, ISO/IEC 14496-15 section 5.3.4.1.
|
||||
// AVCConfigurationBox, ISO/IEC 14496-15 section 5.3.4.1.
|
||||
let avcc_len_pos = sample_entry.len();
|
||||
sample_entry.extend_from_slice(b"\x00\x00\x00\x00avcC");
|
||||
sample_entry.extend_from_slice(extradata);
|
||||
|
||||
// Fix up avc1 and avcC box lengths.
|
||||
let cur_pos = sample_entry.len();
|
||||
BigEndian::write_u32(
|
||||
&mut sample_entry[avcc_len_pos..avcc_len_pos + 4],
|
||||
u32::try_from(cur_pos - avcc_len_pos).map_err(|_| err!(OutOfRange))?,
|
||||
);
|
||||
|
||||
// PixelAspectRatioBox, ISO/IEC 14496-12 section 12.1.4.2.
|
||||
// Write a PixelAspectRatioBox if necessary, as the sub streams can be be anamorphic.
|
||||
let pasp = sps
|
||||
.vui_parameters
|
||||
.as_ref()
|
||||
.and_then(|v| v.aspect_ratio_info.as_ref())
|
||||
.and_then(|a| a.clone().get())
|
||||
.unwrap_or_else(|| default_pixel_aspect_ratio(width, height));
|
||||
if pasp != (1, 1) {
|
||||
sample_entry.extend_from_slice(b"\x00\x00\x00\x10pasp"); // length + box name
|
||||
sample_entry.write_u32::<BigEndian>(pasp.0.into())?;
|
||||
sample_entry.write_u32::<BigEndian>(pasp.1.into())?;
|
||||
}
|
||||
|
||||
let cur_pos = sample_entry.len();
|
||||
BigEndian::write_u32(
|
||||
&mut sample_entry[avc1_len_pos..avc1_len_pos + 4],
|
||||
u32::try_from(cur_pos - avc1_len_pos).map_err(|_| err!(OutOfRange))?,
|
||||
);
|
||||
|
||||
let profile_idc = sample_entry[103];
|
||||
let constraint_flags = sample_entry[104];
|
||||
let level_idc = sample_entry[105];
|
||||
|
||||
let rfc6381_codec = format!("avc1.{profile_idc:02x}{constraint_flags:02x}{level_idc:02x}");
|
||||
Ok(VideoSampleEntryToInsert {
|
||||
data: sample_entry,
|
||||
rfc6381_codec,
|
||||
width,
|
||||
height,
|
||||
pasp_h_spacing: pasp.0,
|
||||
pasp_v_spacing: pasp.1,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use db::testutil;
|
||||
|
||||
#[rustfmt::skip]
|
||||
const AVC_DECODER_CONFIG_TEST_INPUT: [u8; 38] = [
|
||||
0x01, 0x4d, 0x00, 0x1f, 0xff, 0xe1, 0x00, 0x17,
|
||||
0x67, 0x4d, 0x00, 0x1f, 0x9a, 0x66, 0x02, 0x80,
|
||||
0x2d, 0xff, 0x35, 0x01, 0x01, 0x01, 0x40, 0x00,
|
||||
0x00, 0xfa, 0x00, 0x00, 0x1d, 0x4c, 0x01, 0x01,
|
||||
0x00, 0x04, 0x68, 0xee, 0x3c, 0x80,
|
||||
];
|
||||
|
||||
#[rustfmt::skip]
|
||||
const TEST_OUTPUT: [u8; 132] = [
|
||||
0x00, 0x00, 0x00, 0x84, 0x61, 0x76, 0x63, 0x31,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x05, 0x00, 0x02, 0xd0, 0x00, 0x48, 0x00, 0x00,
|
||||
0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x18, 0xff, 0xff, 0x00, 0x00,
|
||||
0x00, 0x2e, 0x61, 0x76, 0x63, 0x43, 0x01, 0x4d,
|
||||
0x00, 0x1f, 0xff, 0xe1, 0x00, 0x17, 0x67, 0x4d,
|
||||
0x00, 0x1f, 0x9a, 0x66, 0x02, 0x80, 0x2d, 0xff,
|
||||
0x35, 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0xfa,
|
||||
0x00, 0x00, 0x1d, 0x4c, 0x01, 0x01, 0x00, 0x04,
|
||||
0x68, 0xee, 0x3c, 0x80,
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn test_sample_entry_from_avc_decoder_config() {
|
||||
testutil::init();
|
||||
let e = super::parse_extra_data(&AVC_DECODER_CONFIG_TEST_INPUT).unwrap();
|
||||
assert_eq!(&e.data[..], &TEST_OUTPUT[..]);
|
||||
assert_eq!(e.width, 1280);
|
||||
assert_eq!(e.height, 720);
|
||||
assert_eq!(e.rfc6381_codec, "avc1.4d001f");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pixel_aspect_ratios() {
|
||||
use super::default_pixel_aspect_ratio;
|
||||
use num_rational::Ratio;
|
||||
for &((w, h), _) in &super::PIXEL_ASPECT_RATIOS {
|
||||
let (h_spacing, v_spacing) = default_pixel_aspect_ratio(w, h);
|
||||
assert_eq!(Ratio::new(w * h_spacing, h * v_spacing), Ratio::new(16, 9));
|
||||
|
||||
// 90 or 270 degree rotation.
|
||||
let (h_spacing, v_spacing) = default_pixel_aspect_ratio(h, w);
|
||||
assert_eq!(Ratio::new(h * h_spacing, w * v_spacing), Ratio::new(9, 16));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -376,7 +376,7 @@ struct SignalDayValue<'a> {
|
||||
pub states: &'a [u64],
|
||||
}
|
||||
|
||||
impl<'a> TopLevel<'a> {
|
||||
impl TopLevel<'_> {
|
||||
/// Serializes cameras as a list (rather than a map), optionally including the `days` and
|
||||
/// `cameras` fields.
|
||||
fn serialize_cameras<S>(
|
||||
@ -440,7 +440,7 @@ pub struct ListRecordings<'a> {
|
||||
pub video_sample_entries: (&'a db::LockedDatabase, Vec<i32>),
|
||||
}
|
||||
|
||||
impl<'a> ListRecordings<'a> {
|
||||
impl ListRecordings<'_> {
|
||||
fn serialize_video_sample_entries<S>(
|
||||
video_sample_entries: &(&db::LockedDatabase, Vec<i32>),
|
||||
serializer: S,
|
||||
@ -483,6 +483,9 @@ pub struct Recording {
|
||||
|
||||
#[serde(skip_serializing_if = "Not::not")]
|
||||
pub has_trailing_zero: bool,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub end_reason: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
|
||||
@ -12,7 +12,6 @@ use tracing::{debug, error};
|
||||
|
||||
mod body;
|
||||
mod cmds;
|
||||
mod h264;
|
||||
mod json;
|
||||
mod mp4;
|
||||
mod slices;
|
||||
@ -71,13 +70,14 @@ fn main() {
|
||||
// anything (with timestamps...) so we can print a helpful error.
|
||||
if let Err(e) = nix::time::clock_gettime(nix::time::ClockId::CLOCK_MONOTONIC) {
|
||||
eprintln!(
|
||||
"clock_gettime failed: {e}\n\n\
|
||||
"clock_gettime(CLOCK_MONOTONIC) failed: {e}\n\n\
|
||||
This indicates a broken environment. See the troubleshooting guide."
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
base::tracing_setup::install();
|
||||
base::time::init_zone(jiff::tz::TimeZone::system);
|
||||
base::ensure_malloc_used();
|
||||
|
||||
// Get the program name from the OS (e.g. if invoked as `target/debug/nvr`: `nvr`),
|
||||
// falling back to the crate name if conversion to a path/UTF-8 string fails.
|
||||
@ -94,7 +94,10 @@ fn main() {
|
||||
.run_inner(bpaf::Args::current_args().set_name(progname))
|
||||
{
|
||||
Ok(a) => a,
|
||||
Err(e) => std::process::exit(e.exit_code()),
|
||||
Err(e) => {
|
||||
e.print_message(100);
|
||||
std::process::exit(e.exit_code())
|
||||
}
|
||||
};
|
||||
tracing::trace!("Parsed command-line arguments: {args:#?}");
|
||||
|
||||
|
||||
@ -61,28 +61,27 @@ use byteorder::{BigEndian, ByteOrder, WriteBytesExt};
|
||||
use bytes::BytesMut;
|
||||
use db::dir;
|
||||
use db::recording::{self, rescale, TIME_UNITS_PER_SEC};
|
||||
use futures::stream::{self, TryStreamExt};
|
||||
use futures::Stream;
|
||||
use http::header::HeaderValue;
|
||||
use hyper::body::Buf;
|
||||
use pin_project::pin_project;
|
||||
use reffers::ARefss;
|
||||
use smallvec::SmallVec;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::cmp;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Once;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::SystemTime;
|
||||
use tracing::{debug, error, trace, warn};
|
||||
|
||||
/// This value should be incremented any time a change is made to this file that causes different
|
||||
/// bytes or headers to be output for a particular set of `FileBuilder` options. Incrementing this
|
||||
/// value will cause the etag to change as well.
|
||||
const FORMAT_VERSION: [u8; 1] = [0x09];
|
||||
const FORMAT_VERSION: [u8; 1] = [0x0a];
|
||||
|
||||
/// An `ftyp` (ISO/IEC 14496-12 section 4.3 `FileType`) box.
|
||||
const NORMAL_FTYP_BOX: &[u8] = &[
|
||||
@ -328,19 +327,18 @@ struct Segment {
|
||||
/// * _media_ time: as described in design/glossary.md and design/time.md.
|
||||
rel_media_range_90k: Range<i32>,
|
||||
|
||||
/// If generated, the `.mp4`-format sample indexes, accessed only through `get_index`:
|
||||
/// If generated, the `.mp4`-format sample indexes, accessed only through `index`:
|
||||
/// 1. stts: `slice[.. stsz_start]`
|
||||
/// 2. stsz: `slice[stsz_start .. stss_start]`
|
||||
/// 3. stss: `slice[stss_start ..]`
|
||||
index: UnsafeCell<Result<Box<[u8]>, ()>>,
|
||||
index_once: Once,
|
||||
index: OnceLock<Result<Box<[u8]>, ()>>,
|
||||
|
||||
/// The 1-indexed frame number in the `File` of the first frame in this segment.
|
||||
first_frame_num: u32,
|
||||
num_subtitle_samples: u16,
|
||||
}
|
||||
|
||||
// Manually implement Debug because `index` and `index_once` are not Debug.
|
||||
// Manually implement `Debug` skipping the obnoxiously-long `index` field.
|
||||
impl fmt::Debug for Segment {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("mp4::Segment")
|
||||
@ -361,8 +359,6 @@ impl fmt::Debug for Segment {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Sync for Segment {}
|
||||
|
||||
impl Segment {
|
||||
fn new(
|
||||
db: &db::LockedDatabase,
|
||||
@ -378,8 +374,7 @@ impl Segment {
|
||||
recording_wall_duration_90k: row.wall_duration_90k,
|
||||
recording_media_duration_90k: row.media_duration_90k,
|
||||
rel_media_range_90k,
|
||||
index: UnsafeCell::new(Err(())),
|
||||
index_once: Once::new(),
|
||||
index: OnceLock::new(),
|
||||
first_frame_num,
|
||||
num_subtitle_samples: 0,
|
||||
})
|
||||
@ -401,24 +396,18 @@ impl Segment {
|
||||
)
|
||||
}
|
||||
|
||||
fn get_index<'a, F>(&'a self, db: &db::Database, f: F) -> Result<&'a [u8], Error>
|
||||
where
|
||||
F: FnOnce(&[u8], SegmentLengths) -> &[u8],
|
||||
{
|
||||
self.index_once.call_once(|| {
|
||||
let index = unsafe { &mut *self.index.get() };
|
||||
*index = db
|
||||
fn index<'a>(&'a self, db: &db::Database) -> Result<&'a [u8], Error> {
|
||||
self.index
|
||||
.get_or_init(|| {
|
||||
db
|
||||
.lock()
|
||||
.with_recording_playback(self.s.id, &mut |playback| self.build_index(playback))
|
||||
.map_err(|err| {
|
||||
error!(%err, recording_id = %self.s.id, "unable to build index for segment");
|
||||
});
|
||||
});
|
||||
let index: &'a _ = unsafe { &*self.index.get() };
|
||||
match *index {
|
||||
Ok(ref b) => Ok(f(&b[..], self.lens())),
|
||||
Err(()) => bail!(Unknown, msg("unable to build index; see logs")),
|
||||
}
|
||||
error!(err = %err.chain(), recording_id = %self.s.id, "unable to build index for segment");
|
||||
})
|
||||
})
|
||||
.as_deref()
|
||||
.map_err(|()| err!(Unknown, msg("unable to build index; see logs")))
|
||||
}
|
||||
|
||||
fn lens(&self) -> SegmentLengths {
|
||||
@ -727,7 +716,9 @@ impl Slice {
|
||||
let p = self.p();
|
||||
Ok(mp4
|
||||
.try_map(|mp4| {
|
||||
let i = mp4.segments[p].get_index(&mp4.db, f)?;
|
||||
let segment = &mp4.segments[p];
|
||||
let i = segment.index(&mp4.db)?;
|
||||
let i = f(i, segment.lens());
|
||||
if u64::try_from(i.len()).unwrap() != len {
|
||||
bail!(Internal, msg("expected len {} got {}", len, i.len()));
|
||||
}
|
||||
@ -767,19 +758,37 @@ impl Slice {
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = SliceStreamProj)]
|
||||
enum SliceStream {
|
||||
Once(Option<Result<Chunk, Error>>),
|
||||
File(#[pin] db::dir::reader::FileStream),
|
||||
}
|
||||
|
||||
impl futures::stream::Stream for SliceStream {
|
||||
type Item = Result<Chunk, BoxedError>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
match self.project() {
|
||||
SliceStreamProj::Once(o) => {
|
||||
std::task::Poll::Ready(o.take().map(|r| r.map_err(wrap_error)))
|
||||
}
|
||||
SliceStreamProj::File(f) => f.poll_next(cx).map_ok(Chunk::from).map_err(wrap_error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl slices::Slice for Slice {
|
||||
type Ctx = File;
|
||||
type Chunk = Chunk;
|
||||
type Stream = SliceStream;
|
||||
|
||||
fn end(&self) -> u64 {
|
||||
self.0 & 0xFF_FF_FF_FF_FF
|
||||
}
|
||||
fn get_range(
|
||||
&self,
|
||||
f: &File,
|
||||
range: Range<u64>,
|
||||
len: u64,
|
||||
) -> Box<dyn Stream<Item = Result<Self::Chunk, BoxedError>> + Send + Sync> {
|
||||
fn get_range(&self, f: &File, range: Range<u64>, len: u64) -> SliceStream {
|
||||
trace!("getting mp4 slice {:?}'s range {:?} / {}", self, range, len);
|
||||
let p = self.p();
|
||||
let res = match self.t() {
|
||||
@ -811,22 +820,20 @@ impl slices::Slice for Slice {
|
||||
SliceType::SubtitleSampleData => f.0.get_subtitle_sample_data(p, range.clone(), len),
|
||||
SliceType::Truns => self.wrap_truns(f, range.clone(), len as usize),
|
||||
};
|
||||
Box::new(stream::once(futures::future::ready(
|
||||
res.map_err(wrap_error).and_then(move |c| {
|
||||
if c.remaining() != (range.end - range.start) as usize {
|
||||
return Err(wrap_error(err!(
|
||||
Internal,
|
||||
msg(
|
||||
"{:?} range {:?} produced incorrect len {}",
|
||||
self,
|
||||
range,
|
||||
c.remaining()
|
||||
)
|
||||
)));
|
||||
}
|
||||
Ok(c)
|
||||
}),
|
||||
)))
|
||||
SliceStream::Once(Some(res.and_then(move |c| {
|
||||
if c.remaining() != (range.end - range.start) as usize {
|
||||
bail!(
|
||||
Internal,
|
||||
msg(
|
||||
"{:?} range {:?} produced incorrect len {}",
|
||||
self,
|
||||
range,
|
||||
c.remaining()
|
||||
)
|
||||
);
|
||||
}
|
||||
Ok(c)
|
||||
})))
|
||||
}
|
||||
|
||||
fn get_slices(ctx: &File) -> &Slices<Self> {
|
||||
@ -927,7 +934,7 @@ impl FileBuilder {
|
||||
pub fn append(
|
||||
&mut self,
|
||||
db: &db::LockedDatabase,
|
||||
row: db::ListRecordingsRow,
|
||||
row: &db::ListRecordingsRow,
|
||||
rel_media_range_90k: Range<i32>,
|
||||
start_at_key: bool,
|
||||
) -> Result<(), Error> {
|
||||
@ -951,7 +958,7 @@ impl FileBuilder {
|
||||
}
|
||||
let s = Segment::new(
|
||||
db,
|
||||
&row,
|
||||
row,
|
||||
rel_media_range_90k,
|
||||
self.next_frame_num,
|
||||
start_at_key,
|
||||
@ -1028,7 +1035,7 @@ impl FileBuilder {
|
||||
let start_sec = wall.start.unix_seconds();
|
||||
let end_sec =
|
||||
(wall.end + recording::Duration(TIME_UNITS_PER_SEC - 1)).unix_seconds();
|
||||
s.num_subtitle_samples = (end_sec - start_sec) as u16;
|
||||
s.num_subtitle_samples = (end_sec - start_sec + 1) as u16;
|
||||
self.num_subtitle_samples += s.num_subtitle_samples as u32;
|
||||
}
|
||||
|
||||
@ -1071,7 +1078,7 @@ impl FileBuilder {
|
||||
|
||||
// If the segment is > 4 GiB, the 32-bit trun data offsets are untrustworthy.
|
||||
// We'd need multiple moof+mdat sequences to support large media segments properly.
|
||||
if self.body.slices.len() > u32::max_value() as u64 {
|
||||
if self.body.slices.len() > u64::from(u32::MAX) {
|
||||
bail!(
|
||||
OutOfRange,
|
||||
msg(
|
||||
@ -1805,32 +1812,20 @@ impl FileInner {
|
||||
.into())
|
||||
}
|
||||
|
||||
/// Gets a `Chunk` of video sample data from disk.
|
||||
/// This works by `mmap()`ing in the data. There are a couple caveats:
|
||||
///
|
||||
/// * The thread which reads the resulting slice is likely to experience major page faults.
|
||||
/// Eventually this will likely be rewritten to `mmap()` the memory in another thread, and
|
||||
/// `mlock()` and send chunks of it to be read and `munlock()`ed to avoid this problem.
|
||||
///
|
||||
/// * If the backing file is truncated, the program will crash with `SIGBUS`. This shouldn't
|
||||
/// happen because nothing should be touching Moonfire NVR's files but itself.
|
||||
fn get_video_sample_data(
|
||||
&self,
|
||||
i: usize,
|
||||
r: Range<u64>,
|
||||
) -> Box<dyn Stream<Item = Result<Chunk, BoxedError>> + Send + Sync> {
|
||||
/// Gets a stream representing a range of segment `i`'s sample data from disk.
|
||||
fn get_video_sample_data(&self, i: usize, r: Range<u64>) -> SliceStream {
|
||||
let s = &self.segments[i];
|
||||
let sr = s.s.sample_file_range();
|
||||
let f = match self.dirs_by_stream_id.get(&s.s.id.stream()) {
|
||||
None => {
|
||||
return Box::new(stream::iter(std::iter::once(Err(wrap_error(err!(
|
||||
return SliceStream::Once(Some(Err(err!(
|
||||
NotFound,
|
||||
msg("{}: stream not found", s.s.id)
|
||||
))))))
|
||||
))))
|
||||
}
|
||||
Some(d) => d.open_file(s.s.id, (r.start + sr.start)..(r.end + sr.start)),
|
||||
};
|
||||
Box::new(f.map_ok(Chunk::from).map_err(wrap_error))
|
||||
SliceStream::File(f)
|
||||
}
|
||||
|
||||
fn get_subtitle_sample_data(&self, i: usize, r: Range<u64>, len: u64) -> Result<Chunk, Error> {
|
||||
@ -1839,24 +1834,26 @@ impl FileInner {
|
||||
let wd = s.wall(md.start)..s.wall(md.end);
|
||||
let start_sec =
|
||||
(s.recording_start + recording::Duration(i64::from(wd.start))).unix_seconds();
|
||||
let end_sec = (s.recording_start
|
||||
let end_inclusive_sec = (s.recording_start
|
||||
+ recording::Duration(i64::from(wd.end) + TIME_UNITS_PER_SEC - 1))
|
||||
.unix_seconds();
|
||||
let len = usize::try_from(len).unwrap();
|
||||
let mut v = Vec::with_capacity(len);
|
||||
// TODO(slamb): is this right?!? might have an off-by-one here.
|
||||
for ts in start_sec..end_sec {
|
||||
let mut tm = jiff::Zoned::new(
|
||||
jiff::Timestamp::from_second(start_sec).expect("timestamp is valid"),
|
||||
base::time::global_zone(),
|
||||
);
|
||||
let mut cur_sec = start_sec;
|
||||
loop {
|
||||
v.write_u16::<BigEndian>(SUBTITLE_LENGTH as u16)
|
||||
.expect("Vec write shouldn't fail");
|
||||
let tm = time::at(time::Timespec { sec: ts, nsec: 0 });
|
||||
use std::io::Write;
|
||||
write!(
|
||||
v,
|
||||
"{}",
|
||||
tm.strftime(SUBTITLE_TEMPLATE)
|
||||
.err_kind(ErrorKind::Internal)?
|
||||
)
|
||||
.expect("Vec write shouldn't fail");
|
||||
use std::io::Write as _;
|
||||
write!(v, "{}", tm.strftime(SUBTITLE_TEMPLATE)).expect("Vec write shouldn't fail");
|
||||
if cur_sec == end_inclusive_sec {
|
||||
break;
|
||||
}
|
||||
tm += std::time::Duration::from_secs(1);
|
||||
cur_sec += 1;
|
||||
}
|
||||
assert_eq!(len, v.len());
|
||||
Ok(ARefss::new(v)
|
||||
@ -1880,7 +1877,7 @@ impl File {
|
||||
),
|
||||
)
|
||||
})?);
|
||||
let mut b = std::pin::Pin::from(self.get_range(0..self.len()));
|
||||
let mut b = self.get_range(0..self.len());
|
||||
loop {
|
||||
use futures::stream::StreamExt;
|
||||
match b.next().await {
|
||||
@ -1971,7 +1968,7 @@ impl http_serve::Entity for File {
|
||||
fn get_range(
|
||||
&self,
|
||||
range: Range<u64>,
|
||||
) -> Box<dyn Stream<Item = Result<Self::Data, Self::Error>> + Send + Sync> {
|
||||
) -> Pin<Box<dyn Stream<Item = Result<Self::Data, Self::Error>> + Send + Sync>> {
|
||||
self.0.slices.get_range(self, range)
|
||||
}
|
||||
}
|
||||
@ -2012,7 +2009,6 @@ mod tests {
|
||||
use std::fs;
|
||||
use std::ops::Range;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::str;
|
||||
use tracing::info;
|
||||
|
||||
@ -2021,7 +2017,7 @@ mod tests {
|
||||
E::Error: ::std::fmt::Debug,
|
||||
{
|
||||
let mut p = 0;
|
||||
Pin::from(e.get_range(start..start + slice.len() as u64))
|
||||
e.get_range(start..start + slice.len() as u64)
|
||||
.try_for_each(|mut chunk| {
|
||||
let len = chunk.remaining();
|
||||
chunk.copy_to_slice(&mut slice[p..p + len]);
|
||||
@ -2047,7 +2043,7 @@ mod tests {
|
||||
hasher.update(&b"\r\n"[..]);
|
||||
}
|
||||
hasher.update(&b"\r\n"[..]);
|
||||
Pin::from(e.get_range(0..e.len()))
|
||||
e.get_range(0..e.len())
|
||||
.try_fold(hasher, |mut hasher, mut chunk| {
|
||||
while chunk.has_remaining() {
|
||||
let c = chunk.chunk();
|
||||
@ -2160,8 +2156,7 @@ mod tests {
|
||||
let interior = self.stack.last().expect("at root").interior.clone();
|
||||
let len = (interior.end - interior.start) as usize;
|
||||
trace!("get_all: start={}, len={}", interior.start, len);
|
||||
let mut out = Vec::with_capacity(len);
|
||||
unsafe { out.set_len(len) };
|
||||
let mut out = vec![0; len];
|
||||
fill_slice(&mut out[..], &self.mp4, interior.start).await;
|
||||
out
|
||||
}
|
||||
@ -2353,7 +2348,7 @@ mod tests {
|
||||
builder
|
||||
.include_timestamp_subtitle_track(include_subtitles)
|
||||
.unwrap();
|
||||
let all_time = recording::Time(i64::min_value())..recording::Time(i64::max_value());
|
||||
let all_time = recording::Time(i64::MIN)..recording::Time(i64::MAX);
|
||||
{
|
||||
let db = tdb.db.lock();
|
||||
db.list_recordings_by_time(TEST_STREAM_ID, all_time, &mut |r| {
|
||||
@ -2364,7 +2359,7 @@ mod tests {
|
||||
"skip_90k={skip_90k} shorten_90k={shorten_90k} r={r:?}"
|
||||
);
|
||||
builder
|
||||
.append(&db, r, skip_90k..d - shorten_90k, true)
|
||||
.append(&db, &r, skip_90k..d - shorten_90k, true)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
})
|
||||
@ -2384,7 +2379,7 @@ mod tests {
|
||||
.open(&filename)
|
||||
.unwrap();
|
||||
use ::std::io::Write;
|
||||
Pin::from(mp4.get_range(0..mp4.len()))
|
||||
mp4.get_range(0..mp4.len())
|
||||
.try_for_each(|mut chunk| {
|
||||
while chunk.has_remaining() {
|
||||
let c = chunk.chunk();
|
||||
@ -2492,7 +2487,7 @@ mod tests {
|
||||
};
|
||||
duration_so_far += row.media_duration_90k;
|
||||
builder
|
||||
.append(&db.db.lock(), row, d_start..d_end, start_at_key)
|
||||
.append(&db.db.lock(), &row, d_start..d_end, start_at_key)
|
||||
.unwrap();
|
||||
}
|
||||
builder.build(db.db.clone(), db.dirs_by_stream_id.clone())
|
||||
@ -2844,11 +2839,11 @@ mod tests {
|
||||
// combine ranges from the new format with ranges from the old format.
|
||||
let hash = digest(&mp4).await;
|
||||
assert_eq!(
|
||||
"64f23b856692702b13d1811cd02dc83395b3d501dead7fd16f175eb26b4d8eee",
|
||||
"123e2cf075125c81e80820bffa412d38729aff05c252c7ea2ab3384905903bb7",
|
||||
hash.to_hex().as_str()
|
||||
);
|
||||
const EXPECTED_ETAG: &str =
|
||||
"\"791114c469130970608dd999b0ecf5861d077ec33fad2f0b040996e4aae4e30f\"";
|
||||
"\"37c89bda9f0513acdc2ab95f48f03b3f797dfa3fb30bbefa6549fdc7296afed2\"";
|
||||
assert_eq!(
|
||||
Some(HeaderValue::from_str(EXPECTED_ETAG).unwrap()),
|
||||
mp4.etag()
|
||||
@ -2873,11 +2868,11 @@ mod tests {
|
||||
// combine ranges from the new format with ranges from the old format.
|
||||
let hash = digest(&mp4).await;
|
||||
assert_eq!(
|
||||
"f9e4ed946187b2dd22ef049c4c1869d0f6c4f377ef08f8f53570850b61a06701",
|
||||
"8f17df9b43dc55654a1e4e00126e7477f43234693d4f1fae72185798a09479d7",
|
||||
hash.to_hex().as_str()
|
||||
);
|
||||
const EXPECTED_ETAG: &str =
|
||||
"\"85703b9abadd4292e119f2f7b0d6a16e99acf8b3ba98fcb6498e60ac5cb0b0b7\"";
|
||||
"\"d4af0554a50f6dfff2f7f95a14e16f720bd5fe36a9570cd4fd32f6664f1487c4\"";
|
||||
assert_eq!(
|
||||
Some(HeaderValue::from_str(EXPECTED_ETAG).unwrap()),
|
||||
mp4.etag()
|
||||
@ -2902,11 +2897,11 @@ mod tests {
|
||||
// combine ranges from the new format with ranges from the old format.
|
||||
let hash = digest(&mp4).await;
|
||||
assert_eq!(
|
||||
"f913d46d0119a03291e85459455b9a75a84cc9a1a5e3b88ca7e93eb718d73190",
|
||||
"1debe76fc6277546209454919550ff4c3a379560f481fa0ce78378cbf3c646f8",
|
||||
hash.to_hex().as_str()
|
||||
);
|
||||
const EXPECTED_ETAG: &str =
|
||||
"\"3d2031124fb995bf2fc4930e7affdcd51add396e062cfab97e1001224c5ee42c\"";
|
||||
"\"7165c1a866451b7e714a8ad47f4a0022a3749212e945321b35b2f8aaee8aea5c\"";
|
||||
assert_eq!(
|
||||
Some(HeaderValue::from_str(EXPECTED_ETAG).unwrap()),
|
||||
mp4.etag()
|
||||
@ -2932,11 +2927,11 @@ mod tests {
|
||||
// combine ranges from the new format with ranges from the old format.
|
||||
let hash = digest(&mp4).await;
|
||||
assert_eq!(
|
||||
"64cc763fa2533118bc6bf0b01249f02524ae87e0c97815079447b235722c1e2d",
|
||||
"caf8b23f3b6ee959981687ff0bcbf8d6b01db9daef35695b2600ffb9f8b54fe1",
|
||||
hash.to_hex().as_str()
|
||||
);
|
||||
const EXPECTED_ETAG: &str =
|
||||
"\"aa9bb2f63787a7d21227981135326c948db3e0b3dae5d0d39c77df69d0baf504\"";
|
||||
"\"167ad6b44502cb09eb15d08fdd2c360e4e54e521251eceeebddf74c4041b0b38\"";
|
||||
assert_eq!(
|
||||
Some(HeaderValue::from_str(EXPECTED_ETAG).unwrap()),
|
||||
mp4.etag()
|
||||
@ -2961,11 +2956,11 @@ mod tests {
|
||||
// combine ranges from the new format with ranges from the old format.
|
||||
let hash = digest(&mp4).await;
|
||||
assert_eq!(
|
||||
"6886b36ae6df9ce538f6db7ebd6159e68c2936b9d43307f7728fe75e0b62cad2",
|
||||
"e06b5627788828b73b98726dfb6466d32305df64af0acbe6164fc8ab296de473",
|
||||
hash.to_hex().as_str()
|
||||
);
|
||||
const EXPECTED_ETAG: &str =
|
||||
"\"0a6accaa7b583c94209eba58b00b39a804a5c4a8c99043e58e72fed7acd8dfc6\"";
|
||||
"\"2c591788cf06f09b55450cd98cb07c670d580413359260f2d18b9595bd0b430d\"";
|
||||
assert_eq!(
|
||||
Some(HeaderValue::from_str(EXPECTED_ETAG).unwrap()),
|
||||
mp4.etag()
|
||||
@ -2980,13 +2975,16 @@ mod tests {
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use super::tests::create_mp4_from_db;
|
||||
use base::clock::RealClocks;
|
||||
use db::recording;
|
||||
use db::testutil::{self, TestDb};
|
||||
use futures::future;
|
||||
use http_serve;
|
||||
use hyper;
|
||||
use hyper::service::service_fn;
|
||||
use url::Url;
|
||||
|
||||
/// An HTTP server for benchmarking.
|
||||
@ -3007,28 +3005,35 @@ mod bench {
|
||||
testutil::add_dummy_recordings_to_db(&db.db, 60);
|
||||
let mp4 = create_mp4_from_db(&db, 0, 0, false);
|
||||
let p = mp4.0.initial_sample_byte_pos;
|
||||
let make_svc = hyper::service::make_service_fn(move |_conn| {
|
||||
future::ok::<_, std::convert::Infallible>(hyper::service::service_fn({
|
||||
let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
|
||||
let listener = std::net::TcpListener::bind(addr).unwrap();
|
||||
listener.set_nonblocking(true).unwrap();
|
||||
let addr = listener.local_addr().unwrap(); // resolve port 0 to a real ephemeral port number.
|
||||
let srv = async move {
|
||||
let listener = tokio::net::TcpListener::from_std(listener).unwrap();
|
||||
loop {
|
||||
let (conn, _remote_addr) = listener.accept().await.unwrap();
|
||||
conn.set_nodelay(true).unwrap();
|
||||
let io = hyper_util::rt::TokioIo::new(conn);
|
||||
let mp4 = mp4.clone();
|
||||
move |req| {
|
||||
future::ok::<hyper::Response<crate::body::Body>, hyper::Error>(
|
||||
http_serve::serve(mp4.clone(), &req),
|
||||
)
|
||||
}
|
||||
}))
|
||||
});
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let srv = {
|
||||
let _guard = rt.enter();
|
||||
let addr = ([127, 0, 0, 1], 0).into();
|
||||
hyper::server::Server::bind(&addr)
|
||||
.tcp_nodelay(true)
|
||||
.serve(make_svc)
|
||||
let svc_fn = service_fn(move |req| {
|
||||
futures::future::ok::<_, Infallible>(http_serve::serve(mp4.clone(), &req))
|
||||
});
|
||||
tokio::spawn(
|
||||
hyper::server::conn::http1::Builder::new().serve_connection(io, svc_fn),
|
||||
);
|
||||
}
|
||||
};
|
||||
let addr = srv.local_addr(); // resolve port 0 to a real ephemeral port number.
|
||||
::std::thread::spawn(move || {
|
||||
rt.block_on(srv).unwrap();
|
||||
});
|
||||
std::thread::Builder::new()
|
||||
.name("bench-server".to_owned())
|
||||
.spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
rt.block_on(srv)
|
||||
})
|
||||
.unwrap();
|
||||
BenchServer {
|
||||
url: Url::parse(&format!("http://{}:{}/", addr.ip(), addr.port())).unwrap(),
|
||||
generated_len: p,
|
||||
@ -3060,7 +3065,11 @@ mod bench {
|
||||
db.with_recording_playback(segment.s.id, &mut |playback| {
|
||||
let v = segment.build_index(playback).unwrap(); // warm.
|
||||
b.bytes = v.len() as u64; // define the benchmark performance in terms of output bytes.
|
||||
b.iter(|| segment.build_index(playback).unwrap());
|
||||
b.iter(|| {
|
||||
for _i in 0..100 {
|
||||
segment.build_index(playback).unwrap();
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
@ -3074,17 +3083,25 @@ mod bench {
|
||||
let p = server.generated_len;
|
||||
b.bytes = p;
|
||||
let client = reqwest::Client::new();
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
let run = || {
|
||||
rt.block_on(async {
|
||||
let resp = client
|
||||
.get(server.url.clone())
|
||||
.header(reqwest::header::RANGE, format!("bytes=0-{}", p - 1))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
let b = resp.bytes().await.unwrap();
|
||||
assert_eq!(p, b.len() as u64);
|
||||
for _i in 0..100 {
|
||||
let mut resp = client
|
||||
.get(server.url.clone())
|
||||
.header(reqwest::header::RANGE, format!("bytes=0-{}", p - 1))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
let mut size = 0u64;
|
||||
while let Some(b) = resp.chunk().await.unwrap() {
|
||||
size += u64::try_from(b.len()).unwrap();
|
||||
}
|
||||
assert_eq!(p, size);
|
||||
}
|
||||
});
|
||||
};
|
||||
run(); // warm.
|
||||
@ -3097,7 +3114,9 @@ mod bench {
|
||||
let db = TestDb::new(RealClocks {});
|
||||
testutil::add_dummy_recordings_to_db(&db.db, 60);
|
||||
b.iter(|| {
|
||||
create_mp4_from_db(&db, 0, 0, false);
|
||||
for _i in 0..100 {
|
||||
create_mp4_from_db(&db, 0, 0, false);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,7 +17,8 @@ use tracing_futures::Instrument;
|
||||
/// Each `Slice` instance belongs to a single `Slices`.
|
||||
pub trait Slice: fmt::Debug + Sized + Sync + 'static {
|
||||
type Ctx: Send + Sync + Clone;
|
||||
type Chunk: Send + Sync;
|
||||
type Chunk: Send + Sync + 'static;
|
||||
type Stream: Stream<Item = Result<Self::Chunk, BoxedError>> + Send + Sync;
|
||||
|
||||
/// The byte position (relative to the start of the `Slices`) of the end of this slice,
|
||||
/// exclusive. Note the starting position (and thus length) are inferred from the previous
|
||||
@ -27,12 +28,10 @@ pub trait Slice: fmt::Debug + Sized + Sync + 'static {
|
||||
/// Gets the body bytes indicated by `r`, which is relative to this slice's start.
|
||||
/// The additional argument `ctx` is as supplied to the `Slices`.
|
||||
/// The additional argument `l` is the length of this slice, as determined by the `Slices`.
|
||||
fn get_range(
|
||||
&self,
|
||||
ctx: &Self::Ctx,
|
||||
r: Range<u64>,
|
||||
len: u64,
|
||||
) -> Box<dyn Stream<Item = Result<Self::Chunk, BoxedError>> + Sync + Send>;
|
||||
///
|
||||
/// Note that unlike [`http_entity::Entity::get_range`], this is called many times per request,
|
||||
/// so it's worth defining a custom stream type to avoid allocation overhead.
|
||||
fn get_range(&self, ctx: &Self::Ctx, r: Range<u64>, len: u64) -> Self::Stream;
|
||||
|
||||
fn get_slices(ctx: &Self::Ctx) -> &Slices<Self>;
|
||||
}
|
||||
@ -127,15 +126,15 @@ where
|
||||
}
|
||||
|
||||
/// Writes `range` to `out`.
|
||||
/// This interface mirrors `http_serve::Entity::write_to`, with the additional `ctx` argument.
|
||||
/// This interface mirrors `http_serve::Entity::get_range`, with the additional `ctx` argument.
|
||||
pub fn get_range(
|
||||
&self,
|
||||
ctx: &S::Ctx,
|
||||
range: Range<u64>,
|
||||
) -> Box<dyn Stream<Item = Result<S::Chunk, BoxedError>> + Sync + Send> {
|
||||
) -> Pin<Box<dyn Stream<Item = Result<S::Chunk, BoxedError>> + Sync + Send>> {
|
||||
#[allow(clippy::suspicious_operation_groupings)]
|
||||
if range.start > range.end || range.end > self.len {
|
||||
return Box::new(stream::once(futures::future::err(wrap_error(err!(
|
||||
return Box::pin(stream::once(futures::future::err(wrap_error(err!(
|
||||
Internal,
|
||||
msg("bad range {:?} for slice of length {}", range, self.len),
|
||||
)))));
|
||||
@ -170,10 +169,10 @@ where
|
||||
let l = s_end - slice_start;
|
||||
body = s.get_range(&c, start_pos..min_end - slice_start, l);
|
||||
};
|
||||
futures::future::ready(Some((Pin::from(body), (c, i + 1, 0, min_end))))
|
||||
futures::future::ready(Some((body, (c, i + 1, 0, min_end))))
|
||||
},
|
||||
);
|
||||
Box::new(bodies.flatten().in_current_span())
|
||||
Box::pin(bodies.flatten().in_current_span())
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,9 +181,8 @@ mod tests {
|
||||
use super::{Slice, Slices};
|
||||
use crate::body::BoxedError;
|
||||
use db::testutil;
|
||||
use futures::stream::{self, Stream, TryStreamExt};
|
||||
use futures::stream::{self, TryStreamExt};
|
||||
use std::ops::Range;
|
||||
use std::pin::Pin;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct FakeChunk {
|
||||
@ -201,6 +199,7 @@ mod tests {
|
||||
impl Slice for FakeSlice {
|
||||
type Ctx = &'static Slices<FakeSlice>;
|
||||
type Chunk = FakeChunk;
|
||||
type Stream = stream::Once<futures::future::Ready<Result<FakeChunk, BoxedError>>>;
|
||||
|
||||
fn end(&self) -> u64 {
|
||||
self.end
|
||||
@ -211,11 +210,11 @@ mod tests {
|
||||
_ctx: &&'static Slices<FakeSlice>,
|
||||
r: Range<u64>,
|
||||
_l: u64,
|
||||
) -> Box<dyn Stream<Item = Result<FakeChunk, BoxedError>> + Send + Sync> {
|
||||
Box::new(stream::once(futures::future::ok(FakeChunk {
|
||||
) -> Self::Stream {
|
||||
stream::once(futures::future::ok(FakeChunk {
|
||||
slice: self.name,
|
||||
range: r,
|
||||
})))
|
||||
}))
|
||||
}
|
||||
|
||||
fn get_slices(ctx: &&'static Slices<FakeSlice>) -> &'static Slices<Self> {
|
||||
@ -241,10 +240,7 @@ mod tests {
|
||||
|
||||
async fn get_range(r: Range<u64>) -> Vec<FakeChunk> {
|
||||
let slices = slices();
|
||||
Pin::from(slices.get_range(&slices, r))
|
||||
.try_collect()
|
||||
.await
|
||||
.unwrap()
|
||||
slices.get_range(&slices, r).try_collect().await.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@ -2,19 +2,54 @@
|
||||
// Copyright (C) 2016 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
||||
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
||||
|
||||
use crate::h264;
|
||||
use base::{bail, err, Error};
|
||||
use bytes::Bytes;
|
||||
use futures::StreamExt;
|
||||
use retina::client::Demuxed;
|
||||
use retina::codec::CodecItem;
|
||||
use std::pin::Pin;
|
||||
use std::result::Result;
|
||||
use tracing::Instrument;
|
||||
use url::Url;
|
||||
|
||||
static RETINA_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
|
||||
|
||||
// For certain common sub stream anamorphic resolutions, add a pixel aspect ratio box.
|
||||
// Assume the camera is 16x9. These are just the standard wide mode; default_pixel_aspect_ratio
|
||||
// tries the transpose also.
|
||||
const PIXEL_ASPECT_RATIOS: [((u16, u16), (u16, u16)); 6] = [
|
||||
((320, 240), (4, 3)),
|
||||
((352, 240), (40, 33)),
|
||||
((640, 352), (44, 45)),
|
||||
((640, 480), (4, 3)),
|
||||
((704, 480), (40, 33)),
|
||||
((720, 480), (32, 27)),
|
||||
];
|
||||
|
||||
/// Gets the pixel aspect ratio to use if none is specified.
|
||||
///
|
||||
/// The Dahua IPC-HDW5231R-Z sets the aspect ratio in the H.264 SPS (correctly) for both square and
|
||||
/// non-square pixels. The Hikvision DS-2CD2032-I doesn't set it, even though the sub stream's
|
||||
/// pixels aren't square. So define a default based on the pixel dimensions to use if the camera
|
||||
/// doesn't tell us what to do.
|
||||
///
|
||||
/// Note that at least in the case of .mp4 muxing, we don't need to fix up the underlying SPS.
|
||||
/// PixelAspectRatioBox's definition says that it overrides the H.264-level declaration.
|
||||
fn default_pixel_aspect_ratio(width: u16, height: u16) -> (u16, u16) {
|
||||
if width >= height {
|
||||
PIXEL_ASPECT_RATIOS
|
||||
.iter()
|
||||
.find(|r| r.0 == (width, height))
|
||||
.map(|r| r.1)
|
||||
.unwrap_or((1, 1))
|
||||
} else {
|
||||
PIXEL_ASPECT_RATIOS
|
||||
.iter()
|
||||
.find(|r| r.0 == (height, width))
|
||||
.map(|r| (r.1 .1, r.1 .0))
|
||||
.unwrap_or((1, 1))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Options {
|
||||
pub session: retina::client::SessionOptions,
|
||||
pub setup: retina::client::SetupOptions,
|
||||
@ -34,6 +69,7 @@ pub struct VideoFrame {
|
||||
|
||||
/// An estimate of the duration of the frame, or zero.
|
||||
/// This can be deceptive and is only used by some testing code.
|
||||
#[cfg(test)]
|
||||
pub duration: i32,
|
||||
|
||||
pub is_key: bool,
|
||||
@ -74,7 +110,13 @@ impl Opener for RealOpener {
|
||||
),
|
||||
)
|
||||
.expect("RetinaStream::play task panicked, see earlier error")
|
||||
.map_err(|e| err!(Unknown, source(e)))??;
|
||||
.map_err(|e| {
|
||||
err!(
|
||||
DeadlineExceeded,
|
||||
msg("unable to play stream and get first frame within {RETINA_TIMEOUT:?}"),
|
||||
source(e),
|
||||
)
|
||||
})??;
|
||||
Ok(Box::new(RetinaStream {
|
||||
inner: Some(inner),
|
||||
rt_handle,
|
||||
@ -114,6 +156,27 @@ struct RetinaStreamInner {
|
||||
video_sample_entry: db::VideoSampleEntryToInsert,
|
||||
}
|
||||
|
||||
fn params_to_sample_entry(
|
||||
params: &retina::codec::VideoParameters,
|
||||
) -> Result<db::VideoSampleEntryToInsert, Error> {
|
||||
let (width, height) = params.pixel_dimensions();
|
||||
let width = u16::try_from(width).map_err(|e| err!(Unknown, source(e)))?;
|
||||
let height = u16::try_from(height).map_err(|e| err!(Unknown, source(e)))?;
|
||||
let aspect = default_pixel_aspect_ratio(width, height);
|
||||
Ok(db::VideoSampleEntryToInsert {
|
||||
data: params
|
||||
.mp4_sample_entry()
|
||||
.with_aspect_ratio(aspect)
|
||||
.build()
|
||||
.map_err(|e| err!(Unknown, source(e)))?,
|
||||
rfc6381_codec: params.rfc6381_codec().to_owned(),
|
||||
width,
|
||||
height,
|
||||
pasp_h_spacing: aspect.0,
|
||||
pasp_v_spacing: aspect.1,
|
||||
})
|
||||
}
|
||||
|
||||
impl RetinaStreamInner {
|
||||
/// Plays to first frame. No timeout; that's the caller's responsibility.
|
||||
async fn play(
|
||||
@ -128,8 +191,15 @@ impl RetinaStreamInner {
|
||||
let video_i = session
|
||||
.streams()
|
||||
.iter()
|
||||
.position(|s| s.media() == "video" && s.encoding_name() == "h264")
|
||||
.ok_or_else(|| err!(FailedPrecondition, msg("couldn't find H.264 video stream")))?;
|
||||
.position(|s| {
|
||||
s.media() == "video" && matches!(s.encoding_name(), "h264" | "h265" | "jpeg")
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
err!(
|
||||
FailedPrecondition,
|
||||
msg("couldn't find supported video stream")
|
||||
)
|
||||
})?;
|
||||
session
|
||||
.setup(video_i, options.setup)
|
||||
.await
|
||||
@ -156,9 +226,9 @@ impl RetinaStreamInner {
|
||||
let video_params = match session.streams()[video_i].parameters() {
|
||||
Some(retina::codec::ParametersRef::Video(v)) => v.clone(),
|
||||
Some(_) => unreachable!(),
|
||||
None => bail!(Unknown, msg("couldn't find H.264 parameters")),
|
||||
None => bail!(Unknown, msg("couldn't find video parameters")),
|
||||
};
|
||||
let video_sample_entry = h264::parse_extra_data(video_params.extra_data())?;
|
||||
let video_sample_entry = params_to_sample_entry(&video_params)?;
|
||||
let self_ = Box::new(Self {
|
||||
label,
|
||||
session,
|
||||
@ -239,13 +309,13 @@ impl Stream for RetinaStream {
|
||||
.map_err(|e| {
|
||||
err!(
|
||||
DeadlineExceeded,
|
||||
msg("timeout getting next frame"),
|
||||
msg("unable to get next frame within {RETINA_TIMEOUT:?}"),
|
||||
source(e)
|
||||
)
|
||||
})??;
|
||||
let mut new_video_sample_entry = false;
|
||||
if let Some(p) = new_parameters {
|
||||
let video_sample_entry = h264::parse_extra_data(p.extra_data())?;
|
||||
let video_sample_entry = params_to_sample_entry(&p)?;
|
||||
if video_sample_entry != inner.video_sample_entry {
|
||||
tracing::debug!(
|
||||
"{}: parameter change:\nold: {:?}\nnew: {:?}",
|
||||
@ -262,6 +332,7 @@ impl Stream for RetinaStream {
|
||||
})?;
|
||||
Ok(VideoFrame {
|
||||
pts: frame.timestamp().elapsed(),
|
||||
#[cfg(test)]
|
||||
duration: 0,
|
||||
is_key: frame.is_random_access_point(),
|
||||
data: frame.into_data().into(),
|
||||
@ -272,6 +343,8 @@ impl Stream for RetinaStream {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod testutil {
|
||||
use mp4::mp4box::WriteBox as _;
|
||||
|
||||
use super::*;
|
||||
use std::convert::TryFrom;
|
||||
use std::io::Cursor;
|
||||
@ -298,14 +371,35 @@ pub mod testutil {
|
||||
.values()
|
||||
.find(|t| matches!(t.media_type(), Ok(mp4::MediaType::H264)))
|
||||
{
|
||||
None => bail!(InvalidArgument, msg("expected a H.264 track")),
|
||||
None => bail!(
|
||||
InvalidArgument,
|
||||
msg(
|
||||
"expected a H.264 track, tracks were: {:#?}",
|
||||
reader.tracks()
|
||||
)
|
||||
),
|
||||
Some(t) => t,
|
||||
};
|
||||
let video_sample_entry = h264::parse_extra_data(
|
||||
&h264_track
|
||||
.extra_data()
|
||||
.map_err(|e| err!(Unknown, source(e)))?[..],
|
||||
)?;
|
||||
let mut data = Vec::new();
|
||||
h264_track
|
||||
.trak
|
||||
.mdia
|
||||
.minf
|
||||
.stbl
|
||||
.stsd
|
||||
.avc1
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.write_box(&mut data)
|
||||
.unwrap();
|
||||
let video_sample_entry = db::VideoSampleEntryToInsert {
|
||||
data,
|
||||
rfc6381_codec: "avc1.4d401e".to_string(),
|
||||
width: h264_track.width(),
|
||||
height: h264_track.height(),
|
||||
pasp_h_spacing: 1,
|
||||
pasp_v_spacing: 1,
|
||||
};
|
||||
let h264_track_id = h264_track.track_id();
|
||||
let stream = Mp4Stream {
|
||||
reader,
|
||||
@ -345,6 +439,7 @@ pub mod testutil {
|
||||
self.next_sample_id += 1;
|
||||
Ok(VideoFrame {
|
||||
pts: sample.start_time as i64,
|
||||
#[cfg(test)]
|
||||
duration: sample.duration as i32,
|
||||
is_key: sample.is_sync,
|
||||
data: sample.bytes,
|
||||
@ -357,3 +452,23 @@ pub mod testutil {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use db::testutil;
|
||||
|
||||
#[test]
|
||||
fn pixel_aspect_ratios() {
|
||||
testutil::init();
|
||||
use super::default_pixel_aspect_ratio;
|
||||
use num_rational::Ratio;
|
||||
for &((w, h), _) in &super::PIXEL_ASPECT_RATIOS {
|
||||
let (h_spacing, v_spacing) = default_pixel_aspect_ratio(w, h);
|
||||
assert_eq!(Ratio::new(w * h_spacing, h * v_spacing), Ratio::new(16, 9));
|
||||
|
||||
// 90 or 270 degree rotation.
|
||||
let (h_spacing, v_spacing) = default_pixel_aspect_ratio(h, w);
|
||||
assert_eq!(Ratio::new(h * h_spacing, w * v_spacing), Ratio::new(9, 16));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,7 +6,6 @@ use crate::stream;
|
||||
use base::clock::{Clocks, TimerGuard};
|
||||
use base::{bail, err, Error};
|
||||
use db::{dir, recording, writer, Camera, Database, Stream};
|
||||
use std::result::Result;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, info, trace, warn, Instrument};
|
||||
@ -120,7 +119,7 @@ where
|
||||
pub fn run(&mut self) {
|
||||
while self.shutdown_rx.check().is_ok() {
|
||||
if let Err(err) = self.run_once() {
|
||||
let sleep_time = time::Duration::seconds(1);
|
||||
let sleep_time = base::clock::Duration::from_secs(1);
|
||||
warn!(
|
||||
err = %err.chain(),
|
||||
"sleeping for 1 s after error"
|
||||
@ -181,7 +180,7 @@ where
|
||||
self.opener
|
||||
.open(self.short_name.clone(), self.url.clone(), options)?
|
||||
};
|
||||
let realtime_offset = self.db.clocks().realtime() - clocks.monotonic();
|
||||
let realtime_offset = self.db.clocks().realtime().0 - clocks.monotonic().0;
|
||||
let mut video_sample_entry_id = {
|
||||
let _t = TimerGuard::new(&clocks, || "inserting video sample entry");
|
||||
self.db
|
||||
@ -204,7 +203,7 @@ where
|
||||
let frame = match frame {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let _ = w.close(None, Some(e.to_string()));
|
||||
let _ = w.close(None, Some(e.chain().to_string()));
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
@ -214,10 +213,10 @@ where
|
||||
debug!("have first key frame");
|
||||
seen_key_frame = true;
|
||||
}
|
||||
let frame_realtime = clocks.monotonic() + realtime_offset;
|
||||
let local_time = recording::Time::new(frame_realtime);
|
||||
let frame_realtime = base::clock::SystemTime(realtime_offset + clocks.monotonic().0);
|
||||
let local_time = recording::Time::from(frame_realtime);
|
||||
rotate = if let Some(r) = rotate {
|
||||
if frame_realtime.sec > r && frame.is_key {
|
||||
if frame_realtime.as_secs() > r && frame.is_key {
|
||||
trace!("close on normal rotation");
|
||||
let _t = TimerGuard::new(&clocks, || "closing writer");
|
||||
w.close(Some(frame.pts), None)?;
|
||||
@ -245,7 +244,7 @@ where
|
||||
let r = match rotate {
|
||||
Some(r) => r,
|
||||
None => {
|
||||
let sec = frame_realtime.sec;
|
||||
let sec = frame_realtime.as_secs();
|
||||
let r = sec - (sec % self.rotate_interval_sec) + self.rotate_offset_sec;
|
||||
let r = r + if r <= sec {
|
||||
self.rotate_interval_sec
|
||||
@ -289,19 +288,19 @@ where
|
||||
mod tests {
|
||||
use crate::stream::{self, Stream};
|
||||
use base::clock::{self, Clocks};
|
||||
use base::Mutex;
|
||||
use base::{bail, Error};
|
||||
use db::{recording, testutil, CompositeId};
|
||||
use std::cmp;
|
||||
use std::convert::TryFrom;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use tracing::trace;
|
||||
|
||||
struct ProxyingStream {
|
||||
clocks: clock::SimulatedClocks,
|
||||
inner: Box<dyn stream::Stream>,
|
||||
buffered: time::Duration,
|
||||
slept: time::Duration,
|
||||
buffered: base::clock::Duration,
|
||||
slept: base::clock::Duration,
|
||||
ts_offset: i64,
|
||||
ts_offset_pkts_left: u32,
|
||||
pkts_left: u32,
|
||||
@ -310,7 +309,7 @@ mod tests {
|
||||
impl ProxyingStream {
|
||||
fn new(
|
||||
clocks: clock::SimulatedClocks,
|
||||
buffered: time::Duration,
|
||||
buffered: base::clock::Duration,
|
||||
inner: Box<dyn stream::Stream>,
|
||||
) -> ProxyingStream {
|
||||
clocks.sleep(buffered);
|
||||
@ -318,7 +317,7 @@ mod tests {
|
||||
clocks,
|
||||
inner,
|
||||
buffered,
|
||||
slept: time::Duration::seconds(0),
|
||||
slept: base::clock::Duration::default(),
|
||||
ts_offset: 0,
|
||||
ts_offset_pkts_left: 0,
|
||||
pkts_left: 0,
|
||||
@ -349,13 +348,14 @@ mod tests {
|
||||
// Avoid accumulating conversion error by tracking the total amount to sleep and how
|
||||
// much we've already slept, rather than considering each frame in isolation.
|
||||
{
|
||||
let goal = frame.pts + i64::from(frame.duration);
|
||||
let goal = time::Duration::nanoseconds(
|
||||
goal * 1_000_000_000 / recording::TIME_UNITS_PER_SEC,
|
||||
let goal =
|
||||
u64::try_from(frame.pts).unwrap() + u64::try_from(frame.duration).unwrap();
|
||||
let goal = base::clock::Duration::from_nanos(
|
||||
goal * 1_000_000_000 / u64::try_from(recording::TIME_UNITS_PER_SEC).unwrap(),
|
||||
);
|
||||
let duration = goal - self.slept;
|
||||
let buf_part = cmp::min(self.buffered, duration);
|
||||
self.buffered = self.buffered - buf_part;
|
||||
self.buffered -= buf_part;
|
||||
self.clocks.sleep(duration - buf_part);
|
||||
self.slept = goal;
|
||||
}
|
||||
@ -388,7 +388,7 @@ mod tests {
|
||||
_options: stream::Options,
|
||||
) -> Result<Box<dyn stream::Stream>, Error> {
|
||||
assert_eq!(&url, &self.expected_url);
|
||||
let mut l = self.streams.lock().unwrap();
|
||||
let mut l = self.streams.lock();
|
||||
match l.pop() {
|
||||
Some(stream) => {
|
||||
trace!("MockOpener returning next stream");
|
||||
@ -396,7 +396,7 @@ mod tests {
|
||||
}
|
||||
None => {
|
||||
trace!("MockOpener shutting down");
|
||||
self.shutdown_tx.lock().unwrap().take();
|
||||
self.shutdown_tx.lock().take();
|
||||
bail!(Cancelled, msg("done"))
|
||||
}
|
||||
}
|
||||
@ -430,15 +430,18 @@ mod tests {
|
||||
async fn basic() {
|
||||
testutil::init();
|
||||
// 2015-04-25 00:00:00 UTC
|
||||
let clocks = clock::SimulatedClocks::new(time::Timespec::new(1429920000, 0));
|
||||
clocks.sleep(time::Duration::seconds(86400)); // to 2015-04-26 00:00:00 UTC
|
||||
let clocks = clock::SimulatedClocks::new(clock::SystemTime::new(1429920000, 0));
|
||||
clocks.sleep(clock::Duration::from_secs(86400)); // to 2015-04-26 00:00:00 UTC
|
||||
|
||||
let stream = stream::testutil::Mp4Stream::open("src/testdata/clip.mp4").unwrap();
|
||||
let mut stream =
|
||||
ProxyingStream::new(clocks.clone(), time::Duration::seconds(2), Box::new(stream));
|
||||
let mut stream = ProxyingStream::new(
|
||||
clocks.clone(),
|
||||
clock::Duration::from_secs(2),
|
||||
Box::new(stream),
|
||||
);
|
||||
stream.ts_offset = 123456; // starting pts of the input should be irrelevant
|
||||
stream.ts_offset_pkts_left = u32::max_value();
|
||||
stream.pkts_left = u32::max_value();
|
||||
stream.ts_offset_pkts_left = u32::MAX;
|
||||
stream.pkts_left = u32::MAX;
|
||||
let (shutdown_tx, shutdown_rx) = base::shutdown::channel();
|
||||
let opener = MockOpener {
|
||||
expected_url: url::Url::parse("rtsp://test-camera/main").unwrap(),
|
||||
@ -475,7 +478,7 @@ mod tests {
|
||||
.unwrap();
|
||||
}
|
||||
stream.run();
|
||||
assert!(opener.streams.lock().unwrap().is_empty());
|
||||
assert!(opener.streams.lock().is_empty());
|
||||
db.syncer_channel.flush();
|
||||
let db = db.db.lock();
|
||||
|
||||
@ -513,7 +516,6 @@ mod tests {
|
||||
assert_eq!(recording::Time(128700576719993), recordings[1].start);
|
||||
assert_eq!(db::RecordingFlags::TrailingZero as i32, recordings[1].flags);
|
||||
|
||||
drop(env);
|
||||
drop(opener);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,51 +2,40 @@
|
||||
// Copyright (C) 2021 The Moonfire NVR Authors; see AUTHORS and LICENSE.txt.
|
||||
// SPDX-License-Identifier: GPL-v3.0-or-later WITH GPL-3.0-linking-exception.
|
||||
|
||||
//! Unified [`hyper::server::accept::Accept`] impl for TCP and Unix sockets.
|
||||
//! Unified connection handling for TCP and Unix sockets.
|
||||
|
||||
use std::pin::Pin;
|
||||
|
||||
use hyper::server::accept::Accept;
|
||||
|
||||
pub enum Listener {
|
||||
Tcp(tokio::net::TcpListener),
|
||||
Unix(tokio::net::UnixListener),
|
||||
}
|
||||
|
||||
impl Accept for Listener {
|
||||
type Conn = Conn;
|
||||
type Error = std::io::Error;
|
||||
|
||||
fn poll_accept(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Result<Self::Conn, Self::Error>>> {
|
||||
match Pin::into_inner(self) {
|
||||
Listener::Tcp(l) => Pin::new(l).poll_accept(cx)?.map(|(s, a)| {
|
||||
if let Err(e) = s.set_nodelay(true) {
|
||||
return Some(Err(e));
|
||||
}
|
||||
Some(Ok(Conn {
|
||||
impl Listener {
|
||||
pub async fn accept(&mut self) -> std::io::Result<Conn> {
|
||||
match self {
|
||||
Listener::Tcp(l) => {
|
||||
let (s, a) = l.accept().await?;
|
||||
s.set_nodelay(true)?;
|
||||
Ok(Conn {
|
||||
stream: Stream::Tcp(s),
|
||||
data: ConnData {
|
||||
client_unix_uid: None,
|
||||
client_addr: Some(a),
|
||||
},
|
||||
}))
|
||||
}),
|
||||
Listener::Unix(l) => Pin::new(l).poll_accept(cx)?.map(|(s, _a)| {
|
||||
let ucred = match s.peer_cred() {
|
||||
Err(e) => return Some(Err(e)),
|
||||
Ok(ucred) => ucred,
|
||||
};
|
||||
Some(Ok(Conn {
|
||||
})
|
||||
}
|
||||
Listener::Unix(l) => {
|
||||
let (s, _a) = l.accept().await?;
|
||||
let ucred = s.peer_cred()?;
|
||||
Ok(Conn {
|
||||
stream: Stream::Unix(s),
|
||||
data: ConnData {
|
||||
client_unix_uid: Some(nix::unistd::Uid::from_raw(ucred.uid())),
|
||||
client_addr: None,
|
||||
},
|
||||
}))
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,19 +7,28 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use base::{bail, err, Error};
|
||||
use futures::{future::Either, SinkExt, StreamExt};
|
||||
use bytes::Bytes;
|
||||
use futures::SinkExt;
|
||||
use http::header;
|
||||
use tokio_tungstenite::{tungstenite, WebSocketStream};
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
use tokio_tungstenite::tungstenite;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::mp4;
|
||||
|
||||
use super::{Caller, Service};
|
||||
use super::{websocket::WebSocketStream, Caller, Service};
|
||||
|
||||
/// Interval at which to send keepalives if there are no frames.
|
||||
///
|
||||
/// Chrome appears to time out WebSockets after 60 seconds of inactivity.
|
||||
/// If the camera is disconnected or not sending frames, we'd like to keep
|
||||
/// the connection open so everything will recover when the camera comes back.
|
||||
const KEEPALIVE_AFTER_IDLE: tokio::time::Duration = tokio::time::Duration::from_secs(30);
|
||||
|
||||
impl Service {
|
||||
pub(super) async fn stream_live_m4s(
|
||||
self: Arc<Self>,
|
||||
ws: &mut WebSocketStream<hyper::upgrade::Upgraded>,
|
||||
ws: &mut WebSocketStream,
|
||||
caller: Result<Caller, Error>,
|
||||
uuid: Uuid,
|
||||
stream_type: db::StreamType,
|
||||
@ -31,8 +40,7 @@ impl Service {
|
||||
|
||||
let stream_id;
|
||||
let open_id;
|
||||
let (sub_tx, sub_rx) = futures::channel::mpsc::unbounded();
|
||||
{
|
||||
let mut sub_rx = {
|
||||
let mut db = self.db.lock();
|
||||
open_id = match db.open {
|
||||
None => {
|
||||
@ -48,45 +56,49 @@ impl Service {
|
||||
.ok_or_else(|| err!(NotFound, msg("no such camera {uuid}")))?;
|
||||
stream_id = camera.streams[stream_type.index()]
|
||||
.ok_or_else(|| err!(NotFound, msg("no such stream {uuid}/{stream_type}")))?;
|
||||
db.watch_live(
|
||||
stream_id,
|
||||
Box::new(move |l| sub_tx.unbounded_send(l).is_ok()),
|
||||
)
|
||||
.expect("stream_id refed by camera");
|
||||
}
|
||||
db.watch_live(stream_id).expect("stream_id refed by camera")
|
||||
};
|
||||
|
||||
let keepalive = tokio_stream::wrappers::IntervalStream::new(tokio::time::interval(
|
||||
std::time::Duration::new(30, 0),
|
||||
));
|
||||
let mut combo = futures::stream::select(
|
||||
sub_rx.map(Either::Left),
|
||||
keepalive.map(|_| Either::Right(())),
|
||||
);
|
||||
let mut keepalive = tokio::time::interval(KEEPALIVE_AFTER_IDLE);
|
||||
keepalive.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
|
||||
|
||||
// On the first LiveSegment, send all the data from the previous key frame onward.
|
||||
// For LiveSegments, it's okay to send a single non-key frame at a time.
|
||||
// On the first LiveFrame, send all the data from the previous key frame
|
||||
// onward. Afterward, send a single (often non-key) frame at a time.
|
||||
let mut start_at_key = true;
|
||||
loop {
|
||||
let next = combo
|
||||
.next()
|
||||
.await
|
||||
.unwrap_or_else(|| unreachable!("timer stream never ends"));
|
||||
match next {
|
||||
Either::Left(live) => {
|
||||
if !self
|
||||
.stream_live_m4s_chunk(open_id, stream_id, ws, live, start_at_key)
|
||||
.await?
|
||||
{
|
||||
return Ok(());
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
next = sub_rx.recv() => {
|
||||
match next {
|
||||
Ok(l) => {
|
||||
keepalive.reset_after(KEEPALIVE_AFTER_IDLE);
|
||||
if !self.stream_live_m4s_chunk(
|
||||
open_id,
|
||||
stream_id,
|
||||
ws,
|
||||
l,
|
||||
start_at_key,
|
||||
).await? {
|
||||
return Ok(());
|
||||
}
|
||||
start_at_key = false;
|
||||
}
|
||||
Err(RecvError::Closed) => {
|
||||
bail!(Internal, msg("live stream closed unexpectedly"));
|
||||
}
|
||||
Err(RecvError::Lagged(frames)) => {
|
||||
bail!(
|
||||
ResourceExhausted,
|
||||
msg("subscriber {frames} frames further behind than allowed; \
|
||||
this typically indicates insufficient bandwidth"),
|
||||
)
|
||||
}
|
||||
}
|
||||
start_at_key = false;
|
||||
}
|
||||
Either::Right(_) => {
|
||||
if ws
|
||||
.send(tungstenite::Message::Ping(Vec::new()))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
|
||||
_ = keepalive.tick() => {
|
||||
if ws.send(tungstenite::Message::Ping(Bytes::new())).await.is_err() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@ -100,8 +112,8 @@ impl Service {
|
||||
&self,
|
||||
open_id: u32,
|
||||
stream_id: i32,
|
||||
ws: &mut tokio_tungstenite::WebSocketStream<hyper::upgrade::Upgraded>,
|
||||
live: db::LiveSegment,
|
||||
ws: &mut WebSocketStream,
|
||||
live: db::LiveFrame,
|
||||
start_at_key: bool,
|
||||
) -> Result<bool, Error> {
|
||||
let mut builder = mp4::FileBuilder::new(mp4::Type::MediaSegment);
|
||||
@ -111,8 +123,8 @@ impl Service {
|
||||
let mut rows = 0;
|
||||
db.list_recordings_by_id(stream_id, live.recording..live.recording + 1, &mut |r| {
|
||||
rows += 1;
|
||||
builder.append(&db, &r, live.media_off_90k.clone(), start_at_key)?;
|
||||
row = Some(r);
|
||||
builder.append(&db, r, live.media_off_90k.clone(), start_at_key)?;
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
@ -143,6 +155,9 @@ impl Service {
|
||||
);
|
||||
let mut v = hdr.into_bytes();
|
||||
mp4.append_into_vec(&mut v).await?;
|
||||
Ok(ws.send(tungstenite::Message::Binary(v)).await.is_ok())
|
||||
Ok(ws
|
||||
.send(tungstenite::Message::Binary(v.into()))
|
||||
.await
|
||||
.is_ok())
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,26 +37,6 @@ use tracing::Instrument;
|
||||
use url::form_urlencoded;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// An HTTP error response.
|
||||
/// This is a thin wrapper over the hyper response type; it doesn't even verify
|
||||
/// that the response actually uses a non-2xx status code. Its purpose is to
|
||||
/// allow automatic conversion from `base::Error`. Rust's orphan rule prevents
|
||||
/// this crate from defining a direct conversion from `base::Error` to
|
||||
/// `hyper::Response`.
|
||||
struct HttpError(Response<Body>);
|
||||
|
||||
impl From<Response<Body>> for HttpError {
|
||||
fn from(response: Response<Body>) -> Self {
|
||||
HttpError(response)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<base::Error> for HttpError {
|
||||
fn from(err: base::Error) -> Self {
|
||||
HttpError(from_base_error(&err))
|
||||
}
|
||||
}
|
||||
|
||||
fn plain_response<B: Into<Body>>(status: http::StatusCode, body: B) -> Response<Body> {
|
||||
Response::builder()
|
||||
.status(status)
|
||||
@ -86,7 +66,10 @@ struct Caller {
|
||||
|
||||
type ResponseResult = Result<Response<Body>, base::Error>;
|
||||
|
||||
fn serve_json<T: serde::ser::Serialize>(req: &Request<hyper::Body>, out: &T) -> ResponseResult {
|
||||
fn serve_json<R: http_serve::AsRequest, T: serde::ser::Serialize>(
|
||||
req: &R,
|
||||
out: &T,
|
||||
) -> ResponseResult {
|
||||
let (mut resp, writer) = http_serve::streaming_body(req).build();
|
||||
resp.headers_mut().insert(
|
||||
header::CONTENT_TYPE,
|
||||
@ -101,18 +84,18 @@ fn serve_json<T: serde::ser::Serialize>(req: &Request<hyper::Body>, out: &T) ->
|
||||
fn csrf_matches(csrf: &str, session: auth::SessionHash) -> bool {
|
||||
let mut b64 = [0u8; 32];
|
||||
session.encode_base64(&mut b64);
|
||||
::ring::constant_time::verify_slices_are_equal(&b64[..], csrf.as_bytes()).is_ok()
|
||||
use subtle::ConstantTimeEq as _;
|
||||
b64.ct_eq(csrf.as_bytes()).into()
|
||||
}
|
||||
|
||||
/// Extracts `s` cookie from the HTTP request. Does not authenticate.
|
||||
fn extract_sid(req: &Request<hyper::Body>) -> Option<auth::RawSessionId> {
|
||||
for hdr in req.headers().get_all(header::COOKIE) {
|
||||
/// Extracts `s` cookie from the HTTP request headers. Does not authenticate.
|
||||
fn extract_sid(req_hdrs: &http::HeaderMap) -> Option<auth::RawSessionId> {
|
||||
for hdr in req_hdrs.get_all(header::COOKIE) {
|
||||
for mut cookie in hdr.as_bytes().split(|&b| b == b';') {
|
||||
if cookie.starts_with(b" ") {
|
||||
cookie = &cookie[1..];
|
||||
}
|
||||
if cookie.starts_with(b"s=") {
|
||||
let s = &cookie[2..];
|
||||
if let Some(s) = cookie.strip_prefix(b"s=") {
|
||||
if let Ok(s) = auth::RawSessionId::decode_base64(s) {
|
||||
return Some(s);
|
||||
}
|
||||
@ -127,7 +110,9 @@ fn extract_sid(req: &Request<hyper::Body>) -> Option<auth::RawSessionId> {
|
||||
/// This returns the request body as bytes rather than performing
|
||||
/// deserialization. Keeping the bytes allows the caller to use a `Deserialize`
|
||||
/// that borrows from the bytes.
|
||||
async fn extract_json_body(req: &mut Request<hyper::Body>) -> Result<Bytes, base::Error> {
|
||||
async fn into_json_body(
|
||||
req: Request<hyper::body::Incoming>,
|
||||
) -> Result<(http::request::Parts, Bytes), base::Error> {
|
||||
let correct_mime_type = match req.headers().get(header::CONTENT_TYPE) {
|
||||
Some(t) if t == "application/json" => true,
|
||||
Some(t) if t == "application/json; charset=UTF-8" => true,
|
||||
@ -139,10 +124,12 @@ async fn extract_json_body(req: &mut Request<hyper::Body>) -> Result<Bytes, base
|
||||
msg("expected application/json request body")
|
||||
);
|
||||
}
|
||||
let b = ::std::mem::replace(req.body_mut(), hyper::Body::empty());
|
||||
hyper::body::to_bytes(b)
|
||||
let (parts, b) = req.into_parts();
|
||||
let b = http_body_util::BodyExt::collect(b)
|
||||
.await
|
||||
.map_err(|e| err!(Unavailable, msg("unable to read request body"), source(e)))
|
||||
.map_err(|e| err!(Unavailable, msg("unable to read request body"), source(e)))?
|
||||
.to_bytes();
|
||||
Ok((parts, b))
|
||||
}
|
||||
|
||||
fn parse_json_body<'a, T: serde::Deserialize<'a>>(body: &'a [u8]) -> Result<T, base::Error> {
|
||||
@ -227,7 +214,7 @@ impl Service {
|
||||
/// as well as returning it to the HTTP client.
|
||||
async fn serve_inner(
|
||||
self: Arc<Self>,
|
||||
req: Request<::hyper::Body>,
|
||||
req: Request<::hyper::body::Incoming>,
|
||||
authreq: auth::Request,
|
||||
conn_data: ConnData,
|
||||
) -> ResponseResult {
|
||||
@ -330,12 +317,12 @@ impl Service {
|
||||
/// them to hyper as `Ok` results.
|
||||
pub async fn serve(
|
||||
self: Arc<Self>,
|
||||
req: Request<::hyper::Body>,
|
||||
req: Request<::hyper::body::Incoming>,
|
||||
conn_data: ConnData,
|
||||
) -> Result<Response<Body>, std::convert::Infallible> {
|
||||
let request_id = ulid::Ulid::new();
|
||||
let request_id = uuid::Uuid::now_v7();
|
||||
let authreq = auth::Request {
|
||||
when_sec: Some(self.db.clocks().realtime().sec),
|
||||
when_sec: Some(self.db.clocks().realtime().as_secs()),
|
||||
addr: if self.trust_forward_hdrs {
|
||||
req.headers()
|
||||
.get("X-Real-IP")
|
||||
@ -354,7 +341,7 @@ impl Service {
|
||||
// https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/
|
||||
let span = tracing::info_span!(
|
||||
"request",
|
||||
%request_id,
|
||||
request_id = %data_encoding::BASE32_NOPAD.encode_display(request_id.as_bytes()),
|
||||
net.sock.peer.uid = conn_data.client_unix_uid.map(tracing::field::display),
|
||||
http.client_ip = authreq.addr.map(tracing::field::display),
|
||||
http.method = %req.method(),
|
||||
@ -398,7 +385,7 @@ impl Service {
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
fn top_level(&self, req: &Request<::hyper::Body>, caller: Caller) -> ResponseResult {
|
||||
fn top_level(&self, req: &Request<::hyper::body::Incoming>, caller: Caller) -> ResponseResult {
|
||||
let mut days = false;
|
||||
let mut camera_configs = false;
|
||||
if let Some(q) = req.uri().query() {
|
||||
@ -431,7 +418,7 @@ impl Service {
|
||||
)
|
||||
}
|
||||
|
||||
fn camera(&self, req: &Request<::hyper::Body>, uuid: Uuid) -> ResponseResult {
|
||||
fn camera(&self, req: &Request<::hyper::body::Incoming>, uuid: Uuid) -> ResponseResult {
|
||||
let db = self.db.lock();
|
||||
let camera = db
|
||||
.get_camera(uuid)
|
||||
@ -444,13 +431,13 @@ impl Service {
|
||||
|
||||
fn stream_recordings(
|
||||
&self,
|
||||
req: &Request<::hyper::Body>,
|
||||
req: &Request<::hyper::body::Incoming>,
|
||||
uuid: Uuid,
|
||||
type_: db::StreamType,
|
||||
) -> ResponseResult {
|
||||
let (r, split) = {
|
||||
let mut time = recording::Time::min_value()..recording::Time::max_value();
|
||||
let mut split = recording::Duration(i64::max_value());
|
||||
let mut time = recording::Time::MIN..recording::Time::MAX;
|
||||
let mut split = recording::Duration(i64::MAX);
|
||||
if let Some(q) = req.uri().query() {
|
||||
for (key, value) in form_urlencoded::parse(q.as_bytes()) {
|
||||
let (key, value) = (key.borrow(), value.borrow());
|
||||
@ -506,6 +493,7 @@ impl Service {
|
||||
video_sample_entry_id: row.video_sample_entry_id,
|
||||
growing: row.growing,
|
||||
has_trailing_zero: row.has_trailing_zero,
|
||||
end_reason: row.end_reason.clone(),
|
||||
});
|
||||
if !out
|
||||
.video_sample_entries
|
||||
@ -520,7 +508,12 @@ impl Service {
|
||||
serve_json(req, &out)
|
||||
}
|
||||
|
||||
fn init_segment(&self, id: i32, debug: bool, req: &Request<::hyper::Body>) -> ResponseResult {
|
||||
fn init_segment(
|
||||
&self,
|
||||
id: i32,
|
||||
debug: bool,
|
||||
req: &Request<::hyper::body::Incoming>,
|
||||
) -> ResponseResult {
|
||||
let mut builder = mp4::FileBuilder::new(mp4::Type::InitSegment);
|
||||
let db = self.db.lock();
|
||||
let Some(ent) = db.video_sample_entries_by_id().get(&id) else {
|
||||
@ -539,7 +532,7 @@ impl Service {
|
||||
|
||||
fn request(
|
||||
&self,
|
||||
req: &Request<::hyper::Body>,
|
||||
req: &Request<::hyper::body::Incoming>,
|
||||
authreq: &auth::Request,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
@ -551,26 +544,26 @@ impl Service {
|
||||
.user_agent
|
||||
.as_ref()
|
||||
.map(|u| String::from_utf8_lossy(&u[..]));
|
||||
let when = authreq.when_sec.map(|sec| {
|
||||
jiff::Timestamp::from_second(sec)
|
||||
.expect("valid time")
|
||||
.to_zoned(base::time::global_zone())
|
||||
.strftime("%FT%T%:z")
|
||||
});
|
||||
Ok(plain_response(
|
||||
StatusCode::OK,
|
||||
format!(
|
||||
"when: {}\n\
|
||||
"when: {:?}\n\
|
||||
host: {:?}\n\
|
||||
addr: {:?}\n\
|
||||
user_agent: {:?}\n\
|
||||
secure: {:?}\n\
|
||||
caller: {:?}\n",
|
||||
time::at(time::Timespec {
|
||||
sec: authreq.when_sec.unwrap(),
|
||||
nsec: 0
|
||||
})
|
||||
.strftime("%FT%T")
|
||||
.map(|f| f.to_string())
|
||||
.unwrap_or_else(|e| e.to_string()),
|
||||
when,
|
||||
host.as_deref(),
|
||||
&authreq.addr,
|
||||
agent.as_deref(),
|
||||
self.is_secure(req),
|
||||
self.is_secure(req.headers()),
|
||||
&caller,
|
||||
),
|
||||
))
|
||||
@ -580,10 +573,9 @@ impl Service {
|
||||
/// Moonfire NVR currently doesn't directly serve `https`, but it supports
|
||||
/// proxies which set the `X-Forwarded-Proto` header. See `guide/secure.md`
|
||||
/// for more information.
|
||||
fn is_secure(&self, req: &Request<::hyper::Body>) -> bool {
|
||||
fn is_secure(&self, hdrs: &http::HeaderMap) -> bool {
|
||||
self.trust_forward_hdrs
|
||||
&& req
|
||||
.headers()
|
||||
&& hdrs
|
||||
.get("X-Forwarded-Proto")
|
||||
.map(|v| v.as_bytes() == b"https")
|
||||
.unwrap_or(false)
|
||||
@ -605,12 +597,12 @@ impl Service {
|
||||
/// performing.
|
||||
fn authenticate(
|
||||
&self,
|
||||
req: &Request<hyper::Body>,
|
||||
req: &Request<hyper::body::Incoming>,
|
||||
authreq: &auth::Request,
|
||||
conn_data: &ConnData,
|
||||
unauth_path: bool,
|
||||
) -> Result<Caller, base::Error> {
|
||||
if let Some(sid) = extract_sid(req) {
|
||||
if let Some(sid) = extract_sid(req.headers()) {
|
||||
match self
|
||||
.db
|
||||
.lock()
|
||||
@ -671,8 +663,9 @@ impl Service {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use db::testutil::{self, TestDb};
|
||||
use futures::future::FutureExt;
|
||||
use http::{header, Request};
|
||||
// use futures::future::FutureExt;
|
||||
// use http::{header, Request};
|
||||
use http::header;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(super) struct Server {
|
||||
@ -698,36 +691,43 @@ mod tests {
|
||||
})
|
||||
.unwrap(),
|
||||
);
|
||||
let make_svc = hyper::service::make_service_fn(move |_conn| {
|
||||
futures::future::ok::<_, std::convert::Infallible>(hyper::service::service_fn({
|
||||
let s = Arc::clone(&service);
|
||||
move |req| {
|
||||
Arc::clone(&s).serve(
|
||||
req,
|
||||
super::accept::ConnData {
|
||||
client_unix_uid: None,
|
||||
client_addr: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
}))
|
||||
});
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
let (addr_tx, addr_rx) = std::sync::mpsc::channel();
|
||||
let handle = ::std::thread::spawn(move || {
|
||||
let addr = ([127, 0, 0, 1], 0).into();
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let srv = {
|
||||
let _guard = rt.enter();
|
||||
hyper::server::Server::bind(&addr)
|
||||
.tcp_nodelay(true)
|
||||
.serve(make_svc)
|
||||
};
|
||||
let addr = srv.local_addr(); // resolve port 0 to a real ephemeral port number.
|
||||
tx.send(addr).unwrap();
|
||||
rt.block_on(srv.with_graceful_shutdown(shutdown_rx.map(|_| ())))
|
||||
.unwrap();
|
||||
let service = Arc::clone(&service);
|
||||
rt.block_on(async move {
|
||||
let addr = std::net::SocketAddr::from((std::net::Ipv4Addr::LOCALHOST, 0));
|
||||
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
let mut shutdown_rx = std::pin::pin!(shutdown_rx);
|
||||
addr_tx.send(addr).unwrap();
|
||||
loop {
|
||||
let (tcp, _) = tokio::select! {
|
||||
r = listener.accept() => r.unwrap(),
|
||||
_ = shutdown_rx.as_mut() => return,
|
||||
};
|
||||
tcp.set_nodelay(true).unwrap();
|
||||
let io = hyper_util::rt::TokioIo::new(tcp);
|
||||
let service = Arc::clone(&service);
|
||||
let serve = move |req| {
|
||||
Arc::clone(&service).serve(
|
||||
req,
|
||||
super::accept::ConnData {
|
||||
client_unix_uid: None,
|
||||
client_addr: None,
|
||||
},
|
||||
)
|
||||
};
|
||||
tokio::task::spawn(async move {
|
||||
hyper::server::conn::http1::Builder::new()
|
||||
.serve_connection(io, hyper::service::service_fn(serve))
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
let addr = rx.recv().unwrap();
|
||||
let addr = addr_rx.recv().unwrap();
|
||||
|
||||
// Create a user.
|
||||
let mut c = db::UserChange::add_user("slamb".to_owned());
|
||||
@ -745,7 +745,7 @@ mod tests {
|
||||
|
||||
impl Drop for Server {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown_tx.take().unwrap().send(()).unwrap();
|
||||
let _ = self.shutdown_tx.take().unwrap().send(());
|
||||
self.handle.take().unwrap().join().unwrap()
|
||||
}
|
||||
}
|
||||
@ -756,7 +756,7 @@ mod tests {
|
||||
let s = Server::new(None);
|
||||
let cli = reqwest::Client::new();
|
||||
let resp = cli
|
||||
.get(&format!("{}/api/", &s.base_url))
|
||||
.get(format!("{}/api/", &s.base_url))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
@ -765,15 +765,15 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_extract_sid() {
|
||||
let req = Request::builder()
|
||||
.header(header::COOKIE, "foo=asdf; bar=asdf")
|
||||
.header(
|
||||
header::COOKIE,
|
||||
"s=OsL6Cg4ikLw6UIXOT28tI+vPez3qWACovI+nLHWyjsW1ERX83qRrOR3guKedc8IP",
|
||||
)
|
||||
.body(hyper::Body::empty())
|
||||
.unwrap();
|
||||
let sid = super::extract_sid(&req).unwrap();
|
||||
let mut hdrs = http::HeaderMap::new();
|
||||
hdrs.append(header::COOKIE, "foo=asdf; bar=asdf".parse().unwrap());
|
||||
hdrs.append(
|
||||
header::COOKIE,
|
||||
"s=OsL6Cg4ikLw6UIXOT28tI+vPez3qWACovI+nLHWyjsW1ERX83qRrOR3guKedc8IP"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let sid = super::extract_sid(&hdrs).unwrap();
|
||||
assert_eq!(sid.as_ref(), &b":\xc2\xfa\n\x0e\"\x90\xbc:P\x85\xceOo-#\xeb\xcf{=\xeaX\x00\xa8\xbc\x8f\xa7,u\xb2\x8e\xc5\xb5\x11\x15\xfc\xde\xa4k9\x1d\xe0\xb8\xa7\x9ds\xc2\x0f"[..]);
|
||||
}
|
||||
}
|
||||
@ -783,8 +783,11 @@ mod bench {
|
||||
extern crate test;
|
||||
|
||||
use db::testutil::{self, TestDb};
|
||||
use hyper;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use hyper::{self, service::service_fn};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
struct Server {
|
||||
@ -808,32 +811,41 @@ mod bench {
|
||||
})
|
||||
.unwrap(),
|
||||
);
|
||||
let make_svc = hyper::service::make_service_fn(move |_conn| {
|
||||
futures::future::ok::<_, std::convert::Infallible>(hyper::service::service_fn({
|
||||
let s = Arc::clone(&service);
|
||||
move |req| {
|
||||
Arc::clone(&s).serve(
|
||||
let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
|
||||
let listener = std::net::TcpListener::bind(addr).unwrap();
|
||||
listener.set_nonblocking(true).unwrap();
|
||||
let addr = listener.local_addr().unwrap(); // resolve port 0 to a real ephemeral port number.
|
||||
let srv = async move {
|
||||
let listener = tokio::net::TcpListener::from_std(listener).unwrap();
|
||||
loop {
|
||||
let (conn, _remote_addr) = listener.accept().await.unwrap();
|
||||
conn.set_nodelay(true).unwrap();
|
||||
let io = hyper_util::rt::TokioIo::new(conn);
|
||||
let service = Arc::clone(&service);
|
||||
let svc_fn = service_fn(move |req| {
|
||||
Arc::clone(&service).serve(
|
||||
req,
|
||||
super::accept::ConnData {
|
||||
client_unix_uid: None,
|
||||
client_addr: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
}))
|
||||
});
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let srv = {
|
||||
let _guard = rt.enter();
|
||||
let addr = ([127, 0, 0, 1], 0).into();
|
||||
hyper::server::Server::bind(&addr)
|
||||
.tcp_nodelay(true)
|
||||
.serve(make_svc)
|
||||
});
|
||||
tokio::spawn(
|
||||
hyper::server::conn::http1::Builder::new().serve_connection(io, svc_fn),
|
||||
);
|
||||
}
|
||||
};
|
||||
let addr = srv.local_addr(); // resolve port 0 to a real ephemeral port number.
|
||||
::std::thread::spawn(move || {
|
||||
rt.block_on(srv).unwrap();
|
||||
});
|
||||
std::thread::Builder::new()
|
||||
.name("bench-server".to_owned())
|
||||
.spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
rt.block_on(srv)
|
||||
})
|
||||
.unwrap();
|
||||
Server {
|
||||
base_url: format!("http://{}:{}", addr.ip(), addr.port()),
|
||||
test_camera_uuid,
|
||||
@ -853,13 +865,18 @@ mod bench {
|
||||
))
|
||||
.unwrap();
|
||||
let client = reqwest::Client::new();
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
let f = || {
|
||||
rt.block_on(async {
|
||||
let resp = client.get(url.clone()).send().await.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::OK);
|
||||
let _b = resp.bytes().await.unwrap();
|
||||
});
|
||||
for _i in 0..100 {
|
||||
rt.block_on(async {
|
||||
let resp = client.get(url.clone()).send().await.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::OK);
|
||||
let _b = resp.bytes().await.unwrap();
|
||||
});
|
||||
}
|
||||
};
|
||||
f(); // warm.
|
||||
b.iter(f);
|
||||
|
||||
@ -13,15 +13,13 @@ use tracing::{info, warn};
|
||||
|
||||
use crate::{json, web::parse_json_body};
|
||||
|
||||
use super::{
|
||||
csrf_matches, extract_json_body, extract_sid, plain_response, ResponseResult, Service,
|
||||
};
|
||||
use super::{csrf_matches, extract_sid, into_json_body, plain_response, ResponseResult, Service};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
impl Service {
|
||||
pub(super) async fn login(
|
||||
&self,
|
||||
mut req: Request<::hyper::Body>,
|
||||
req: Request<::hyper::body::Incoming>,
|
||||
authreq: auth::Request,
|
||||
) -> ResponseResult {
|
||||
if *req.method() != Method::POST {
|
||||
@ -30,9 +28,9 @@ impl Service {
|
||||
"POST expected",
|
||||
));
|
||||
}
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let r: json::LoginRequest = parse_json_body(&r)?;
|
||||
let Some(host) = req.headers().get(header::HOST) else {
|
||||
let (parts, b) = into_json_body(req).await?;
|
||||
let r: json::LoginRequest = parse_json_body(&b)?;
|
||||
let Some(host) = parts.headers.get(header::HOST) else {
|
||||
bail!(InvalidArgument, msg("missing Host header"));
|
||||
};
|
||||
let host = host.as_bytes();
|
||||
@ -45,7 +43,7 @@ impl Service {
|
||||
|
||||
// If the request came in over https, tell the browser to only send the cookie on https
|
||||
// requests also.
|
||||
let is_secure = self.is_secure(&req);
|
||||
let is_secure = self.is_secure(&parts.headers);
|
||||
|
||||
// Use SameSite=Lax rather than SameSite=Strict. Safari apparently doesn't send
|
||||
// SameSite=Strict cookies on WebSocket upgrade requests. There's no real security
|
||||
@ -76,7 +74,7 @@ impl Service {
|
||||
|
||||
pub(super) async fn logout(
|
||||
&self,
|
||||
mut req: Request<hyper::Body>,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
authreq: auth::Request,
|
||||
) -> ResponseResult {
|
||||
if *req.method() != Method::POST {
|
||||
@ -85,11 +83,11 @@ impl Service {
|
||||
"POST expected",
|
||||
));
|
||||
}
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let r: json::LogoutRequest = parse_json_body(&r)?;
|
||||
let (parts, b) = into_json_body(req).await?;
|
||||
let r: json::LogoutRequest = parse_json_body(&b)?;
|
||||
|
||||
let mut res = Response::new(b""[..].into());
|
||||
if let Some(sid) = extract_sid(&req) {
|
||||
if let Some(sid) = extract_sid(&parts.headers) {
|
||||
let mut l = self.db.lock();
|
||||
let hash = sid.hash();
|
||||
match l.authenticate_session(authreq.clone(), &hash) {
|
||||
@ -177,7 +175,7 @@ mod tests {
|
||||
info!("header: {}", cookie.header());
|
||||
|
||||
let resp = cli
|
||||
.get(&format!("{}/api/", &s.base_url))
|
||||
.get(format!("{}/api/", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.send()
|
||||
.await
|
||||
@ -194,7 +192,7 @@ mod tests {
|
||||
p.insert("username", "slamb");
|
||||
p.insert("password", "hunter2");
|
||||
let resp = cli
|
||||
.post(&format!("{}/api/login", &s.base_url))
|
||||
.post(format!("{}/api/login", &s.base_url))
|
||||
.json(&p)
|
||||
.send()
|
||||
.await
|
||||
@ -204,7 +202,7 @@ mod tests {
|
||||
|
||||
// A GET shouldn't work.
|
||||
let resp = cli
|
||||
.get(&format!("{}/api/logout", &s.base_url))
|
||||
.get(format!("{}/api/logout", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.send()
|
||||
.await
|
||||
@ -213,7 +211,7 @@ mod tests {
|
||||
|
||||
// Neither should a POST without a csrf token.
|
||||
let resp = cli
|
||||
.post(&format!("{}/api/logout", &s.base_url))
|
||||
.post(format!("{}/api/logout", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.send()
|
||||
.await
|
||||
@ -223,7 +221,7 @@ mod tests {
|
||||
// But it should work with the csrf token.
|
||||
// Retrieve that from the toplevel API request.
|
||||
let toplevel: serde_json::Value = cli
|
||||
.post(&format!("{}/api/", &s.base_url))
|
||||
.post(format!("{}/api/", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.send()
|
||||
.await
|
||||
@ -242,7 +240,7 @@ mod tests {
|
||||
let mut p = FastHashMap::default();
|
||||
p.insert("csrf", csrf);
|
||||
let resp = cli
|
||||
.post(&format!("{}/api/logout", &s.base_url))
|
||||
.post(format!("{}/api/logout", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.json(&p)
|
||||
.send()
|
||||
@ -257,7 +255,7 @@ mod tests {
|
||||
|
||||
// It should also be invalidated server-side.
|
||||
let resp = cli
|
||||
.get(&format!("{}/api/", &s.base_url))
|
||||
.get(format!("{}/api/", &s.base_url))
|
||||
.header(reqwest::header::COOKIE, cookie.header())
|
||||
.send()
|
||||
.await
|
||||
|
||||
@ -12,8 +12,8 @@ use url::form_urlencoded;
|
||||
use crate::json;
|
||||
|
||||
use super::{
|
||||
extract_json_body, parse_json_body, plain_response, require_csrf_if_session, serve_json,
|
||||
Caller, ResponseResult, Service,
|
||||
into_json_body, parse_json_body, plain_response, require_csrf_if_session, serve_json, Caller,
|
||||
ResponseResult, Service,
|
||||
};
|
||||
|
||||
use std::borrow::Borrow;
|
||||
@ -21,7 +21,7 @@ use std::borrow::Borrow;
|
||||
impl Service {
|
||||
pub(super) async fn signals(
|
||||
&self,
|
||||
req: Request<hyper::Body>,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
match *req.method() {
|
||||
@ -34,14 +34,18 @@ impl Service {
|
||||
}
|
||||
}
|
||||
|
||||
async fn post_signals(&self, mut req: Request<hyper::Body>, caller: Caller) -> ResponseResult {
|
||||
async fn post_signals(
|
||||
&self,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
if !caller.permissions.update_signals {
|
||||
bail!(PermissionDenied, msg("update_signals required"));
|
||||
}
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let r: json::PostSignalsRequest = parse_json_body(&r)?;
|
||||
let (parts, b) = into_json_body(req).await?;
|
||||
let r: json::PostSignalsRequest = parse_json_body(&b)?;
|
||||
require_csrf_if_session(&caller, r.csrf)?;
|
||||
let now = recording::Time::new(self.db.clocks().realtime());
|
||||
let now = recording::Time::from(self.db.clocks().realtime());
|
||||
let mut l = self.db.lock();
|
||||
let start = match r.start {
|
||||
json::PostSignalsTimeBase::Epoch(t) => t,
|
||||
@ -52,11 +56,11 @@ impl Service {
|
||||
json::PostSignalsTimeBase::Now(d) => now + d,
|
||||
};
|
||||
l.update_signals(start..end, &r.signal_ids, &r.states)?;
|
||||
serve_json(&req, &json::PostSignalsResponse { time_90k: now })
|
||||
serve_json(&parts, &json::PostSignalsResponse { time_90k: now })
|
||||
}
|
||||
|
||||
fn get_signals(&self, req: &Request<hyper::Body>) -> ResponseResult {
|
||||
let mut time = recording::Time::min_value()..recording::Time::max_value();
|
||||
fn get_signals(&self, req: &Request<hyper::body::Incoming>) -> ResponseResult {
|
||||
let mut time = recording::Time::MIN..recording::Time::MAX;
|
||||
if let Some(q) = req.uri().query() {
|
||||
for (key, value) in form_urlencoded::parse(q.as_bytes()) {
|
||||
let (key, value) = (key.borrow(), value.borrow());
|
||||
|
||||
@ -49,7 +49,7 @@ impl Ui {
|
||||
async fn serve(
|
||||
&self,
|
||||
path: &str,
|
||||
req: &Request<hyper::Body>,
|
||||
req: &Request<hyper::body::Incoming>,
|
||||
cache_control: &'static str,
|
||||
content_type: &'static str,
|
||||
) -> ResponseResult {
|
||||
@ -89,7 +89,7 @@ impl Ui {
|
||||
|
||||
impl Service {
|
||||
/// Serves a static file if possible.
|
||||
pub(super) async fn static_file(&self, req: Request<hyper::Body>) -> ResponseResult {
|
||||
pub(super) async fn static_file(&self, req: Request<hyper::body::Incoming>) -> ResponseResult {
|
||||
let Some(static_req) = StaticFileRequest::parse(req.uri().path()) else {
|
||||
bail!(NotFound, msg("static file not found"));
|
||||
};
|
||||
@ -129,10 +129,7 @@ impl<'a> StaticFileRequest<'a> {
|
||||
p => (p, true),
|
||||
};
|
||||
|
||||
let last_dot = match path.rfind('.') {
|
||||
None => return None,
|
||||
Some(d) => d,
|
||||
};
|
||||
let last_dot = path.rfind('.')?;
|
||||
let ext = &path[last_dot + 1..];
|
||||
let mime = match ext {
|
||||
"css" => "text/css",
|
||||
|
||||
@ -10,12 +10,16 @@ use http::{Method, Request, StatusCode};
|
||||
use crate::json::{self, PutUsersResponse, UserSubset, UserWithId};
|
||||
|
||||
use super::{
|
||||
extract_json_body, parse_json_body, plain_response, require_csrf_if_session, serve_json,
|
||||
Caller, ResponseResult, Service,
|
||||
into_json_body, parse_json_body, plain_response, require_csrf_if_session, serve_json, Caller,
|
||||
ResponseResult, Service,
|
||||
};
|
||||
|
||||
impl Service {
|
||||
pub(super) async fn users(&self, req: Request<hyper::Body>, caller: Caller) -> ResponseResult {
|
||||
pub(super) async fn users(
|
||||
&self,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
match *req.method() {
|
||||
Method::GET | Method::HEAD => self.get_users(req, caller).await,
|
||||
Method::POST => self.post_users(req, caller).await,
|
||||
@ -26,7 +30,11 @@ impl Service {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_users(&self, req: Request<hyper::Body>, caller: Caller) -> ResponseResult {
|
||||
async fn get_users(
|
||||
&self,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
if !caller.permissions.admin_users {
|
||||
bail!(Unauthenticated, msg("must have admin_users permission"));
|
||||
}
|
||||
@ -42,12 +50,16 @@ impl Service {
|
||||
serve_json(&req, &json::GetUsersResponse { users })
|
||||
}
|
||||
|
||||
async fn post_users(&self, mut req: Request<hyper::Body>, caller: Caller) -> ResponseResult {
|
||||
async fn post_users(
|
||||
&self,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
) -> ResponseResult {
|
||||
if !caller.permissions.admin_users {
|
||||
bail!(Unauthenticated, msg("must have admin_users permission"));
|
||||
}
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let mut r: json::PutUsers = parse_json_body(&r)?;
|
||||
let (parts, b) = into_json_body(req).await?;
|
||||
let mut r: json::PutUsers = parse_json_body(&b)?;
|
||||
require_csrf_if_session(&caller, r.csrf)?;
|
||||
let username = r
|
||||
.user
|
||||
@ -69,12 +81,12 @@ impl Service {
|
||||
}
|
||||
let mut l = self.db.lock();
|
||||
let user = l.apply_user_change(change)?;
|
||||
serve_json(&req, &PutUsersResponse { id: user.id })
|
||||
serve_json(&parts, &PutUsersResponse { id: user.id })
|
||||
}
|
||||
|
||||
pub(super) async fn user(
|
||||
&self,
|
||||
req: Request<hyper::Body>,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
id: i32,
|
||||
) -> ResponseResult {
|
||||
@ -89,7 +101,12 @@ impl Service {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_user(&self, req: Request<hyper::Body>, caller: Caller, id: i32) -> ResponseResult {
|
||||
async fn get_user(
|
||||
&self,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
id: i32,
|
||||
) -> ResponseResult {
|
||||
require_same_or_admin(&caller, id)?;
|
||||
let db = self.db.lock();
|
||||
let user = db
|
||||
@ -101,15 +118,15 @@ impl Service {
|
||||
|
||||
async fn delete_user(
|
||||
&self,
|
||||
mut req: Request<hyper::Body>,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
id: i32,
|
||||
) -> ResponseResult {
|
||||
if !caller.permissions.admin_users {
|
||||
bail!(Unauthenticated, msg("must have admin_users permission"));
|
||||
}
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let r: json::DeleteUser = parse_json_body(&r)?;
|
||||
let (_parts, b) = into_json_body(req).await?;
|
||||
let r: json::DeleteUser = parse_json_body(&b)?;
|
||||
require_csrf_if_session(&caller, r.csrf)?;
|
||||
let mut l = self.db.lock();
|
||||
l.delete_user(id)?;
|
||||
@ -118,13 +135,13 @@ impl Service {
|
||||
|
||||
async fn patch_user(
|
||||
&self,
|
||||
mut req: Request<hyper::Body>,
|
||||
req: Request<hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
id: i32,
|
||||
) -> ResponseResult {
|
||||
require_same_or_admin(&caller, id)?;
|
||||
let r = extract_json_body(&mut req).await?;
|
||||
let r: json::PostUser = parse_json_body(&r)?;
|
||||
let (_parts, b) = into_json_body(req).await?;
|
||||
let r: json::PostUser = parse_json_body(&b)?;
|
||||
let mut db = self.db.lock();
|
||||
let user = db
|
||||
.get_user_by_id_mut(id)
|
||||
|
||||
@ -28,7 +28,7 @@ use super::{Caller, ResponseResult, Service};
|
||||
impl Service {
|
||||
pub(super) fn stream_view_mp4(
|
||||
&self,
|
||||
req: &Request<::hyper::Body>,
|
||||
req: &Request<::hyper::body::Incoming>,
|
||||
caller: Caller,
|
||||
uuid: Uuid,
|
||||
stream_type: db::StreamType,
|
||||
@ -39,6 +39,11 @@ impl Service {
|
||||
bail!(PermissionDenied, msg("view_video required"));
|
||||
}
|
||||
let (stream_id, camera_name);
|
||||
|
||||
// False positive: on Rust 1.78.0, clippy erroneously suggests calling `clone_from` on the
|
||||
// uninitialized `camera_name`.
|
||||
// Apparently fixed in rustc 1.80.0-nightly (ada5e2c7b 2024-05-31).
|
||||
#[allow(clippy::assigning_clones)]
|
||||
{
|
||||
let db = self.db.lock();
|
||||
let camera = db
|
||||
@ -112,7 +117,7 @@ impl Service {
|
||||
|
||||
// Add a segment for the relevant part of the recording, if any.
|
||||
// Note all calculations here are in wall times / wall durations.
|
||||
let end_time = s.end_time.unwrap_or(i64::max_value());
|
||||
let end_time = s.end_time.unwrap_or(i64::MAX);
|
||||
let wd = i64::from(r.wall_duration_90k);
|
||||
if s.start_time <= cur_off + wd && cur_off < end_time {
|
||||
let start = cmp::max(0, s.start_time - cur_off);
|
||||
@ -136,7 +141,7 @@ impl Service {
|
||||
r.wall_duration_90k,
|
||||
r.media_duration_90k,
|
||||
);
|
||||
builder.append(&db, r, mr, true)?;
|
||||
builder.append(&db, &r, mr, true)?;
|
||||
} else {
|
||||
trace!("...skipping recording {} wall dur {}", r.id, wd);
|
||||
}
|
||||
@ -175,10 +180,10 @@ impl Service {
|
||||
}
|
||||
}
|
||||
if let Some(start) = start_time_for_filename {
|
||||
let tm = time::at(time::Timespec {
|
||||
sec: start.unix_seconds(),
|
||||
nsec: 0,
|
||||
});
|
||||
let zone = base::time::global_zone();
|
||||
let tm = jiff::Timestamp::from_second(start.unix_seconds())
|
||||
.expect("valid start")
|
||||
.to_zoned(zone);
|
||||
let stream_abbrev = if stream_type == db::StreamType::Main {
|
||||
"main"
|
||||
} else {
|
||||
@ -191,7 +196,7 @@ impl Service {
|
||||
};
|
||||
builder.set_filename(&format!(
|
||||
"{}-{}-{}.{}",
|
||||
tm.strftime("%Y%m%d%H%M%S").unwrap(),
|
||||
tm.strftime("%Y%m%d%H%M%S"),
|
||||
camera_name,
|
||||
stream_abbrev,
|
||||
suffix
|
||||
@ -286,7 +291,7 @@ mod tests {
|
||||
let s = Server::new(Some(permissions));
|
||||
let cli = reqwest::Client::new();
|
||||
let resp = cli
|
||||
.get(&format!(
|
||||
.get(format!(
|
||||
"{}/api/cameras/{}/main/view.mp4",
|
||||
&s.base_url, s.db.test_camera_uuid
|
||||
))
|
||||
|
||||
@ -11,20 +11,23 @@ use crate::body::Body;
|
||||
use base::{bail, err};
|
||||
use futures::{Future, SinkExt};
|
||||
use http::{header, Request, Response};
|
||||
use tokio_tungstenite::{tungstenite, WebSocketStream};
|
||||
use tokio_tungstenite::tungstenite;
|
||||
use tracing::Instrument;
|
||||
|
||||
pub type WebSocketStream =
|
||||
tokio_tungstenite::WebSocketStream<hyper_util::rt::TokioIo<hyper::upgrade::Upgraded>>;
|
||||
|
||||
/// Upgrades to WebSocket and runs the supplied stream handler in a separate tokio task.
|
||||
///
|
||||
/// Fails on `Origin` mismatch with an HTTP-level error. If the handler returns
|
||||
/// an error, tries to send it to the client before dropping the stream.
|
||||
pub(super) fn upgrade<H>(
|
||||
req: Request<::hyper::Body>,
|
||||
req: Request<::hyper::body::Incoming>,
|
||||
handler: H,
|
||||
) -> Result<Response<Body>, base::Error>
|
||||
where
|
||||
for<'a> H: FnOnce(
|
||||
&'a mut WebSocketStream<hyper::upgrade::Upgraded>,
|
||||
&'a mut WebSocketStream,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), base::Error>> + Send + 'a>>
|
||||
+ Send
|
||||
+ 'static,
|
||||
@ -35,10 +38,8 @@ where
|
||||
check_origin(req.headers())?;
|
||||
|
||||
// Otherwise, upgrade and handle the rest in a separate task.
|
||||
let response =
|
||||
tungstenite::handshake::server::create_response_with_body(&req, hyper::Body::empty)
|
||||
.map_err(|e| err!(InvalidArgument, source(e)))?;
|
||||
let (parts, _) = response.into_parts();
|
||||
let response = tungstenite::handshake::server::create_response_with_body(&req, Body::empty)
|
||||
.map_err(|e| err!(InvalidArgument, source(e)))?;
|
||||
let span = tracing::info_span!("websocket");
|
||||
tokio::spawn(
|
||||
async move {
|
||||
@ -49,7 +50,9 @@ where
|
||||
return;
|
||||
}
|
||||
};
|
||||
let mut ws = tokio_tungstenite::WebSocketStream::from_raw_socket(
|
||||
let upgraded = hyper_util::rt::TokioIo::new(upgraded);
|
||||
|
||||
let mut ws = WebSocketStream::from_raw_socket(
|
||||
upgraded,
|
||||
tungstenite::protocol::Role::Server,
|
||||
None,
|
||||
@ -57,8 +60,10 @@ where
|
||||
.await;
|
||||
if let Err(err) = handler(&mut ws).await {
|
||||
// TODO: use a nice JSON message format for errors.
|
||||
tracing::error!(%err, "closing with error");
|
||||
let _ = ws.send(tungstenite::Message::Text(err.to_string())).await;
|
||||
tracing::error!(err = %err.chain(), "closing with error");
|
||||
let _ = ws
|
||||
.send(tungstenite::Message::Text(err.to_string().into()))
|
||||
.await;
|
||||
} else {
|
||||
tracing::info!("closing");
|
||||
};
|
||||
@ -66,7 +71,7 @@ where
|
||||
}
|
||||
.instrument(span),
|
||||
);
|
||||
Ok(Response::from_parts(parts, Body::from("")))
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Checks the `Host` and `Origin` headers match, if the latter is supplied.
|
||||
|
||||
5
ui/.gitignore
vendored
5
ui/.gitignore
vendored
@ -9,10 +9,7 @@
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# production, current path
|
||||
/build
|
||||
|
||||
# production, old path
|
||||
# production
|
||||
/dist
|
||||
|
||||
# misc
|
||||
|
||||
34
ui/.prettierignore
Normal file
34
ui/.prettierignore
Normal file
@ -0,0 +1,34 @@
|
||||
|
||||
#-------------------------------------------------------------------------------------------------------------------
|
||||
# Keep this section in sync with .gitignore
|
||||
#-------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
/.idea
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# production
|
||||
/dist
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.eslintcache
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
#-------------------------------------------------------------------------------------------------------------------
|
||||
# Prettier-specific overrides
|
||||
#-------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
pnpm-lock.yaml
|
||||
@ -21,10 +21,10 @@
|
||||
"react-router-dom": "^6.22.3"
|
||||
},
|
||||
"scripts": {
|
||||
"check-format": "prettier --check --ignore-path .gitignore .",
|
||||
"check-format": "prettier --check --ignore-path .prettierignore .",
|
||||
"dev": "vite",
|
||||
"build": "tsc && vite build",
|
||||
"format": "prettier --write --ignore-path .gitignore .",
|
||||
"format": "prettier --write --ignore-path .prettierignore .",
|
||||
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
|
||||
"preview": "vite preview",
|
||||
"test": "vitest"
|
||||
|
||||
12729
ui/pnpm-lock.yaml
generated
12729
ui/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
@ -11,6 +11,9 @@ import TableCell from "@mui/material/TableCell";
|
||||
import TableRow, { TableRowProps } from "@mui/material/TableRow";
|
||||
import Skeleton from "@mui/material/Skeleton";
|
||||
import Alert from "@mui/material/Alert";
|
||||
import Tooltip from "@mui/material/Tooltip";
|
||||
import ErrorIcon from "@mui/icons-material/Error";
|
||||
import Icon from "@mui/material/Icon";
|
||||
|
||||
interface Props {
|
||||
stream: Stream;
|
||||
@ -40,6 +43,7 @@ export interface CombinedRecording {
|
||||
height: number;
|
||||
aspectWidth: number;
|
||||
aspectHeight: number;
|
||||
endReason?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -58,7 +62,7 @@ export function combine(
|
||||
for (const r of response.recordings) {
|
||||
const vse = response.videoSampleEntries[r.videoSampleEntryId];
|
||||
|
||||
// Combine `r` into `cur` if `r` precedes r, shouldn't be split, and
|
||||
// Combine `r` into `cur` if `r` precedes `cur`, shouldn't be split, and
|
||||
// has similar resolution. It doesn't have to have exactly the same
|
||||
// video sample entry; minor changes to encoding can be seamlessly
|
||||
// combined into one `.mp4` file.
|
||||
@ -74,7 +78,9 @@ export function combine(
|
||||
(split90k === undefined || cur.endTime90k - r.startTime90k <= split90k)
|
||||
) {
|
||||
cur.startId = r.startId;
|
||||
cur.firstUncommitted == r.firstUncommitted ?? cur.firstUncommitted;
|
||||
if (r.firstUncommitted !== undefined) {
|
||||
cur.firstUncommitted = r.firstUncommitted;
|
||||
}
|
||||
cur.startTime90k = r.startTime90k;
|
||||
cur.videoSamples += r.videoSamples;
|
||||
cur.sampleFileBytes += r.sampleFileBytes;
|
||||
@ -100,6 +106,7 @@ export function combine(
|
||||
height: vse.height,
|
||||
aspectWidth: vse.aspectWidth,
|
||||
aspectHeight: vse.aspectHeight,
|
||||
endReason: r.endReason,
|
||||
};
|
||||
}
|
||||
if (cur !== null) {
|
||||
@ -129,6 +136,7 @@ interface State {
|
||||
interface RowProps extends TableRowProps {
|
||||
start: React.ReactNode;
|
||||
end: React.ReactNode;
|
||||
endReason?: string;
|
||||
resolution: React.ReactNode;
|
||||
fps: React.ReactNode;
|
||||
storage: React.ReactNode;
|
||||
@ -138,6 +146,7 @@ interface RowProps extends TableRowProps {
|
||||
const Row = ({
|
||||
start,
|
||||
end,
|
||||
endReason,
|
||||
resolution,
|
||||
fps,
|
||||
storage,
|
||||
@ -146,7 +155,18 @@ const Row = ({
|
||||
}: RowProps) => (
|
||||
<TableRow {...rest}>
|
||||
<TableCell align="right">{start}</TableCell>
|
||||
<TableCell align="right">{end}</TableCell>
|
||||
<TableCell align="right">
|
||||
{end}
|
||||
{endReason !== undefined ? (
|
||||
<Tooltip title={endReason}>
|
||||
<Icon sx={{ verticalAlign: "bottom", marginLeft: ".5em" }}>
|
||||
<ErrorIcon />
|
||||
</Icon>
|
||||
</Tooltip>
|
||||
) : (
|
||||
<Icon sx={{ verticalAlign: "bottom", marginLeft: ".5em" }} />
|
||||
)}
|
||||
</TableCell>
|
||||
<TableCell align="right" className="opt">
|
||||
{resolution}
|
||||
</TableCell>
|
||||
@ -268,6 +288,7 @@ const VideoList = ({
|
||||
onClick={() => setActiveRecording([stream, r])}
|
||||
start={formatTime(start)}
|
||||
end={formatTime(end)}
|
||||
endReason={r.endReason}
|
||||
resolution={`${r.width}x${r.height}`}
|
||||
fps={frameRateFmt.format(r.videoSamples / durationSec)}
|
||||
storage={`${sizeFmt.format(r.sampleFileBytes / 1048576)} MiB`}
|
||||
|
||||
@ -475,7 +475,7 @@ const LiveCamera = ({ mediaSourceApi, camera, chooser }: LiveCameraProps) => {
|
||||
<Alert severity="error">{playbackState.message}</Alert>
|
||||
</div>
|
||||
)}
|
||||
<video ref={videoRef} muted autoPlay />
|
||||
<video ref={videoRef} muted autoPlay playsInline />
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
@ -405,6 +405,11 @@ export interface Recording {
|
||||
* the number of bytes of video in this recording.
|
||||
*/
|
||||
sampleFileBytes: number;
|
||||
|
||||
/**
|
||||
* the reason this recording ended, if any/known.
|
||||
*/
|
||||
endReason?: string;
|
||||
}
|
||||
|
||||
export interface VideoSampleEntry {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user