mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-02-13 15:02:01 -05:00
Compare commits
54 Commits
fix-log-le
...
ai-chat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e57447813e | ||
|
|
89a713baaa | ||
|
|
ae45e8f873 | ||
|
|
80b20898f9 | ||
|
|
058da7ac1a | ||
|
|
f7ce3914ce | ||
|
|
325b9d6407 | ||
|
|
7c235bb86e | ||
|
|
57366db37e | ||
|
|
2ab6fe6f7b | ||
|
|
a084b63a96 | ||
|
|
7ea8a7597f | ||
|
|
e1ddef4de2 | ||
|
|
f0e0d820c1 | ||
|
|
b89cdbc730 | ||
|
|
2d3028f523 | ||
|
|
a6335204ba | ||
|
|
9827363761 | ||
|
|
3f234bbd36 | ||
|
|
e0559fc335 | ||
|
|
3798e28b13 | ||
|
|
1ddacb8bbb | ||
|
|
785bdf0ea5 | ||
|
|
e84dbb9f3f | ||
|
|
6c93a9a1b3 | ||
|
|
e74c2540f2 | ||
|
|
6dc6b7b575 | ||
|
|
e9848b227f | ||
|
|
1cc4b8bafa | ||
|
|
a3f72c1540 | ||
|
|
cd4494a702 | ||
|
|
8797949f83 | ||
|
|
ab105c6ae5 | ||
|
|
e7b4b0b9c0 | ||
|
|
b804750aaf | ||
|
|
28f925926c | ||
|
|
5da58d12b5 | ||
|
|
e63ef5afac | ||
|
|
b425bb4ba4 | ||
|
|
67e3f8eefa | ||
|
|
e1005ac2a5 | ||
|
|
6accc38275 | ||
|
|
ff20be58b4 | ||
|
|
fc3f798bd6 | ||
|
|
44e695362a | ||
|
|
9fbc854bf5 | ||
|
|
334acd6078 | ||
|
|
92c503070c | ||
|
|
ecd7d04228 | ||
|
|
11576e9e68 | ||
|
|
2cfb118981 | ||
|
|
e1c273be8d | ||
|
|
ea1533f456 | ||
|
|
41b983a133 |
5
Makefile
5
Makefile
@@ -1,7 +1,7 @@
|
||||
default_target: local
|
||||
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.17.0
|
||||
VERSION = 0.18.0
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BOARDS= #Initialized empty
|
||||
@@ -49,7 +49,8 @@ push: push-boards
|
||||
--push
|
||||
|
||||
run: local
|
||||
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
|
||||
docker run --rm --publish=5000:5000 --publish=8971:8971 \
|
||||
--volume=${PWD}/config:/config frigate:latest
|
||||
|
||||
run_tests: local
|
||||
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
|
||||
@@ -10,7 +10,8 @@ echo "[INFO] Starting certsync..."
|
||||
|
||||
lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
|
||||
|
||||
tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
|
||||
tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled`
|
||||
listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port`
|
||||
|
||||
while true
|
||||
do
|
||||
@@ -34,7 +35,7 @@ do
|
||||
;;
|
||||
esac
|
||||
|
||||
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
|
||||
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
|
||||
|
||||
case "$liveprint" in
|
||||
*Fingerprint*)
|
||||
@@ -55,4 +56,4 @@ do
|
||||
|
||||
done
|
||||
|
||||
exit 0
|
||||
exit 0
|
||||
|
||||
@@ -80,14 +80,14 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
|
||||
fi
|
||||
|
||||
# build templates for optional FRIGATE_BASE_PATH environment variable
|
||||
python3 /usr/local/nginx/get_base_path.py | \
|
||||
python3 /usr/local/nginx/get_nginx_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/base_path.gotmpl \
|
||||
-out /usr/local/nginx/conf/base_path.conf
|
||||
-out /usr/local/nginx/conf/base_path.conf
|
||||
|
||||
# build templates for optional TLS support
|
||||
python3 /usr/local/nginx/get_listen_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
||||
-out /usr/local/nginx/conf/listen.conf
|
||||
# build templates for additional network settings
|
||||
python3 /usr/local/nginx/get_nginx_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
||||
-out /usr/local/nginx/conf/listen.conf
|
||||
|
||||
# Replace the bash process with the NGINX process, redirecting stderr to stdout
|
||||
exec 2>&1
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
"""Prints the base path as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
|
||||
|
||||
result: dict[str, Any] = {"base_path": base_path}
|
||||
|
||||
print(json.dumps(result))
|
||||
@@ -1,35 +0,0 @@
|
||||
"""Prints the tls config as json to stdout."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config: dict[str, Any] = yaml.load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config: dict[str, Any] = json.loads(raw_config)
|
||||
except FileNotFoundError:
|
||||
config: dict[str, Any] = {}
|
||||
|
||||
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
|
||||
networking_config = config.get("networking", {})
|
||||
ipv6_config = networking_config.get("ipv6", {"enabled": False})
|
||||
|
||||
output = {"tls": tls_config, "ipv6": ipv6_config}
|
||||
|
||||
print(json.dumps(output))
|
||||
62
docker/main/rootfs/usr/local/nginx/get_nginx_settings.py
Normal file
62
docker/main/rootfs/usr/local/nginx/get_nginx_settings.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""Prints the nginx settings as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config: dict[str, Any] = yaml.load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config: dict[str, Any] = json.loads(raw_config)
|
||||
except FileNotFoundError:
|
||||
config: dict[str, Any] = {}
|
||||
|
||||
tls_config: dict[str, Any] = config.get("tls", {})
|
||||
tls_config.setdefault("enabled", True)
|
||||
|
||||
networking_config: dict[str, Any] = config.get("networking", {})
|
||||
ipv6_config: dict[str, Any] = networking_config.get("ipv6", {})
|
||||
ipv6_config.setdefault("enabled", False)
|
||||
|
||||
listen_config: dict[str, Any] = networking_config.get("listen", {})
|
||||
listen_config.setdefault("internal", 5000)
|
||||
listen_config.setdefault("external", 8971)
|
||||
|
||||
# handle case where internal port is a string with ip:port
|
||||
internal_port = listen_config["internal"]
|
||||
if type(internal_port) is str:
|
||||
internal_port = int(internal_port.split(":")[-1])
|
||||
listen_config["internal_port"] = internal_port
|
||||
|
||||
# handle case where external port is a string with ip:port
|
||||
external_port = listen_config["external"]
|
||||
if type(external_port) is str:
|
||||
external_port = int(external_port.split(":")[-1])
|
||||
listen_config["external_port"] = external_port
|
||||
|
||||
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
|
||||
|
||||
result: dict[str, Any] = {
|
||||
"tls": tls_config,
|
||||
"ipv6": ipv6_config,
|
||||
"listen": listen_config,
|
||||
"base_path": base_path,
|
||||
}
|
||||
|
||||
print(json.dumps(result))
|
||||
@@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ {
|
||||
# remove base_url from the path before passing upstream
|
||||
rewrite ^{{ .base_path }}/(.*) /$1 break;
|
||||
|
||||
proxy_pass $scheme://127.0.0.1:8971;
|
||||
proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }};
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
@@ -1,45 +1,36 @@
|
||||
|
||||
# Internal (IPv4 always; IPv6 optional)
|
||||
listen 5000;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
|
||||
|
||||
listen {{ .listen.internal }};
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }}
|
||||
|
||||
# intended for external traffic, protected by auth
|
||||
{{ if .tls }}
|
||||
{{ if .tls.enabled }}
|
||||
# external HTTPS (IPv4 always; IPv6 optional)
|
||||
listen 8971 ssl;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
|
||||
{{ if .tls.enabled }}
|
||||
# external HTTPS (IPv4 always; IPv6 optional)
|
||||
listen {{ .listen.external }} ssl;
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }}
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
|
||||
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
|
||||
|
||||
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||
ssl_session_tickets off;
|
||||
# generated 2024-06-01, Mozilla Guideline v5.7, nginx 1.25.3, OpenSSL 1.1.1w, modern configuration, no OCSP
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.25.3&config=modern&openssl=1.1.1w&ocsp=false&guideline=5.7
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||
ssl_session_tickets off;
|
||||
|
||||
# modern configuration
|
||||
ssl_protocols TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
# modern configuration
|
||||
ssl_protocols TLSv1.3;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||
|
||||
# ACME challenge location
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /etc/letsencrypt/www;
|
||||
}
|
||||
{{ else }}
|
||||
# external HTTP (IPv4 always; IPv6 optional)
|
||||
listen 8971;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||
{{ end }}
|
||||
# ACME challenge location
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /etc/letsencrypt/www;
|
||||
}
|
||||
{{ else }}
|
||||
# (No tls section) default to HTTP (IPv4 always; IPv6 optional)
|
||||
listen 8971;
|
||||
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
|
||||
# (No tls) default to HTTP (IPv4 always; IPv6 optional)
|
||||
listen {{ .listen.external }};
|
||||
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }}
|
||||
{{ end }}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ ARG ROCM
|
||||
|
||||
RUN apt update -qq && \
|
||||
apt install -y wget gpg && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
|
||||
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \
|
||||
apt install -y ./rocm.deb && \
|
||||
apt update && \
|
||||
apt install -qq -y rocm
|
||||
@@ -56,6 +56,8 @@ FROM scratch AS rocm-dist
|
||||
|
||||
ARG ROCM
|
||||
|
||||
# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime
|
||||
COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/
|
||||
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
|
||||
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
|
||||
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/
|
||||
|
||||
@@ -1 +1 @@
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
|
||||
@@ -1,5 +1,5 @@
|
||||
variable "ROCM" {
|
||||
default = "7.1.1"
|
||||
default = "7.2.0"
|
||||
}
|
||||
variable "HSA_OVERRIDE_GFX_VERSION" {
|
||||
default = ""
|
||||
|
||||
@@ -155,34 +155,33 @@ services:
|
||||
|
||||
### Enabling IPv6
|
||||
|
||||
IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example:
|
||||
IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows:
|
||||
|
||||
```
|
||||
{{ if not .enabled }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen 8971;
|
||||
{{ else }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen 8971 ssl;
|
||||
|
||||
# intended for internal traffic, not protected by auth
|
||||
listen 5000;
|
||||
```yaml
|
||||
networking:
|
||||
ipv6:
|
||||
enabled: True
|
||||
```
|
||||
|
||||
becomes
|
||||
### Listen on different ports
|
||||
|
||||
```
|
||||
{{ if not .enabled }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen [::]:8971 ipv6only=off;
|
||||
{{ else }}
|
||||
# intended for external traffic, protected by auth
|
||||
listen [::]:8971 ipv6only=off ssl;
|
||||
You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
|
||||
|
||||
# intended for internal traffic, not protected by auth
|
||||
listen [::]:5000 ipv6only=off;
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
networking:
|
||||
listen:
|
||||
internal: 127.0.0.1:5000
|
||||
external: 8971
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations.
|
||||
|
||||
:::
|
||||
|
||||
## Base path
|
||||
|
||||
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
|
||||
@@ -234,7 +233,7 @@ To do this:
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
||||
@@ -166,6 +166,10 @@ In this example:
|
||||
- If no mapping matches, Frigate falls back to `default_role` if configured.
|
||||
- If `role_map` is not defined, Frigate assumes the role header directly contains `admin`, `viewer`, or a custom role name.
|
||||
|
||||
**Note on matching semantics:**
|
||||
|
||||
- Admin precedence: if the `admin` mapping matches, Frigate resolves the session to `admin` to avoid accidental downgrade when a user belongs to multiple groups (for example both `admin` and `viewer` groups).
|
||||
|
||||
#### Port Considerations
|
||||
|
||||
**Authenticated Port (8971)**
|
||||
|
||||
@@ -244,7 +244,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ title: Configuring Generative AI
|
||||
|
||||
## Configuration
|
||||
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
|
||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
|
||||
@@ -77,8 +77,46 @@ genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: qwen3-vl:4b
|
||||
provider_options: # other Ollama client options can be defined
|
||||
keep_alive: -1
|
||||
options:
|
||||
num_ctx: 8192 # make sure the context matches other services that are using ollama
|
||||
```
|
||||
|
||||
## llama.cpp
|
||||
|
||||
[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters.
|
||||
|
||||
:::warning
|
||||
|
||||
Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||
|
||||
:::
|
||||
|
||||
It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format.
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: llamacpp
|
||||
base_url: http://localhost:8080
|
||||
model: your-model-name
|
||||
provider_options:
|
||||
temperature: 0.7
|
||||
repeat_penalty: 1.05
|
||||
top_p: 0.8
|
||||
top_k: 40
|
||||
min_p: 0.05
|
||||
seed: -1
|
||||
```
|
||||
|
||||
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters.
|
||||
|
||||
## Google Gemini
|
||||
|
||||
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.
|
||||
|
||||
@@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
|
||||
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
|
||||
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset).
|
||||
|
||||
## Usage and Best Practices
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi
|
||||
|
||||
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
|
||||
|
||||
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
|
||||
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset).
|
||||
|
||||
## Review Summary Usage and Best Practices
|
||||
|
||||
|
||||
@@ -139,7 +139,13 @@ record:
|
||||
|
||||
:::tip
|
||||
|
||||
When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras.<camera>.record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
|
||||
|
||||
:::
|
||||
@@ -148,19 +154,16 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
|
||||
|
||||
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
|
||||
|
||||
## Syncing Recordings With Disk
|
||||
## Syncing Media Files With Disk
|
||||
|
||||
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
|
||||
Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
sync_recordings: True
|
||||
```
|
||||
Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database.
|
||||
|
||||
This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
|
||||
The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint.
|
||||
|
||||
:::warning
|
||||
|
||||
The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.
|
||||
This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended.
|
||||
|
||||
:::
|
||||
|
||||
@@ -73,11 +73,19 @@ tls:
|
||||
# Optional: Enable TLS for port 8971 (default: shown below)
|
||||
enabled: True
|
||||
|
||||
# Optional: IPv6 configuration
|
||||
# Optional: Networking configuration
|
||||
networking:
|
||||
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
|
||||
ipv6:
|
||||
enabled: False
|
||||
# Optional: Override ports Frigate uses for listening (defaults: shown below)
|
||||
# An IP address may also be provided to bind to a specific interface, e.g. ip:port
|
||||
# NOTE: This setting is for advanced users and may break some integrations. The majority
|
||||
# of users should change ports in the docker compose file
|
||||
# or use the docker run `--publish` option to select a different port.
|
||||
listen:
|
||||
internal: 5000
|
||||
external: 8971
|
||||
|
||||
# Optional: Proxy configuration
|
||||
proxy:
|
||||
@@ -510,8 +518,6 @@ record:
|
||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||
expire_interval: 60
|
||||
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
||||
sync_recordings: False
|
||||
# Optional: Continuous retention settings
|
||||
continuous:
|
||||
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
|
||||
@@ -534,6 +540,8 @@ record:
|
||||
# The -r (framerate) dictates how smooth the output video is.
|
||||
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
|
||||
timelapse_args: "-vf setpts=0.04*PTS -r 30"
|
||||
# Optional: Global hardware acceleration settings for timelapse exports. (default: inherit)
|
||||
hwaccel_args: auto
|
||||
# Optional: Recording Preview Settings
|
||||
preview:
|
||||
# Optional: Quality of recording preview (default: shown below).
|
||||
@@ -752,7 +760,7 @@ classification:
|
||||
interval: None
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.13)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
@@ -838,6 +846,11 @@ cameras:
|
||||
# Optional: camera specific output args (default: inherit)
|
||||
# output_args:
|
||||
|
||||
# Optional: camera specific hwaccel args for timelapse export (default: inherit)
|
||||
# record:
|
||||
# export:
|
||||
# hwaccel_args:
|
||||
|
||||
# Optional: timeout for highest scoring image before allowing it
|
||||
# to be replaced by a newer image. (default: shown below)
|
||||
best_image_timeout: 60
|
||||
|
||||
@@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
:::warning
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
## Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp.
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
- Check Video Codec:
|
||||
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
|
||||
@@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
|
||||
{
|
||||
type: "link",
|
||||
label: "Go2RTC Configuration Reference",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration",
|
||||
} as PropSidebarItemLink,
|
||||
],
|
||||
Detectors: [
|
||||
|
||||
60
docs/static/frigate-api.yaml
vendored
60
docs/static/frigate-api.yaml
vendored
@@ -331,6 +331,59 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/media/sync:
|
||||
post:
|
||||
tags:
|
||||
- App
|
||||
summary: Start media sync job
|
||||
description: |-
|
||||
Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
|
||||
Returns 202 with job details when queued, or 409 if a job is already running.
|
||||
operationId: sync_media_media_sync_post
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
responses:
|
||||
"202":
|
||||
description: Accepted - Job queued
|
||||
"409":
|
||||
description: Conflict - Job already running
|
||||
"422":
|
||||
description: Validation Error
|
||||
|
||||
/media/sync/current:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Get current media sync job
|
||||
description: |-
|
||||
Retrieve the current running media sync job, if any. Returns the job details or null when no job is active.
|
||||
operationId: get_media_sync_current_media_sync_current_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"422":
|
||||
description: Validation Error
|
||||
|
||||
/media/sync/status/{job_id}:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Get media sync job status
|
||||
description: |-
|
||||
Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found.
|
||||
operationId: get_media_sync_status_media_sync_status__job_id__get
|
||||
parameters:
|
||||
- name: job_id
|
||||
in: path
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"404":
|
||||
description: Not Found - Job not found
|
||||
"422":
|
||||
description: Validation Error
|
||||
/faces/train/{name}/classify:
|
||||
post:
|
||||
tags:
|
||||
@@ -3147,6 +3200,7 @@ paths:
|
||||
duration: 30
|
||||
include_recording: true
|
||||
draw: {}
|
||||
pre_capture: null
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
@@ -4949,6 +5003,12 @@ components:
|
||||
- type: "null"
|
||||
title: Draw
|
||||
default: {}
|
||||
pre_capture:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: "null"
|
||||
title: Pre Capture Seconds
|
||||
default: null
|
||||
type: object
|
||||
title: EventsCreateBody
|
||||
EventsDeleteBody:
|
||||
|
||||
@@ -30,15 +30,22 @@ from frigate.api.auth import (
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateEnum,
|
||||
CameraConfigUpdateTopic,
|
||||
)
|
||||
from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector
|
||||
from frigate.jobs.media_sync import (
|
||||
get_current_media_sync_job,
|
||||
get_media_sync_job_by_id,
|
||||
start_media_sync_job,
|
||||
)
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
flatten_config_data,
|
||||
@@ -463,7 +470,15 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
|
||||
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
|
||||
def vainfo():
|
||||
vainfo = vainfo_hwaccel()
|
||||
# Use LibvaGpuSelector to pick an appropriate libva device (if available)
|
||||
selected_gpu = ""
|
||||
try:
|
||||
selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or ""
|
||||
except Exception:
|
||||
selected_gpu = ""
|
||||
|
||||
# If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`.
|
||||
vainfo = vainfo_hwaccel(device_name=selected_gpu or None)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"return_code": vainfo.returncode,
|
||||
@@ -598,6 +613,98 @@ def restart():
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/media/sync",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Start media sync job",
|
||||
description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
|
||||
Returns 202 with job details when queued, or 409 if a job is already running.""",
|
||||
)
|
||||
def sync_media(body: MediaSyncBody = Body(...)):
|
||||
"""Start async media sync job - remove orphaned files.
|
||||
|
||||
Syncs specified media types: event snapshots, event thumbnails, review thumbnails,
|
||||
previews, exports, and/or recordings. Job runs in background; use /media/sync/current
|
||||
or /media/sync/status/{job_id} to check status.
|
||||
|
||||
Args:
|
||||
body: MediaSyncBody with dry_run flag and media_types list.
|
||||
media_types can include: 'all', 'event_snapshots', 'event_thumbnails',
|
||||
'review_thumbnails', 'previews', 'exports', 'recordings'
|
||||
|
||||
Returns:
|
||||
202 Accepted with job_id, or 409 Conflict if job already running.
|
||||
"""
|
||||
job_id = start_media_sync_job(
|
||||
dry_run=body.dry_run, media_types=body.media_types, force=body.force
|
||||
)
|
||||
|
||||
if job_id is None:
|
||||
# A job is already running
|
||||
current = get_current_media_sync_job()
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "A media sync job is already running",
|
||||
"current_job_id": current.id if current else None,
|
||||
},
|
||||
status_code=409,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"job": {
|
||||
"job_type": "media_sync",
|
||||
"status": JobStatusTypesEnum.queued,
|
||||
"id": job_id,
|
||||
}
|
||||
},
|
||||
status_code=202,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/current",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get current media sync job",
|
||||
description="""Retrieve the current running media sync job, if any. Returns the job details
|
||||
or null when no job is active.""",
|
||||
)
|
||||
def get_media_sync_current():
|
||||
"""Get the current running media sync job, if any."""
|
||||
job = get_current_media_sync_job()
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(content={"job": None}, status_code=200)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/status/{job_id}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get media sync job status",
|
||||
description="""Get status and results for the specified media sync job id. Returns 200 with
|
||||
job details including results, or 404 if the job is not found.""",
|
||||
)
|
||||
def get_media_sync_status(job_id: str):
|
||||
"""Get the status of a specific media sync job."""
|
||||
job = get_media_sync_job_by_id(job_id)
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(
|
||||
content={"error": "Job not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_labels(camera: str = ""):
|
||||
try:
|
||||
|
||||
@@ -26,7 +26,7 @@ from frigate.api.defs.request.app_body import (
|
||||
AppPutRoleBody,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import AuthConfig, ProxyConfig
|
||||
from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig
|
||||
from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM
|
||||
from frigate.models import User
|
||||
|
||||
@@ -41,7 +41,7 @@ def require_admin_by_default():
|
||||
endpoints require admin access unless explicitly overridden with
|
||||
allow_public(), allow_any_authenticated(), or require_role().
|
||||
|
||||
Port 5000 (internal) always has admin role set by the /auth endpoint,
|
||||
Internal port always has admin role set by the /auth endpoint,
|
||||
so this check passes automatically for internal requests.
|
||||
|
||||
Certain paths are exempted from the global admin check because they must
|
||||
@@ -130,7 +130,7 @@ def require_admin_by_default():
|
||||
pass
|
||||
|
||||
# For all other paths, require admin role
|
||||
# Port 5000 (internal) requests have admin role set automatically
|
||||
# Internal port requests have admin role set automatically
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
@@ -143,6 +143,17 @@ def require_admin_by_default():
|
||||
return admin_checker
|
||||
|
||||
|
||||
def _is_authenticated(request: Request) -> bool:
|
||||
"""
|
||||
Helper to determine if a request is from an authenticated user.
|
||||
|
||||
Returns True if the request has a valid authenticated user (not anonymous).
|
||||
Internal port requests are considered anonymous despite having admin role.
|
||||
"""
|
||||
username = request.headers.get("remote-user")
|
||||
return username is not None and username != "anonymous"
|
||||
|
||||
|
||||
def allow_public():
|
||||
"""
|
||||
Override dependency to allow unauthenticated access to an endpoint.
|
||||
@@ -171,6 +182,7 @@ def allow_any_authenticated():
|
||||
|
||||
Rejects:
|
||||
- Requests with no remote-user header (did not pass through /auth endpoint)
|
||||
- External port requests with anonymous user (auth disabled, no proxy auth)
|
||||
|
||||
Example:
|
||||
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
|
||||
@@ -179,8 +191,14 @@ def allow_any_authenticated():
|
||||
async def auth_checker(request: Request):
|
||||
# Ensure a remote-user has been set by the /auth endpoint
|
||||
username = request.headers.get("remote-user")
|
||||
if username is None:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
# Internal port requests have admin role and should be allowed
|
||||
role = request.headers.get("remote-role")
|
||||
|
||||
if role != "admin":
|
||||
if username is None or not _is_authenticated(request):
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
|
||||
return
|
||||
|
||||
return auth_checker
|
||||
@@ -439,10 +457,11 @@ def resolve_role(
|
||||
Determine the effective role for a request based on proxy headers and configuration.
|
||||
|
||||
Order of resolution:
|
||||
1. If a role header is defined in proxy_config.header_map.role:
|
||||
- If a role_map is configured, treat the header as group claims
|
||||
(split by proxy_config.separator) and map to roles.
|
||||
- If no role_map is configured, treat the header as role names directly.
|
||||
1. If a role header is defined in proxy_config.header_map.role:
|
||||
- If a role_map is configured, treat the header as group claims
|
||||
(split by proxy_config.separator) and map to roles.
|
||||
Admin matches short-circuit to admin.
|
||||
- If no role_map is configured, treat the header as role names directly.
|
||||
2. If no valid role is found, return proxy_config.default_role if it's valid in config_roles, else 'viewer'.
|
||||
|
||||
Args:
|
||||
@@ -492,6 +511,12 @@ def resolve_role(
|
||||
}
|
||||
logger.debug("Matched roles from role_map: %s", matched_roles)
|
||||
|
||||
# If admin matches, prioritize it to avoid accidental downgrade when
|
||||
# users belong to both admin and lower-privilege groups.
|
||||
if "admin" in matched_roles and "admin" in config_roles:
|
||||
logger.debug("Resolved role (with role_map) to 'admin'.")
|
||||
return "admin"
|
||||
|
||||
if matched_roles:
|
||||
resolved = next(
|
||||
(r for r in config_roles if r in matched_roles), validated_default
|
||||
@@ -563,12 +588,18 @@ def resolve_role(
|
||||
def auth(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
proxy_config: ProxyConfig = request.app.frigate_config.proxy
|
||||
networking_config: NetworkingConfig = request.app.frigate_config.networking
|
||||
|
||||
success_response = Response("", status_code=202)
|
||||
|
||||
# handle case where internal port is a string with ip:port
|
||||
internal_port = networking_config.listen.internal
|
||||
if type(internal_port) is str:
|
||||
internal_port = int(internal_port.split(":")[-1])
|
||||
|
||||
# dont require auth if the request is on the internal port
|
||||
# this header is set by Frigate's nginx proxy, so it cant be spoofed
|
||||
if int(request.headers.get("x-server-port", default=0)) == 5000:
|
||||
if int(request.headers.get("x-server-port", default=0)) == internal_port:
|
||||
success_response.headers["remote-user"] = "anonymous"
|
||||
success_response.headers["remote-role"] = "admin"
|
||||
return success_response
|
||||
|
||||
717
frigate/api/chat.py
Normal file
717
frigate/api/chat.py
Normal file
@@ -0,0 +1,717 @@
|
||||
"""Chat and LLM tool calling APIs."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import cv2
|
||||
from fastapi import APIRouter, Body, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
)
|
||||
from frigate.api.defs.query.events_query_parameters import EventsQueryParams
|
||||
from frigate.api.defs.request.chat_body import ChatCompletionRequest
|
||||
from frigate.api.defs.response.chat_response import (
|
||||
ChatCompletionResponse,
|
||||
ChatMessageResponse,
|
||||
ToolCall,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.api.event import events
|
||||
from frigate.genai import get_genai_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.chat])
|
||||
|
||||
|
||||
def _format_events_with_local_time(
|
||||
events_list: List[Dict[str, Any]],
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Add human-readable local start/end times to each event for the LLM."""
|
||||
result = []
|
||||
for evt in events_list:
|
||||
if not isinstance(evt, dict):
|
||||
result.append(evt)
|
||||
continue
|
||||
copy_evt = dict(evt)
|
||||
try:
|
||||
start_ts = evt.get("start_time")
|
||||
end_ts = evt.get("end_time")
|
||||
if start_ts is not None:
|
||||
dt_start = datetime.fromtimestamp(start_ts)
|
||||
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
|
||||
if end_ts is not None:
|
||||
dt_end = datetime.fromtimestamp(end_ts)
|
||||
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
|
||||
except (TypeError, ValueError, OSError):
|
||||
pass
|
||||
result.append(copy_evt)
|
||||
return result
|
||||
|
||||
|
||||
class ToolExecuteRequest(BaseModel):
|
||||
"""Request model for tool execution."""
|
||||
|
||||
tool_name: str
|
||||
arguments: Dict[str, Any]
|
||||
|
||||
|
||||
def get_tool_definitions() -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get OpenAI-compatible tool definitions for Frigate.
|
||||
|
||||
Returns a list of tool definitions that can be used with OpenAI-compatible
|
||||
function calling APIs.
|
||||
"""
|
||||
return [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "search_objects",
|
||||
"description": (
|
||||
"Search for detected objects in Frigate by camera, object label, time range, "
|
||||
"zones, and other filters. Use this to answer questions about when "
|
||||
"objects were detected, what objects appeared, or to find specific object detections. "
|
||||
"An 'object' in Frigate represents a tracked detection (e.g., a person, package, car)."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"camera": {
|
||||
"type": "string",
|
||||
"description": "Camera name to filter by (optional). Use 'all' for all cameras.",
|
||||
},
|
||||
"label": {
|
||||
"type": "string",
|
||||
"description": "Object label to filter by (e.g., 'person', 'package', 'car').",
|
||||
},
|
||||
"after": {
|
||||
"type": "string",
|
||||
"description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').",
|
||||
},
|
||||
"before": {
|
||||
"type": "string",
|
||||
"description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').",
|
||||
},
|
||||
"zones": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of zone names to filter by.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"description": "Maximum number of objects to return (default: 10).",
|
||||
"default": 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
"required": [],
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_live_context",
|
||||
"description": (
|
||||
"Get the current detection information for a camera: objects being tracked, "
|
||||
"zones, timestamps. Use this to understand what is visible in the live view. "
|
||||
"Call this when the user has included a live image (via include_live_image) or "
|
||||
"when answering questions about what is happening right now on a specific camera."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"camera": {
|
||||
"type": "string",
|
||||
"description": "Camera name to get live context for.",
|
||||
},
|
||||
},
|
||||
"required": ["camera"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@router.get(
|
||||
"/chat/tools",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get available tools",
|
||||
description="Returns OpenAI-compatible tool definitions for function calling.",
|
||||
)
|
||||
def get_tools(request: Request) -> JSONResponse:
|
||||
"""Get list of available tools for LLM function calling."""
|
||||
tools = get_tool_definitions()
|
||||
return JSONResponse(content={"tools": tools})
|
||||
|
||||
|
||||
async def _execute_search_objects(
|
||||
request: Request,
|
||||
arguments: Dict[str, Any],
|
||||
allowed_cameras: List[str],
|
||||
) -> JSONResponse:
|
||||
"""
|
||||
Execute the search_objects tool.
|
||||
|
||||
This searches for detected objects (events) in Frigate using the same
|
||||
logic as the events API endpoint.
|
||||
"""
|
||||
# Parse after/before as server local time; convert to Unix timestamp
|
||||
after = arguments.get("after")
|
||||
before = arguments.get("before")
|
||||
|
||||
def _parse_as_local_timestamp(s: str):
|
||||
s = s.replace("Z", "").strip()[:19]
|
||||
dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
if after:
|
||||
try:
|
||||
after = _parse_as_local_timestamp(after)
|
||||
except (ValueError, AttributeError, TypeError):
|
||||
logger.warning(f"Invalid 'after' timestamp format: {after}")
|
||||
after = None
|
||||
|
||||
if before:
|
||||
try:
|
||||
before = _parse_as_local_timestamp(before)
|
||||
except (ValueError, AttributeError, TypeError):
|
||||
logger.warning(f"Invalid 'before' timestamp format: {before}")
|
||||
before = None
|
||||
|
||||
# Convert zones array to comma-separated string if provided
|
||||
zones = arguments.get("zones")
|
||||
if isinstance(zones, list):
|
||||
zones = ",".join(zones)
|
||||
elif zones is None:
|
||||
zones = "all"
|
||||
|
||||
# Build query parameters compatible with EventsQueryParams
|
||||
query_params = EventsQueryParams(
|
||||
camera=arguments.get("camera", "all"),
|
||||
cameras=arguments.get("camera", "all"),
|
||||
label=arguments.get("label", "all"),
|
||||
labels=arguments.get("label", "all"),
|
||||
zones=zones,
|
||||
zone=zones,
|
||||
after=after,
|
||||
before=before,
|
||||
limit=arguments.get("limit", 10),
|
||||
)
|
||||
|
||||
try:
|
||||
# Call the events endpoint function directly
|
||||
# The events function is synchronous and takes params and allowed_cameras
|
||||
response = events(query_params, allowed_cameras)
|
||||
|
||||
# The response is already a JSONResponse with event data
|
||||
# Return it as-is for the LLM
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing search_objects: {e}", exc_info=True)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Error searching objects: {str(e)}",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/chat/execute",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Execute a tool",
|
||||
description="Execute a tool function call from an LLM.",
|
||||
)
|
||||
async def execute_tool(
|
||||
request: Request,
|
||||
body: ToolExecuteRequest = Body(...),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
) -> JSONResponse:
|
||||
"""
|
||||
Execute a tool function call.
|
||||
|
||||
This endpoint receives tool calls from LLMs and executes the corresponding
|
||||
Frigate operations, returning results in a format the LLM can understand.
|
||||
"""
|
||||
tool_name = body.tool_name
|
||||
arguments = body.arguments
|
||||
|
||||
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
|
||||
|
||||
if tool_name == "search_objects":
|
||||
return await _execute_search_objects(request, arguments, allowed_cameras)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": f"Unknown tool: {tool_name}",
|
||||
"tool": tool_name,
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
|
||||
async def _execute_get_live_context(
|
||||
request: Request,
|
||||
camera: str,
|
||||
allowed_cameras: List[str],
|
||||
) -> Dict[str, Any]:
|
||||
if camera not in allowed_cameras:
|
||||
return {
|
||||
"error": f"Camera '{camera}' not found or access denied",
|
||||
}
|
||||
|
||||
if camera not in request.app.frigate_config.cameras:
|
||||
return {
|
||||
"error": f"Camera '{camera}' not found",
|
||||
}
|
||||
|
||||
try:
|
||||
frame_processor = request.app.detected_frames_processor
|
||||
camera_state = frame_processor.camera_states.get(camera)
|
||||
|
||||
if camera_state is None:
|
||||
return {
|
||||
"error": f"Camera '{camera}' state not available",
|
||||
}
|
||||
|
||||
tracked_objects_dict = {}
|
||||
with camera_state.current_frame_lock:
|
||||
tracked_objects = camera_state.tracked_objects.copy()
|
||||
frame_time = camera_state.current_frame_time
|
||||
|
||||
for obj_id, tracked_obj in tracked_objects.items():
|
||||
obj_dict = tracked_obj.to_dict()
|
||||
if obj_dict.get("frame_time") == frame_time:
|
||||
tracked_objects_dict[obj_id] = {
|
||||
"label": obj_dict.get("label"),
|
||||
"zones": obj_dict.get("current_zones", []),
|
||||
"sub_label": obj_dict.get("sub_label"),
|
||||
"stationary": obj_dict.get("stationary", False),
|
||||
}
|
||||
|
||||
return {
|
||||
"camera": camera,
|
||||
"timestamp": frame_time,
|
||||
"detections": list(tracked_objects_dict.values()),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing get_live_context: {e}", exc_info=True)
|
||||
return {
|
||||
"error": f"Error getting live context: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
async def _get_live_frame_image_url(
|
||||
request: Request,
|
||||
camera: str,
|
||||
allowed_cameras: List[str],
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Fetch the current live frame for a camera as a base64 data URL.
|
||||
|
||||
Returns None if the frame cannot be retrieved. Used when include_live_image
|
||||
is set to attach the image to the first user message.
|
||||
"""
|
||||
if (
|
||||
camera not in allowed_cameras
|
||||
or camera not in request.app.frigate_config.cameras
|
||||
):
|
||||
return None
|
||||
try:
|
||||
frame_processor = request.app.detected_frames_processor
|
||||
if camera not in frame_processor.camera_states:
|
||||
return None
|
||||
frame = frame_processor.get_current_frame(camera, {})
|
||||
if frame is None:
|
||||
return None
|
||||
height, width = frame.shape[:2]
|
||||
max_dimension = 1024
|
||||
if height > max_dimension or width > max_dimension:
|
||||
scale = max_dimension / max(height, width)
|
||||
frame = cv2.resize(
|
||||
frame,
|
||||
(int(width * scale), int(height * scale)),
|
||||
interpolation=cv2.INTER_AREA,
|
||||
)
|
||||
_, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
|
||||
b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8")
|
||||
return f"data:image/jpeg;base64,{b64}"
|
||||
except Exception as e:
|
||||
logger.debug("Failed to get live frame for %s: %s", camera, e)
|
||||
return None
|
||||
|
||||
|
||||
async def _execute_tool_internal(
|
||||
tool_name: str,
|
||||
arguments: Dict[str, Any],
|
||||
request: Request,
|
||||
allowed_cameras: List[str],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Internal helper to execute a tool and return the result as a dict.
|
||||
|
||||
This is used by the chat completion endpoint to execute tools.
|
||||
"""
|
||||
if tool_name == "search_objects":
|
||||
response = await _execute_search_objects(request, arguments, allowed_cameras)
|
||||
try:
|
||||
if hasattr(response, "body"):
|
||||
body_str = response.body.decode("utf-8")
|
||||
return json.loads(body_str)
|
||||
elif hasattr(response, "content"):
|
||||
return response.content
|
||||
else:
|
||||
return {}
|
||||
except (json.JSONDecodeError, AttributeError) as e:
|
||||
logger.warning(f"Failed to extract tool result: {e}")
|
||||
return {"error": "Failed to parse tool result"}
|
||||
elif tool_name == "get_live_context":
|
||||
camera = arguments.get("camera")
|
||||
if not camera:
|
||||
return {"error": "Camera parameter is required"}
|
||||
return await _execute_get_live_context(request, camera, allowed_cameras)
|
||||
else:
|
||||
return {"error": f"Unknown tool: {tool_name}"}
|
||||
|
||||
|
||||
@router.post(
|
||||
"/chat/completion",
|
||||
response_model=ChatCompletionResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Chat completion with tool calling",
|
||||
description=(
|
||||
"Send a chat message to the configured GenAI provider with tool calling support. "
|
||||
"The LLM can call Frigate tools to answer questions about your cameras and events."
|
||||
),
|
||||
)
|
||||
async def chat_completion(
|
||||
request: Request,
|
||||
body: ChatCompletionRequest = Body(...),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
) -> JSONResponse:
|
||||
"""
|
||||
Chat completion endpoint with tool calling support.
|
||||
|
||||
This endpoint:
|
||||
1. Gets the configured GenAI client
|
||||
2. Gets tool definitions
|
||||
3. Sends messages + tools to LLM
|
||||
4. Handles tool_calls if present
|
||||
5. Executes tools and sends results back to LLM
|
||||
6. Repeats until final answer
|
||||
7. Returns response to user
|
||||
"""
|
||||
genai_client = get_genai_client(request.app.frigate_config)
|
||||
if not genai_client:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
tools = get_tool_definitions()
|
||||
conversation = []
|
||||
|
||||
current_datetime = datetime.now()
|
||||
current_date_str = current_datetime.strftime("%Y-%m-%d")
|
||||
current_time_str = current_datetime.strftime("%I:%M:%S %p")
|
||||
|
||||
cameras_info = []
|
||||
config = request.app.frigate_config
|
||||
for camera_id in allowed_cameras:
|
||||
if camera_id not in config.cameras:
|
||||
continue
|
||||
camera_config = config.cameras[camera_id]
|
||||
friendly_name = (
|
||||
camera_config.friendly_name
|
||||
if camera_config.friendly_name
|
||||
else camera_id.replace("_", " ").title()
|
||||
)
|
||||
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
|
||||
|
||||
cameras_section = ""
|
||||
if cameras_info:
|
||||
cameras_section = (
|
||||
"\n\nAvailable cameras:\n"
|
||||
+ "\n".join(cameras_info)
|
||||
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
|
||||
)
|
||||
|
||||
live_image_note = ""
|
||||
if body.include_live_image:
|
||||
live_image_note = (
|
||||
f"\n\nThe first user message includes a live image from camera "
|
||||
f"'{body.include_live_image}'. Use get_live_context for that camera to get "
|
||||
"current detection details (objects, zones) to aid in understanding the image."
|
||||
)
|
||||
|
||||
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
|
||||
|
||||
Current server local date and time: {current_date_str} at {current_time_str}
|
||||
|
||||
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
|
||||
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
|
||||
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
|
||||
Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
|
||||
|
||||
conversation.append(
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
}
|
||||
)
|
||||
|
||||
first_user_message_seen = False
|
||||
for msg in body.messages:
|
||||
msg_dict = {
|
||||
"role": msg.role,
|
||||
"content": msg.content,
|
||||
}
|
||||
if msg.tool_call_id:
|
||||
msg_dict["tool_call_id"] = msg.tool_call_id
|
||||
if msg.name:
|
||||
msg_dict["name"] = msg.name
|
||||
|
||||
if (
|
||||
msg.role == "user"
|
||||
and not first_user_message_seen
|
||||
and body.include_live_image
|
||||
):
|
||||
first_user_message_seen = True
|
||||
image_url = await _get_live_frame_image_url(
|
||||
request, body.include_live_image, allowed_cameras
|
||||
)
|
||||
if image_url:
|
||||
msg_dict["content"] = [
|
||||
{"type": "text", "text": msg.content},
|
||||
{"type": "image_url", "image_url": {"url": image_url}},
|
||||
]
|
||||
|
||||
conversation.append(msg_dict)
|
||||
|
||||
tool_iterations = 0
|
||||
tool_calls: List[ToolCall] = []
|
||||
max_iterations = body.max_tool_iterations
|
||||
|
||||
logger.debug(
|
||||
f"Starting chat completion with {len(conversation)} message(s), "
|
||||
f"{len(tools)} tool(s) available, max_iterations={max_iterations}"
|
||||
)
|
||||
|
||||
try:
|
||||
while tool_iterations < max_iterations:
|
||||
logger.debug(
|
||||
f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) "
|
||||
f"with {len(conversation)} message(s) in conversation"
|
||||
)
|
||||
response = genai_client.chat_with_tools(
|
||||
messages=conversation,
|
||||
tools=tools if tools else None,
|
||||
tool_choice="auto",
|
||||
)
|
||||
|
||||
if response.get("finish_reason") == "error":
|
||||
logger.error("GenAI client returned an error")
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "An error occurred while processing your request.",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
||||
assistant_message = {
|
||||
"role": "assistant",
|
||||
"content": response.get("content"),
|
||||
}
|
||||
if response.get("tool_calls"):
|
||||
assistant_message["tool_calls"] = [
|
||||
{
|
||||
"id": tc["id"],
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tc["name"],
|
||||
"arguments": json.dumps(tc["arguments"]),
|
||||
},
|
||||
}
|
||||
for tc in response["tool_calls"]
|
||||
]
|
||||
conversation.append(assistant_message)
|
||||
|
||||
pending_tool_calls = response.get("tool_calls")
|
||||
if not pending_tool_calls:
|
||||
logger.debug(
|
||||
f"Chat completion finished with final answer (iterations: {tool_iterations})"
|
||||
)
|
||||
return JSONResponse(
|
||||
content=ChatCompletionResponse(
|
||||
message=ChatMessageResponse(
|
||||
role="assistant",
|
||||
content=response.get("content"),
|
||||
tool_calls=None,
|
||||
),
|
||||
finish_reason=response.get("finish_reason", "stop"),
|
||||
tool_iterations=tool_iterations,
|
||||
tool_calls=tool_calls,
|
||||
).model_dump(),
|
||||
)
|
||||
|
||||
# Execute tools
|
||||
tool_iterations += 1
|
||||
logger.debug(
|
||||
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
|
||||
f"{len(pending_tool_calls)} tool(s) to execute"
|
||||
)
|
||||
tool_results = []
|
||||
|
||||
for tool_call in pending_tool_calls:
|
||||
tool_name = tool_call["name"]
|
||||
tool_args = tool_call["arguments"]
|
||||
tool_call_id = tool_call["id"]
|
||||
|
||||
logger.debug(
|
||||
f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}"
|
||||
)
|
||||
|
||||
try:
|
||||
tool_result = await _execute_tool_internal(
|
||||
tool_name, tool_args, request, allowed_cameras
|
||||
)
|
||||
|
||||
# Add local time fields to search_objects results so the LLM doesn't hallucinate timestamps
|
||||
if tool_name == "search_objects" and isinstance(tool_result, list):
|
||||
tool_result = _format_events_with_local_time(tool_result)
|
||||
_keys = {
|
||||
"id",
|
||||
"camera",
|
||||
"label",
|
||||
"zones",
|
||||
"start_time_local",
|
||||
"end_time_local",
|
||||
"sub_label",
|
||||
"event_count",
|
||||
}
|
||||
tool_result = [
|
||||
{k: evt[k] for k in _keys if k in evt}
|
||||
for evt in tool_result
|
||||
if isinstance(evt, dict)
|
||||
]
|
||||
|
||||
if isinstance(tool_result, dict):
|
||||
result_content = json.dumps(tool_result)
|
||||
result_summary = tool_result
|
||||
if isinstance(tool_result, dict) and isinstance(
|
||||
tool_result.get("content"), list
|
||||
):
|
||||
result_count = len(tool_result.get("content", []))
|
||||
result_summary = {
|
||||
"count": result_count,
|
||||
"sample": tool_result.get("content", [])[:2]
|
||||
if result_count > 0
|
||||
else [],
|
||||
}
|
||||
logger.debug(
|
||||
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
|
||||
f"Result: {json.dumps(result_summary, indent=2)}"
|
||||
)
|
||||
elif isinstance(tool_result, list):
|
||||
result_content = json.dumps(tool_result)
|
||||
logger.debug(
|
||||
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
|
||||
f"Result: {len(tool_result)} item(s)"
|
||||
)
|
||||
elif isinstance(tool_result, str):
|
||||
result_content = tool_result
|
||||
logger.debug(
|
||||
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
|
||||
f"Result length: {len(result_content)} characters"
|
||||
)
|
||||
else:
|
||||
result_content = str(tool_result)
|
||||
logger.debug(
|
||||
f"Tool {tool_name} (id: {tool_call_id}) completed successfully. "
|
||||
f"Result type: {type(tool_result).__name__}"
|
||||
)
|
||||
|
||||
tool_calls.append(
|
||||
ToolCall(
|
||||
name=tool_name,
|
||||
arguments=tool_args or {},
|
||||
response=result_content,
|
||||
)
|
||||
)
|
||||
tool_results.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call_id,
|
||||
"content": result_content,
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error executing tool {tool_name} (id: {tool_call_id}): {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
error_content = json.dumps(
|
||||
{"error": f"Tool execution failed: {str(e)}"}
|
||||
)
|
||||
tool_calls.append(
|
||||
ToolCall(
|
||||
name=tool_name,
|
||||
arguments=tool_args or {},
|
||||
response=error_content,
|
||||
)
|
||||
)
|
||||
tool_results.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call_id,
|
||||
"content": error_content,
|
||||
}
|
||||
)
|
||||
logger.debug(
|
||||
f"Tool {tool_name} (id: {tool_call_id}) failed. Error result added to conversation."
|
||||
)
|
||||
|
||||
conversation.extend(tool_results)
|
||||
logger.debug(
|
||||
f"Added {len(tool_results)} tool result(s) to conversation. "
|
||||
f"Continuing with next LLM call..."
|
||||
)
|
||||
|
||||
logger.warning(
|
||||
f"Max tool iterations ({max_iterations}) reached. Returning partial response."
|
||||
)
|
||||
return JSONResponse(
|
||||
content=ChatCompletionResponse(
|
||||
message=ChatMessageResponse(
|
||||
role="assistant",
|
||||
content="I reached the maximum number of tool call iterations. Please try rephrasing your question.",
|
||||
tool_calls=None,
|
||||
),
|
||||
finish_reason="length",
|
||||
tool_iterations=tool_iterations,
|
||||
tool_calls=tool_calls,
|
||||
).model_dump(),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in chat completion: {e}", exc_info=True)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "An error occurred while processing your request.",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
@@ -1,8 +1,7 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class Extension(str, Enum):
|
||||
@@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel):
|
||||
mask: Optional[int] = None
|
||||
motion: Optional[int] = None
|
||||
regions: Optional[int] = None
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
|
||||
class RecordingsDeleteQueryParams(BaseModel):
|
||||
keep: Optional[str] = None
|
||||
cameras: Optional[str] = "all"
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AppConfigSetBody(BaseModel):
|
||||
@@ -27,3 +27,16 @@ class AppPostLoginBody(BaseModel):
|
||||
|
||||
class AppPutRoleBody(BaseModel):
|
||||
role: str
|
||||
|
||||
|
||||
class MediaSyncBody(BaseModel):
|
||||
dry_run: bool = Field(
|
||||
default=True, description="If True, only report orphans without deleting them"
|
||||
)
|
||||
media_types: List[str] = Field(
|
||||
default=["all"],
|
||||
description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'",
|
||||
)
|
||||
force: bool = Field(
|
||||
default=False, description="If True, bypass safety threshold checks"
|
||||
)
|
||||
|
||||
41
frigate/api/defs/request/chat_body.py
Normal file
41
frigate/api/defs/request/chat_body.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""Chat API request models."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
"""A single message in a chat conversation."""
|
||||
|
||||
role: str = Field(
|
||||
description="Message role: 'user', 'assistant', 'system', or 'tool'"
|
||||
)
|
||||
content: str = Field(description="Message content")
|
||||
tool_call_id: Optional[str] = Field(
|
||||
default=None, description="For tool messages, the ID of the tool call"
|
||||
)
|
||||
name: Optional[str] = Field(
|
||||
default=None, description="For tool messages, the tool name"
|
||||
)
|
||||
|
||||
|
||||
class ChatCompletionRequest(BaseModel):
|
||||
"""Request for chat completion with tool calling."""
|
||||
|
||||
messages: list[ChatMessage] = Field(
|
||||
description="List of messages in the conversation"
|
||||
)
|
||||
max_tool_iterations: int = Field(
|
||||
default=5,
|
||||
ge=1,
|
||||
le=10,
|
||||
description="Maximum number of tool call iterations (default: 5)",
|
||||
)
|
||||
include_live_image: Optional[str] = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"If set, the current live frame from this camera is attached to the first "
|
||||
"user message as multimodal content. Use with get_live_context for detection info."
|
||||
),
|
||||
)
|
||||
@@ -41,6 +41,7 @@ class EventsCreateBody(BaseModel):
|
||||
duration: Optional[int] = 30
|
||||
include_recording: Optional[bool] = True
|
||||
draw: Optional[dict] = {}
|
||||
pre_capture: Optional[int] = None
|
||||
|
||||
|
||||
class EventsEndBody(BaseModel):
|
||||
|
||||
35
frigate/api/defs/request/export_case_body.py
Normal file
35
frigate/api/defs/request/export_case_body.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseCreateBody(BaseModel):
|
||||
"""Request body for creating a new export case."""
|
||||
|
||||
name: str = Field(max_length=100, description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseUpdateBody(BaseModel):
|
||||
"""Request body for updating an existing export case."""
|
||||
|
||||
name: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=100,
|
||||
description="Updated friendly name of the export case",
|
||||
)
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Updated description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseAssignBody(BaseModel):
|
||||
"""Request body for assigning or unassigning an export to a case."""
|
||||
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=30,
|
||||
description="Case ID to assign to the export, or null to unassign",
|
||||
)
|
||||
@@ -3,18 +3,47 @@ from typing import Optional, Union
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
PlaybackSourceEnum,
|
||||
)
|
||||
from frigate.record.export import PlaybackSourceEnum
|
||||
|
||||
|
||||
class ExportRecordingsBody(BaseModel):
|
||||
playback: PlaybackFactorEnum = Field(
|
||||
default=PlaybackFactorEnum.realtime, title="Playback factor"
|
||||
)
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Export case ID",
|
||||
max_length=30,
|
||||
description="ID of the export case to assign this export to",
|
||||
)
|
||||
|
||||
|
||||
class ExportRecordingsCustomBody(BaseModel):
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Export case ID",
|
||||
max_length=30,
|
||||
description="ID of the export case to assign this export to",
|
||||
)
|
||||
ffmpeg_input_args: Optional[str] = Field(
|
||||
default=None,
|
||||
title="FFmpeg input arguments",
|
||||
description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.",
|
||||
)
|
||||
ffmpeg_output_args: Optional[str] = Field(
|
||||
default=None,
|
||||
title="FFmpeg output arguments",
|
||||
description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.",
|
||||
)
|
||||
cpu_fallback: bool = Field(
|
||||
default=False,
|
||||
title="CPU Fallback",
|
||||
description="If true, retry export without hardware acceleration if the initial export fails.",
|
||||
)
|
||||
|
||||
54
frigate/api/defs/response/chat_response.py
Normal file
54
frigate/api/defs/response/chat_response.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Chat API response models."""
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ToolCallInvocation(BaseModel):
|
||||
"""A tool call requested by the LLM (before execution)."""
|
||||
|
||||
id: str = Field(description="Unique identifier for this tool call")
|
||||
name: str = Field(description="Tool name to call")
|
||||
arguments: dict[str, Any] = Field(description="Arguments for the tool call")
|
||||
|
||||
|
||||
class ChatMessageResponse(BaseModel):
|
||||
"""A message in the chat response."""
|
||||
|
||||
role: str = Field(description="Message role")
|
||||
content: Optional[str] = Field(
|
||||
default=None, description="Message content (None if tool calls present)"
|
||||
)
|
||||
tool_calls: Optional[list[ToolCallInvocation]] = Field(
|
||||
default=None, description="Tool calls if LLM wants to call tools"
|
||||
)
|
||||
|
||||
|
||||
class ToolCall(BaseModel):
|
||||
"""A tool that was executed during the completion, with its response."""
|
||||
|
||||
name: str = Field(description="Tool name that was called")
|
||||
arguments: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Arguments passed to the tool"
|
||||
)
|
||||
response: str = Field(
|
||||
default="",
|
||||
description="The response or result returned from the tool execution",
|
||||
)
|
||||
|
||||
|
||||
class ChatCompletionResponse(BaseModel):
|
||||
"""Response from chat completion."""
|
||||
|
||||
message: ChatMessageResponse = Field(description="The assistant's message")
|
||||
finish_reason: str = Field(
|
||||
description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'"
|
||||
)
|
||||
tool_iterations: int = Field(
|
||||
default=0, description="Number of tool call iterations performed"
|
||||
)
|
||||
tool_calls: list[ToolCall] = Field(
|
||||
default_factory=list,
|
||||
description="List of tool calls that were executed during this completion",
|
||||
)
|
||||
22
frigate/api/defs/response/export_case_response.py
Normal file
22
frigate/api/defs/response/export_case_response.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseModel(BaseModel):
|
||||
"""Model representing a single export case."""
|
||||
|
||||
id: str = Field(description="Unique identifier for the export case")
|
||||
name: str = Field(description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
created_at: float = Field(
|
||||
description="Unix timestamp when the export case was created"
|
||||
)
|
||||
updated_at: float = Field(
|
||||
description="Unix timestamp when the export case was last updated"
|
||||
)
|
||||
|
||||
|
||||
ExportCasesResponse = List[ExportCaseModel]
|
||||
@@ -15,6 +15,9 @@ class ExportModel(BaseModel):
|
||||
in_progress: bool = Field(
|
||||
description="Whether the export is currently being processed"
|
||||
)
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None, description="ID of the export case this export belongs to"
|
||||
)
|
||||
|
||||
|
||||
class StartExportResponse(BaseModel):
|
||||
|
||||
@@ -3,13 +3,15 @@ from enum import Enum
|
||||
|
||||
class Tags(Enum):
|
||||
app = "App"
|
||||
auth = "Auth"
|
||||
camera = "Camera"
|
||||
preview = "Preview"
|
||||
chat = "Chat"
|
||||
events = "Events"
|
||||
export = "Export"
|
||||
classification = "Classification"
|
||||
logs = "Logs"
|
||||
media = "Media"
|
||||
notifications = "Notifications"
|
||||
preview = "Preview"
|
||||
recordings = "Recordings"
|
||||
review = "Review"
|
||||
export = "Export"
|
||||
events = "Events"
|
||||
classification = "Classification"
|
||||
auth = "Auth"
|
||||
|
||||
@@ -1772,6 +1772,7 @@ def create_event(
|
||||
body.duration,
|
||||
"api",
|
||||
body.draw,
|
||||
body.pre_capture,
|
||||
),
|
||||
EventMetadataTypeEnum.manual_event_create.value,
|
||||
)
|
||||
|
||||
@@ -4,10 +4,10 @@ import logging
|
||||
import random
|
||||
import string
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
@@ -19,8 +19,20 @@ from frigate.api.auth import (
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.request.export_case_body import (
|
||||
ExportCaseAssignBody,
|
||||
ExportCaseCreateBody,
|
||||
ExportCaseUpdateBody,
|
||||
)
|
||||
from frigate.api.defs.request.export_recordings_body import (
|
||||
ExportRecordingsBody,
|
||||
ExportRecordingsCustomBody,
|
||||
)
|
||||
from frigate.api.defs.request.export_rename_body import ExportRenameBody
|
||||
from frigate.api.defs.response.export_case_response import (
|
||||
ExportCaseModel,
|
||||
ExportCasesResponse,
|
||||
)
|
||||
from frigate.api.defs.response.export_response import (
|
||||
ExportModel,
|
||||
ExportsResponse,
|
||||
@@ -29,9 +41,9 @@ from frigate.api.defs.response.export_response import (
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.models import Export, ExportCase, Previews, Recordings
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
DEFAULT_TIME_LAPSE_FFMPEG_ARGS,
|
||||
PlaybackSourceEnum,
|
||||
RecordingExporter,
|
||||
)
|
||||
@@ -52,17 +64,182 @@ router = APIRouter(tags=[Tags.export])
|
||||
)
|
||||
def get_exports(
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
export_case_id: Optional[str] = None,
|
||||
cameras: Optional[str] = Query(default="all"),
|
||||
start_date: Optional[float] = None,
|
||||
end_date: Optional[float] = None,
|
||||
):
|
||||
exports = (
|
||||
Export.select()
|
||||
.where(Export.camera << allowed_cameras)
|
||||
.order_by(Export.date.desc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
query = Export.select().where(Export.camera << allowed_cameras)
|
||||
|
||||
if export_case_id is not None:
|
||||
if export_case_id == "unassigned":
|
||||
query = query.where(Export.export_case.is_null(True))
|
||||
else:
|
||||
query = query.where(Export.export_case == export_case_id)
|
||||
|
||||
if cameras and cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered_cameras = list(requested.intersection(allowed_cameras))
|
||||
if not filtered_cameras:
|
||||
return JSONResponse(content=[])
|
||||
query = query.where(Export.camera << filtered_cameras)
|
||||
|
||||
if start_date is not None:
|
||||
query = query.where(Export.date >= start_date)
|
||||
|
||||
if end_date is not None:
|
||||
query = query.where(Export.date <= end_date)
|
||||
|
||||
exports = query.order_by(Export.date.desc()).dicts().iterator()
|
||||
return JSONResponse(content=[e for e in exports])
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases",
|
||||
response_model=ExportCasesResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get export cases",
|
||||
description="Gets all export cases from the database.",
|
||||
)
|
||||
def get_export_cases():
|
||||
cases = (
|
||||
ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator()
|
||||
)
|
||||
return JSONResponse(content=[c for c in cases])
|
||||
|
||||
|
||||
@router.post(
|
||||
"/cases",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create export case",
|
||||
description="Creates a new export case.",
|
||||
)
|
||||
def create_export_case(body: ExportCaseCreateBody):
|
||||
case = ExportCase.create(
|
||||
id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)),
|
||||
name=body.name,
|
||||
description=body.description,
|
||||
created_at=Path().stat().st_mtime,
|
||||
updated_at=Path().stat().st_mtime,
|
||||
)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases/{case_id}",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get a single export case",
|
||||
description="Gets a specific export case by ID.",
|
||||
)
|
||||
def get_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update export case",
|
||||
description="Updates an existing export case.",
|
||||
)
|
||||
def update_export_case(case_id: str, body: ExportCaseUpdateBody):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.name is not None:
|
||||
case.name = body.name
|
||||
if body.description is not None:
|
||||
case.description = body.description
|
||||
|
||||
case.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete export case",
|
||||
description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """,
|
||||
)
|
||||
def delete_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Unassign exports from this case but keep the exports themselves
|
||||
Export.update(export_case=None).where(Export.export_case == case).execute()
|
||||
|
||||
case.delete_instance()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully deleted export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/export/{export_id}/case",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Assign export to case",
|
||||
description=(
|
||||
"Assigns an export to a case, or unassigns it if export_case_id is null."
|
||||
),
|
||||
)
|
||||
async def assign_export_case(
|
||||
export_id: str,
|
||||
body: ExportCaseAssignBody,
|
||||
request: Request,
|
||||
):
|
||||
try:
|
||||
export: Export = Export.get(Export.id == export_id)
|
||||
await require_camera_access(export.camera, request=request)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export not found."},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == body.export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found."},
|
||||
status_code=404,
|
||||
)
|
||||
export.export_case = body.export_case_id
|
||||
else:
|
||||
export.export_case = None
|
||||
|
||||
export.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/export/{camera_name}/start/{start_time}/end/{end_time}",
|
||||
response_model=StartExportResponse,
|
||||
@@ -88,11 +265,20 @@ def export_recording(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
playback_factor = body.playback
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
|
||||
export_case_id = body.export_case_id
|
||||
if export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
@@ -151,16 +337,12 @@ def export_recording(
|
||||
existing_image,
|
||||
int(start_time),
|
||||
int(end_time),
|
||||
(
|
||||
PlaybackFactorEnum[playback_factor]
|
||||
if playback_factor in PlaybackFactorEnum.__members__.values()
|
||||
else PlaybackFactorEnum.realtime
|
||||
),
|
||||
(
|
||||
PlaybackSourceEnum[playback_source]
|
||||
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||
else PlaybackSourceEnum.recordings
|
||||
),
|
||||
export_case_id,
|
||||
)
|
||||
exporter.start()
|
||||
return JSONResponse(
|
||||
@@ -271,6 +453,138 @@ async def export_delete(event_id: str, request: Request):
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}",
|
||||
response_model=StartExportResponse,
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
summary="Start custom recording export",
|
||||
description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments.
|
||||
The export can be from recordings or preview footage. Returns the export ID if
|
||||
successful, or an error message if the camera is invalid or no recordings/previews
|
||||
are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided,
|
||||
defaults to timelapse export settings.""",
|
||||
)
|
||||
def export_recording_custom(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
start_time: float,
|
||||
end_time: float,
|
||||
body: ExportRecordingsCustomBody,
|
||||
):
|
||||
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": f"{camera_name} is not a valid camera."}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
ffmpeg_input_args = body.ffmpeg_input_args
|
||||
ffmpeg_output_args = body.ffmpeg_output_args
|
||||
cpu_fallback = body.cpu_fallback
|
||||
|
||||
export_case_id = body.export_case_id
|
||||
if export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Invalid image path"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if playback_source == "recordings":
|
||||
recordings_count = (
|
||||
Recordings.select()
|
||||
.where(
|
||||
Recordings.start_time.between(start_time, end_time)
|
||||
| Recordings.end_time.between(start_time, end_time)
|
||||
| (
|
||||
(start_time > Recordings.start_time)
|
||||
& (end_time < Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.count()
|
||||
)
|
||||
|
||||
if recordings_count <= 0:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "No recordings found for time range"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
else:
|
||||
previews_count = (
|
||||
Previews.select()
|
||||
.where(
|
||||
Previews.start_time.between(start_time, end_time)
|
||||
| Previews.end_time.between(start_time, end_time)
|
||||
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
|
||||
)
|
||||
.where(Previews.camera == camera_name)
|
||||
.count()
|
||||
)
|
||||
|
||||
if not is_current_hour(start_time) and previews_count <= 0:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "No previews found for time range"}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
|
||||
|
||||
# Set default values if not provided (timelapse defaults)
|
||||
if ffmpeg_input_args is None:
|
||||
ffmpeg_input_args = ""
|
||||
|
||||
if ffmpeg_output_args is None:
|
||||
ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS
|
||||
|
||||
exporter = RecordingExporter(
|
||||
request.app.frigate_config,
|
||||
export_id,
|
||||
camera_name,
|
||||
friendly_name,
|
||||
existing_image,
|
||||
int(start_time),
|
||||
int(end_time),
|
||||
(
|
||||
PlaybackSourceEnum[playback_source]
|
||||
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||
else PlaybackSourceEnum.recordings
|
||||
),
|
||||
export_case_id,
|
||||
ffmpeg_input_args,
|
||||
ffmpeg_output_args,
|
||||
cpu_fallback,
|
||||
)
|
||||
exporter.start()
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Starting export of recording.",
|
||||
"export_id": export_id,
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/exports/{export_id}",
|
||||
response_model=ExportModel,
|
||||
|
||||
@@ -16,12 +16,14 @@ from frigate.api import app as main_app
|
||||
from frigate.api import (
|
||||
auth,
|
||||
camera,
|
||||
chat,
|
||||
classification,
|
||||
event,
|
||||
export,
|
||||
media,
|
||||
notification,
|
||||
preview,
|
||||
record,
|
||||
review,
|
||||
)
|
||||
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
|
||||
@@ -120,6 +122,7 @@ def create_fastapi_app(
|
||||
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
|
||||
app.include_router(auth.router)
|
||||
app.include_router(camera.router)
|
||||
app.include_router(chat.router)
|
||||
app.include_router(classification.router)
|
||||
app.include_router(review.router)
|
||||
app.include_router(main_app.router)
|
||||
@@ -128,6 +131,7 @@ def create_fastapi_app(
|
||||
app.include_router(export.router)
|
||||
app.include_router(event.router)
|
||||
app.include_router(media.router)
|
||||
app.include_router(record.router)
|
||||
# App Properties
|
||||
app.frigate_config = frigate_config
|
||||
app.embeddings = embeddings
|
||||
|
||||
@@ -8,9 +8,8 @@ import os
|
||||
import subprocess as sp
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from functools import reduce
|
||||
from pathlib import Path as FilePath
|
||||
from typing import Any, List
|
||||
from typing import Any
|
||||
from urllib.parse import unquote
|
||||
|
||||
import cv2
|
||||
@@ -19,12 +18,11 @@ import pytz
|
||||
from fastapi import APIRouter, Depends, Path, Query, Request, Response
|
||||
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from peewee import DoesNotExist, fn
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
)
|
||||
from frigate.api.defs.query.media_query_parameters import (
|
||||
@@ -32,8 +30,6 @@ from frigate.api.defs.query.media_query_parameters import (
|
||||
MediaEventsSnapshotQueryParams,
|
||||
MediaLatestFrameQueryParams,
|
||||
MediaMjpegFeedQueryParams,
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.camera.state import CameraState
|
||||
@@ -44,13 +40,12 @@ from frigate.const import (
|
||||
INSTALL_DIR,
|
||||
MAX_SEGMENT_DURATION,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.output.preview import get_most_recent_preview_frame
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -131,7 +126,9 @@ async def camera_ptz_info(request: Request, camera_name: str):
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)]
|
||||
"/{camera_name}/latest.{extension}",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
|
||||
)
|
||||
async def latest_frame(
|
||||
request: Request,
|
||||
@@ -165,20 +162,37 @@ async def latest_frame(
|
||||
or 10
|
||||
)
|
||||
|
||||
is_offline = False
|
||||
if frame is None or datetime.now().timestamp() > (
|
||||
frame_processor.get_current_frame_time(camera_name) + retry_interval
|
||||
):
|
||||
if request.app.camera_error_image is None:
|
||||
error_image = glob.glob(
|
||||
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
|
||||
)
|
||||
last_frame_time = frame_processor.get_current_frame_time(camera_name)
|
||||
preview_path = get_most_recent_preview_frame(
|
||||
camera_name, before=last_frame_time
|
||||
)
|
||||
|
||||
if len(error_image) > 0:
|
||||
request.app.camera_error_image = cv2.imread(
|
||||
error_image[0], cv2.IMREAD_UNCHANGED
|
||||
if preview_path:
|
||||
logger.debug(f"Using most recent preview frame for {camera_name}")
|
||||
frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
|
||||
|
||||
if frame is not None:
|
||||
is_offline = True
|
||||
|
||||
if frame is None or not is_offline:
|
||||
logger.debug(
|
||||
f"No live or preview frame available for {camera_name}. Using error image."
|
||||
)
|
||||
if request.app.camera_error_image is None:
|
||||
error_image = glob.glob(
|
||||
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
|
||||
)
|
||||
|
||||
frame = request.app.camera_error_image
|
||||
if len(error_image) > 0:
|
||||
request.app.camera_error_image = cv2.imread(
|
||||
error_image[0], cv2.IMREAD_UNCHANGED
|
||||
)
|
||||
|
||||
frame = request.app.camera_error_image
|
||||
|
||||
height = int(params.height or str(frame.shape[0]))
|
||||
width = int(height * frame.shape[1] / frame.shape[0])
|
||||
@@ -200,14 +214,18 @@ async def latest_frame(
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
_, img = cv2.imencode(f".{extension.value}", frame, quality_params)
|
||||
|
||||
headers = {
|
||||
"Cache-Control": "no-store" if not params.store else "private, max-age=60",
|
||||
}
|
||||
|
||||
if is_offline:
|
||||
headers["X-Frigate-Offline"] = "true"
|
||||
|
||||
return Response(
|
||||
content=img.tobytes(),
|
||||
media_type=extension.get_mime_type(),
|
||||
headers={
|
||||
"Cache-Control": "no-store"
|
||||
if not params.store
|
||||
else "private, max-age=60",
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
elif (
|
||||
camera_name == "birdseye"
|
||||
@@ -397,333 +415,6 @@ async def submit_recording_snapshot_to_plus(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
@@ -1070,7 +761,7 @@ async def event_snapshot(
|
||||
if event_id in camera_state.tracked_objects:
|
||||
tracked_obj = camera_state.tracked_objects.get(event_id)
|
||||
if tracked_obj is not None:
|
||||
jpg_bytes = tracked_obj.get_img_bytes(
|
||||
jpg_bytes, frame_time = tracked_obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=params.timestamp,
|
||||
bounding_box=params.bbox,
|
||||
@@ -1099,6 +790,7 @@ async def event_snapshot(
|
||||
headers = {
|
||||
"Content-Type": "image/jpeg",
|
||||
"Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
|
||||
"X-Frame-Time": frame_time,
|
||||
}
|
||||
|
||||
if params.download:
|
||||
|
||||
479
frigate/api/record.py
Normal file
479
frigate/api/record.py
Normal file
@@ -0,0 +1,479 @@
|
||||
"""Recording APIs."""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from functools import reduce
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from urllib.parse import unquote
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import Path as PathParam
|
||||
from fastapi.responses import JSONResponse
|
||||
from peewee import fn, operator
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.recordings_query_parameters import (
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
RecordingsDeleteQueryParams,
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.recordings])
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/recordings/start/{start}/end/{end}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete recordings",
|
||||
description="""Deletes recordings within the specified time range.
|
||||
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
|
||||
""",
|
||||
)
|
||||
async def delete_recordings(
|
||||
start: float = PathParam(..., description="Start timestamp (unix)"),
|
||||
end: float = PathParam(..., description="End timestamp (unix)"),
|
||||
params: RecordingsDeleteQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Delete recordings in the specified time range."""
|
||||
if start >= end:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Start time must be less than end time.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
cameras = params.cameras
|
||||
|
||||
if cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
|
||||
if not filtered:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No valid cameras found in the request.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Parse keep parameter
|
||||
keep_set = set()
|
||||
|
||||
if params.keep:
|
||||
keep_set = set(params.keep.split(","))
|
||||
|
||||
# Build query to find overlapping recordings
|
||||
clauses = [
|
||||
(
|
||||
Recordings.start_time.between(start, end)
|
||||
| Recordings.end_time.between(start, end)
|
||||
| ((start > Recordings.start_time) & (end < Recordings.end_time))
|
||||
),
|
||||
(Recordings.camera << camera_list),
|
||||
]
|
||||
|
||||
keep_clauses = []
|
||||
|
||||
if "motion" in keep_set:
|
||||
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
|
||||
|
||||
if "object" in keep_set:
|
||||
keep_clauses.append(
|
||||
Recordings.objects.is_null(False) & (Recordings.objects > 0)
|
||||
)
|
||||
|
||||
if "audio" in keep_set:
|
||||
keep_clauses.append(Recordings.dBFS.is_null(False))
|
||||
|
||||
if keep_clauses:
|
||||
keep_condition = reduce(operator.or_, keep_clauses)
|
||||
clauses.append(~keep_condition)
|
||||
|
||||
recordings_to_delete = (
|
||||
Recordings.select(Recordings.id, Recordings.path)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
recording_ids = []
|
||||
deleted_count = 0
|
||||
error_count = 0
|
||||
|
||||
for recording in recordings_to_delete:
|
||||
recording_ids.append(recording["id"])
|
||||
|
||||
try:
|
||||
Path(recording["path"]).unlink(missing_ok=True)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
|
||||
error_count += 1
|
||||
|
||||
if recording_ids:
|
||||
max_deletes = 100000
|
||||
recording_ids_list = list(recording_ids)
|
||||
|
||||
for i in range(0, len(recording_ids_list), max_deletes):
|
||||
Recordings.delete().where(
|
||||
Recordings.id << recording_ids_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
message = f"Successfully deleted {deleted_count} recording(s)."
|
||||
|
||||
if error_count > 0:
|
||||
message += f" {error_count} file deletion error(s) occurred."
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": message},
|
||||
status_code=200,
|
||||
)
|
||||
@@ -19,6 +19,8 @@ class CameraMetrics:
|
||||
process_pid: Synchronized
|
||||
capture_process_pid: Synchronized
|
||||
ffmpeg_pid: Synchronized
|
||||
reconnects_last_hour: Synchronized
|
||||
stalls_last_hour: Synchronized
|
||||
|
||||
def __init__(self, manager: SyncManager):
|
||||
self.camera_fps = manager.Value("d", 0)
|
||||
@@ -35,6 +37,8 @@ class CameraMetrics:
|
||||
self.process_pid = manager.Value("i", 0)
|
||||
self.capture_process_pid = manager.Value("i", 0)
|
||||
self.ffmpeg_pid = manager.Value("i", 0)
|
||||
self.reconnects_last_hour = manager.Value("i", 0)
|
||||
self.stalls_last_hour = manager.Value("i", 0)
|
||||
|
||||
|
||||
class PTZMetrics:
|
||||
|
||||
@@ -28,6 +28,7 @@ from frigate.const import (
|
||||
UPDATE_CAMERA_ACTIVITY,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
UPDATE_JOB_STATE,
|
||||
UPDATE_MODEL_STATE,
|
||||
UPDATE_REVIEW_DESCRIPTION,
|
||||
UPSERT_REVIEW_SEGMENT,
|
||||
@@ -60,6 +61,7 @@ class Dispatcher:
|
||||
self.camera_activity = CameraActivityManager(config, self.publish)
|
||||
self.audio_activity = AudioActivityManager(config, self.publish)
|
||||
self.model_state: dict[str, ModelStatusTypesEnum] = {}
|
||||
self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data}
|
||||
self.embeddings_reindex: dict[str, Any] = {}
|
||||
self.birdseye_layout: dict[str, Any] = {}
|
||||
self.audio_transcription_state: str = "idle"
|
||||
@@ -180,6 +182,19 @@ class Dispatcher:
|
||||
def handle_model_state() -> None:
|
||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||
|
||||
def handle_update_job_state() -> None:
|
||||
if payload and isinstance(payload, dict):
|
||||
job_type = payload.get("job_type")
|
||||
if job_type:
|
||||
self.job_state[job_type] = payload
|
||||
self.publish(
|
||||
"job_state",
|
||||
json.dumps(self.job_state),
|
||||
)
|
||||
|
||||
def handle_job_state() -> None:
|
||||
self.publish("job_state", json.dumps(self.job_state.copy()))
|
||||
|
||||
def handle_update_audio_transcription_state() -> None:
|
||||
if payload:
|
||||
self.audio_transcription_state = payload
|
||||
@@ -277,6 +292,7 @@ class Dispatcher:
|
||||
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
||||
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
|
||||
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||
UPDATE_JOB_STATE: handle_update_job_state,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state,
|
||||
@@ -284,6 +300,7 @@ class Dispatcher:
|
||||
"restart": handle_restart,
|
||||
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||
"modelState": handle_model_state,
|
||||
"jobState": handle_job_state,
|
||||
"audioTranscriptionState": handle_audio_transcription_state,
|
||||
"birdseyeLayout": handle_birdseye_layout,
|
||||
"onConnect": handle_on_connect,
|
||||
|
||||
@@ -8,6 +8,7 @@ from .config import * # noqa: F403
|
||||
from .database import * # noqa: F403
|
||||
from .logger import * # noqa: F403
|
||||
from .mqtt import * # noqa: F403
|
||||
from .network import * # noqa: F403
|
||||
from .proxy import * # noqa: F403
|
||||
from .telemetry import * # noqa: F403
|
||||
from .tls import * # noqa: F403
|
||||
|
||||
@@ -14,6 +14,7 @@ class GenAIProviderEnum(str, Enum):
|
||||
azure_openai = "azure_openai"
|
||||
gemini = "gemini"
|
||||
ollama = "ollama"
|
||||
llamacpp = "llamacpp"
|
||||
|
||||
|
||||
class GenAIConfig(FrigateBaseModel):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
@@ -19,8 +19,6 @@ __all__ = [
|
||||
"RetainModeEnum",
|
||||
]
|
||||
|
||||
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
|
||||
|
||||
|
||||
class RecordRetainConfig(FrigateBaseModel):
|
||||
days: float = Field(default=0, ge=0, title="Default retention period.")
|
||||
@@ -67,16 +65,13 @@ class RecordPreviewConfig(FrigateBaseModel):
|
||||
|
||||
|
||||
class RecordExportConfig(FrigateBaseModel):
|
||||
timelapse_args: str = Field(
|
||||
default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args"
|
||||
hwaccel_args: Union[str, list[str]] = Field(
|
||||
default="auto", title="Export-specific FFmpeg hardware acceleration arguments."
|
||||
)
|
||||
|
||||
|
||||
class RecordConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable record on all cameras.")
|
||||
sync_recordings: bool = Field(
|
||||
default=False, title="Sync recordings with disk on startup and once a day."
|
||||
)
|
||||
expire_interval: int = Field(
|
||||
default=60,
|
||||
title="Number of minutes to wait between cleanup runs.",
|
||||
|
||||
@@ -525,6 +525,14 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if camera_config.ffmpeg.hwaccel_args == "auto":
|
||||
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
|
||||
|
||||
# Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg
|
||||
# This allows per-camera override for exports (e.g., when camera resolution
|
||||
# exceeds hardware encoder limits)
|
||||
if camera_config.record.export.hwaccel_args == "auto":
|
||||
camera_config.record.export.hwaccel_args = (
|
||||
camera_config.ffmpeg.hwaccel_args
|
||||
)
|
||||
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
need_detect_dimensions = "detect" in input.roles and (
|
||||
camera_config.detect.height is None
|
||||
|
||||
@@ -1,13 +1,27 @@
|
||||
from typing import Union
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from .base import FrigateBaseModel
|
||||
|
||||
__all__ = ["IPv6Config", "NetworkingConfig"]
|
||||
__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"]
|
||||
|
||||
|
||||
class IPv6Config(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
|
||||
|
||||
|
||||
class ListenConfig(FrigateBaseModel):
|
||||
internal: Union[int, str] = Field(
|
||||
default=5000, title="Internal listening port for Frigate"
|
||||
)
|
||||
external: Union[int, str] = Field(
|
||||
default=8971, title="External listening port for Frigate"
|
||||
)
|
||||
|
||||
|
||||
class NetworkingConfig(FrigateBaseModel):
|
||||
ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration")
|
||||
ipv6: IPv6Config = Field(default_factory=IPv6Config, title="IPv6 configuration")
|
||||
listen: ListenConfig = Field(
|
||||
default_factory=ListenConfig, title="Listening ports configuration"
|
||||
)
|
||||
|
||||
@@ -14,7 +14,6 @@ RECORD_DIR = f"{BASE_DIR}/recordings"
|
||||
TRIGGER_DIR = f"{CLIPS_DIR}/triggers"
|
||||
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
|
||||
CACHE_DIR = "/tmp/cache"
|
||||
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
|
||||
PLUS_ENV_VAR = "PLUS_API_KEY"
|
||||
PLUS_API_HOST = "https://api.frigate.video"
|
||||
|
||||
@@ -122,6 +121,7 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description"
|
||||
UPDATE_MODEL_STATE = "update_model_state"
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
||||
UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout"
|
||||
UPDATE_JOB_STATE = "update_job_state"
|
||||
NOTIFICATION_TEST = "notification_test"
|
||||
|
||||
# IO Nice Values
|
||||
|
||||
@@ -601,7 +601,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
if consensus_label is not None:
|
||||
camera = obj_data["camera"]
|
||||
logger.info(
|
||||
logger.debug(
|
||||
f"{self.model_config.name}: Publishing sub_label={consensus_label} for {obj_data['label']} object {object_id} on {camera}"
|
||||
)
|
||||
|
||||
@@ -658,6 +658,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
def handle_request(self, topic, request_data):
|
||||
if topic == EmbeddingsRequestEnum.reload_classification_model.value:
|
||||
if request_data.get("model_name") == self.model_config.name:
|
||||
self.__build_detector()
|
||||
logger.info(
|
||||
f"Successfully loaded updated model for {self.model_config.name}"
|
||||
)
|
||||
|
||||
@@ -131,10 +131,8 @@ class ONNXModelRunner(BaseModelRunner):
|
||||
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.yolov9_license_plate.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
EnrichmentModelTypeEnum.facenet.value,
|
||||
EnrichmentModelTypeEnum.arcface.value,
|
||||
ModelTypeEnum.rfdetr.value,
|
||||
ModelTypeEnum.dfine.value,
|
||||
]
|
||||
|
||||
@@ -69,7 +69,7 @@ class GenAIClient:
|
||||
return "\n- (No objects detected)"
|
||||
|
||||
context_prompt = f"""
|
||||
Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera.
|
||||
Your task is to analyze a sequence of images taken in chronological order from a security camera.
|
||||
|
||||
## Normal Activity Patterns for This Property
|
||||
|
||||
@@ -108,7 +108,8 @@ Your response MUST be a flat JSON object with:
|
||||
|
||||
## Sequence Details
|
||||
|
||||
- Frame 1 = earliest, Frame {len(thumbnails)} = latest
|
||||
- Camera: {review_data["camera"]}
|
||||
- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest)
|
||||
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
|
||||
- Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"}
|
||||
|
||||
@@ -292,6 +293,64 @@ Guidelines:
|
||||
"""Get the context window size for this provider in tokens."""
|
||||
return 4096
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Send chat messages to LLM with optional tool definitions.
|
||||
|
||||
This method handles conversation-style interactions with the LLM,
|
||||
including function calling/tool usage capabilities.
|
||||
|
||||
Args:
|
||||
messages: List of message dictionaries. Each message should have:
|
||||
- 'role': str - One of 'user', 'assistant', 'system', or 'tool'
|
||||
- 'content': str - The message content
|
||||
- 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call
|
||||
- 'name': Optional[str] - For tool messages, the tool name
|
||||
tools: Optional list of tool definitions in OpenAI-compatible format.
|
||||
Each tool should have 'type': 'function' and 'function' with:
|
||||
- 'name': str - Tool name
|
||||
- 'description': str - Tool description
|
||||
- 'parameters': dict - JSON schema for parameters
|
||||
tool_choice: How the model should handle tools:
|
||||
- 'auto': Model decides whether to call tools
|
||||
- 'none': Model must not call tools
|
||||
- 'required': Model must call at least one tool
|
||||
- Or a dict specifying a specific tool to call
|
||||
**kwargs: Additional provider-specific parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary with:
|
||||
- 'content': Optional[str] - The text response from the LLM, None if tool calls
|
||||
- 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools.
|
||||
Each tool call dict has:
|
||||
- 'id': str - Unique identifier for this tool call
|
||||
- 'name': str - Tool name to call
|
||||
- 'arguments': dict - Arguments for the tool call (parsed JSON)
|
||||
- 'finish_reason': str - Reason generation stopped:
|
||||
- 'stop': Normal completion
|
||||
- 'tool_calls': LLM wants to call tools
|
||||
- 'length': Hit token limit
|
||||
- 'error': An error occurred
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the provider doesn't implement this method.
|
||||
"""
|
||||
# Base implementation - each provider should override this
|
||||
logger.warning(
|
||||
f"{self.__class__.__name__} does not support chat_with_tools. "
|
||||
"This method should be overridden by the provider implementation."
|
||||
)
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
|
||||
def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
|
||||
"""Get the GenAI client."""
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
"""Azure OpenAI Provider for Frigate AI."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from openai import AzureOpenAI
|
||||
@@ -76,3 +77,93 @@ class OpenAIClient(GenAIClient):
|
||||
def get_context_size(self) -> int:
|
||||
"""Get the context window size for Azure OpenAI."""
|
||||
return 128000
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
try:
|
||||
openai_tool_choice = None
|
||||
if tool_choice:
|
||||
if tool_choice == "none":
|
||||
openai_tool_choice = "none"
|
||||
elif tool_choice == "auto":
|
||||
openai_tool_choice = "auto"
|
||||
elif tool_choice == "required":
|
||||
openai_tool_choice = "required"
|
||||
|
||||
request_params = {
|
||||
"model": self.genai_config.model,
|
||||
"messages": messages,
|
||||
"timeout": self.timeout,
|
||||
}
|
||||
|
||||
if tools:
|
||||
request_params["tools"] = tools
|
||||
if openai_tool_choice is not None:
|
||||
request_params["tool_choice"] = openai_tool_choice
|
||||
|
||||
result = self.provider.chat.completions.create(**request_params)
|
||||
|
||||
if (
|
||||
result is None
|
||||
or not hasattr(result, "choices")
|
||||
or len(result.choices) == 0
|
||||
):
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
choice = result.choices[0]
|
||||
message = choice.message
|
||||
|
||||
content = message.content.strip() if message.content else None
|
||||
|
||||
tool_calls = None
|
||||
if message.tool_calls:
|
||||
tool_calls = []
|
||||
for tool_call in message.tool_calls:
|
||||
try:
|
||||
arguments = json.loads(tool_call.function.arguments)
|
||||
except (json.JSONDecodeError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Failed to parse tool call arguments: {e}, "
|
||||
f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}"
|
||||
)
|
||||
arguments = {}
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": tool_call.id if hasattr(tool_call, "id") else "",
|
||||
"name": tool_call.function.name
|
||||
if hasattr(tool_call.function, "name")
|
||||
else "",
|
||||
"arguments": arguments,
|
||||
}
|
||||
)
|
||||
|
||||
finish_reason = "error"
|
||||
if hasattr(choice, "finish_reason") and choice.finish_reason:
|
||||
finish_reason = choice.finish_reason
|
||||
elif tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"tool_calls": tool_calls,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("Azure OpenAI returned an error: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
"""Gemini Provider for Frigate AI."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from google import genai
|
||||
from google.genai import errors, types
|
||||
@@ -76,3 +77,188 @@ class GeminiClient(GenAIClient):
|
||||
"""Get the context window size for Gemini."""
|
||||
# Gemini Pro Vision has a 1M token context window
|
||||
return 1000000
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
try:
|
||||
if tools:
|
||||
function_declarations = []
|
||||
for tool in tools:
|
||||
if tool.get("type") == "function":
|
||||
func_def = tool.get("function", {})
|
||||
function_declarations.append(
|
||||
genai.protos.FunctionDeclaration(
|
||||
name=func_def.get("name"),
|
||||
description=func_def.get("description"),
|
||||
parameters=genai.protos.Schema(
|
||||
type=genai.protos.Type.OBJECT,
|
||||
properties={
|
||||
prop_name: genai.protos.Schema(
|
||||
type=_convert_json_type_to_gemini(
|
||||
prop.get("type")
|
||||
),
|
||||
description=prop.get("description"),
|
||||
)
|
||||
for prop_name, prop in func_def.get(
|
||||
"parameters", {}
|
||||
)
|
||||
.get("properties", {})
|
||||
.items()
|
||||
},
|
||||
required=func_def.get("parameters", {}).get(
|
||||
"required", []
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
tool_config = genai.protos.Tool(
|
||||
function_declarations=function_declarations
|
||||
)
|
||||
|
||||
if tool_choice == "none":
|
||||
function_calling_config = genai.protos.FunctionCallingConfig(
|
||||
mode=genai.protos.FunctionCallingConfig.Mode.NONE
|
||||
)
|
||||
elif tool_choice == "required":
|
||||
function_calling_config = genai.protos.FunctionCallingConfig(
|
||||
mode=genai.protos.FunctionCallingConfig.Mode.ANY
|
||||
)
|
||||
else:
|
||||
function_calling_config = genai.protos.FunctionCallingConfig(
|
||||
mode=genai.protos.FunctionCallingConfig.Mode.AUTO
|
||||
)
|
||||
else:
|
||||
tool_config = None
|
||||
function_calling_config = None
|
||||
|
||||
contents = []
|
||||
for msg in messages:
|
||||
role = msg.get("role")
|
||||
content = msg.get("content", "")
|
||||
|
||||
if role == "system":
|
||||
continue
|
||||
elif role == "user":
|
||||
contents.append({"role": "user", "parts": [content]})
|
||||
elif role == "assistant":
|
||||
parts = [content] if content else []
|
||||
if "tool_calls" in msg:
|
||||
for tc in msg["tool_calls"]:
|
||||
parts.append(
|
||||
genai.protos.FunctionCall(
|
||||
name=tc["function"]["name"],
|
||||
args=json.loads(tc["function"]["arguments"]),
|
||||
)
|
||||
)
|
||||
contents.append({"role": "model", "parts": parts})
|
||||
elif role == "tool":
|
||||
tool_name = msg.get("name", "")
|
||||
tool_result = (
|
||||
json.loads(content) if isinstance(content, str) else content
|
||||
)
|
||||
contents.append(
|
||||
{
|
||||
"role": "function",
|
||||
"parts": [
|
||||
genai.protos.FunctionResponse(
|
||||
name=tool_name,
|
||||
response=tool_result,
|
||||
)
|
||||
],
|
||||
}
|
||||
)
|
||||
|
||||
generation_config = genai.types.GenerationConfig(
|
||||
candidate_count=1,
|
||||
)
|
||||
if function_calling_config:
|
||||
generation_config.function_calling_config = function_calling_config
|
||||
|
||||
response = self.provider.generate_content(
|
||||
contents,
|
||||
tools=[tool_config] if tool_config else None,
|
||||
generation_config=generation_config,
|
||||
request_options=genai.types.RequestOptions(timeout=self.timeout),
|
||||
)
|
||||
|
||||
content = None
|
||||
tool_calls = None
|
||||
|
||||
if response.candidates and response.candidates[0].content:
|
||||
parts = response.candidates[0].content.parts
|
||||
text_parts = [p.text for p in parts if hasattr(p, "text") and p.text]
|
||||
if text_parts:
|
||||
content = " ".join(text_parts).strip()
|
||||
|
||||
function_calls = [
|
||||
p.function_call
|
||||
for p in parts
|
||||
if hasattr(p, "function_call") and p.function_call
|
||||
]
|
||||
if function_calls:
|
||||
tool_calls = []
|
||||
for fc in function_calls:
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": f"call_{hash(fc.name)}",
|
||||
"name": fc.name,
|
||||
"arguments": dict(fc.args)
|
||||
if hasattr(fc, "args")
|
||||
else {},
|
||||
}
|
||||
)
|
||||
|
||||
finish_reason = "error"
|
||||
if response.candidates:
|
||||
finish_reason_map = {
|
||||
genai.types.FinishReason.STOP: "stop",
|
||||
genai.types.FinishReason.MAX_TOKENS: "length",
|
||||
genai.types.FinishReason.SAFETY: "stop",
|
||||
genai.types.FinishReason.RECITATION: "stop",
|
||||
genai.types.FinishReason.OTHER: "error",
|
||||
}
|
||||
finish_reason = finish_reason_map.get(
|
||||
response.candidates[0].finish_reason, "error"
|
||||
)
|
||||
elif tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"tool_calls": tool_calls,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
|
||||
except GoogleAPICallError as e:
|
||||
logger.warning("Gemini returned an error: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Unexpected error in Gemini chat_with_tools: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
|
||||
def _convert_json_type_to_gemini(json_type: str) -> genai.protos.Type:
|
||||
type_map = {
|
||||
"string": genai.protos.Type.STRING,
|
||||
"integer": genai.protos.Type.INTEGER,
|
||||
"number": genai.protos.Type.NUMBER,
|
||||
"boolean": genai.protos.Type.BOOLEAN,
|
||||
"array": genai.protos.Type.ARRAY,
|
||||
"object": genai.protos.Type.OBJECT,
|
||||
}
|
||||
return type_map.get(json_type, genai.protos.Type.STRING)
|
||||
|
||||
238
frigate/genai/llama_cpp.py
Normal file
238
frigate/genai/llama_cpp.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""llama.cpp Provider for Frigate AI."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from frigate.config import GenAIProviderEnum
|
||||
from frigate.genai import GenAIClient, register_genai_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@register_genai_provider(GenAIProviderEnum.llamacpp)
|
||||
class LlamaCppClient(GenAIClient):
|
||||
"""Generative AI client for Frigate using llama.cpp server."""
|
||||
|
||||
LOCAL_OPTIMIZED_OPTIONS = {
|
||||
"temperature": 0.7,
|
||||
"repeat_penalty": 1.05,
|
||||
"top_p": 0.8,
|
||||
}
|
||||
|
||||
provider: str # base_url
|
||||
provider_options: dict[str, Any]
|
||||
|
||||
def _init_provider(self):
|
||||
"""Initialize the client."""
|
||||
self.provider_options = {
|
||||
**self.LOCAL_OPTIMIZED_OPTIONS,
|
||||
**self.genai_config.provider_options,
|
||||
}
|
||||
return (
|
||||
self.genai_config.base_url.rstrip("/")
|
||||
if self.genai_config.base_url
|
||||
else None
|
||||
)
|
||||
|
||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||
"""Submit a request to llama.cpp server."""
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration."
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
content = []
|
||||
for image in images:
|
||||
encoded_image = base64.b64encode(image).decode("utf-8")
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{encoded_image}",
|
||||
},
|
||||
}
|
||||
)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": prompt,
|
||||
}
|
||||
)
|
||||
|
||||
# Build request payload with llama.cpp native options
|
||||
payload = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": content,
|
||||
},
|
||||
],
|
||||
**self.provider_options,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{self.provider}/v1/chat/completions",
|
||||
json=payload,
|
||||
timeout=self.timeout,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
if (
|
||||
result is not None
|
||||
and "choices" in result
|
||||
and len(result["choices"]) > 0
|
||||
):
|
||||
choice = result["choices"][0]
|
||||
if "message" in choice and "content" in choice["message"]:
|
||||
return choice["message"]["content"].strip()
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning("llama.cpp returned an error: %s", str(e))
|
||||
return None
|
||||
|
||||
def get_context_size(self) -> int:
|
||||
"""Get the context window size for llama.cpp."""
|
||||
return self.genai_config.provider_options.get("context_size", 4096)
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Send chat messages to llama.cpp server with optional tool definitions.
|
||||
|
||||
Uses the OpenAI-compatible endpoint but passes through all native llama.cpp
|
||||
parameters (like slot_id, temperature, etc.) via provider_options.
|
||||
"""
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"llama.cpp provider has not been initialized. Check your llama.cpp configuration."
|
||||
)
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
try:
|
||||
openai_tool_choice = None
|
||||
if tool_choice:
|
||||
if tool_choice == "none":
|
||||
openai_tool_choice = "none"
|
||||
elif tool_choice == "auto":
|
||||
openai_tool_choice = "auto"
|
||||
elif tool_choice == "required":
|
||||
openai_tool_choice = "required"
|
||||
|
||||
payload = {
|
||||
"messages": messages,
|
||||
}
|
||||
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
if openai_tool_choice is not None:
|
||||
payload["tool_choice"] = openai_tool_choice
|
||||
|
||||
provider_opts = {
|
||||
k: v for k, v in self.provider_options.items() if k != "context_size"
|
||||
}
|
||||
payload.update(provider_opts)
|
||||
|
||||
response = requests.post(
|
||||
f"{self.provider}/v1/chat/completions",
|
||||
json=payload,
|
||||
timeout=self.timeout,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
if result is None or "choices" not in result or len(result["choices"]) == 0:
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
choice = result["choices"][0]
|
||||
message = choice.get("message", {})
|
||||
|
||||
content = message.get("content")
|
||||
if content:
|
||||
content = content.strip()
|
||||
else:
|
||||
content = None
|
||||
|
||||
tool_calls = None
|
||||
if "tool_calls" in message and message["tool_calls"]:
|
||||
tool_calls = []
|
||||
for tool_call in message["tool_calls"]:
|
||||
try:
|
||||
function_data = tool_call.get("function", {})
|
||||
arguments_str = function_data.get("arguments", "{}")
|
||||
arguments = json.loads(arguments_str)
|
||||
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
||||
logger.warning(
|
||||
f"Failed to parse tool call arguments: {e}, "
|
||||
f"tool: {function_data.get('name', 'unknown')}"
|
||||
)
|
||||
arguments = {}
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": tool_call.get("id", ""),
|
||||
"name": function_data.get("name", ""),
|
||||
"arguments": arguments,
|
||||
}
|
||||
)
|
||||
|
||||
finish_reason = "error"
|
||||
if "finish_reason" in choice and choice["finish_reason"]:
|
||||
finish_reason = choice["finish_reason"]
|
||||
elif tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"tool_calls": tool_calls,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
|
||||
except requests.exceptions.Timeout as e:
|
||||
logger.warning("llama.cpp request timed out: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_detail = str(e)
|
||||
if hasattr(e, "response") and e.response is not None:
|
||||
try:
|
||||
error_body = e.response.text
|
||||
error_detail = f"{str(e)} - Response: {error_body[:500]}"
|
||||
except Exception:
|
||||
pass
|
||||
logger.warning("llama.cpp returned an error: %s", error_detail)
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Unexpected error in llama.cpp chat_with_tools: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Ollama Provider for Frigate AI."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
@@ -86,3 +87,120 @@ class OllamaClient(GenAIClient):
|
||||
return self.genai_config.provider_options.get("options", {}).get(
|
||||
"num_ctx", 4096
|
||||
)
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"Ollama provider has not been initialized. Check your Ollama configuration."
|
||||
)
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
try:
|
||||
request_messages = []
|
||||
for msg in messages:
|
||||
msg_dict = {
|
||||
"role": msg.get("role"),
|
||||
"content": msg.get("content", ""),
|
||||
}
|
||||
if msg.get("tool_call_id"):
|
||||
msg_dict["tool_call_id"] = msg["tool_call_id"]
|
||||
if msg.get("name"):
|
||||
msg_dict["name"] = msg["name"]
|
||||
if msg.get("tool_calls"):
|
||||
msg_dict["tool_calls"] = msg["tool_calls"]
|
||||
request_messages.append(msg_dict)
|
||||
|
||||
request_params = {
|
||||
"model": self.genai_config.model,
|
||||
"messages": request_messages,
|
||||
}
|
||||
|
||||
if tools:
|
||||
request_params["tools"] = tools
|
||||
if tool_choice:
|
||||
if tool_choice == "none":
|
||||
request_params["tool_choice"] = "none"
|
||||
elif tool_choice == "required":
|
||||
request_params["tool_choice"] = "required"
|
||||
elif tool_choice == "auto":
|
||||
request_params["tool_choice"] = "auto"
|
||||
|
||||
request_params.update(self.provider_options)
|
||||
|
||||
response = self.provider.chat(**request_params)
|
||||
|
||||
if not response or "message" not in response:
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
message = response["message"]
|
||||
content = (
|
||||
message.get("content", "").strip() if message.get("content") else None
|
||||
)
|
||||
|
||||
tool_calls = None
|
||||
if "tool_calls" in message and message["tool_calls"]:
|
||||
tool_calls = []
|
||||
for tool_call in message["tool_calls"]:
|
||||
try:
|
||||
function_data = tool_call.get("function", {})
|
||||
arguments_str = function_data.get("arguments", "{}")
|
||||
arguments = json.loads(arguments_str)
|
||||
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
||||
logger.warning(
|
||||
f"Failed to parse tool call arguments: {e}, "
|
||||
f"tool: {function_data.get('name', 'unknown')}"
|
||||
)
|
||||
arguments = {}
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": tool_call.get("id", ""),
|
||||
"name": function_data.get("name", ""),
|
||||
"arguments": arguments,
|
||||
}
|
||||
)
|
||||
|
||||
finish_reason = "error"
|
||||
if "done" in response and response["done"]:
|
||||
if tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
elif tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"tool_calls": tool_calls,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
|
||||
except (TimeoutException, ResponseError, ConnectionError) as e:
|
||||
logger.warning("Ollama returned an error: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("Unexpected error in Ollama chat_with_tools: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
"""OpenAI Provider for Frigate AI."""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from httpx import TimeoutException
|
||||
from openai import OpenAI
|
||||
@@ -116,3 +117,113 @@ class OpenAIClient(GenAIClient):
|
||||
f"Using default context size {self.context_size} for model {self.genai_config.model}"
|
||||
)
|
||||
return self.context_size
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
messages: list[dict[str, Any]],
|
||||
tools: Optional[list[dict[str, Any]]] = None,
|
||||
tool_choice: Optional[str] = "auto",
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Send chat messages to OpenAI with optional tool definitions.
|
||||
|
||||
Implements function calling/tool usage for OpenAI models.
|
||||
"""
|
||||
try:
|
||||
openai_tool_choice = None
|
||||
if tool_choice:
|
||||
if tool_choice == "none":
|
||||
openai_tool_choice = "none"
|
||||
elif tool_choice == "auto":
|
||||
openai_tool_choice = "auto"
|
||||
elif tool_choice == "required":
|
||||
openai_tool_choice = "required"
|
||||
|
||||
request_params = {
|
||||
"model": self.genai_config.model,
|
||||
"messages": messages,
|
||||
"timeout": self.timeout,
|
||||
}
|
||||
|
||||
if tools:
|
||||
request_params["tools"] = tools
|
||||
if openai_tool_choice is not None:
|
||||
request_params["tool_choice"] = openai_tool_choice
|
||||
|
||||
if isinstance(self.genai_config.provider_options, dict):
|
||||
excluded_options = {"context_size"}
|
||||
provider_opts = {
|
||||
k: v
|
||||
for k, v in self.genai_config.provider_options.items()
|
||||
if k not in excluded_options
|
||||
}
|
||||
request_params.update(provider_opts)
|
||||
|
||||
result = self.provider.chat.completions.create(**request_params)
|
||||
|
||||
if (
|
||||
result is None
|
||||
or not hasattr(result, "choices")
|
||||
or len(result.choices) == 0
|
||||
):
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
choice = result.choices[0]
|
||||
message = choice.message
|
||||
content = message.content.strip() if message.content else None
|
||||
|
||||
tool_calls = None
|
||||
if message.tool_calls:
|
||||
tool_calls = []
|
||||
for tool_call in message.tool_calls:
|
||||
try:
|
||||
arguments = json.loads(tool_call.function.arguments)
|
||||
except (json.JSONDecodeError, AttributeError) as e:
|
||||
logger.warning(
|
||||
f"Failed to parse tool call arguments: {e}, "
|
||||
f"tool: {tool_call.function.name if hasattr(tool_call.function, 'name') else 'unknown'}"
|
||||
)
|
||||
arguments = {}
|
||||
|
||||
tool_calls.append(
|
||||
{
|
||||
"id": tool_call.id if hasattr(tool_call, "id") else "",
|
||||
"name": tool_call.function.name
|
||||
if hasattr(tool_call.function, "name")
|
||||
else "",
|
||||
"arguments": arguments,
|
||||
}
|
||||
)
|
||||
|
||||
finish_reason = "error"
|
||||
if hasattr(choice, "finish_reason") and choice.finish_reason:
|
||||
finish_reason = choice.finish_reason
|
||||
elif tool_calls:
|
||||
finish_reason = "tool_calls"
|
||||
elif content:
|
||||
finish_reason = "stop"
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"tool_calls": tool_calls,
|
||||
"finish_reason": finish_reason,
|
||||
}
|
||||
|
||||
except TimeoutException as e:
|
||||
logger.warning("OpenAI request timed out: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning("OpenAI returned an error: %s", str(e))
|
||||
return {
|
||||
"content": None,
|
||||
"tool_calls": None,
|
||||
"finish_reason": "error",
|
||||
}
|
||||
|
||||
0
frigate/jobs/__init__.py
Normal file
0
frigate/jobs/__init__.py
Normal file
21
frigate/jobs/job.py
Normal file
21
frigate/jobs/job.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""Generic base class for long-running background jobs."""
|
||||
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Job:
|
||||
"""Base class for long-running background jobs."""
|
||||
|
||||
id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12])
|
||||
job_type: str = "" # Must be set by subclasses
|
||||
status: str = "queued" # queued, running, success, failed, cancelled
|
||||
results: Optional[dict[str, Any]] = None
|
||||
start_time: Optional[float] = None
|
||||
end_time: Optional[float] = None
|
||||
error_message: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary for WebSocket transmission."""
|
||||
return asdict(self)
|
||||
70
frigate/jobs/manager.py
Normal file
70
frigate/jobs/manager.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Generic job management for long-running background tasks."""
|
||||
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
from frigate.jobs.job import Job
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
|
||||
# Global state and locks for enforcing single concurrent job per job type
|
||||
_job_locks: dict[str, threading.Lock] = {}
|
||||
_current_jobs: dict[str, Optional[Job]] = {}
|
||||
# Keep completed jobs for retrieval, keyed by (job_type, job_id)
|
||||
_completed_jobs: dict[tuple[str, str], Job] = {}
|
||||
|
||||
|
||||
def _get_lock(job_type: str) -> threading.Lock:
|
||||
"""Get or create a lock for the specified job type."""
|
||||
if job_type not in _job_locks:
|
||||
_job_locks[job_type] = threading.Lock()
|
||||
return _job_locks[job_type]
|
||||
|
||||
|
||||
def set_current_job(job: Job) -> None:
|
||||
"""Set the current job for a given job type."""
|
||||
lock = _get_lock(job.job_type)
|
||||
with lock:
|
||||
# Store the previous job if it was completed
|
||||
old_job = _current_jobs.get(job.job_type)
|
||||
if old_job and old_job.status in (
|
||||
JobStatusTypesEnum.success,
|
||||
JobStatusTypesEnum.failed,
|
||||
JobStatusTypesEnum.cancelled,
|
||||
):
|
||||
_completed_jobs[(job.job_type, old_job.id)] = old_job
|
||||
_current_jobs[job.job_type] = job
|
||||
|
||||
|
||||
def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None:
|
||||
"""Clear the current job for a given job type, optionally checking the ID."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
if job_type in _current_jobs:
|
||||
current = _current_jobs[job_type]
|
||||
if current is None or (job_id is None or current.id == job_id):
|
||||
_current_jobs[job_type] = None
|
||||
|
||||
|
||||
def get_current_job(job_type: str) -> Optional[Job]:
|
||||
"""Get the current running/queued job for a given job type, if any."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
return _current_jobs.get(job_type)
|
||||
|
||||
|
||||
def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]:
|
||||
"""Get job by ID. Checks current job first, then completed jobs."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
# Check if it's the current job
|
||||
current = _current_jobs.get(job_type)
|
||||
if current and current.id == job_id:
|
||||
return current
|
||||
# Check if it's a completed job
|
||||
return _completed_jobs.get((job_type, job_id))
|
||||
|
||||
|
||||
def job_is_running(job_type: str) -> bool:
|
||||
"""Check if a job of the given type is currently running or queued."""
|
||||
job = get_current_job(job_type)
|
||||
return job is not None and job.status in ("queued", "running")
|
||||
135
frigate/jobs/media_sync.py
Normal file
135
frigate/jobs/media_sync.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Media sync job management with background execution."""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import UPDATE_JOB_STATE
|
||||
from frigate.jobs.job import Job
|
||||
from frigate.jobs.manager import (
|
||||
get_current_job,
|
||||
get_job_by_id,
|
||||
job_is_running,
|
||||
set_current_job,
|
||||
)
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
from frigate.util.media import sync_all_media
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaSyncJob(Job):
|
||||
"""In-memory job state for media sync operations."""
|
||||
|
||||
job_type: str = "media_sync"
|
||||
dry_run: bool = False
|
||||
media_types: list[str] = field(default_factory=lambda: ["all"])
|
||||
force: bool = False
|
||||
|
||||
|
||||
class MediaSyncRunner(threading.Thread):
|
||||
"""Thread-based runner for media sync jobs."""
|
||||
|
||||
def __init__(self, job: MediaSyncJob) -> None:
|
||||
super().__init__(daemon=True, name="media_sync")
|
||||
self.job = job
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
def run(self) -> None:
|
||||
"""Execute the media sync job and broadcast status updates."""
|
||||
try:
|
||||
# Update job status to running
|
||||
self.job.status = JobStatusTypesEnum.running
|
||||
self.job.start_time = datetime.now().timestamp()
|
||||
self._broadcast_status()
|
||||
|
||||
# Execute sync with provided parameters
|
||||
logger.debug(
|
||||
f"Starting media sync job {self.job.id}: "
|
||||
f"media_types={self.job.media_types}, "
|
||||
f"dry_run={self.job.dry_run}, "
|
||||
f"force={self.job.force}"
|
||||
)
|
||||
|
||||
results = sync_all_media(
|
||||
dry_run=self.job.dry_run,
|
||||
media_types=self.job.media_types,
|
||||
force=self.job.force,
|
||||
)
|
||||
|
||||
# Store results and mark as complete
|
||||
self.job.results = results.to_dict()
|
||||
self.job.status = JobStatusTypesEnum.success
|
||||
self.job.end_time = datetime.now().timestamp()
|
||||
|
||||
logger.debug(f"Media sync job {self.job.id} completed successfully")
|
||||
self._broadcast_status()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True)
|
||||
self.job.status = JobStatusTypesEnum.failed
|
||||
self.job.error_message = str(e)
|
||||
self.job.end_time = datetime.now().timestamp()
|
||||
self._broadcast_status()
|
||||
|
||||
finally:
|
||||
if self.requestor:
|
||||
self.requestor.stop()
|
||||
|
||||
def _broadcast_status(self) -> None:
|
||||
"""Broadcast job status update via IPC to all WebSocket subscribers."""
|
||||
try:
|
||||
self.requestor.send_data(
|
||||
UPDATE_JOB_STATE,
|
||||
self.job.to_dict(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to broadcast media sync status: {e}")
|
||||
|
||||
|
||||
def start_media_sync_job(
|
||||
dry_run: bool = False,
|
||||
media_types: Optional[list[str]] = None,
|
||||
force: bool = False,
|
||||
) -> Optional[str]:
|
||||
"""Start a new media sync job if none is currently running.
|
||||
|
||||
Returns job ID on success, None if job already running.
|
||||
"""
|
||||
# Check if a job is already running
|
||||
if job_is_running("media_sync"):
|
||||
current = get_current_job("media_sync")
|
||||
logger.warning(
|
||||
f"Media sync job {current.id} is already running. Rejecting new request."
|
||||
)
|
||||
return None
|
||||
|
||||
# Create and start new job
|
||||
job = MediaSyncJob(
|
||||
dry_run=dry_run,
|
||||
media_types=media_types or ["all"],
|
||||
force=force,
|
||||
)
|
||||
|
||||
logger.debug(f"Creating new media sync job: {job.id}")
|
||||
set_current_job(job)
|
||||
|
||||
# Start the background runner
|
||||
runner = MediaSyncRunner(job)
|
||||
runner.start()
|
||||
|
||||
return job.id
|
||||
|
||||
|
||||
def get_current_media_sync_job() -> Optional[MediaSyncJob]:
|
||||
"""Get the current running/queued media sync job, if any."""
|
||||
return get_current_job("media_sync")
|
||||
|
||||
|
||||
def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]:
|
||||
"""Get media sync job by ID. Currently only tracks the current job."""
|
||||
return get_job_by_id("media_sync", job_id)
|
||||
@@ -80,6 +80,14 @@ class Recordings(Model):
|
||||
regions = IntegerField(null=True)
|
||||
|
||||
|
||||
class ExportCase(Model):
|
||||
id = CharField(null=False, primary_key=True, max_length=30)
|
||||
name = CharField(index=True, max_length=100)
|
||||
description = TextField(null=True)
|
||||
created_at = DateTimeField()
|
||||
updated_at = DateTimeField()
|
||||
|
||||
|
||||
class Export(Model):
|
||||
id = CharField(null=False, primary_key=True, max_length=30)
|
||||
camera = CharField(index=True, max_length=20)
|
||||
@@ -88,6 +96,12 @@ class Export(Model):
|
||||
video_path = CharField(unique=True)
|
||||
thumb_path = CharField(unique=True)
|
||||
in_progress = BooleanField()
|
||||
export_case = ForeignKeyField(
|
||||
ExportCase,
|
||||
null=True,
|
||||
backref="exports",
|
||||
column_name="export_case_id",
|
||||
)
|
||||
|
||||
|
||||
class ReviewSegment(Model):
|
||||
|
||||
@@ -57,6 +57,51 @@ def get_cache_image_name(camera: str, frame_time: float) -> str:
|
||||
)
|
||||
|
||||
|
||||
def get_most_recent_preview_frame(camera: str, before: float = None) -> str | None:
|
||||
"""Get the most recent preview frame for a camera."""
|
||||
if not os.path.exists(PREVIEW_CACHE_DIR):
|
||||
return None
|
||||
|
||||
try:
|
||||
# files are named preview_{camera}-{timestamp}.webp
|
||||
# we want the largest timestamp that is less than or equal to before
|
||||
preview_files = [
|
||||
f
|
||||
for f in os.listdir(PREVIEW_CACHE_DIR)
|
||||
if f.startswith(f"preview_{camera}-")
|
||||
and f.endswith(f".{PREVIEW_FRAME_TYPE}")
|
||||
]
|
||||
|
||||
if not preview_files:
|
||||
return None
|
||||
|
||||
# sort by timestamp in descending order
|
||||
# filenames are like preview_front-1712345678.901234.webp
|
||||
preview_files.sort(reverse=True)
|
||||
|
||||
if before is None:
|
||||
return os.path.join(PREVIEW_CACHE_DIR, preview_files[0])
|
||||
|
||||
for file_name in preview_files:
|
||||
try:
|
||||
# Extract timestamp: preview_front-1712345678.901234.webp
|
||||
# Split by dash and extension
|
||||
timestamp_part = file_name.split("-")[-1].split(
|
||||
f".{PREVIEW_FRAME_TYPE}"
|
||||
)[0]
|
||||
timestamp = float(timestamp_part)
|
||||
|
||||
if timestamp <= before:
|
||||
return os.path.join(PREVIEW_CACHE_DIR, file_name)
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching for most recent preview frame: {e}")
|
||||
return None
|
||||
|
||||
|
||||
class FFMpegConverter(threading.Thread):
|
||||
"""Convert a list of still frames into a vfr mp4."""
|
||||
|
||||
|
||||
@@ -13,9 +13,8 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
from frigate.util.media import remove_empty_directories
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -61,7 +60,7 @@ class RecordingCleanup(threading.Thread):
|
||||
db.execute_sql("PRAGMA wal_checkpoint(TRUNCATE);")
|
||||
db.close()
|
||||
|
||||
def expire_review_segments(self, config: CameraConfig, now: datetime) -> None:
|
||||
def expire_review_segments(self, config: CameraConfig, now: datetime) -> set[Path]:
|
||||
"""Delete review segments that are expired"""
|
||||
alert_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.alerts.retain.days)
|
||||
@@ -85,9 +84,12 @@ class RecordingCleanup(threading.Thread):
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
maybe_empty_dirs = set()
|
||||
thumbs_to_delete = list(map(lambda x: x[1], expired_reviews))
|
||||
for thumb_path in thumbs_to_delete:
|
||||
Path(thumb_path).unlink(missing_ok=True)
|
||||
thumb_path = Path(thumb_path)
|
||||
thumb_path.unlink(missing_ok=True)
|
||||
maybe_empty_dirs.add(thumb_path.parent)
|
||||
|
||||
max_deletes = 100000
|
||||
deleted_reviews_list = list(map(lambda x: x[0], expired_reviews))
|
||||
@@ -100,13 +102,15 @@ class RecordingCleanup(threading.Thread):
|
||||
<< deleted_reviews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
return maybe_empty_dirs
|
||||
|
||||
def expire_existing_camera_recordings(
|
||||
self,
|
||||
continuous_expire_date: float,
|
||||
motion_expire_date: float,
|
||||
config: CameraConfig,
|
||||
reviews: ReviewSegment,
|
||||
) -> None:
|
||||
) -> set[Path]:
|
||||
"""Delete recordings for existing camera based on retention config."""
|
||||
# Get the timestamp for cutoff of retained days
|
||||
|
||||
@@ -137,6 +141,8 @@ class RecordingCleanup(threading.Thread):
|
||||
.iterator()
|
||||
)
|
||||
|
||||
maybe_empty_dirs = set()
|
||||
|
||||
# loop over recordings and see if they overlap with any non-expired reviews
|
||||
# TODO: expire segments based on segment stats according to config
|
||||
review_start = 0
|
||||
@@ -191,8 +197,10 @@ class RecordingCleanup(threading.Thread):
|
||||
)
|
||||
or (mode == RetainModeEnum.active_objects and recording.objects == 0)
|
||||
):
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
recording_path = Path(recording.path)
|
||||
recording_path.unlink(missing_ok=True)
|
||||
deleted_recordings.add(recording.id)
|
||||
maybe_empty_dirs.add(recording_path.parent)
|
||||
else:
|
||||
kept_recordings.append((recording.start_time, recording.end_time))
|
||||
|
||||
@@ -253,8 +261,10 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
# Delete previews without any relevant recordings
|
||||
if not keep:
|
||||
Path(preview.path).unlink(missing_ok=True)
|
||||
preview_path = Path(preview.path)
|
||||
preview_path.unlink(missing_ok=True)
|
||||
deleted_previews.add(preview.id)
|
||||
maybe_empty_dirs.add(preview_path.parent)
|
||||
|
||||
# expire previews
|
||||
logger.debug(f"Expiring {len(deleted_previews)} previews")
|
||||
@@ -266,7 +276,9 @@ class RecordingCleanup(threading.Thread):
|
||||
Previews.id << deleted_previews_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
def expire_recordings(self) -> None:
|
||||
return maybe_empty_dirs
|
||||
|
||||
def expire_recordings(self) -> set[Path]:
|
||||
"""Delete recordings based on retention config."""
|
||||
logger.debug("Start expire recordings.")
|
||||
logger.debug("Start deleted cameras.")
|
||||
@@ -291,10 +303,14 @@ class RecordingCleanup(threading.Thread):
|
||||
.iterator()
|
||||
)
|
||||
|
||||
maybe_empty_dirs = set()
|
||||
|
||||
deleted_recordings = set()
|
||||
for recording in no_camera_recordings:
|
||||
Path(recording.path).unlink(missing_ok=True)
|
||||
recording_path = Path(recording.path)
|
||||
recording_path.unlink(missing_ok=True)
|
||||
deleted_recordings.add(recording.id)
|
||||
maybe_empty_dirs.add(recording_path.parent)
|
||||
|
||||
logger.debug(f"Expiring {len(deleted_recordings)} recordings")
|
||||
# delete up to 100,000 at a time
|
||||
@@ -311,7 +327,7 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug(f"Start camera: {camera}.")
|
||||
now = datetime.datetime.now()
|
||||
|
||||
self.expire_review_segments(config, now)
|
||||
maybe_empty_dirs |= self.expire_review_segments(config, now)
|
||||
continuous_expire_date = (
|
||||
now - datetime.timedelta(days=config.record.continuous.days)
|
||||
).timestamp()
|
||||
@@ -341,7 +357,7 @@ class RecordingCleanup(threading.Thread):
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
self.expire_existing_camera_recordings(
|
||||
maybe_empty_dirs |= self.expire_existing_camera_recordings(
|
||||
continuous_expire_date, motion_expire_date, config, reviews
|
||||
)
|
||||
logger.debug(f"End camera: {camera}.")
|
||||
@@ -349,12 +365,9 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug("End all cameras.")
|
||||
logger.debug("End expire recordings.")
|
||||
|
||||
def run(self) -> None:
|
||||
# on startup sync recordings with disk if enabled
|
||||
if self.config.record.sync_recordings:
|
||||
sync_recordings(limited=False)
|
||||
next_sync = get_tomorrow_at_time(3)
|
||||
return maybe_empty_dirs
|
||||
|
||||
def run(self) -> None:
|
||||
# Expire tmp clips every minute, recordings and clean directories every hour.
|
||||
for counter in itertools.cycle(range(self.config.record.expire_interval)):
|
||||
if self.stop_event.wait(60):
|
||||
@@ -363,16 +376,8 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
self.clean_tmp_previews()
|
||||
|
||||
if (
|
||||
self.config.record.sync_recordings
|
||||
and datetime.datetime.now().astimezone(datetime.timezone.utc)
|
||||
> next_sync
|
||||
):
|
||||
sync_recordings(limited=True)
|
||||
next_sync = get_tomorrow_at_time(3)
|
||||
|
||||
if counter == 0:
|
||||
self.clean_tmp_clips()
|
||||
self.expire_recordings()
|
||||
remove_empty_directories(RECORD_DIR)
|
||||
maybe_empty_dirs = self.expire_recordings()
|
||||
remove_empty_directories(Path(RECORD_DIR), maybe_empty_dirs)
|
||||
self.truncate_wal()
|
||||
|
||||
@@ -33,6 +33,7 @@ from frigate.util.time import is_current_hour
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
|
||||
TIMELAPSE_DATA_INPUT_ARGS = "-an -skip_frame nokey"
|
||||
|
||||
|
||||
@@ -40,11 +41,6 @@ def lower_priority():
|
||||
os.nice(PROCESS_PRIORITY_LOW)
|
||||
|
||||
|
||||
class PlaybackFactorEnum(str, Enum):
|
||||
realtime = "realtime"
|
||||
timelapse_25x = "timelapse_25x"
|
||||
|
||||
|
||||
class PlaybackSourceEnum(str, Enum):
|
||||
recordings = "recordings"
|
||||
preview = "preview"
|
||||
@@ -62,8 +58,11 @@ class RecordingExporter(threading.Thread):
|
||||
image: Optional[str],
|
||||
start_time: int,
|
||||
end_time: int,
|
||||
playback_factor: PlaybackFactorEnum,
|
||||
playback_source: PlaybackSourceEnum,
|
||||
export_case_id: Optional[str] = None,
|
||||
ffmpeg_input_args: Optional[str] = None,
|
||||
ffmpeg_output_args: Optional[str] = None,
|
||||
cpu_fallback: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.config = config
|
||||
@@ -73,8 +72,11 @@ class RecordingExporter(threading.Thread):
|
||||
self.user_provided_image = image
|
||||
self.start_time = start_time
|
||||
self.end_time = end_time
|
||||
self.playback_factor = playback_factor
|
||||
self.playback_source = playback_source
|
||||
self.export_case_id = export_case_id
|
||||
self.ffmpeg_input_args = ffmpeg_input_args
|
||||
self.ffmpeg_output_args = ffmpeg_output_args
|
||||
self.cpu_fallback = cpu_fallback
|
||||
|
||||
# ensure export thumb dir
|
||||
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
||||
@@ -179,9 +181,16 @@ class RecordingExporter(threading.Thread):
|
||||
|
||||
return thumb_path
|
||||
|
||||
def get_record_export_command(self, video_path: str) -> list[str]:
|
||||
def get_record_export_command(
|
||||
self, video_path: str, use_hwaccel: bool = True
|
||||
) -> list[str]:
|
||||
# handle case where internal port is a string with ip:port
|
||||
internal_port = self.config.networking.listen.internal
|
||||
if type(internal_port) is str:
|
||||
internal_port = int(internal_port.split(":")[-1])
|
||||
|
||||
if (self.end_time - self.start_time) <= MAX_PLAYLIST_SECONDS:
|
||||
playlist_lines = f"http://127.0.0.1:5000/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8"
|
||||
playlist_lines = f"http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{self.start_time}/end/{self.end_time}/index.m3u8"
|
||||
ffmpeg_input = (
|
||||
f"-y -protocol_whitelist pipe,file,http,tcp -i {playlist_lines}"
|
||||
)
|
||||
@@ -213,25 +222,30 @@ class RecordingExporter(threading.Thread):
|
||||
for page in range(1, num_pages + 1):
|
||||
playlist = export_recordings.paginate(page, page_size)
|
||||
playlist_lines.append(
|
||||
f"file 'http://127.0.0.1:5000/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'"
|
||||
f"file 'http://127.0.0.1:{internal_port}/vod/{self.camera}/start/{float(playlist[0].start_time)}/end/{float(playlist[-1].end_time)}/index.m3u8'"
|
||||
)
|
||||
|
||||
ffmpeg_input = "-y -protocol_whitelist pipe,file,http,tcp -f concat -safe 0 -i /dev/stdin"
|
||||
|
||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||
ffmpeg_cmd = (
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart"
|
||||
).split(" ")
|
||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||
if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None:
|
||||
hwaccel_args = (
|
||||
self.config.cameras[self.camera].record.export.hwaccel_args
|
||||
if use_hwaccel
|
||||
else None
|
||||
)
|
||||
ffmpeg_cmd = (
|
||||
parse_preset_hardware_acceleration_encode(
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
f"-an {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart",
|
||||
hwaccel_args,
|
||||
f"{self.ffmpeg_input_args} -an {ffmpeg_input}".strip(),
|
||||
f"{self.ffmpeg_output_args} -movflags +faststart".strip(),
|
||||
EncodeTypeEnum.timelapse,
|
||||
)
|
||||
).split(" ")
|
||||
else:
|
||||
ffmpeg_cmd = (
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} -c copy -movflags +faststart"
|
||||
).split(" ")
|
||||
|
||||
# add metadata
|
||||
title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
|
||||
@@ -241,7 +255,9 @@ class RecordingExporter(threading.Thread):
|
||||
|
||||
return ffmpeg_cmd, playlist_lines
|
||||
|
||||
def get_preview_export_command(self, video_path: str) -> list[str]:
|
||||
def get_preview_export_command(
|
||||
self, video_path: str, use_hwaccel: bool = True
|
||||
) -> list[str]:
|
||||
playlist_lines = []
|
||||
codec = "-c copy"
|
||||
|
||||
@@ -309,20 +325,25 @@ class RecordingExporter(threading.Thread):
|
||||
"-y -protocol_whitelist pipe,file,tcp -f concat -safe 0 -i /dev/stdin"
|
||||
)
|
||||
|
||||
if self.playback_factor == PlaybackFactorEnum.realtime:
|
||||
ffmpeg_cmd = (
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}"
|
||||
).split(" ")
|
||||
elif self.playback_factor == PlaybackFactorEnum.timelapse_25x:
|
||||
if self.ffmpeg_input_args is not None and self.ffmpeg_output_args is not None:
|
||||
hwaccel_args = (
|
||||
self.config.cameras[self.camera].record.export.hwaccel_args
|
||||
if use_hwaccel
|
||||
else None
|
||||
)
|
||||
ffmpeg_cmd = (
|
||||
parse_preset_hardware_acceleration_encode(
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
||||
hwaccel_args,
|
||||
f"{self.ffmpeg_input_args} {TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}".strip(),
|
||||
f"{self.ffmpeg_output_args} -movflags +faststart {video_path}".strip(),
|
||||
EncodeTypeEnum.timelapse,
|
||||
)
|
||||
).split(" ")
|
||||
else:
|
||||
ffmpeg_cmd = (
|
||||
f"{self.config.ffmpeg.ffmpeg_path} -hide_banner {ffmpeg_input} {codec} -movflags +faststart {video_path}"
|
||||
).split(" ")
|
||||
|
||||
# add metadata
|
||||
title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
|
||||
@@ -348,17 +369,20 @@ class RecordingExporter(threading.Thread):
|
||||
video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4"
|
||||
thumb_path = self.save_thumbnail(self.export_id)
|
||||
|
||||
Export.insert(
|
||||
{
|
||||
Export.id: self.export_id,
|
||||
Export.camera: self.camera,
|
||||
Export.name: export_name,
|
||||
Export.date: self.start_time,
|
||||
Export.video_path: video_path,
|
||||
Export.thumb_path: thumb_path,
|
||||
Export.in_progress: True,
|
||||
}
|
||||
).execute()
|
||||
export_values = {
|
||||
Export.id: self.export_id,
|
||||
Export.camera: self.camera,
|
||||
Export.name: export_name,
|
||||
Export.date: self.start_time,
|
||||
Export.video_path: video_path,
|
||||
Export.thumb_path: thumb_path,
|
||||
Export.in_progress: True,
|
||||
}
|
||||
|
||||
if self.export_case_id is not None:
|
||||
export_values[Export.export_case] = self.export_case_id
|
||||
|
||||
Export.insert(export_values).execute()
|
||||
|
||||
try:
|
||||
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||
@@ -376,6 +400,34 @@ class RecordingExporter(threading.Thread):
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
# If export failed and cpu_fallback is enabled, retry without hwaccel
|
||||
if (
|
||||
p.returncode != 0
|
||||
and self.cpu_fallback
|
||||
and self.ffmpeg_input_args is not None
|
||||
and self.ffmpeg_output_args is not None
|
||||
):
|
||||
logger.warning(
|
||||
f"Export with hardware acceleration failed, retrying without hwaccel for {self.export_id}"
|
||||
)
|
||||
|
||||
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||
ffmpeg_cmd, playlist_lines = self.get_record_export_command(
|
||||
video_path, use_hwaccel=False
|
||||
)
|
||||
else:
|
||||
ffmpeg_cmd, playlist_lines = self.get_preview_export_command(
|
||||
video_path, use_hwaccel=False
|
||||
)
|
||||
|
||||
p = sp.run(
|
||||
ffmpeg_cmd,
|
||||
input="\n".join(playlist_lines),
|
||||
encoding="ascii",
|
||||
preexec_fn=lower_priority,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
if p.returncode != 0:
|
||||
logger.error(
|
||||
f"Failed to export {self.playback_source.value} for command {' '.join(ffmpeg_cmd)}"
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
"""Recordings Utilities."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Recordings, RecordingsToDelete
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def remove_empty_directories(directory: str) -> None:
|
||||
# list all directories recursively and sort them by path,
|
||||
# longest first
|
||||
paths = sorted(
|
||||
[x[0] for x in os.walk(directory)],
|
||||
key=lambda p: len(str(p)),
|
||||
reverse=True,
|
||||
)
|
||||
for path in paths:
|
||||
# don't delete the parent
|
||||
if path == directory:
|
||||
continue
|
||||
if len(os.listdir(path)) == 0:
|
||||
os.rmdir(path)
|
||||
|
||||
|
||||
def sync_recordings(limited: bool) -> None:
|
||||
"""Check the db for stale recordings entries that don't exist in the filesystem."""
|
||||
|
||||
def delete_db_entries_without_file(check_timestamp: float) -> bool:
|
||||
"""Delete db entries where file was deleted outside of frigate."""
|
||||
|
||||
if limited:
|
||||
recordings = Recordings.select(Recordings.id, Recordings.path).where(
|
||||
Recordings.start_time >= check_timestamp
|
||||
)
|
||||
else:
|
||||
# get all recordings in the db
|
||||
recordings = Recordings.select(Recordings.id, Recordings.path)
|
||||
|
||||
# Use pagination to process records in chunks
|
||||
page_size = 1000
|
||||
num_pages = (recordings.count() + page_size - 1) // page_size
|
||||
recordings_to_delete = set()
|
||||
|
||||
for page in range(num_pages):
|
||||
for recording in recordings.paginate(page, page_size):
|
||||
if not os.path.exists(recording.path):
|
||||
recordings_to_delete.add(recording.id)
|
||||
|
||||
if len(recordings_to_delete) == 0:
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
f"Deleting {len(recordings_to_delete)} recording DB entries with missing files"
|
||||
)
|
||||
|
||||
# convert back to list of dictionaries for insertion
|
||||
recordings_to_delete = [
|
||||
{"id": recording_id} for recording_id in recordings_to_delete
|
||||
]
|
||||
|
||||
if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
return False
|
||||
|
||||
# create a temporary table for deletion
|
||||
RecordingsToDelete.create_table(temporary=True)
|
||||
|
||||
# insert ids to the temporary table
|
||||
max_inserts = 1000
|
||||
for batch in chunked(recordings_to_delete, max_inserts):
|
||||
RecordingsToDelete.insert_many(batch).execute()
|
||||
|
||||
try:
|
||||
# delete records in the main table that exist in the temporary table
|
||||
query = Recordings.delete().where(
|
||||
Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id))
|
||||
)
|
||||
query.execute()
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error during recordings db cleanup: {e}")
|
||||
|
||||
return True
|
||||
|
||||
def delete_files_without_db_entry(files_on_disk: list[str]):
|
||||
"""Delete files where file is not inside frigate db."""
|
||||
files_to_delete = []
|
||||
|
||||
for file in files_on_disk:
|
||||
if not Recordings.select().where(Recordings.path == file).exists():
|
||||
files_to_delete.append(file)
|
||||
|
||||
if len(files_to_delete) == 0:
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
f"Deleting {len(files_to_delete)} recordings files with missing DB entries"
|
||||
)
|
||||
|
||||
if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5:
|
||||
logger.debug(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
return False
|
||||
|
||||
for file in files_to_delete:
|
||||
os.unlink(file)
|
||||
|
||||
return True
|
||||
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# start checking on the hour 36 hours ago
|
||||
check_point = datetime.datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36)
|
||||
db_success = delete_db_entries_without_file(check_point.timestamp())
|
||||
|
||||
# only try to cleanup files if db cleanup was successful
|
||||
if db_success:
|
||||
if limited:
|
||||
# get recording files from last 36 hours
|
||||
hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}"
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
if root > hour_check
|
||||
}
|
||||
else:
|
||||
# get all recordings files on disk and put them in a set
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
}
|
||||
|
||||
delete_files_without_db_entry(files_on_disk)
|
||||
|
||||
logger.debug("End sync recordings.")
|
||||
@@ -394,7 +394,11 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
|
||||
if activity.has_activity_category(SeverityEnum.alert):
|
||||
# update current time for last alert activity
|
||||
segment.last_alert_time = frame_time
|
||||
if (
|
||||
segment.last_alert_time is None
|
||||
or frame_time > segment.last_alert_time
|
||||
):
|
||||
segment.last_alert_time = frame_time
|
||||
|
||||
if segment.severity != SeverityEnum.alert:
|
||||
# if segment is not alert category but current activity is
|
||||
@@ -404,7 +408,11 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
should_update_image = True
|
||||
|
||||
if activity.has_activity_category(SeverityEnum.detection):
|
||||
segment.last_detection_time = frame_time
|
||||
if (
|
||||
segment.last_detection_time is None
|
||||
or frame_time > segment.last_detection_time
|
||||
):
|
||||
segment.last_detection_time = frame_time
|
||||
|
||||
for object in activity.get_all_objects():
|
||||
# Alert-level objects should always be added (they extend/upgrade the segment)
|
||||
@@ -695,17 +703,28 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
current_segment.detections[manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
)
|
||||
if (
|
||||
topic == DetectionTypeEnum.api
|
||||
and self.config.cameras[camera].review.alerts.enabled
|
||||
):
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
if topic == DetectionTypeEnum.api:
|
||||
# manual_info["label"] contains 'label: sub_label'
|
||||
# so split out the label without modifying manual_info
|
||||
if (
|
||||
self.config.cameras[camera].review.detections.enabled
|
||||
and manual_info["label"].split(": ")[0]
|
||||
in self.config.cameras[camera].review.detections.labels
|
||||
):
|
||||
current_segment.last_detection_time = manual_info[
|
||||
"end_time"
|
||||
]
|
||||
elif self.config.cameras[camera].review.alerts.enabled:
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
current_segment.last_alert_time = manual_info[
|
||||
"end_time"
|
||||
]
|
||||
elif (
|
||||
topic == DetectionTypeEnum.lpr
|
||||
and self.config.cameras[camera].review.detections.enabled
|
||||
):
|
||||
current_segment.severity = SeverityEnum.detection
|
||||
current_segment.last_alert_time = manual_info["end_time"]
|
||||
current_segment.last_alert_time = manual_info["end_time"]
|
||||
elif manual_info["state"] == ManualEventState.start:
|
||||
self.indefinite_events[camera][manual_info["event_id"]] = (
|
||||
manual_info["label"]
|
||||
@@ -717,7 +736,18 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
topic == DetectionTypeEnum.api
|
||||
and self.config.cameras[camera].review.alerts.enabled
|
||||
):
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
# manual_info["label"] contains 'label: sub_label'
|
||||
# so split out the label without modifying manual_info
|
||||
if (
|
||||
not self.config.cameras[
|
||||
camera
|
||||
].review.detections.enabled
|
||||
or manual_info["label"].split(": ")[0]
|
||||
not in self.config.cameras[
|
||||
camera
|
||||
].review.detections.labels
|
||||
):
|
||||
current_segment.severity = SeverityEnum.alert
|
||||
elif (
|
||||
topic == DetectionTypeEnum.lpr
|
||||
and self.config.cameras[camera].review.detections.enabled
|
||||
@@ -789,11 +819,23 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
detections,
|
||||
)
|
||||
elif topic == DetectionTypeEnum.api:
|
||||
if self.config.cameras[camera].review.alerts.enabled:
|
||||
severity = None
|
||||
# manual_info["label"] contains 'label: sub_label'
|
||||
# so split out the label without modifying manual_info
|
||||
if (
|
||||
self.config.cameras[camera].review.detections.enabled
|
||||
and manual_info["label"].split(": ")[0]
|
||||
in self.config.cameras[camera].review.detections.labels
|
||||
):
|
||||
severity = SeverityEnum.detection
|
||||
elif self.config.cameras[camera].review.alerts.enabled:
|
||||
severity = SeverityEnum.alert
|
||||
|
||||
if severity:
|
||||
self.active_review_segments[camera] = PendingReviewSegment(
|
||||
camera,
|
||||
frame_time,
|
||||
SeverityEnum.alert,
|
||||
severity,
|
||||
{manual_info["event_id"]: manual_info["label"]},
|
||||
{},
|
||||
[],
|
||||
@@ -820,7 +862,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
].last_detection_time = manual_info["end_time"]
|
||||
else:
|
||||
logger.warning(
|
||||
f"Manual event API has been called for {camera}, but alerts are disabled. This manual event will not appear as an alert."
|
||||
f"Manual event API has been called for {camera}, but alerts and detections are disabled. This manual event will not appear as an alert or detection."
|
||||
)
|
||||
elif topic == DetectionTypeEnum.lpr:
|
||||
if self.config.cameras[camera].review.detections.enabled:
|
||||
|
||||
@@ -22,6 +22,7 @@ from frigate.util.services import (
|
||||
get_bandwidth_stats,
|
||||
get_cpu_stats,
|
||||
get_fs_type,
|
||||
get_hailo_temps,
|
||||
get_intel_gpu_stats,
|
||||
get_jetson_stats,
|
||||
get_nvidia_gpu_stats,
|
||||
@@ -90,9 +91,80 @@ def get_temperatures() -> dict[str, float]:
|
||||
if temp is not None:
|
||||
temps[apex] = temp
|
||||
|
||||
# Get temperatures for Hailo devices
|
||||
temps.update(get_hailo_temps())
|
||||
|
||||
return temps
|
||||
|
||||
|
||||
def get_detector_temperature(
|
||||
detector_type: str,
|
||||
detector_index_by_type: dict[str, int],
|
||||
) -> Optional[float]:
|
||||
"""Get temperature for a specific detector based on its type."""
|
||||
if detector_type == "edgetpu":
|
||||
# Get temperatures for all attached Corals
|
||||
base = "/sys/class/apex/"
|
||||
if os.path.isdir(base):
|
||||
apex_devices = sorted(os.listdir(base))
|
||||
index = detector_index_by_type.get("edgetpu", 0)
|
||||
if index < len(apex_devices):
|
||||
apex_name = apex_devices[index]
|
||||
temp = read_temperature(os.path.join(base, apex_name, "temp"))
|
||||
if temp is not None:
|
||||
return temp
|
||||
elif detector_type == "hailo8l":
|
||||
# Get temperatures for Hailo devices
|
||||
hailo_temps = get_hailo_temps()
|
||||
if hailo_temps:
|
||||
hailo_device_names = sorted(hailo_temps.keys())
|
||||
index = detector_index_by_type.get("hailo8l", 0)
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "rknn":
|
||||
# Rockchip temperatures are handled by the GPU / NPU stats
|
||||
# as there are not detector specific temperatures
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_detector_stats(
|
||||
stats_tracking: StatsTrackingTypes,
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Get stats for all detectors, including temperatures based on detector type."""
|
||||
detector_stats: dict[str, dict[str, Any]] = {}
|
||||
detector_type_indices: dict[str, int] = {}
|
||||
|
||||
for name, detector in stats_tracking["detectors"].items():
|
||||
pid = detector.detect_process.pid if detector.detect_process else None
|
||||
detector_type = detector.detector_config.type
|
||||
|
||||
# Keep track of the index for each detector type to match temperatures correctly
|
||||
current_index = detector_type_indices.get(detector_type, 0)
|
||||
detector_type_indices[detector_type] = current_index + 1
|
||||
|
||||
detector_stat = {
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"pid": pid,
|
||||
}
|
||||
|
||||
temp = get_detector_temperature(detector_type, {detector_type: current_index})
|
||||
|
||||
if temp is not None:
|
||||
detector_stat["temperature"] = round(temp, 1)
|
||||
|
||||
detector_stats[name] = detector_stat
|
||||
|
||||
return detector_stats
|
||||
|
||||
|
||||
def get_processing_stats(
|
||||
config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str]
|
||||
) -> None:
|
||||
@@ -173,6 +245,7 @@ async def set_gpu_stats(
|
||||
"mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%",
|
||||
"enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%",
|
||||
"dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%",
|
||||
"temp": str(nvidia_usage[i]["temp"]),
|
||||
}
|
||||
|
||||
else:
|
||||
@@ -278,6 +351,32 @@ def stats_snapshot(
|
||||
if camera_stats.capture_process_pid.value
|
||||
else None
|
||||
)
|
||||
# Calculate connection quality based on current state
|
||||
# This is computed at stats-collection time so offline cameras
|
||||
# correctly show as unusable rather than excellent
|
||||
expected_fps = config.cameras[name].detect.fps
|
||||
current_fps = camera_stats.camera_fps.value
|
||||
reconnects = camera_stats.reconnects_last_hour.value
|
||||
stalls = camera_stats.stalls_last_hour.value
|
||||
|
||||
if current_fps < 0.1:
|
||||
quality_str = "unusable"
|
||||
elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5:
|
||||
quality_str = "excellent"
|
||||
elif reconnects <= 2 and current_fps >= 0.6 * expected_fps:
|
||||
quality_str = "fair"
|
||||
elif reconnects > 10 or current_fps < 1.0 or stalls > 100:
|
||||
quality_str = "unusable"
|
||||
else:
|
||||
quality_str = "poor"
|
||||
|
||||
connection_quality = {
|
||||
"connection_quality": quality_str,
|
||||
"expected_fps": expected_fps,
|
||||
"reconnects_last_hour": reconnects,
|
||||
"stalls_last_hour": stalls,
|
||||
}
|
||||
|
||||
stats["cameras"][name] = {
|
||||
"camera_fps": round(camera_stats.camera_fps.value, 2),
|
||||
"process_fps": round(camera_stats.process_fps.value, 2),
|
||||
@@ -289,20 +388,10 @@ def stats_snapshot(
|
||||
"ffmpeg_pid": ffmpeg_pid,
|
||||
"audio_rms": round(camera_stats.audio_rms.value, 4),
|
||||
"audio_dBFS": round(camera_stats.audio_dBFS.value, 4),
|
||||
**connection_quality,
|
||||
}
|
||||
|
||||
stats["detectors"] = {}
|
||||
for name, detector in stats_tracking["detectors"].items():
|
||||
pid = detector.detect_process.pid if detector.detect_process else None
|
||||
stats["detectors"][name] = {
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"pid": pid,
|
||||
}
|
||||
stats["detectors"] = get_detector_stats(stats_tracking)
|
||||
stats["camera_fps"] = round(total_camera_fps, 2)
|
||||
stats["process_fps"] = round(total_process_fps, 2)
|
||||
stats["skipped_fps"] = round(total_skipped_fps, 2)
|
||||
@@ -388,7 +477,6 @@ def stats_snapshot(
|
||||
"version": VERSION,
|
||||
"latest_version": stats_tracking["latest_frigate_version"],
|
||||
"storage": {},
|
||||
"temperatures": get_temperatures(),
|
||||
"last_updated": int(time.time()),
|
||||
}
|
||||
|
||||
|
||||
107
frigate/test/http_api/test_http_latest_frame.py
Normal file
107
frigate/test/http_api/test_http_latest_frame.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import os
|
||||
import shutil
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.output.preview import PREVIEW_CACHE_DIR, PREVIEW_FRAME_TYPE
|
||||
from frigate.test.http_api.base_http_test import AuthTestClient, BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpLatestFrame(BaseTestHttp):
|
||||
def setUp(self):
|
||||
super().setUp([])
|
||||
self.app = super().create_app()
|
||||
self.app.detected_frames_processor = MagicMock()
|
||||
|
||||
if os.path.exists(PREVIEW_CACHE_DIR):
|
||||
shutil.rmtree(PREVIEW_CACHE_DIR)
|
||||
os.makedirs(PREVIEW_CACHE_DIR)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(PREVIEW_CACHE_DIR):
|
||||
shutil.rmtree(PREVIEW_CACHE_DIR)
|
||||
super().tearDown()
|
||||
|
||||
def test_latest_frame_fallback_to_preview(self):
|
||||
camera = "front_door"
|
||||
# 1. Mock frame processor to return None (simulating offline/missing frame)
|
||||
self.app.detected_frames_processor.get_current_frame.return_value = None
|
||||
# Return a timestamp that is after our dummy preview frame
|
||||
self.app.detected_frames_processor.get_current_frame_time.return_value = (
|
||||
1234567891.0
|
||||
)
|
||||
|
||||
# 2. Create a dummy preview file
|
||||
dummy_frame = np.zeros((180, 320, 3), np.uint8)
|
||||
cv2.putText(
|
||||
dummy_frame,
|
||||
"PREVIEW",
|
||||
(50, 50),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
||||
preview_path = os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-1234567890.0.{PREVIEW_FRAME_TYPE}"
|
||||
)
|
||||
cv2.imwrite(preview_path, dummy_frame)
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get(f"/{camera}/latest.webp")
|
||||
assert response.status_code == 200
|
||||
assert response.headers.get("X-Frigate-Offline") == "true"
|
||||
# Verify we got an image (webp)
|
||||
assert response.headers.get("content-type") == "image/webp"
|
||||
|
||||
def test_latest_frame_no_fallback_when_live(self):
|
||||
camera = "front_door"
|
||||
# 1. Mock frame processor to return a live frame
|
||||
dummy_frame = np.zeros((180, 320, 3), np.uint8)
|
||||
self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame
|
||||
self.app.detected_frames_processor.get_current_frame_time.return_value = (
|
||||
2000000000.0 # Way in the future
|
||||
)
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get(f"/{camera}/latest.webp")
|
||||
assert response.status_code == 200
|
||||
assert "X-Frigate-Offline" not in response.headers
|
||||
|
||||
def test_latest_frame_stale_falls_back_to_preview(self):
|
||||
camera = "front_door"
|
||||
# 1. Mock frame processor to return a stale frame
|
||||
dummy_frame = np.zeros((180, 320, 3), np.uint8)
|
||||
self.app.detected_frames_processor.get_current_frame.return_value = dummy_frame
|
||||
# Return a timestamp that is after our dummy preview frame, but way in the past
|
||||
self.app.detected_frames_processor.get_current_frame_time.return_value = 1000.0
|
||||
|
||||
# 2. Create a dummy preview file
|
||||
preview_path = os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-999.0.{PREVIEW_FRAME_TYPE}"
|
||||
)
|
||||
cv2.imwrite(preview_path, dummy_frame)
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get(f"/{camera}/latest.webp")
|
||||
assert response.status_code == 200
|
||||
assert response.headers.get("X-Frigate-Offline") == "true"
|
||||
|
||||
def test_latest_frame_no_preview_found(self):
|
||||
camera = "front_door"
|
||||
# 1. Mock frame processor to return None
|
||||
self.app.detected_frames_processor.get_current_frame.return_value = None
|
||||
|
||||
# 2. No preview file created
|
||||
|
||||
with AuthTestClient(self.app) as client:
|
||||
response = client.get(f"/{camera}/latest.webp")
|
||||
# Should fall back to camera-error.jpg (which might not exist in test env, but let's see)
|
||||
# If camera-error.jpg is not found, it returns 500 "Unable to get valid frame" in latest_frame
|
||||
# OR it uses request.app.camera_error_image if already loaded.
|
||||
|
||||
# Since we didn't provide camera-error.jpg, it might 500 if glob fails or return 500 if frame is None.
|
||||
assert response.status_code in [200, 500]
|
||||
assert "X-Frigate-Offline" not in response.headers
|
||||
80
frigate/test/test_preview_loader.py
Normal file
80
frigate/test/test_preview_loader.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import os
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
from frigate.output.preview import (
|
||||
PREVIEW_CACHE_DIR,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
get_most_recent_preview_frame,
|
||||
)
|
||||
|
||||
|
||||
class TestPreviewLoader(unittest.TestCase):
|
||||
def setUp(self):
|
||||
if os.path.exists(PREVIEW_CACHE_DIR):
|
||||
shutil.rmtree(PREVIEW_CACHE_DIR)
|
||||
os.makedirs(PREVIEW_CACHE_DIR)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(PREVIEW_CACHE_DIR):
|
||||
shutil.rmtree(PREVIEW_CACHE_DIR)
|
||||
|
||||
def test_get_most_recent_preview_frame_missing(self):
|
||||
self.assertIsNone(get_most_recent_preview_frame("test_camera"))
|
||||
|
||||
def test_get_most_recent_preview_frame_exists(self):
|
||||
camera = "test_camera"
|
||||
# create dummy preview files
|
||||
for ts in ["1000.0", "2000.0", "1500.0"]:
|
||||
with open(
|
||||
os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}"
|
||||
),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(f"test_{ts}")
|
||||
|
||||
expected_path = os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-2000.0.{PREVIEW_FRAME_TYPE}"
|
||||
)
|
||||
self.assertEqual(get_most_recent_preview_frame(camera), expected_path)
|
||||
|
||||
def test_get_most_recent_preview_frame_before(self):
|
||||
camera = "test_camera"
|
||||
# create dummy preview files
|
||||
for ts in ["1000.0", "2000.0"]:
|
||||
with open(
|
||||
os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-{ts}.{PREVIEW_FRAME_TYPE}"
|
||||
),
|
||||
"w",
|
||||
) as f:
|
||||
f.write(f"test_{ts}")
|
||||
|
||||
# Test finding frame before or at 1500
|
||||
expected_path = os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{camera}-1000.0.{PREVIEW_FRAME_TYPE}"
|
||||
)
|
||||
self.assertEqual(
|
||||
get_most_recent_preview_frame(camera, before=1500.0), expected_path
|
||||
)
|
||||
|
||||
# Test finding frame before or at 999
|
||||
self.assertIsNone(get_most_recent_preview_frame(camera, before=999.0))
|
||||
|
||||
def test_get_most_recent_preview_frame_other_camera(self):
|
||||
camera = "test_camera"
|
||||
other_camera = "other_camera"
|
||||
with open(
|
||||
os.path.join(
|
||||
PREVIEW_CACHE_DIR, f"preview_{other_camera}-3000.0.{PREVIEW_FRAME_TYPE}"
|
||||
),
|
||||
"w",
|
||||
) as f:
|
||||
f.write("test")
|
||||
|
||||
self.assertIsNone(get_most_recent_preview_frame(camera))
|
||||
|
||||
def test_get_most_recent_preview_frame_no_directory(self):
|
||||
shutil.rmtree(PREVIEW_CACHE_DIR)
|
||||
self.assertIsNone(get_most_recent_preview_frame("test_camera"))
|
||||
@@ -31,6 +31,21 @@ class TestProxyRoleResolution(unittest.TestCase):
|
||||
role = resolve_role(headers, self.proxy_config, self.config_roles)
|
||||
self.assertEqual(role, "admin")
|
||||
|
||||
def test_role_map_or_matching(self):
|
||||
config = self.proxy_config
|
||||
config.header_map.role_map = {
|
||||
"admin": ["group_admin", "group_privileged"],
|
||||
}
|
||||
|
||||
# OR semantics: a single matching group should map to the role
|
||||
headers = {"x-remote-role": "group_admin"}
|
||||
role = resolve_role(headers, config, self.config_roles)
|
||||
self.assertEqual(role, "admin")
|
||||
|
||||
headers = {"x-remote-role": "group_admin|group_privileged"}
|
||||
role = resolve_role(headers, config, self.config_roles)
|
||||
self.assertEqual(role, "admin")
|
||||
|
||||
def test_direct_role_header_with_separator(self):
|
||||
config = self.proxy_config
|
||||
config.header_map.role_map = None # disable role_map
|
||||
|
||||
@@ -185,7 +185,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
def snapshot(camera: str, obj: TrackedObject) -> bool:
|
||||
mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt
|
||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||
jpg_bytes = obj.get_img_bytes(
|
||||
jpg_bytes, _ = obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=mqtt_config.timestamp,
|
||||
bounding_box=mqtt_config.bounding_box,
|
||||
@@ -515,6 +515,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
duration,
|
||||
source_type,
|
||||
draw,
|
||||
pre_capture,
|
||||
) = payload
|
||||
|
||||
# save the snapshot image
|
||||
@@ -522,6 +523,11 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
None, event_id, label, draw
|
||||
)
|
||||
end_time = frame_time + duration if duration is not None else None
|
||||
start_time = (
|
||||
frame_time - self.config.cameras[camera_name].record.event_pre_capture
|
||||
if pre_capture is None
|
||||
else frame_time - pre_capture
|
||||
)
|
||||
|
||||
# send event to event maintainer
|
||||
self.event_sender.publish(
|
||||
@@ -536,8 +542,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
"sub_label": sub_label,
|
||||
"score": score,
|
||||
"camera": camera_name,
|
||||
"start_time": frame_time
|
||||
- self.config.cameras[camera_name].record.event_pre_capture,
|
||||
"start_time": start_time,
|
||||
"end_time": end_time,
|
||||
"has_clip": self.config.cameras[camera_name].record.enabled
|
||||
and include_recording,
|
||||
|
||||
@@ -422,7 +422,7 @@ class TrackedObject:
|
||||
return count > (self.camera_config.detect.stationary.threshold or 50)
|
||||
|
||||
def get_thumbnail(self, ext: str) -> bytes | None:
|
||||
img_bytes = self.get_img_bytes(
|
||||
img_bytes, _ = self.get_img_bytes(
|
||||
ext, timestamp=False, bounding_box=False, crop=True, height=175
|
||||
)
|
||||
|
||||
@@ -463,20 +463,21 @@ class TrackedObject:
|
||||
crop: bool = False,
|
||||
height: int | None = None,
|
||||
quality: int | None = None,
|
||||
) -> bytes | None:
|
||||
) -> tuple[bytes | None, float | None]:
|
||||
if self.thumbnail_data is None:
|
||||
return None
|
||||
return None, None
|
||||
|
||||
try:
|
||||
frame_time = self.thumbnail_data["frame_time"]
|
||||
best_frame = cv2.cvtColor(
|
||||
self.frame_cache[self.thumbnail_data["frame_time"]]["frame"],
|
||||
self.frame_cache[frame_time]["frame"],
|
||||
cv2.COLOR_YUV2BGR_I420,
|
||||
)
|
||||
except KeyError:
|
||||
logger.warning(
|
||||
f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
|
||||
f"Unable to create jpg because frame {frame_time} is not in the cache"
|
||||
)
|
||||
return None
|
||||
return None, None
|
||||
|
||||
if bounding_box:
|
||||
thickness = 2
|
||||
@@ -558,13 +559,13 @@ class TrackedObject:
|
||||
ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params)
|
||||
|
||||
if ret:
|
||||
return jpg.tobytes()
|
||||
return jpg.tobytes(), frame_time
|
||||
else:
|
||||
return None
|
||||
return None, None
|
||||
|
||||
def write_snapshot_to_disk(self) -> None:
|
||||
snapshot_config: SnapshotsConfig = self.camera_config.snapshots
|
||||
jpg_bytes = self.get_img_bytes(
|
||||
jpg_bytes, _ = self.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=snapshot_config.timestamp,
|
||||
bounding_box=snapshot_config.bounding_box,
|
||||
|
||||
@@ -26,6 +26,15 @@ class ModelStatusTypesEnum(str, Enum):
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class JobStatusTypesEnum(str, Enum):
|
||||
pending = "pending"
|
||||
queued = "queued"
|
||||
running = "running"
|
||||
success = "success"
|
||||
failed = "failed"
|
||||
cancelled = "cancelled"
|
||||
|
||||
|
||||
class TrackedObjectUpdateTypesEnum(str, Enum):
|
||||
description = "description"
|
||||
face = "face"
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = "0.17-0"
|
||||
CURRENT_CONFIG_VERSION = "0.18-0"
|
||||
DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml")
|
||||
|
||||
|
||||
@@ -98,6 +98,13 @@ def migrate_frigate_config(config_file: str):
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.17-0"
|
||||
|
||||
if previous_version < "0.18-0":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...")
|
||||
new_config = migrate_018_0(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.18-0"
|
||||
|
||||
logger.info("Finished frigate config migration...")
|
||||
|
||||
|
||||
@@ -427,6 +434,49 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
return new_config
|
||||
|
||||
|
||||
def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
|
||||
"""Handle migrating frigate config to 0.18-0"""
|
||||
new_config = config.copy()
|
||||
|
||||
# Remove deprecated sync_recordings from global record config
|
||||
if new_config.get("record", {}).get("sync_recordings") is not None:
|
||||
del new_config["record"]["sync_recordings"]
|
||||
|
||||
# Remove deprecated timelapse_args from global record export config
|
||||
if new_config.get("record", {}).get("export", {}).get("timelapse_args") is not None:
|
||||
del new_config["record"]["export"]["timelapse_args"]
|
||||
# Remove export section if empty
|
||||
if not new_config.get("record", {}).get("export"):
|
||||
del new_config["record"]["export"]
|
||||
# Remove record section if empty
|
||||
if not new_config.get("record"):
|
||||
del new_config["record"]
|
||||
|
||||
# Remove deprecated sync_recordings and timelapse_args from camera-specific record configs
|
||||
for name, camera in config.get("cameras", {}).items():
|
||||
camera_config: dict[str, dict[str, Any]] = camera.copy()
|
||||
|
||||
if camera_config.get("record", {}).get("sync_recordings") is not None:
|
||||
del camera_config["record"]["sync_recordings"]
|
||||
|
||||
if (
|
||||
camera_config.get("record", {}).get("export", {}).get("timelapse_args")
|
||||
is not None
|
||||
):
|
||||
del camera_config["record"]["export"]["timelapse_args"]
|
||||
# Remove export section if empty
|
||||
if not camera_config.get("record", {}).get("export"):
|
||||
del camera_config["record"]["export"]
|
||||
# Remove record section if empty
|
||||
if not camera_config.get("record"):
|
||||
del camera_config["record"]
|
||||
|
||||
new_config["cameras"][name] = camera_config
|
||||
|
||||
new_config["version"] = "0.18-0"
|
||||
return new_config
|
||||
|
||||
|
||||
def get_relative_coordinates(
|
||||
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
|
||||
) -> Union[str, list]:
|
||||
|
||||
808
frigate/util/media.py
Normal file
808
frigate/util/media.py
Normal file
@@ -0,0 +1,808 @@
|
||||
"""Recordings Utilities."""
|
||||
|
||||
import datetime
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR
|
||||
from frigate.models import (
|
||||
Event,
|
||||
Export,
|
||||
Previews,
|
||||
Recordings,
|
||||
RecordingsToDelete,
|
||||
ReviewSegment,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Safety threshold - abort if more than 50% of files would be deleted
|
||||
SAFETY_THRESHOLD = 0.5
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncResult:
|
||||
"""Result of a sync operation."""
|
||||
|
||||
media_type: str
|
||||
files_checked: int = 0
|
||||
orphans_found: int = 0
|
||||
orphans_deleted: int = 0
|
||||
orphan_paths: list[str] = field(default_factory=list)
|
||||
aborted: bool = False
|
||||
error: str | None = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"media_type": self.media_type,
|
||||
"files_checked": self.files_checked,
|
||||
"orphans_found": self.orphans_found,
|
||||
"orphans_deleted": self.orphans_deleted,
|
||||
"aborted": self.aborted,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
def remove_empty_directories(root: Path, paths: Iterable[Path]) -> None:
|
||||
"""
|
||||
Remove directories if they exist and are empty.
|
||||
Silently ignores non-existent and non-empty directories.
|
||||
Attempts to remove parent directories as well, stopping at the given root.
|
||||
"""
|
||||
count = 0
|
||||
while True:
|
||||
parents = set()
|
||||
for path in paths:
|
||||
if path == root:
|
||||
continue
|
||||
|
||||
try:
|
||||
path.rmdir()
|
||||
count += 1
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOTEMPTY:
|
||||
continue
|
||||
raise
|
||||
|
||||
parents.add(path.parent)
|
||||
|
||||
if not parents:
|
||||
break
|
||||
|
||||
paths = parents
|
||||
|
||||
logger.debug("Removed {count} empty directories")
|
||||
|
||||
|
||||
def sync_recordings(
|
||||
limited: bool = False, dry_run: bool = False, force: bool = False
|
||||
) -> SyncResult:
|
||||
"""Sync recordings between the database and disk using the SyncResult format."""
|
||||
|
||||
result = SyncResult(media_type="recordings")
|
||||
|
||||
try:
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# start checking on the hour 36 hours ago
|
||||
check_point = datetime.datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36)
|
||||
|
||||
# Gather DB recordings to inspect
|
||||
if limited:
|
||||
recordings_query = Recordings.select(Recordings.id, Recordings.path).where(
|
||||
Recordings.start_time >= check_point.timestamp()
|
||||
)
|
||||
else:
|
||||
recordings_query = Recordings.select(Recordings.id, Recordings.path)
|
||||
|
||||
recordings_count = recordings_query.count()
|
||||
page_size = 1000
|
||||
num_pages = (recordings_count + page_size - 1) // page_size
|
||||
recordings_to_delete: list[dict] = []
|
||||
|
||||
for page in range(num_pages):
|
||||
for recording in recordings_query.paginate(page, page_size):
|
||||
if not os.path.exists(recording.path):
|
||||
recordings_to_delete.append(
|
||||
{"id": recording.id, "path": recording.path}
|
||||
)
|
||||
|
||||
result.orphans_found += len(recordings_to_delete)
|
||||
result.orphan_paths.extend(
|
||||
[
|
||||
recording["path"]
|
||||
for recording in recordings_to_delete
|
||||
if recording.get("path")
|
||||
]
|
||||
)
|
||||
|
||||
if (
|
||||
recordings_count
|
||||
and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if recordings_to_delete and not dry_run:
|
||||
logger.info(
|
||||
f"Deleting {len(recordings_to_delete)} recording DB entries with missing files"
|
||||
)
|
||||
|
||||
RecordingsToDelete.create_table(temporary=True)
|
||||
|
||||
max_inserts = 1000
|
||||
for batch in chunked(recordings_to_delete, max_inserts):
|
||||
RecordingsToDelete.insert_many(batch).execute()
|
||||
|
||||
try:
|
||||
deleted = (
|
||||
Recordings.delete()
|
||||
.where(
|
||||
Recordings.id.in_(
|
||||
RecordingsToDelete.select(RecordingsToDelete.id)
|
||||
)
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
result.orphans_deleted += int(deleted)
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error during recordings db cleanup: {e}")
|
||||
result.error = str(e)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if result.aborted:
|
||||
logger.warning("Recording DB sync aborted; skipping file cleanup.")
|
||||
return result
|
||||
|
||||
# Only try to cleanup files if db cleanup was successful or dry_run
|
||||
if limited:
|
||||
# get recording files from last 36 hours
|
||||
hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}"
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
if root > hour_check
|
||||
}
|
||||
else:
|
||||
# get all recordings files on disk and put them in a set
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
}
|
||||
|
||||
result.files_checked = len(files_on_disk)
|
||||
|
||||
files_to_delete: list[str] = []
|
||||
for file in files_on_disk:
|
||||
if not Recordings.select().where(Recordings.path == file).exists():
|
||||
files_to_delete.append(file)
|
||||
|
||||
result.orphans_found += len(files_to_delete)
|
||||
result.orphan_paths.extend(files_to_delete)
|
||||
|
||||
if (
|
||||
files_on_disk
|
||||
and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files")
|
||||
for file in files_to_delete:
|
||||
try:
|
||||
os.unlink(file)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file}: {e}")
|
||||
|
||||
logger.debug("End sync recordings.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing recordings: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync event snapshots - delete files not referenced by any event.
|
||||
|
||||
Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg
|
||||
Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png
|
||||
"""
|
||||
result = SyncResult(media_type="event_snapshots")
|
||||
|
||||
try:
|
||||
# Get all event IDs with snapshots from DB
|
||||
events_with_snapshots = set(
|
||||
f"{e.camera}-{e.id}"
|
||||
for e in Event.select(Event.id, Event.camera).where(
|
||||
Event.has_snapshot == True
|
||||
)
|
||||
)
|
||||
|
||||
# Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories)
|
||||
snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name)
|
||||
if os.path.isdir(CLIPS_DIR):
|
||||
for file in os.listdir(CLIPS_DIR):
|
||||
file_path = os.path.join(CLIPS_DIR, file)
|
||||
if os.path.isfile(file_path) and file.endswith(
|
||||
(".jpg", "-clean.webp", "-clean.png")
|
||||
):
|
||||
# Extract base name (camera-event_id) from filename
|
||||
base_name = file
|
||||
for suffix in ["-clean.webp", "-clean.png", ".jpg"]:
|
||||
if file.endswith(suffix):
|
||||
base_name = file[: -len(suffix)]
|
||||
break
|
||||
snapshot_files.append((file_path, base_name))
|
||||
|
||||
result.files_checked = len(snapshot_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path, base_name in snapshot_files:
|
||||
if base_name not in events_with_snapshots:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned event snapshot files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing event snapshots: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync event thumbnails - delete files not referenced by any event.
|
||||
|
||||
Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp
|
||||
Only events without inline thumbnail (thumbnail field is None/empty) use files.
|
||||
"""
|
||||
result = SyncResult(media_type="event_thumbnails")
|
||||
|
||||
try:
|
||||
# Get all events that use file-based thumbnails
|
||||
# Events with thumbnail field populated don't need files
|
||||
events_with_file_thumbs = set(
|
||||
(e.camera, e.id)
|
||||
for e in Event.select(Event.id, Event.camera, Event.thumbnail).where(
|
||||
(Event.thumbnail.is_null(True)) | (Event.thumbnail == "")
|
||||
)
|
||||
)
|
||||
|
||||
# Find thumbnail files on disk
|
||||
thumbnail_files: list[
|
||||
tuple[str, str, str]
|
||||
] = [] # (full_path, camera, event_id)
|
||||
if os.path.isdir(THUMB_DIR):
|
||||
for camera_dir in os.listdir(THUMB_DIR):
|
||||
camera_path = os.path.join(THUMB_DIR, camera_dir)
|
||||
if not os.path.isdir(camera_path):
|
||||
continue
|
||||
for file in os.listdir(camera_path):
|
||||
if file.endswith(".webp"):
|
||||
event_id = file[:-5] # Remove .webp
|
||||
file_path = os.path.join(camera_path, file)
|
||||
thumbnail_files.append((file_path, camera_dir, event_id))
|
||||
|
||||
result.files_checked = len(thumbnail_files)
|
||||
|
||||
# Find orphans - files where event doesn't exist or event has inline thumbnail
|
||||
orphans: list[str] = []
|
||||
for file_path, camera, event_id in thumbnail_files:
|
||||
if (camera, event_id) not in events_with_file_thumbs:
|
||||
# Check if event exists with inline thumbnail
|
||||
event_exists = Event.select().where(Event.id == event_id).exists()
|
||||
if not event_exists:
|
||||
orphans.append(file_path)
|
||||
# If event exists with inline thumbnail, the file is also orphaned
|
||||
elif event_exists:
|
||||
event = Event.get_or_none(Event.id == event_id)
|
||||
if event and event.thumbnail:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing event thumbnails: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync review segment thumbnails - delete files not referenced by any review segment.
|
||||
|
||||
Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp
|
||||
The full path is stored in ReviewSegment.thumb_path
|
||||
"""
|
||||
result = SyncResult(media_type="review_thumbnails")
|
||||
|
||||
try:
|
||||
# Get all thumb paths from DB
|
||||
review_thumb_paths = set(
|
||||
r.thumb_path
|
||||
for r in ReviewSegment.select(ReviewSegment.thumb_path)
|
||||
if r.thumb_path
|
||||
)
|
||||
|
||||
# Find review thumbnail files on disk
|
||||
review_dir = os.path.join(CLIPS_DIR, "review")
|
||||
thumbnail_files: list[str] = []
|
||||
if os.path.isdir(review_dir):
|
||||
for file in os.listdir(review_dir):
|
||||
if file.startswith("thumb-") and file.endswith(".webp"):
|
||||
file_path = os.path.join(review_dir, file)
|
||||
thumbnail_files.append(file_path)
|
||||
|
||||
result.files_checked = len(thumbnail_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in thumbnail_files:
|
||||
if file_path not in review_thumb_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing review thumbnails: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync preview files - delete files not referenced by any preview record.
|
||||
|
||||
Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4
|
||||
The full path is stored in Previews.path
|
||||
"""
|
||||
result = SyncResult(media_type="previews")
|
||||
|
||||
try:
|
||||
# Get all preview paths from DB
|
||||
preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path)
|
||||
|
||||
# Find preview files on disk
|
||||
previews_dir = os.path.join(CLIPS_DIR, "previews")
|
||||
preview_files: list[str] = []
|
||||
if os.path.isdir(previews_dir):
|
||||
for camera_dir in os.listdir(previews_dir):
|
||||
camera_path = os.path.join(previews_dir, camera_dir)
|
||||
if not os.path.isdir(camera_path):
|
||||
continue
|
||||
for file in os.listdir(camera_path):
|
||||
if file.endswith(".mp4"):
|
||||
file_path = os.path.join(camera_path, file)
|
||||
preview_files.append(file_path)
|
||||
|
||||
result.files_checked = len(preview_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in preview_files:
|
||||
if file_path not in preview_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Previews sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Previews sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files")
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned preview files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing previews: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync export files - delete files not referenced by any export record.
|
||||
|
||||
Export videos are stored at: EXPORT_DIR/*.mp4
|
||||
Export thumbnails are stored at: CLIPS_DIR/export/*.jpg
|
||||
The paths are stored in Export.video_path and Export.thumb_path
|
||||
"""
|
||||
result = SyncResult(media_type="exports")
|
||||
|
||||
try:
|
||||
# Get all export paths from DB
|
||||
export_video_paths = set()
|
||||
export_thumb_paths = set()
|
||||
for e in Export.select(Export.video_path, Export.thumb_path):
|
||||
if e.video_path:
|
||||
export_video_paths.add(e.video_path)
|
||||
if e.thumb_path:
|
||||
export_thumb_paths.add(e.thumb_path)
|
||||
|
||||
# Find export video files on disk
|
||||
export_files: list[str] = []
|
||||
if os.path.isdir(EXPORT_DIR):
|
||||
for file in os.listdir(EXPORT_DIR):
|
||||
if file.endswith(".mp4"):
|
||||
file_path = os.path.join(EXPORT_DIR, file)
|
||||
export_files.append(file_path)
|
||||
|
||||
# Find export thumbnail files on disk
|
||||
export_thumb_dir = os.path.join(CLIPS_DIR, "export")
|
||||
thumb_files: list[str] = []
|
||||
if os.path.isdir(export_thumb_dir):
|
||||
for file in os.listdir(export_thumb_dir):
|
||||
if file.endswith(".jpg"):
|
||||
file_path = os.path.join(export_thumb_dir, file)
|
||||
thumb_files.append(file_path)
|
||||
|
||||
result.files_checked = len(export_files) + len(thumb_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in export_files:
|
||||
if file_path not in export_video_paths:
|
||||
orphans.append(file_path)
|
||||
for file_path in thumb_files:
|
||||
if file_path not in export_thumb_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Exports sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Exports sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files")
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned export files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing exports: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaSyncResults:
|
||||
"""Combined results from all media sync operations."""
|
||||
|
||||
event_snapshots: SyncResult | None = None
|
||||
event_thumbnails: SyncResult | None = None
|
||||
review_thumbnails: SyncResult | None = None
|
||||
previews: SyncResult | None = None
|
||||
exports: SyncResult | None = None
|
||||
recordings: SyncResult | None = None
|
||||
|
||||
@property
|
||||
def total_files_checked(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.files_checked
|
||||
return total
|
||||
|
||||
@property
|
||||
def total_orphans_found(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.orphans_found
|
||||
return total
|
||||
|
||||
@property
|
||||
def total_orphans_deleted(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.orphans_deleted
|
||||
return total
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert results to dictionary for API response."""
|
||||
results = {}
|
||||
for name, result in [
|
||||
("event_snapshots", self.event_snapshots),
|
||||
("event_thumbnails", self.event_thumbnails),
|
||||
("review_thumbnails", self.review_thumbnails),
|
||||
("previews", self.previews),
|
||||
("exports", self.exports),
|
||||
("recordings", self.recordings),
|
||||
]:
|
||||
if result:
|
||||
results[name] = {
|
||||
"files_checked": result.files_checked,
|
||||
"orphans_found": result.orphans_found,
|
||||
"orphans_deleted": result.orphans_deleted,
|
||||
"aborted": result.aborted,
|
||||
"error": result.error,
|
||||
}
|
||||
results["totals"] = {
|
||||
"files_checked": self.total_files_checked,
|
||||
"orphans_found": self.total_orphans_found,
|
||||
"orphans_deleted": self.total_orphans_deleted,
|
||||
}
|
||||
return results
|
||||
|
||||
|
||||
def sync_all_media(
|
||||
dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False
|
||||
) -> MediaSyncResults:
|
||||
"""Sync specified media types with the database.
|
||||
|
||||
Args:
|
||||
dry_run: If True, only report orphans without deleting them.
|
||||
media_types: List of media types to sync. Can include: 'all', 'event_snapshots',
|
||||
'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'
|
||||
force: If True, bypass safety threshold checks.
|
||||
|
||||
Returns:
|
||||
MediaSyncResults with details of each sync operation.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})"
|
||||
)
|
||||
|
||||
results = MediaSyncResults()
|
||||
|
||||
# Determine which media types to sync
|
||||
sync_all = "all" in media_types
|
||||
|
||||
if sync_all or "event_snapshots" in media_types:
|
||||
results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "event_thumbnails" in media_types:
|
||||
results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "review_thumbnails" in media_types:
|
||||
results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "previews" in media_types:
|
||||
results.previews = sync_previews(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "exports" in media_types:
|
||||
results.exports = sync_exports(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "recordings" in media_types:
|
||||
results.recordings = sync_recordings(dry_run=dry_run, force=force)
|
||||
|
||||
logger.info(
|
||||
f"Media sync complete: checked {results.total_files_checked} files, "
|
||||
f"found {results.total_orphans_found} orphans, "
|
||||
f"deleted {results.total_orphans_deleted}"
|
||||
)
|
||||
|
||||
return results
|
||||
@@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]:
|
||||
else:
|
||||
usage = 0.0
|
||||
|
||||
return {"npu": f"{round(usage, 2)}", "mem": "-"}
|
||||
return {"npu": f"{round(usage, 2)}", "mem": "-%"}
|
||||
except (FileNotFoundError, PermissionError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def get_rockchip_gpu_stats() -> Optional[dict[str, str]]:
|
||||
def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]:
|
||||
"""Get GPU stats using rk."""
|
||||
try:
|
||||
with open("/sys/kernel/debug/rkrga/load", "r") as f:
|
||||
@@ -440,7 +440,16 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str]]:
|
||||
return None
|
||||
|
||||
average_load = f"{round(sum(load_values) / len(load_values), 2)}%"
|
||||
return {"gpu": average_load, "mem": "-"}
|
||||
stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"}
|
||||
|
||||
try:
|
||||
with open("/sys/class/thermal/thermal_zone5/temp", "r") as f:
|
||||
line = f.readline().strip()
|
||||
stats["temp"] = round(int(line) / 1000, 1)
|
||||
except (FileNotFoundError, OSError, ValueError):
|
||||
pass
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
|
||||
@@ -463,13 +472,25 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
|
||||
|
||||
percentages = [int(load) for load in core_loads]
|
||||
mean = round(sum(percentages) / len(percentages), 2)
|
||||
return {"npu": mean, "mem": "-"}
|
||||
stats: dict[str, float | str] = {"npu": mean, "mem": "-%"}
|
||||
|
||||
try:
|
||||
with open("/sys/class/thermal/thermal_zone6/temp", "r") as f:
|
||||
line = f.readline().strip()
|
||||
stats["temp"] = round(int(line) / 1000, 1)
|
||||
except (FileNotFoundError, OSError, ValueError):
|
||||
pass
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def try_get_info(f, h, default="N/A"):
|
||||
def try_get_info(f, h, default="N/A", sensor=None):
|
||||
try:
|
||||
if h:
|
||||
v = f(h)
|
||||
if sensor is not None:
|
||||
v = f(h, sensor)
|
||||
else:
|
||||
v = f(h)
|
||||
else:
|
||||
v = f()
|
||||
except nvml.NVMLError_NotSupported:
|
||||
@@ -498,6 +519,9 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
|
||||
dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
|
||||
temp = try_get_info(
|
||||
nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0
|
||||
)
|
||||
pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None)
|
||||
|
||||
if util != "N/A":
|
||||
@@ -510,6 +534,11 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
else:
|
||||
gpu_mem_util = -1
|
||||
|
||||
if temp != "N/A" and temp is not None:
|
||||
temp = float(temp)
|
||||
else:
|
||||
temp = None
|
||||
|
||||
if enc != "N/A":
|
||||
enc_util = enc[0]
|
||||
else:
|
||||
@@ -527,6 +556,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
"enc": enc_util,
|
||||
"dec": dec_util,
|
||||
"pstate": pstate or "unknown",
|
||||
"temp": temp,
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
@@ -556,6 +586,53 @@ def get_jetson_stats() -> Optional[dict[int, dict]]:
|
||||
return results
|
||||
|
||||
|
||||
def get_hailo_temps() -> dict[str, float]:
|
||||
"""Get temperatures for Hailo devices."""
|
||||
try:
|
||||
from hailo_platform import Device
|
||||
except ModuleNotFoundError:
|
||||
return {}
|
||||
|
||||
temps = {}
|
||||
|
||||
try:
|
||||
device_ids = Device.scan()
|
||||
for i, device_id in enumerate(device_ids):
|
||||
try:
|
||||
with Device(device_id) as device:
|
||||
temp_info = device.control.get_chip_temperature()
|
||||
|
||||
# Get board name and normalise it
|
||||
identity = device.control.identify()
|
||||
board_name = None
|
||||
for line in str(identity).split("\n"):
|
||||
if line.startswith("Board Name:"):
|
||||
board_name = (
|
||||
line.split(":", 1)[1].strip().lower().replace("-", "")
|
||||
)
|
||||
break
|
||||
|
||||
if not board_name:
|
||||
board_name = f"hailo{i}"
|
||||
|
||||
# Use indexed name if multiple devices, otherwise just the board name
|
||||
device_name = (
|
||||
f"{board_name}-{i}" if len(device_ids) > 1 else board_name
|
||||
)
|
||||
|
||||
# ts1_temperature is also available, but appeared to be the same as ts0 in testing.
|
||||
temps[device_name] = round(temp_info.ts0_temperature, 1)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to get temperature for Hailo device {device_id}: {e}"
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to scan for Hailo devices: {e}")
|
||||
|
||||
return temps
|
||||
|
||||
|
||||
def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess:
|
||||
"""Run ffprobe on stream."""
|
||||
clean_path = escape_special_characters(path)
|
||||
@@ -591,12 +668,17 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
"""Run vainfo."""
|
||||
ffprobe_cmd = (
|
||||
["vainfo"]
|
||||
if not device_name
|
||||
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
|
||||
)
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
if not device_name:
|
||||
cmd = ["vainfo"]
|
||||
else:
|
||||
if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"):
|
||||
device_path = device_name
|
||||
else:
|
||||
device_path = f"/dev/dri/{device_name}"
|
||||
|
||||
cmd = ["vainfo", "--display", "drm", "--device", device_path]
|
||||
|
||||
return sp.run(cmd, capture_output=True)
|
||||
|
||||
|
||||
def get_nvidia_driver_info() -> dict[str, Any]:
|
||||
|
||||
145
frigate/video.py
145
frigate/video.py
@@ -3,6 +3,7 @@ import queue
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
from collections import deque
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from multiprocessing import Queue, Value
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
@@ -117,6 +118,7 @@ def capture_frames(
|
||||
frame_rate.start()
|
||||
skipped_eps = EventsPerSecond()
|
||||
skipped_eps.start()
|
||||
|
||||
config_subscriber = CameraConfigUpdateSubscriber(
|
||||
None, {config.name: config}, [CameraConfigUpdateEnum.enabled]
|
||||
)
|
||||
@@ -181,6 +183,9 @@ class CameraWatchdog(threading.Thread):
|
||||
camera_fps,
|
||||
skipped_fps,
|
||||
ffmpeg_pid,
|
||||
stalls,
|
||||
reconnects,
|
||||
detection_frame,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
@@ -201,6 +206,10 @@ class CameraWatchdog(threading.Thread):
|
||||
self.frame_index = 0
|
||||
self.stop_event = stop_event
|
||||
self.sleeptime = self.config.ffmpeg.retry_interval
|
||||
self.reconnect_timestamps = deque()
|
||||
self.stalls = stalls
|
||||
self.reconnects = reconnects
|
||||
self.detection_frame = detection_frame
|
||||
|
||||
self.config_subscriber = CameraConfigUpdateSubscriber(
|
||||
None,
|
||||
@@ -214,6 +223,36 @@ class CameraWatchdog(threading.Thread):
|
||||
self.latest_valid_segment_time: float = 0
|
||||
self.latest_invalid_segment_time: float = 0
|
||||
self.latest_cache_segment_time: float = 0
|
||||
self.record_enable_time: datetime | None = None
|
||||
|
||||
# Stall tracking (based on last processed frame)
|
||||
self._stall_timestamps: deque[float] = deque()
|
||||
self._stall_active: bool = False
|
||||
|
||||
# Status caching to reduce message volume
|
||||
self._last_detect_status: str | None = None
|
||||
self._last_record_status: str | None = None
|
||||
self._last_status_update_time: float = 0.0
|
||||
|
||||
def _send_detect_status(self, status: str, now: float) -> None:
|
||||
"""Send detect status only if changed or retry_interval has elapsed."""
|
||||
if (
|
||||
status != self._last_detect_status
|
||||
or (now - self._last_status_update_time) >= self.sleeptime
|
||||
):
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", status)
|
||||
self._last_detect_status = status
|
||||
self._last_status_update_time = now
|
||||
|
||||
def _send_record_status(self, status: str, now: float) -> None:
|
||||
"""Send record status only if changed or retry_interval has elapsed."""
|
||||
if (
|
||||
status != self._last_record_status
|
||||
or (now - self._last_status_update_time) >= self.sleeptime
|
||||
):
|
||||
self.requestor.send_data(f"{self.config.name}/status/record", status)
|
||||
self._last_record_status = status
|
||||
self._last_status_update_time = now
|
||||
|
||||
def _update_enabled_state(self) -> bool:
|
||||
"""Fetch the latest config and update enabled state."""
|
||||
@@ -241,6 +280,14 @@ class CameraWatchdog(threading.Thread):
|
||||
else:
|
||||
self.ffmpeg_detect_process.wait()
|
||||
|
||||
# Update reconnects
|
||||
now = datetime.now().timestamp()
|
||||
self.reconnect_timestamps.append(now)
|
||||
while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600:
|
||||
self.reconnect_timestamps.popleft()
|
||||
if self.reconnects:
|
||||
self.reconnects.value = len(self.reconnect_timestamps)
|
||||
|
||||
# Wait for old capture thread to fully exit before starting a new one
|
||||
if self.capture_thread is not None and self.capture_thread.is_alive():
|
||||
self.logger.info("Waiting for capture thread to exit...")
|
||||
@@ -261,30 +308,35 @@ class CameraWatchdog(threading.Thread):
|
||||
def run(self) -> None:
|
||||
if self._update_enabled_state():
|
||||
self.start_all_ffmpeg()
|
||||
# If recording is enabled at startup, set the grace period timer
|
||||
if self.config.record.enabled:
|
||||
self.record_enable_time = datetime.now().astimezone(timezone.utc)
|
||||
|
||||
time.sleep(self.sleeptime)
|
||||
while not self.stop_event.wait(self.sleeptime):
|
||||
last_restart_time = datetime.now().timestamp()
|
||||
|
||||
# 1 second watchdog loop
|
||||
while not self.stop_event.wait(1):
|
||||
enabled = self._update_enabled_state()
|
||||
if enabled != self.was_enabled:
|
||||
if enabled:
|
||||
self.logger.debug(f"Enabling camera {self.config.name}")
|
||||
self.start_all_ffmpeg()
|
||||
|
||||
# reset all timestamps
|
||||
# reset all timestamps and record the enable time for grace period
|
||||
self.latest_valid_segment_time = 0
|
||||
self.latest_invalid_segment_time = 0
|
||||
self.latest_cache_segment_time = 0
|
||||
self.record_enable_time = datetime.now().astimezone(timezone.utc)
|
||||
else:
|
||||
self.logger.debug(f"Disabling camera {self.config.name}")
|
||||
self.stop_all_ffmpeg()
|
||||
self.record_enable_time = None
|
||||
|
||||
# update camera status
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/detect", "disabled"
|
||||
)
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/record", "disabled"
|
||||
)
|
||||
now = datetime.now().timestamp()
|
||||
self._send_detect_status("disabled", now)
|
||||
self._send_record_status("disabled", now)
|
||||
self.was_enabled = enabled
|
||||
continue
|
||||
|
||||
@@ -323,36 +375,44 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
# Check if enough time has passed to allow ffmpeg restart (backoff pacing)
|
||||
time_since_last_restart = now - last_restart_time
|
||||
can_restart = time_since_last_restart >= self.sleeptime
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
|
||||
self._send_detect_status("offline", now)
|
||||
self.camera_fps.value = 0
|
||||
self.logger.error(
|
||||
f"Ffmpeg process crashed unexpectedly for {self.config.name}."
|
||||
)
|
||||
self.reset_capture_thread(terminate=False)
|
||||
if can_restart:
|
||||
self.reset_capture_thread(terminate=False)
|
||||
last_restart_time = now
|
||||
elif self.camera_fps.value >= (self.config.detect.fps + 10):
|
||||
self.fps_overflow_count += 1
|
||||
|
||||
if self.fps_overflow_count == 3:
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/detect", "offline"
|
||||
)
|
||||
self._send_detect_status("offline", now)
|
||||
self.fps_overflow_count = 0
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"{self.config.name} exceeded fps limit. Exiting ffmpeg..."
|
||||
)
|
||||
self.reset_capture_thread(drain_output=False)
|
||||
if can_restart:
|
||||
self.reset_capture_thread(drain_output=False)
|
||||
last_restart_time = now
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
|
||||
self._send_detect_status("offline", now)
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..."
|
||||
)
|
||||
self.reset_capture_thread()
|
||||
if can_restart:
|
||||
self.reset_capture_thread()
|
||||
last_restart_time = now
|
||||
else:
|
||||
# process is running normally
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "online")
|
||||
self._send_detect_status("online", now)
|
||||
self.fps_overflow_count = 0
|
||||
|
||||
for p in self.ffmpeg_other_processes:
|
||||
@@ -361,6 +421,12 @@ class CameraWatchdog(threading.Thread):
|
||||
if self.config.record.enabled and "record" in p["roles"]:
|
||||
now_utc = datetime.now().astimezone(timezone.utc)
|
||||
|
||||
# Check if we're within the grace period after enabling recording
|
||||
# Grace period: 90 seconds allows time for ffmpeg to start and create first segment
|
||||
in_grace_period = self.record_enable_time is not None and (
|
||||
now_utc - self.record_enable_time
|
||||
) < timedelta(seconds=90)
|
||||
|
||||
latest_cache_dt = (
|
||||
datetime.fromtimestamp(
|
||||
self.latest_cache_segment_time, tz=timezone.utc
|
||||
@@ -386,10 +452,16 @@ class CameraWatchdog(threading.Thread):
|
||||
)
|
||||
|
||||
# ensure segments are still being created and that they have valid video data
|
||||
cache_stale = now_utc > (latest_cache_dt + timedelta(seconds=120))
|
||||
valid_stale = now_utc > (latest_valid_dt + timedelta(seconds=120))
|
||||
# Skip checks during grace period to allow segments to start being created
|
||||
cache_stale = not in_grace_period and now_utc > (
|
||||
latest_cache_dt + timedelta(seconds=120)
|
||||
)
|
||||
valid_stale = not in_grace_period and now_utc > (
|
||||
latest_valid_dt + timedelta(seconds=120)
|
||||
)
|
||||
invalid_stale_condition = (
|
||||
self.latest_invalid_segment_time > 0
|
||||
and not in_grace_period
|
||||
and now_utc > (latest_invalid_dt + timedelta(seconds=120))
|
||||
and self.latest_valid_segment_time
|
||||
<= self.latest_invalid_segment_time
|
||||
@@ -423,9 +495,7 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
continue
|
||||
else:
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/record", "online"
|
||||
)
|
||||
self._send_record_status("online", now)
|
||||
p["latest_segment_time"] = self.latest_cache_segment_time
|
||||
|
||||
if poll is None:
|
||||
@@ -441,6 +511,34 @@ class CameraWatchdog(threading.Thread):
|
||||
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
|
||||
)
|
||||
|
||||
# Update stall metrics based on last processed frame timestamp
|
||||
now = datetime.now().timestamp()
|
||||
processed_ts = (
|
||||
float(self.detection_frame.value) if self.detection_frame else 0.0
|
||||
)
|
||||
if processed_ts > 0:
|
||||
delta = now - processed_ts
|
||||
observed_fps = (
|
||||
self.camera_fps.value
|
||||
if self.camera_fps.value > 0
|
||||
else self.config.detect.fps
|
||||
)
|
||||
interval = 1.0 / max(observed_fps, 0.1)
|
||||
stall_threshold = max(2.0 * interval, 2.0)
|
||||
|
||||
if delta > stall_threshold:
|
||||
if not self._stall_active:
|
||||
self._stall_timestamps.append(now)
|
||||
self._stall_active = True
|
||||
else:
|
||||
self._stall_active = False
|
||||
|
||||
while self._stall_timestamps and self._stall_timestamps[0] < now - 3600:
|
||||
self._stall_timestamps.popleft()
|
||||
|
||||
if self.stalls:
|
||||
self.stalls.value = len(self._stall_timestamps)
|
||||
|
||||
self.stop_all_ffmpeg()
|
||||
self.logpipe.close()
|
||||
self.config_subscriber.stop()
|
||||
@@ -578,6 +676,9 @@ class CameraCapture(FrigateProcess):
|
||||
self.camera_metrics.camera_fps,
|
||||
self.camera_metrics.skipped_fps,
|
||||
self.camera_metrics.ffmpeg_pid,
|
||||
self.camera_metrics.stalls_last_hour,
|
||||
self.camera_metrics.reconnects_last_hour,
|
||||
self.camera_metrics.detection_frame,
|
||||
self.stop_event,
|
||||
)
|
||||
camera_watchdog.start()
|
||||
|
||||
50
migrations/033_create_export_case_table.py
Normal file
50
migrations/033_create_export_case_table.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Peewee migrations -- 033_create_export_case_table.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import peewee as pw
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.sql(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS "exportcase" (
|
||||
"id" VARCHAR(30) NOT NULL PRIMARY KEY,
|
||||
"name" VARCHAR(100) NOT NULL,
|
||||
"description" TEXT NULL,
|
||||
"created_at" DATETIME NOT NULL,
|
||||
"updated_at" DATETIME NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")'
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
||||
40
migrations/034_add_export_case_to_exports.py
Normal file
40
migrations/034_add_export_case_to_exports.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""Peewee migrations -- 034_add_export_case_to_exports.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import peewee as pw
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
# Add nullable export_case_id column to export table
|
||||
migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL')
|
||||
|
||||
# Index for faster case-based queries
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
||||
1164
web/package-lock.json
generated
1164
web/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -71,6 +71,7 @@
|
||||
"react-icons": "^5.5.0",
|
||||
"react-konva": "^18.2.10",
|
||||
"react-router-dom": "^6.30.3",
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-swipeable": "^7.0.2",
|
||||
"react-tracked": "^2.0.1",
|
||||
"react-transition-group": "^4.4.5",
|
||||
|
||||
@@ -48,7 +48,8 @@
|
||||
"bg": "Български (Búlgar)",
|
||||
"gl": "Galego (Gallec)",
|
||||
"id": "Bahasa Indonesia (Indonesi)",
|
||||
"ur": "اردو (Urdú)"
|
||||
"ur": "اردو (Urdú)",
|
||||
"hr": "Hrvatski (croat)"
|
||||
},
|
||||
"system": "Sistema",
|
||||
"systemMetrics": "Mètriques del sistema",
|
||||
|
||||
@@ -19,7 +19,8 @@
|
||||
"description": {
|
||||
"addFace": "Afegiu una col·lecció nova a la biblioteca de cares pujant la vostra primera imatge.",
|
||||
"placeholder": "Introduïu un nom per a aquesta col·lecció",
|
||||
"invalidName": "Nom no vàlid. Els noms només poden incloure lletres, números, espais, apòstrofs, guions baixos i guions."
|
||||
"invalidName": "Nom no vàlid. Els noms només poden incloure lletres, números, espais, apòstrofs, guions baixos i guions.",
|
||||
"nameCannotContainHash": "El nom no pot contenir #."
|
||||
},
|
||||
"documentTitle": "Biblioteca de rostres - Frigate",
|
||||
"uploadFaceImage": {
|
||||
|
||||
@@ -532,7 +532,7 @@
|
||||
"hide": "Amaga contrasenya",
|
||||
"requirements": {
|
||||
"title": "Requisits contrasenya:",
|
||||
"length": "Com a mínim 8 carácters",
|
||||
"length": "Com a mínim 12 carácters",
|
||||
"uppercase": "Com a mínim una majúscula",
|
||||
"digit": "Com a mínim un digit",
|
||||
"special": "Com a mínim un carácter especial (!@#$%^&*(),.?\":{}|<>)"
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"harp": "Harpe",
|
||||
"bell": "Klokke",
|
||||
"harmonica": "Harmonika",
|
||||
"bagpipes": "Sækkepibe",
|
||||
"bagpipes": "Sækkepiber",
|
||||
"didgeridoo": "Didgeridoo",
|
||||
"jazz": "Jazz",
|
||||
"opera": "Opera",
|
||||
@@ -78,7 +78,7 @@
|
||||
"camera": "Kamera",
|
||||
"tools": "Værktøj",
|
||||
"hammer": "Hammer",
|
||||
"drill": "Bore",
|
||||
"drill": "Boremaskine",
|
||||
"explosion": "Eksplosion",
|
||||
"fireworks": "Nytårskrudt",
|
||||
"babbling": "Pludren",
|
||||
|
||||
@@ -193,7 +193,8 @@
|
||||
"bg": "Български (Bulgarsk)",
|
||||
"gl": "Galego (Galisisk)",
|
||||
"id": "Bahasa Indonesia (Indonesisk)",
|
||||
"ur": "اردو (Urdu)"
|
||||
"ur": "اردو (Urdu)",
|
||||
"hr": "Hrvatski (Kroatisk)"
|
||||
},
|
||||
"appearance": "Udseende",
|
||||
"darkMode": {
|
||||
@@ -221,7 +222,7 @@
|
||||
},
|
||||
"restart": "Genstart Frigate",
|
||||
"live": {
|
||||
"title": "Live",
|
||||
"title": "Direkte",
|
||||
"allCameras": "Alle kameraer",
|
||||
"cameras": {
|
||||
"title": "Kameraer",
|
||||
@@ -240,17 +241,17 @@
|
||||
"current": "Aktiv bruger: {{user}}",
|
||||
"anonymous": "anonym",
|
||||
"logout": "Log ud",
|
||||
"setPassword": "Set Password"
|
||||
"setPassword": "Vælg kodeord"
|
||||
},
|
||||
"classification": "Kategorisering"
|
||||
},
|
||||
"toast": {
|
||||
"copyUrlToClipboard": "Kopieret URL til klippebord.",
|
||||
"copyUrlToClipboard": "Kopieret URL til udklipsholder.",
|
||||
"save": {
|
||||
"title": "Gem",
|
||||
"error": {
|
||||
"title": "Ændringer kan ikke gemmes: {{errorMessage}}",
|
||||
"noMessage": "Kan ikke gemme konfigurationsændringer"
|
||||
"title": "Ændringer kunne ikke gemmes: {{errorMessage}}",
|
||||
"noMessage": "Kunne ikke gemme konfigurationsændringer"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -261,7 +262,7 @@
|
||||
"desc": "Admins har fuld adgang til Frigate UI. Viewers er begrænset til at se kameraer, gennemse items, og historik i UI."
|
||||
},
|
||||
"pagination": {
|
||||
"label": "paginering",
|
||||
"label": "sideinddeling",
|
||||
"previous": {
|
||||
"title": "Forrige",
|
||||
"label": "Gå til forrige side"
|
||||
@@ -273,9 +274,9 @@
|
||||
"more": "Flere sider"
|
||||
},
|
||||
"accessDenied": {
|
||||
"documentTitle": "Adgang forbudt - Frigate",
|
||||
"title": "Adgang forbudt",
|
||||
"desc": "Du har ikke tiiladelse til at se denne side."
|
||||
"documentTitle": "Adgang nægtet - Frigate",
|
||||
"title": "Adgang nægtet",
|
||||
"desc": "Du har ikke rettigheder til at se denne side."
|
||||
},
|
||||
"notFound": {
|
||||
"documentTitle": "Ikke fundet - Frigate",
|
||||
|
||||
@@ -4,13 +4,13 @@
|
||||
"cameraDisabled": "Kamera er deaktiveret",
|
||||
"noPreviewFoundFor": "Ingen forhåndsvisning fundet for {{cameraName}}",
|
||||
"submitFrigatePlus": {
|
||||
"title": "Indsend denne frame til Frigate+?",
|
||||
"title": "Indsend dette billede til Frigate+?",
|
||||
"submit": "Indsend"
|
||||
},
|
||||
"livePlayerRequiredIOSVersion": "iOS 17.1 eller nyere kræves for denne type livestream.",
|
||||
"streamOffline": {
|
||||
"title": "Stream offline",
|
||||
"desc": "Der er ikke modtaget nogen frames på {{cameraName}}-<code>detect</code>-streamen, tjek fejlloggene."
|
||||
"desc": "Der er ikke modtaget nogen billeder på {{cameraName}}-<code>detect</code>-streamen, tjek fejllogs."
|
||||
},
|
||||
"stats": {
|
||||
"streamType": {
|
||||
@@ -18,8 +18,8 @@
|
||||
"short": "Type"
|
||||
},
|
||||
"bandwidth": {
|
||||
"title": "Bandbredde:",
|
||||
"short": "Bandbredde"
|
||||
"title": "Båndbredde:",
|
||||
"short": "Båndbredde"
|
||||
},
|
||||
"latency": {
|
||||
"title": "Latenstid:",
|
||||
@@ -31,8 +31,21 @@
|
||||
},
|
||||
"droppedFrames": {
|
||||
"short": {
|
||||
"title": "Tabt"
|
||||
}
|
||||
"title": "Tabt",
|
||||
"value": "{{droppedFrames}} billeder"
|
||||
},
|
||||
"title": "Tabte billeder:"
|
||||
},
|
||||
"totalFrames": "Antal billeder i alt:",
|
||||
"decodedFrames": "Dekodede billeder:",
|
||||
"droppedFrameRate": "Rate for tabte billeder:"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"submittedFrigatePlus": "Billede sendt til Frigate+"
|
||||
},
|
||||
"error": {
|
||||
"submitFrigatePlusFailed": "Kunne ikke sende billede til Frigate+"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,8 +14,104 @@
|
||||
"move": {
|
||||
"clickMove": {
|
||||
"label": "Klik i billedrammen for at centrere kameraet",
|
||||
"enable": "Aktivér klik for at flytte"
|
||||
"enable": "Aktivér klik for at flytte",
|
||||
"disable": "Deaktiver klik for at flytte"
|
||||
},
|
||||
"left": {
|
||||
"label": "Flyt PTZ-kameraet til venstre"
|
||||
},
|
||||
"up": {
|
||||
"label": "Flyt PTZ kamera op"
|
||||
},
|
||||
"down": {
|
||||
"label": "Flyt PTZ-kameraet ned"
|
||||
},
|
||||
"right": {
|
||||
"label": "Flyt PTZ-kameraet til højre"
|
||||
}
|
||||
}
|
||||
},
|
||||
"zoom": {
|
||||
"in": {
|
||||
"label": "Zoom PTZ-kamera ind"
|
||||
},
|
||||
"out": {
|
||||
"label": "Zoom PTZ kamera ud"
|
||||
}
|
||||
},
|
||||
"focus": {
|
||||
"in": {
|
||||
"label": "Focus PTZ kamera ind"
|
||||
},
|
||||
"out": {
|
||||
"label": "Focus PTZ kamera ud"
|
||||
}
|
||||
},
|
||||
"frame": {
|
||||
"center": {
|
||||
"label": "Klik på billedet for at centrere PTZ-kameraet"
|
||||
}
|
||||
},
|
||||
"presets": "PTZ kamera forudindstillinger"
|
||||
},
|
||||
"camera": {
|
||||
"enable": "Aktivér kamera",
|
||||
"disable": "Deaktivér kamera"
|
||||
},
|
||||
"muteCameras": {
|
||||
"enable": "Slå lyd på alle kameraer fra",
|
||||
"disable": "Slå lyd på alle kameraer til"
|
||||
},
|
||||
"detect": {
|
||||
"enable": "Aktiver detektering",
|
||||
"disable": "Deaktiver detektering"
|
||||
},
|
||||
"recording": {
|
||||
"enable": "Aktivér optagelse",
|
||||
"disable": "Deaktiver optagelse"
|
||||
},
|
||||
"snapshots": {
|
||||
"enable": "Aktivér Snapshots",
|
||||
"disable": "Deaktivér Snapshots"
|
||||
},
|
||||
"snapshot": {
|
||||
"takeSnapshot": "Hent instant snapshot",
|
||||
"noVideoSource": "Ingen videokilde til snapshot.",
|
||||
"captureFailed": "Kunne ikke tage snapshot.",
|
||||
"downloadStarted": "Hentning af snapshot startet."
|
||||
},
|
||||
"audioDetect": {
|
||||
"enable": "Aktiver lyddetektor",
|
||||
"disable": "Deaktiver lyddetektor"
|
||||
},
|
||||
"transcription": {
|
||||
"enable": "Aktiver Live Audio Transkription",
|
||||
"disable": "Deaktiver Live Audio Transkription"
|
||||
},
|
||||
"autotracking": {
|
||||
"enable": "Aktiver Autotracking",
|
||||
"disable": "Deaktiver Autotracking"
|
||||
},
|
||||
"streamStats": {
|
||||
"enable": "Vis Stream statistik",
|
||||
"disable": "Skjul Stream statistik"
|
||||
},
|
||||
"manualRecording": {
|
||||
"title": "Manuel optagelse",
|
||||
"tips": "Hent et øjebliksbillede eller start en manuel begivenhed baseret på dette kameras indstillinger for optagelse af opbevaring.",
|
||||
"playInBackground": {
|
||||
"label": "Afspil i baggrunden",
|
||||
"desc": "Aktiver denne mulighed for at fortsætte streaming, når afspilleren er skjult."
|
||||
},
|
||||
"showStats": {
|
||||
"label": "Vis statistik",
|
||||
"desc": "Aktiver denne mulighed for at vise streamstatistikker som en overlejring på kameraets feed."
|
||||
},
|
||||
"debugView": "Debug View",
|
||||
"start": "Start on-demand optagelse",
|
||||
"started": "Start manuel optagelse.",
|
||||
"failedToStart": "Manuel optagelse fejlede.",
|
||||
"recordDisabledTips": "Da optagelsen er deaktiveret eller begrænset i konfig for dette kamera, gemmes der kun et snapshot.",
|
||||
"end": "Afslut manuel optagelse",
|
||||
"ended": "Afsluttet manuel optagelse."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,7 +179,8 @@
|
||||
"lt": "Lietuvių (Litauisch)",
|
||||
"bg": "Български (bulgarisch)",
|
||||
"gl": "Galego (Galicisch)",
|
||||
"id": "Bahasa Indonesia (Indonesisch)"
|
||||
"id": "Bahasa Indonesia (Indonesisch)",
|
||||
"hr": "Hrvatski (Kroatisch)"
|
||||
},
|
||||
"appearance": "Erscheinung",
|
||||
"theme": {
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
"description": {
|
||||
"placeholder": "Gib einen Name für diese Kollektion ein",
|
||||
"addFace": "Füge der Gesichtsbibliothek eine neue Sammlung hinzu, indem du ein Bild hochlädst.",
|
||||
"invalidName": "Ungültiger Name. Namen dürfen nur Buchstaben, Zahlen, Leerzeichen, Apostrophe, Unterstriche und Bindestriche enthalten."
|
||||
"invalidName": "Ungültiger Name. Namen dürfen nur Buchstaben, Zahlen, Leerzeichen, Apostrophe, Unterstriche und Bindestriche enthalten.",
|
||||
"nameCannotContainHash": "Der Name darf keine # enthalten."
|
||||
},
|
||||
"details": {
|
||||
"person": "Person",
|
||||
|
||||
@@ -544,7 +544,7 @@
|
||||
"placeholder": "Passwort eingeben",
|
||||
"requirements": {
|
||||
"title": "Passwort Anforderungen:",
|
||||
"length": "Mindestens 8 Zeichen",
|
||||
"length": "Mindestens 12 Zeichen",
|
||||
"uppercase": "Mindestens ein Großbuchstabe",
|
||||
"digit": "Mindestens eine Ziffer",
|
||||
"special": "Mindestens ein Sonderzeichen (!@#$%^&*(),.?\":{}|<>)"
|
||||
|
||||
@@ -245,6 +245,7 @@
|
||||
"uiPlayground": "UI Playground",
|
||||
"faceLibrary": "Face Library",
|
||||
"classification": "Classification",
|
||||
"chat": "Chat",
|
||||
"user": {
|
||||
"title": "User",
|
||||
"account": "Account",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"restart": {
|
||||
"title": "Are you sure you want to restart Frigate?",
|
||||
"description": "This will briefly stop Frigate while it restarts.",
|
||||
"button": "Restart",
|
||||
"restarting": {
|
||||
"title": "Frigate is Restarting",
|
||||
@@ -48,6 +49,10 @@
|
||||
"name": {
|
||||
"placeholder": "Name the Export"
|
||||
},
|
||||
"case": {
|
||||
"label": "Case",
|
||||
"placeholder": "Select a case"
|
||||
},
|
||||
"select": "Select",
|
||||
"export": "Export",
|
||||
"selectOrExport": "Select or Export",
|
||||
|
||||
@@ -324,9 +324,6 @@
|
||||
"enabled": {
|
||||
"label": "Enable record on all cameras."
|
||||
},
|
||||
"sync_recordings": {
|
||||
"label": "Sync recordings with disk on startup and once a day."
|
||||
},
|
||||
"expire_interval": {
|
||||
"label": "Number of minutes to wait between cleanup runs."
|
||||
},
|
||||
@@ -758,4 +755,4 @@
|
||||
"label": "Keep track of original state of camera."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,23 @@
|
||||
"label": "Networking configuration",
|
||||
"properties": {
|
||||
"ipv6": {
|
||||
"label": "Network configuration",
|
||||
"label": "IPv6 configuration",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"label": "Enable IPv6 for port 5000 and/or 8971"
|
||||
}
|
||||
}
|
||||
},
|
||||
"listen": {
|
||||
"label": "Listening ports configuration",
|
||||
"properties": {
|
||||
"internal": {
|
||||
"label": "Internal listening port for Frigate"
|
||||
},
|
||||
"external": {
|
||||
"label": "External listening port for Frigate"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
"enabled": {
|
||||
"label": "Enable record on all cameras."
|
||||
},
|
||||
"sync_recordings": {
|
||||
"label": "Sync recordings with disk on startup and once a day."
|
||||
},
|
||||
"expire_interval": {
|
||||
"label": "Number of minutes to wait between cleanup runs."
|
||||
},
|
||||
@@ -90,4 +87,4 @@
|
||||
"label": "Keep track of original state of recording."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10
web/public/locales/en/views/chat.json
Normal file
10
web/public/locales/en/views/chat.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"placeholder": "Ask anything...",
|
||||
"error": "Something went wrong. Please try again.",
|
||||
"processing": "Processing...",
|
||||
"toolsUsed": "Used: {{tools}}",
|
||||
"showTools": "Show tools ({{count}})",
|
||||
"hideTools": "Hide tools",
|
||||
"call": "Call",
|
||||
"result": "Result"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user