Compare commits

..

1 Commits

Author SHA1 Message Date
Blake Blackshear
cb943022f9 updates for yolov9 coral support 2026-02-27 01:35:35 +00:00
318 changed files with 5508 additions and 31940 deletions

View File

@@ -229,7 +229,6 @@ Reolink
restream restream
restreamed restreamed
restreaming restreaming
RJSF
rkmpp rkmpp
rknn rknn
rkrga rkrga

View File

@@ -1,7 +1,7 @@
default_target: local default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.18.0 VERSION = 0.17.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
BOARDS= #Initialized empty BOARDS= #Initialized empty
@@ -49,8 +49,7 @@ push: push-boards
--push --push
run: local run: local
docker run --rm --publish=5000:5000 --publish=8971:8971 \ docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
--volume=${PWD}/config:/config frigate:latest
run_tests: local run_tests: local
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \ docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \

View File

@@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
FROM scratch AS go2rtc FROM scratch AS go2rtc
ARG TARGETARCH ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin WORKDIR /rootfs/usr/local/go2rtc/bin
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
FROM wget AS tempio FROM wget AS tempio
ARG TARGETARCH ARG TARGETARCH

View File

@@ -10,8 +10,7 @@ echo "[INFO] Starting certsync..."
lefile="/etc/letsencrypt/live/frigate/fullchain.pem" lefile="/etc/letsencrypt/live/frigate/fullchain.pem"
tls_enabled=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .tls.enabled` tls_enabled=`python3 /usr/local/nginx/get_listen_settings.py | jq -r .tls.enabled`
listen_external_port=`python3 /usr/local/nginx/get_nginx_settings.py | jq -r .listen.external_port`
while true while true
do do
@@ -35,7 +34,7 @@ do
;; ;;
esac esac
liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:$listen_external_port 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'` liveprint=`echo | openssl s_client -showcerts -connect 127.0.0.1:8971 2>&1 | openssl x509 -fingerprint 2>&1 | grep -i fingerprint || echo 'failed'`
case "$liveprint" in case "$liveprint" in
*Fingerprint*) *Fingerprint*)

View File

@@ -80,12 +80,12 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
fi fi
# build templates for optional FRIGATE_BASE_PATH environment variable # build templates for optional FRIGATE_BASE_PATH environment variable
python3 /usr/local/nginx/get_nginx_settings.py | \ python3 /usr/local/nginx/get_base_path.py | \
tempio -template /usr/local/nginx/templates/base_path.gotmpl \ tempio -template /usr/local/nginx/templates/base_path.gotmpl \
-out /usr/local/nginx/conf/base_path.conf -out /usr/local/nginx/conf/base_path.conf
# build templates for additional network settings # build templates for optional TLS support
python3 /usr/local/nginx/get_nginx_settings.py | \ python3 /usr/local/nginx/get_listen_settings.py | \
tempio -template /usr/local/nginx/templates/listen.gotmpl \ tempio -template /usr/local/nginx/templates/listen.gotmpl \
-out /usr/local/nginx/conf/listen.conf -out /usr/local/nginx/conf/listen.conf

View File

@@ -0,0 +1,11 @@
"""Prints the base path as json to stdout."""
import json
import os
from typing import Any
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
result: dict[str, Any] = {"base_path": base_path}
print(json.dumps(result))

View File

@@ -0,0 +1,35 @@
"""Prints the tls config as json to stdout."""
import json
import sys
from typing import Any
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = find_config_file()
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, Any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, Any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, Any] = {}
tls_config: dict[str, any] = config.get("tls", {"enabled": True})
networking_config = config.get("networking", {})
ipv6_config = networking_config.get("ipv6", {"enabled": False})
output = {"tls": tls_config, "ipv6": ipv6_config}
print(json.dumps(output))

View File

@@ -1,62 +0,0 @@
"""Prints the nginx settings as json to stdout."""
import json
import os
import sys
from typing import Any
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.util.config import find_config_file
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = find_config_file()
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, Any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, Any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, Any] = {}
tls_config: dict[str, Any] = config.get("tls", {})
tls_config.setdefault("enabled", True)
networking_config: dict[str, Any] = config.get("networking", {})
ipv6_config: dict[str, Any] = networking_config.get("ipv6", {})
ipv6_config.setdefault("enabled", False)
listen_config: dict[str, Any] = networking_config.get("listen", {})
listen_config.setdefault("internal", 5000)
listen_config.setdefault("external", 8971)
# handle case where internal port is a string with ip:port
internal_port = listen_config["internal"]
if type(internal_port) is str:
internal_port = int(internal_port.split(":")[-1])
listen_config["internal_port"] = internal_port
# handle case where external port is a string with ip:port
external_port = listen_config["external"]
if type(external_port) is str:
external_port = int(external_port.split(":")[-1])
listen_config["external_port"] = external_port
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
result: dict[str, Any] = {
"tls": tls_config,
"ipv6": ipv6_config,
"listen": listen_config,
"base_path": base_path,
}
print(json.dumps(result))

View File

@@ -7,7 +7,7 @@ location ^~ {{ .base_path }}/ {
# remove base_url from the path before passing upstream # remove base_url from the path before passing upstream
rewrite ^{{ .base_path }}/(.*) /$1 break; rewrite ^{{ .base_path }}/(.*) /$1 break;
proxy_pass $scheme://127.0.0.1:{{ .listen.external_port }}; proxy_pass $scheme://127.0.0.1:8971;
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";

View File

@@ -1,12 +1,15 @@
# Internal (IPv4 always; IPv6 optional) # Internal (IPv4 always; IPv6 optional)
listen {{ .listen.internal }}; listen 5000;
{{ if .ipv6.enabled }}listen [::]:{{ .listen.internal_port }};{{ end }} {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:5000;{{ end }}{{ end }}
# intended for external traffic, protected by auth # intended for external traffic, protected by auth
{{ if .tls.enabled }} {{ if .tls }}
{{ if .tls.enabled }}
# external HTTPS (IPv4 always; IPv6 optional) # external HTTPS (IPv4 always; IPv6 optional)
listen {{ .listen.external }} ssl; listen 8971 ssl;
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }} ssl;{{ end }} {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971 ssl;{{ end }}{{ end }}
ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem; ssl_certificate /etc/letsencrypt/live/frigate/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem; ssl_certificate_key /etc/letsencrypt/live/frigate/privkey.pem;
@@ -29,8 +32,14 @@ listen {{ .listen.internal }};
default_type "text/plain"; default_type "text/plain";
root /etc/letsencrypt/www; root /etc/letsencrypt/www;
} }
{{ else }}
# external HTTP (IPv4 always; IPv6 optional)
listen 8971;
{{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }}
{{ else }} {{ else }}
# (No tls) default to HTTP (IPv4 always; IPv6 optional) # (No tls section) default to HTTP (IPv4 always; IPv6 optional)
listen {{ .listen.external }}; listen 8971;
{{ if .ipv6.enabled }}listen [::]:{{ .listen.external_port }};{{ end }} {{ if .ipv6 }}{{ if .ipv6.enabled }}listen [::]:8971;{{ end }}{{ end }}
{{ end }} {{ end }}

View File

@@ -13,7 +13,7 @@ ARG ROCM
RUN apt update -qq && \ RUN apt update -qq && \
apt install -y wget gpg && \ apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.2/ubuntu/jammy/amdgpu-install_7.2.70200-1_all.deb && \ wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
apt install -y ./rocm.deb && \ apt install -y ./rocm.deb && \
apt update && \ apt update && \
apt install -qq -y rocm apt install -qq -y rocm
@@ -56,8 +56,6 @@ FROM scratch AS rocm-dist
ARG ROCM ARG ROCM
# Copy HIP headers required for MIOpen JIT (BuildHip) / HIPRTC at runtime
COPY --from=rocm /opt/rocm-${ROCM}/include/ /opt/rocm-${ROCM}/include/
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/ COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3) # Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/ COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/

View File

@@ -1 +1 @@
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.2.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl

View File

@@ -1,5 +1,5 @@
variable "ROCM" { variable "ROCM" {
default = "7.2.0" default = "7.1.1"
} }
variable "HSA_OVERRIDE_GFX_VERSION" { variable "HSA_OVERRIDE_GFX_VERSION" {
default = "" default = ""

View File

@@ -155,32 +155,33 @@ services:
### Enabling IPv6 ### Enabling IPv6
IPv6 is disabled by default, to enable IPv6 modify your Frigate configuration as follows: IPv6 is disabled by default, to enable IPv6 listen.gotmpl needs to be bind mounted with IPv6 enabled. For example:
```yaml ```
networking: {{ if not .enabled }}
ipv6: # intended for external traffic, protected by auth
enabled: True listen 8971;
{{ else }}
# intended for external traffic, protected by auth
listen 8971 ssl;
# intended for internal traffic, not protected by auth
listen 5000;
``` ```
### Listen on different ports becomes
You can change the ports Nginx uses for listening using Frigate's configuration file. The internal port (unauthenticated) and external port (authenticated) can be changed independently. You can also specify an IP address using the format `ip:port` if you wish to bind the port to a specific interface. This may be useful for example to prevent exposing the internal port outside the container.
For example:
```yaml
networking:
listen:
internal: 127.0.0.1:5000
external: 8971
``` ```
{{ if not .enabled }}
# intended for external traffic, protected by auth
listen [::]:8971 ipv6only=off;
{{ else }}
# intended for external traffic, protected by auth
listen [::]:8971 ipv6only=off ssl;
:::warning # intended for internal traffic, not protected by auth
listen [::]:5000 ipv6only=off;
This setting is for advanced users. For the majority of use cases it's recommended to change the `ports` section of your Docker compose file or use the Docker `run` `--publish` option instead, e.g. `-p 443:8971`. Changing Frigate's ports may break some integrations. ```
:::
## Base path ## Base path
@@ -233,7 +234,7 @@ To do this:
### Custom go2rtc version ### Custom go2rtc version
Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc. Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
To do this: To do this:

View File

@@ -244,7 +244,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk - rtspx://192.168.1.1:7441/abcdefghijk
``` ```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp) [See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect. In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.

View File

@@ -5,7 +5,7 @@ title: Configuring Generative AI
## Configuration ## Configuration
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`. To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
@@ -77,46 +77,8 @@ genai:
provider: ollama provider: ollama
base_url: http://localhost:11434 base_url: http://localhost:11434
model: qwen3-vl:4b model: qwen3-vl:4b
provider_options: # other Ollama client options can be defined
keep_alive: -1
options:
num_ctx: 8192 # make sure the context matches other services that are using ollama
``` ```
## llama.cpp
[llama.cpp](https://github.com/ggml-org/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters.
:::warning
Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical.
:::
It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance.
### Supported Models
You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format.
### Configuration
```yaml
genai:
provider: llamacpp
base_url: http://localhost:8080
model: your-model-name
provider_options:
temperature: 0.7
repeat_penalty: 1.05
top_p: 0.8
top_k: 40
min_p: 0.05
seed: -1
```
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggml-org/llama.cpp/blob/master/tools/server/README.md) for a complete list of available parameters.
## Google Gemini ## Google Gemini
Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation. Google Gemini has a [free tier](https://ai.google.dev/pricing) for the API, however the limits may not be sufficient for standard Frigate usage. Choose a plan appropriate for your installation.

View File

@@ -11,7 +11,7 @@ By default, descriptions will be generated for all tracked objects and all zones
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction. Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt#frigatecamera_nameobject_descriptionsset). Generative AI object descriptions can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
## Usage and Best Practices ## Usage and Best Practices

View File

@@ -7,7 +7,7 @@ Generative AI can be used to automatically generate structured summaries of revi
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well. Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt#frigatecamera_namereview_descriptionsset). Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
## Review Summary Usage and Best Practices ## Review Summary Usage and Best Practices

View File

@@ -139,13 +139,7 @@ record:
:::tip :::tip
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras.<camera>.record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264). When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
:::
:::tip
The encoder determines its own behavior so the resulting file size may be undesirably large.
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
::: :::
@@ -154,16 +148,19 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices. Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
## Syncing Media Files With Disk ## Syncing Recordings With Disk
Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk. In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database. ```yaml
record:
sync_recordings: True
```
The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint. This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
:::warning :::warning
This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended. The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.
::: :::

View File

@@ -73,19 +73,11 @@ tls:
# Optional: Enable TLS for port 8971 (default: shown below) # Optional: Enable TLS for port 8971 (default: shown below)
enabled: True enabled: True
# Optional: Networking configuration # Optional: IPv6 configuration
networking: networking:
# Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below) # Optional: Enable IPv6 on 5000, and 8971 if tls is configured (default: shown below)
ipv6: ipv6:
enabled: False enabled: False
# Optional: Override ports Frigate uses for listening (defaults: shown below)
# An IP address may also be provided to bind to a specific interface, e.g. ip:port
# NOTE: This setting is for advanced users and may break some integrations. The majority
# of users should change ports in the docker compose file
# or use the docker run `--publish` option to select a different port.
listen:
internal: 5000
external: 8971
# Optional: Proxy configuration # Optional: Proxy configuration
proxy: proxy:
@@ -518,6 +510,8 @@ record:
# Optional: Number of minutes to wait between cleanup runs (default: shown below) # Optional: Number of minutes to wait between cleanup runs (default: shown below)
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
expire_interval: 60 expire_interval: 60
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
sync_recordings: False
# Optional: Continuous retention settings # Optional: Continuous retention settings
continuous: continuous:
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below) # Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
@@ -540,8 +534,6 @@ record:
# The -r (framerate) dictates how smooth the output video is. # The -r (framerate) dictates how smooth the output video is.
# So the args would be -vf setpts=0.02*PTS -r 30 in that case. # So the args would be -vf setpts=0.02*PTS -r 30 in that case.
timelapse_args: "-vf setpts=0.04*PTS -r 30" timelapse_args: "-vf setpts=0.04*PTS -r 30"
# Optional: Global hardware acceleration settings for timelapse exports. (default: inherit)
hwaccel_args: auto
# Optional: Recording Preview Settings # Optional: Recording Preview Settings
preview: preview:
# Optional: Quality of recording preview (default: shown below). # Optional: Quality of recording preview (default: shown below).
@@ -760,7 +752,7 @@ classification:
interval: None interval: None
# Optional: Restream configuration # Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.13) # Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used, # NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported. # changing this port for the integrated go2rtc instance is not supported.
go2rtc: go2rtc:
@@ -846,11 +838,6 @@ cameras:
# Optional: camera specific output args (default: inherit) # Optional: camera specific output args (default: inherit)
# output_args: # output_args:
# Optional: camera specific hwaccel args for timelapse export (default: inherit)
# record:
# export:
# hwaccel_args:
# Optional: timeout for highest scoring image before allowing it # Optional: timeout for highest scoring image before allowing it
# to be replaced by a newer image. (default: shown below) # to be replaced by a newer image. (default: shown below)
best_image_timeout: 60 best_image_timeout: 60

View File

@@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features. Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
:::note :::note
@@ -206,7 +206,7 @@ Enabling arbitrary exec sources allows execution of arbitrary commands through g
## Advanced Restream Configurations ## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
:::warning :::warning

View File

@@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
## Setup a go2rtc stream ## Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp. First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
:::tip :::tip
@@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec: - Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported. - If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation. - If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. - If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml ```yaml
go2rtc: go2rtc:
streams: streams:

View File

@@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
{ {
type: "link", type: "link",
label: "Go2RTC Configuration Reference", label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration", href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
} as PropSidebarItemLink, } as PropSidebarItemLink,
], ],
Detectors: [ Detectors: [

View File

@@ -331,59 +331,6 @@ paths:
application/json: application/json:
schema: schema:
$ref: "#/components/schemas/HTTPValidationError" $ref: "#/components/schemas/HTTPValidationError"
/media/sync:
post:
tags:
- App
summary: Start media sync job
description: |-
Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
Returns 202 with job details when queued, or 409 if a job is already running.
operationId: sync_media_media_sync_post
requestBody:
required: true
content:
application/json:
responses:
"202":
description: Accepted - Job queued
"409":
description: Conflict - Job already running
"422":
description: Validation Error
/media/sync/current:
get:
tags:
- App
summary: Get current media sync job
description: |-
Retrieve the current running media sync job, if any. Returns the job details or null when no job is active.
operationId: get_media_sync_current_media_sync_current_get
responses:
"200":
description: Successful Response
"422":
description: Validation Error
/media/sync/status/{job_id}:
get:
tags:
- App
summary: Get media sync job status
description: |-
Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found.
operationId: get_media_sync_status_media_sync_status__job_id__get
parameters:
- name: job_id
in: path
responses:
"200":
description: Successful Response
"404":
description: Not Found - Job not found
"422":
description: Validation Error
/faces/train/{name}/classify: /faces/train/{name}/classify:
post: post:
tags: tags:
@@ -3200,7 +3147,6 @@ paths:
duration: 30 duration: 30
include_recording: true include_recording: true
draw: {} draw: {}
pre_capture: null
responses: responses:
"200": "200":
description: Successful Response description: Successful Response
@@ -5003,12 +4949,6 @@ components:
- type: "null" - type: "null"
title: Draw title: Draw
default: {} default: {}
pre_capture:
anyOf:
- type: integer
- type: "null"
title: Pre Capture Seconds
default: null
type: object type: object
title: EventsCreateBody title: EventsCreateBody
EventsDeleteBody: EventsDeleteBody:

View File

@@ -30,31 +30,22 @@ from frigate.api.auth import (
require_role, require_role,
) )
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera.updater import ( from frigate.config.camera.updater import (
CameraConfigUpdateEnum, CameraConfigUpdateEnum,
CameraConfigUpdateTopic, CameraConfigUpdateTopic,
) )
from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector
from frigate.jobs.media_sync import (
get_current_media_sync_job,
get_media_sync_job_by_id,
start_media_sync_job,
)
from frigate.models import Event, Timeline from frigate.models import Event, Timeline
from frigate.stats.prometheus import get_metrics, update_metrics from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.types import JobStatusTypesEnum
from frigate.util.builtin import ( from frigate.util.builtin import (
clean_camera_user_pass, clean_camera_user_pass,
flatten_config_data, flatten_config_data,
load_labels,
process_config_query_string, process_config_query_string,
update_yaml_file_bulk, update_yaml_file_bulk,
) )
from frigate.util.config import find_config_file from frigate.util.config import find_config_file
from frigate.util.schema import get_config_schema
from frigate.util.services import ( from frigate.util.services import (
get_nvidia_driver_info, get_nvidia_driver_info,
process_logs, process_logs,
@@ -79,7 +70,9 @@ def is_healthy():
@router.get("/config/schema.json", dependencies=[Depends(allow_public())]) @router.get("/config/schema.json", dependencies=[Depends(allow_public())])
def config_schema(request: Request): def config_schema(request: Request):
return JSONResponse(content=get_config_schema(FrigateConfig)) return Response(
content=request.app.frigate_config.schema_json(), media_type="application/json"
)
@router.get( @router.get(
@@ -125,10 +118,6 @@ def config(request: Request):
config: dict[str, dict[str, Any]] = config_obj.model_dump( config: dict[str, dict[str, Any]] = config_obj.model_dump(
mode="json", warnings="none", exclude_none=True mode="json", warnings="none", exclude_none=True
) )
config["detectors"] = {
name: detector.model_dump(mode="json", warnings="none", exclude_none=True)
for name, detector in config_obj.detectors.items()
}
# remove the mqtt password # remove the mqtt password
config["mqtt"].pop("password", None) config["mqtt"].pop("password", None)
@@ -199,54 +188,6 @@ def config(request: Request):
return JSONResponse(content=config) return JSONResponse(content=config)
@router.get("/ffmpeg/presets", dependencies=[Depends(allow_any_authenticated())])
def ffmpeg_presets():
"""Return available ffmpeg preset keys for config UI usage."""
# Whitelist based on documented presets in ffmpeg_presets.md
hwaccel_presets = [
"preset-rpi-64-h264",
"preset-rpi-64-h265",
"preset-vaapi",
"preset-intel-qsv-h264",
"preset-intel-qsv-h265",
"preset-nvidia",
"preset-jetson-h264",
"preset-jetson-h265",
"preset-rkmpp",
]
input_presets = [
"preset-http-jpeg-generic",
"preset-http-mjpeg-generic",
"preset-http-reolink",
"preset-rtmp-generic",
"preset-rtsp-generic",
"preset-rtsp-restream",
"preset-rtsp-restream-low-latency",
"preset-rtsp-udp",
"preset-rtsp-blue-iris",
]
record_output_presets = [
"preset-record-generic",
"preset-record-generic-audio-copy",
"preset-record-generic-audio-aac",
"preset-record-mjpeg",
"preset-record-jpeg",
"preset-record-ubiquiti",
]
return JSONResponse(
content={
"hwaccel_args": hwaccel_presets,
"input_args": input_presets,
"output_args": {
"record": record_output_presets,
"detect": [],
},
}
)
@router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))]) @router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))])
def config_raw_paths(request: Request): def config_raw_paths(request: Request):
"""Admin-only endpoint that returns camera paths and go2rtc streams without credential masking.""" """Admin-only endpoint that returns camera paths and go2rtc streams without credential masking."""
@@ -484,7 +425,6 @@ def config_set(request: Request, body: AppConfigSetBody):
if body.requires_restart == 0 or body.update_topic: if body.requires_restart == 0 or body.update_topic:
old_config: FrigateConfig = request.app.frigate_config old_config: FrigateConfig = request.app.frigate_config
request.app.frigate_config = config request.app.frigate_config = config
request.app.genai_manager.update_config(config)
if body.update_topic: if body.update_topic:
if body.update_topic.startswith("config/cameras/"): if body.update_topic.startswith("config/cameras/"):
@@ -523,15 +463,7 @@ def config_set(request: Request, body: AppConfigSetBody):
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())]) @router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
def vainfo(): def vainfo():
# Use LibvaGpuSelector to pick an appropriate libva device (if available) vainfo = vainfo_hwaccel()
selected_gpu = ""
try:
selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or ""
except Exception:
selected_gpu = ""
# If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`.
vainfo = vainfo_hwaccel(device_name=selected_gpu or None)
return JSONResponse( return JSONResponse(
content={ content={
"return_code": vainfo.returncode, "return_code": vainfo.returncode,
@@ -666,98 +598,6 @@ def restart():
) )
@router.post(
"/media/sync",
dependencies=[Depends(require_role(["admin"]))],
summary="Start media sync job",
description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
Returns 202 with job details when queued, or 409 if a job is already running.""",
)
def sync_media(body: MediaSyncBody = Body(...)):
"""Start async media sync job - remove orphaned files.
Syncs specified media types: event snapshots, event thumbnails, review thumbnails,
previews, exports, and/or recordings. Job runs in background; use /media/sync/current
or /media/sync/status/{job_id} to check status.
Args:
body: MediaSyncBody with dry_run flag and media_types list.
media_types can include: 'all', 'event_snapshots', 'event_thumbnails',
'review_thumbnails', 'previews', 'exports', 'recordings'
Returns:
202 Accepted with job_id, or 409 Conflict if job already running.
"""
job_id = start_media_sync_job(
dry_run=body.dry_run, media_types=body.media_types, force=body.force
)
if job_id is None:
# A job is already running
current = get_current_media_sync_job()
return JSONResponse(
content={
"error": "A media sync job is already running",
"current_job_id": current.id if current else None,
},
status_code=409,
)
return JSONResponse(
content={
"job": {
"job_type": "media_sync",
"status": JobStatusTypesEnum.queued,
"id": job_id,
}
},
status_code=202,
)
@router.get(
"/media/sync/current",
dependencies=[Depends(require_role(["admin"]))],
summary="Get current media sync job",
description="""Retrieve the current running media sync job, if any. Returns the job details
or null when no job is active.""",
)
def get_media_sync_current():
"""Get the current running media sync job, if any."""
job = get_current_media_sync_job()
if job is None:
return JSONResponse(content={"job": None}, status_code=200)
return JSONResponse(
content={"job": job.to_dict()},
status_code=200,
)
@router.get(
"/media/sync/status/{job_id}",
dependencies=[Depends(require_role(["admin"]))],
summary="Get media sync job status",
description="""Get status and results for the specified media sync job id. Returns 200 with
job details including results, or 404 if the job is not found.""",
)
def get_media_sync_status(job_id: str):
"""Get the status of a specific media sync job."""
job = get_media_sync_job_by_id(job_id)
if job is None:
return JSONResponse(
content={"error": "Job not found"},
status_code=404,
)
return JSONResponse(
content={"job": job.to_dict()},
status_code=200,
)
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())]) @router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
def get_labels(camera: str = ""): def get_labels(camera: str = ""):
try: try:
@@ -807,12 +647,6 @@ def get_sub_labels(split_joined: Optional[int] = None):
return JSONResponse(content=sub_labels) return JSONResponse(content=sub_labels)
@router.get("/audio_labels", dependencies=[Depends(allow_any_authenticated())])
def get_audio_labels():
labels = load_labels("/audio-labelmap.txt", prefill=521)
return JSONResponse(content=labels)
@router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())]) @router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())])
def plusModels(request: Request, filterByCurrentModelDetector: bool = False): def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
if not request.app.frigate_config.plus_api.is_active(): if not request.app.frigate_config.plus_api.is_active():

View File

@@ -26,7 +26,7 @@ from frigate.api.defs.request.app_body import (
AppPutRoleBody, AppPutRoleBody,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import AuthConfig, NetworkingConfig, ProxyConfig from frigate.config import AuthConfig, ProxyConfig
from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM from frigate.const import CONFIG_DIR, JWT_SECRET_ENV_VAR, PASSWORD_HASH_ALGORITHM
from frigate.models import User from frigate.models import User
@@ -41,7 +41,7 @@ def require_admin_by_default():
endpoints require admin access unless explicitly overridden with endpoints require admin access unless explicitly overridden with
allow_public(), allow_any_authenticated(), or require_role(). allow_public(), allow_any_authenticated(), or require_role().
Internal port always has admin role set by the /auth endpoint, Port 5000 (internal) always has admin role set by the /auth endpoint,
so this check passes automatically for internal requests. so this check passes automatically for internal requests.
Certain paths are exempted from the global admin check because they must Certain paths are exempted from the global admin check because they must
@@ -130,7 +130,7 @@ def require_admin_by_default():
pass pass
# For all other paths, require admin role # For all other paths, require admin role
# Internal port requests have admin role set automatically # Port 5000 (internal) requests have admin role set automatically
role = request.headers.get("remote-role") role = request.headers.get("remote-role")
if role == "admin": if role == "admin":
return return
@@ -143,17 +143,6 @@ def require_admin_by_default():
return admin_checker return admin_checker
def _is_authenticated(request: Request) -> bool:
"""
Helper to determine if a request is from an authenticated user.
Returns True if the request has a valid authenticated user (not anonymous).
Internal port requests are considered anonymous despite having admin role.
"""
username = request.headers.get("remote-user")
return username is not None and username != "anonymous"
def allow_public(): def allow_public():
""" """
Override dependency to allow unauthenticated access to an endpoint. Override dependency to allow unauthenticated access to an endpoint.
@@ -182,7 +171,6 @@ def allow_any_authenticated():
Rejects: Rejects:
- Requests with no remote-user header (did not pass through /auth endpoint) - Requests with no remote-user header (did not pass through /auth endpoint)
- External port requests with anonymous user (auth disabled, no proxy auth)
Example: Example:
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())]) @router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
@@ -191,14 +179,8 @@ def allow_any_authenticated():
async def auth_checker(request: Request): async def auth_checker(request: Request):
# Ensure a remote-user has been set by the /auth endpoint # Ensure a remote-user has been set by the /auth endpoint
username = request.headers.get("remote-user") username = request.headers.get("remote-user")
if username is None:
# Internal port requests have admin role and should be allowed
role = request.headers.get("remote-role")
if role != "admin":
if username is None or not _is_authenticated(request):
raise HTTPException(status_code=401, detail="Authentication required") raise HTTPException(status_code=401, detail="Authentication required")
return return
return auth_checker return auth_checker
@@ -588,18 +570,12 @@ def resolve_role(
def auth(request: Request): def auth(request: Request):
auth_config: AuthConfig = request.app.frigate_config.auth auth_config: AuthConfig = request.app.frigate_config.auth
proxy_config: ProxyConfig = request.app.frigate_config.proxy proxy_config: ProxyConfig = request.app.frigate_config.proxy
networking_config: NetworkingConfig = request.app.frigate_config.networking
success_response = Response("", status_code=202) success_response = Response("", status_code=202)
# handle case where internal port is a string with ip:port
internal_port = networking_config.listen.internal
if type(internal_port) is str:
internal_port = int(internal_port.split(":")[-1])
# dont require auth if the request is on the internal port # dont require auth if the request is on the internal port
# this header is set by Frigate's nginx proxy, so it cant be spoofed # this header is set by Frigate's nginx proxy, so it cant be spoofed
if int(request.headers.get("x-server-port", default=0)) == internal_port: if int(request.headers.get("x-server-port", default=0)) == 5000:
success_response.headers["remote-user"] = "anonymous" success_response.headers["remote-user"] = "anonymous"
success_response.headers["remote-role"] = "admin" success_response.headers["remote-role"] = "admin"
return success_response return success_response

View File

@@ -1,821 +0,0 @@
"""Chat and LLM tool calling APIs."""
import base64
import json
import logging
import time
from datetime import datetime
from typing import Any, Dict, Generator, List, Optional
import cv2
from fastapi import APIRouter, Body, Depends, Request
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
)
from frigate.api.defs.query.events_query_parameters import EventsQueryParams
from frigate.api.defs.request.chat_body import ChatCompletionRequest
from frigate.api.defs.response.chat_response import (
ChatCompletionResponse,
ChatMessageResponse,
ToolCall,
)
from frigate.api.defs.tags import Tags
from frigate.api.event import events
from frigate.genai.utils import build_assistant_message_for_conversation
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.chat])
def _chunk_content(content: str, chunk_size: int = 80) -> Generator[str, None, None]:
"""Yield content in word-aware chunks for streaming."""
if not content:
return
words = content.split(" ")
current: List[str] = []
current_len = 0
for w in words:
current.append(w)
current_len += len(w) + 1
if current_len >= chunk_size:
yield " ".join(current) + " "
current = []
current_len = 0
if current:
yield " ".join(current)
def _format_events_with_local_time(
events_list: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
"""Add human-readable local start/end times to each event for the LLM."""
result = []
for evt in events_list:
if not isinstance(evt, dict):
result.append(evt)
continue
copy_evt = dict(evt)
try:
start_ts = evt.get("start_time")
end_ts = evt.get("end_time")
if start_ts is not None:
dt_start = datetime.fromtimestamp(start_ts)
copy_evt["start_time_local"] = dt_start.strftime("%Y-%m-%d %I:%M:%S %p")
if end_ts is not None:
dt_end = datetime.fromtimestamp(end_ts)
copy_evt["end_time_local"] = dt_end.strftime("%Y-%m-%d %I:%M:%S %p")
except (TypeError, ValueError, OSError):
pass
result.append(copy_evt)
return result
class ToolExecuteRequest(BaseModel):
"""Request model for tool execution."""
tool_name: str
arguments: Dict[str, Any]
def get_tool_definitions() -> List[Dict[str, Any]]:
"""
Get OpenAI-compatible tool definitions for Frigate.
Returns a list of tool definitions that can be used with OpenAI-compatible
function calling APIs.
"""
return [
{
"type": "function",
"function": {
"name": "search_objects",
"description": (
"Search for detected objects in Frigate by camera, object label, time range, "
"zones, and other filters. Use this to answer questions about when "
"objects were detected, what objects appeared, or to find specific object detections. "
"An 'object' in Frigate represents a tracked detection (e.g., a person, package, car). "
"When the user asks about a specific name (person, delivery company, animal, etc.), "
"filter by sub_label only and do not set label."
),
"parameters": {
"type": "object",
"properties": {
"camera": {
"type": "string",
"description": "Camera name to filter by (optional).",
},
"label": {
"type": "string",
"description": "Object label to filter by (e.g., 'person', 'package', 'car').",
},
"sub_label": {
"type": "string",
"description": "Name of a person, delivery company, animal, etc. When filtering by a specific name, use only sub_label; do not set label.",
},
"after": {
"type": "string",
"description": "Start time in ISO 8601 format (e.g., '2024-01-01T00:00:00Z').",
},
"before": {
"type": "string",
"description": "End time in ISO 8601 format (e.g., '2024-01-01T23:59:59Z').",
},
"zones": {
"type": "array",
"items": {"type": "string"},
"description": "List of zone names to filter by.",
},
"limit": {
"type": "integer",
"description": "Maximum number of objects to return (default: 25).",
"default": 25,
},
},
},
"required": [],
},
},
{
"type": "function",
"function": {
"name": "get_live_context",
"description": (
"Get the current detection information for a camera: objects being tracked, "
"zones, timestamps. Use this to understand what is visible in the live view. "
"Call this when the user has included a live image (via include_live_image) or "
"when answering questions about what is happening right now on a specific camera."
),
"parameters": {
"type": "object",
"properties": {
"camera": {
"type": "string",
"description": "Camera name to get live context for.",
},
},
"required": ["camera"],
},
},
},
]
@router.get(
"/chat/tools",
dependencies=[Depends(allow_any_authenticated())],
summary="Get available tools",
description="Returns OpenAI-compatible tool definitions for function calling.",
)
def get_tools() -> JSONResponse:
"""Get list of available tools for LLM function calling."""
tools = get_tool_definitions()
return JSONResponse(content={"tools": tools})
async def _execute_search_objects(
arguments: Dict[str, Any],
allowed_cameras: List[str],
) -> JSONResponse:
"""
Execute the search_objects tool.
This searches for detected objects (events) in Frigate using the same
logic as the events API endpoint.
"""
# Parse after/before as server local time; convert to Unix timestamp
after = arguments.get("after")
before = arguments.get("before")
def _parse_as_local_timestamp(s: str):
s = s.replace("Z", "").strip()[:19]
dt = datetime.strptime(s, "%Y-%m-%dT%H:%M:%S")
return time.mktime(dt.timetuple())
if after:
try:
after = _parse_as_local_timestamp(after)
except (ValueError, AttributeError, TypeError):
logger.warning(f"Invalid 'after' timestamp format: {after}")
after = None
if before:
try:
before = _parse_as_local_timestamp(before)
except (ValueError, AttributeError, TypeError):
logger.warning(f"Invalid 'before' timestamp format: {before}")
before = None
# Convert zones array to comma-separated string if provided
zones = arguments.get("zones")
if isinstance(zones, list):
zones = ",".join(zones)
elif zones is None:
zones = "all"
# Build query parameters compatible with EventsQueryParams
query_params = EventsQueryParams(
cameras=arguments.get("camera", "all"),
labels=arguments.get("label", "all"),
sub_labels=arguments.get("sub_label", "all").lower(),
zones=zones,
zone=zones,
after=after,
before=before,
limit=arguments.get("limit", 25),
)
try:
# Call the events endpoint function directly
# The events function is synchronous and takes params and allowed_cameras
response = events(query_params, allowed_cameras)
# The response is already a JSONResponse with event data
# Return it as-is for the LLM
return response
except Exception as e:
logger.error(f"Error executing search_objects: {e}", exc_info=True)
return JSONResponse(
content={
"success": False,
"message": "Error searching objects",
},
status_code=500,
)
@router.post(
"/chat/execute",
dependencies=[Depends(allow_any_authenticated())],
summary="Execute a tool",
description="Execute a tool function call from an LLM.",
)
async def execute_tool(
body: ToolExecuteRequest = Body(...),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
) -> JSONResponse:
"""
Execute a tool function call.
This endpoint receives tool calls from LLMs and executes the corresponding
Frigate operations, returning results in a format the LLM can understand.
"""
tool_name = body.tool_name
arguments = body.arguments
logger.debug(f"Executing tool: {tool_name} with arguments: {arguments}")
if tool_name == "search_objects":
return await _execute_search_objects(arguments, allowed_cameras)
return JSONResponse(
content={
"success": False,
"message": f"Unknown tool: {tool_name}",
"tool": tool_name,
},
status_code=400,
)
async def _execute_get_live_context(
request: Request,
camera: str,
allowed_cameras: List[str],
) -> Dict[str, Any]:
if camera not in allowed_cameras:
return {
"error": f"Camera '{camera}' not found or access denied",
}
if camera not in request.app.frigate_config.cameras:
return {
"error": f"Camera '{camera}' not found",
}
try:
frame_processor = request.app.detected_frames_processor
camera_state = frame_processor.camera_states.get(camera)
if camera_state is None:
return {
"error": f"Camera '{camera}' state not available",
}
tracked_objects_dict = {}
with camera_state.current_frame_lock:
tracked_objects = camera_state.tracked_objects.copy()
frame_time = camera_state.current_frame_time
for obj_id, tracked_obj in tracked_objects.items():
obj_dict = tracked_obj.to_dict()
if obj_dict.get("frame_time") == frame_time:
tracked_objects_dict[obj_id] = {
"label": obj_dict.get("label"),
"zones": obj_dict.get("current_zones", []),
"sub_label": obj_dict.get("sub_label"),
"stationary": obj_dict.get("stationary", False),
}
return {
"camera": camera,
"timestamp": frame_time,
"detections": list(tracked_objects_dict.values()),
}
except Exception as e:
logger.error(f"Error executing get_live_context: {e}", exc_info=True)
return {
"error": "Error getting live context",
}
async def _get_live_frame_image_url(
request: Request,
camera: str,
allowed_cameras: List[str],
) -> Optional[str]:
"""
Fetch the current live frame for a camera as a base64 data URL.
Returns None if the frame cannot be retrieved. Used when include_live_image
is set to attach the image to the first user message.
"""
if (
camera not in allowed_cameras
or camera not in request.app.frigate_config.cameras
):
return None
try:
frame_processor = request.app.detected_frames_processor
if camera not in frame_processor.camera_states:
return None
frame = frame_processor.get_current_frame(camera, {})
if frame is None:
return None
height, width = frame.shape[:2]
max_dimension = 1024
if height > max_dimension or width > max_dimension:
scale = max_dimension / max(height, width)
frame = cv2.resize(
frame,
(int(width * scale), int(height * scale)),
interpolation=cv2.INTER_AREA,
)
_, img_encoded = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 85])
b64 = base64.b64encode(img_encoded.tobytes()).decode("utf-8")
return f"data:image/jpeg;base64,{b64}"
except Exception as e:
logger.debug("Failed to get live frame for %s: %s", camera, e)
return None
async def _execute_tool_internal(
tool_name: str,
arguments: Dict[str, Any],
request: Request,
allowed_cameras: List[str],
) -> Dict[str, Any]:
"""
Internal helper to execute a tool and return the result as a dict.
This is used by the chat completion endpoint to execute tools.
"""
if tool_name == "search_objects":
response = await _execute_search_objects(arguments, allowed_cameras)
try:
if hasattr(response, "body"):
body_str = response.body.decode("utf-8")
return json.loads(body_str)
elif hasattr(response, "content"):
return response.content
else:
return {}
except (json.JSONDecodeError, AttributeError) as e:
logger.warning(f"Failed to extract tool result: {e}")
return {"error": "Failed to parse tool result"}
elif tool_name == "get_live_context":
camera = arguments.get("camera")
if not camera:
logger.error(
"Tool get_live_context failed: camera parameter is required. "
"Arguments: %s",
json.dumps(arguments),
)
return {"error": "Camera parameter is required"}
return await _execute_get_live_context(request, camera, allowed_cameras)
else:
logger.error(
"Tool call failed: unknown tool %r. Expected one of: search_objects, get_live_context. "
"Arguments received: %s",
tool_name,
json.dumps(arguments),
)
return {"error": f"Unknown tool: {tool_name}"}
async def _execute_pending_tools(
pending_tool_calls: List[Dict[str, Any]],
request: Request,
allowed_cameras: List[str],
) -> tuple[List[ToolCall], List[Dict[str, Any]]]:
"""
Execute a list of tool calls; return (ToolCall list for API response, tool result dicts for conversation).
"""
tool_calls_out: List[ToolCall] = []
tool_results: List[Dict[str, Any]] = []
for tool_call in pending_tool_calls:
tool_name = tool_call["name"]
tool_args = tool_call.get("arguments") or {}
tool_call_id = tool_call["id"]
logger.debug(
f"Executing tool: {tool_name} (id: {tool_call_id}) with arguments: {json.dumps(tool_args, indent=2)}"
)
try:
tool_result = await _execute_tool_internal(
tool_name, tool_args, request, allowed_cameras
)
if isinstance(tool_result, dict) and tool_result.get("error"):
logger.error(
"Tool call %s (id: %s) returned error: %s. Arguments: %s",
tool_name,
tool_call_id,
tool_result.get("error"),
json.dumps(tool_args),
)
if tool_name == "search_objects" and isinstance(tool_result, list):
tool_result = _format_events_with_local_time(tool_result)
_keys = {
"id",
"camera",
"label",
"zones",
"start_time_local",
"end_time_local",
"sub_label",
"event_count",
}
tool_result = [
{k: evt[k] for k in _keys if k in evt}
for evt in tool_result
if isinstance(evt, dict)
]
result_content = (
json.dumps(tool_result)
if isinstance(tool_result, (dict, list))
else (tool_result if isinstance(tool_result, str) else str(tool_result))
)
tool_calls_out.append(
ToolCall(name=tool_name, arguments=tool_args, response=result_content)
)
tool_results.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content,
}
)
except Exception as e:
logger.error(
"Error executing tool %s (id: %s): %s. Arguments: %s",
tool_name,
tool_call_id,
e,
json.dumps(tool_args),
exc_info=True,
)
error_content = json.dumps({"error": f"Tool execution failed: {str(e)}"})
tool_calls_out.append(
ToolCall(name=tool_name, arguments=tool_args, response=error_content)
)
tool_results.append(
{
"role": "tool",
"tool_call_id": tool_call_id,
"content": error_content,
}
)
return (tool_calls_out, tool_results)
@router.post(
"/chat/completion",
dependencies=[Depends(allow_any_authenticated())],
summary="Chat completion with tool calling",
description=(
"Send a chat message to the configured GenAI provider with tool calling support. "
"The LLM can call Frigate tools to answer questions about your cameras and events."
),
)
async def chat_completion(
request: Request,
body: ChatCompletionRequest = Body(...),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""
Chat completion endpoint with tool calling support.
This endpoint:
1. Gets the configured GenAI client
2. Gets tool definitions
3. Sends messages + tools to LLM
4. Handles tool_calls if present
5. Executes tools and sends results back to LLM
6. Repeats until final answer
7. Returns response to user
"""
genai_client = request.app.genai_manager.tool_client
if not genai_client:
return JSONResponse(
content={
"error": "GenAI is not configured. Please configure a GenAI provider in your Frigate config.",
},
status_code=400,
)
tools = get_tool_definitions()
conversation = []
current_datetime = datetime.now()
current_date_str = current_datetime.strftime("%Y-%m-%d")
current_time_str = current_datetime.strftime("%I:%M:%S %p")
cameras_info = []
config = request.app.frigate_config
for camera_id in allowed_cameras:
if camera_id not in config.cameras:
continue
camera_config = config.cameras[camera_id]
friendly_name = (
camera_config.friendly_name
if camera_config.friendly_name
else camera_id.replace("_", " ").title()
)
cameras_info.append(f" - {friendly_name} (ID: {camera_id})")
cameras_section = ""
if cameras_info:
cameras_section = (
"\n\nAvailable cameras:\n"
+ "\n".join(cameras_info)
+ "\n\nWhen users refer to cameras by their friendly name (e.g., 'Back Deck Camera'), use the corresponding camera ID (e.g., 'back_deck_cam') in tool calls."
)
live_image_note = ""
if body.include_live_image:
live_image_note = (
f"\n\nThe first user message includes a live image from camera "
f"'{body.include_live_image}'. Use get_live_context for that camera to get "
"current detection details (objects, zones) to aid in understanding the image."
)
system_prompt = f"""You are a helpful assistant for Frigate, a security camera NVR system. You help users answer questions about their cameras, detected objects, and events.
Current server local date and time: {current_date_str} at {current_time_str}
Do not start your response with phrases like "I will check...", "Let me see...", or "Let me look...". Answer directly.
Always present times to the user in the server's local timezone. When tool results include start_time_local and end_time_local, use those exact strings when listing or describing detection times—do not convert or invent timestamps. Do not use UTC or ISO format with Z for the user-facing answer unless the tool result only provides Unix timestamps without local time fields.
When users ask about "today", "yesterday", "this week", etc., use the current date above as reference.
When searching for objects or events, use ISO 8601 format for dates (e.g., {current_date_str}T00:00:00Z for the start of today).
Always be accurate with time calculations based on the current date provided.{cameras_section}{live_image_note}"""
conversation.append(
{
"role": "system",
"content": system_prompt,
}
)
first_user_message_seen = False
for msg in body.messages:
msg_dict = {
"role": msg.role,
"content": msg.content,
}
if msg.tool_call_id:
msg_dict["tool_call_id"] = msg.tool_call_id
if msg.name:
msg_dict["name"] = msg.name
if (
msg.role == "user"
and not first_user_message_seen
and body.include_live_image
):
first_user_message_seen = True
image_url = await _get_live_frame_image_url(
request, body.include_live_image, allowed_cameras
)
if image_url:
msg_dict["content"] = [
{"type": "text", "text": msg.content},
{"type": "image_url", "image_url": {"url": image_url}},
]
conversation.append(msg_dict)
tool_iterations = 0
tool_calls: List[ToolCall] = []
max_iterations = body.max_tool_iterations
logger.debug(
f"Starting chat completion with {len(conversation)} message(s), "
f"{len(tools)} tool(s) available, max_iterations={max_iterations}"
)
# True LLM streaming when client supports it and stream requested
if body.stream and hasattr(genai_client, "chat_with_tools_stream"):
stream_tool_calls: List[ToolCall] = []
stream_iterations = 0
async def stream_body_llm():
nonlocal conversation, stream_tool_calls, stream_iterations
while stream_iterations < max_iterations:
logger.debug(
f"Streaming LLM (iteration {stream_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s)"
)
async for event in genai_client.chat_with_tools_stream(
messages=conversation,
tools=tools if tools else None,
tool_choice="auto",
):
kind, value = event
if kind == "content_delta":
yield (
json.dumps({"type": "content", "delta": value}).encode(
"utf-8"
)
+ b"\n"
)
elif kind == "message":
msg = value
if msg.get("finish_reason") == "error":
yield (
json.dumps(
{
"type": "error",
"error": "An error occurred while processing your request.",
}
).encode("utf-8")
+ b"\n"
)
return
pending = msg.get("tool_calls")
if pending:
stream_iterations += 1
conversation.append(
build_assistant_message_for_conversation(
msg.get("content"), pending
)
)
executed_calls, tool_results = await _execute_pending_tools(
pending, request, allowed_cameras
)
stream_tool_calls.extend(executed_calls)
conversation.extend(tool_results)
yield (
json.dumps(
{
"type": "tool_calls",
"tool_calls": [
tc.model_dump() for tc in stream_tool_calls
],
}
).encode("utf-8")
+ b"\n"
)
break
else:
yield (json.dumps({"type": "done"}).encode("utf-8") + b"\n")
return
else:
yield json.dumps({"type": "done"}).encode("utf-8") + b"\n"
return StreamingResponse(
stream_body_llm(),
media_type="application/x-ndjson",
headers={"X-Accel-Buffering": "no"},
)
try:
while tool_iterations < max_iterations:
logger.debug(
f"Calling LLM (iteration {tool_iterations + 1}/{max_iterations}) "
f"with {len(conversation)} message(s) in conversation"
)
response = genai_client.chat_with_tools(
messages=conversation,
tools=tools if tools else None,
tool_choice="auto",
)
if response.get("finish_reason") == "error":
logger.error("GenAI client returned an error")
return JSONResponse(
content={
"error": "An error occurred while processing your request.",
},
status_code=500,
)
conversation.append(
build_assistant_message_for_conversation(
response.get("content"), response.get("tool_calls")
)
)
pending_tool_calls = response.get("tool_calls")
if not pending_tool_calls:
logger.debug(
f"Chat completion finished with final answer (iterations: {tool_iterations})"
)
final_content = response.get("content") or ""
if body.stream:
async def stream_body() -> Any:
if tool_calls:
yield (
json.dumps(
{
"type": "tool_calls",
"tool_calls": [
tc.model_dump() for tc in tool_calls
],
}
).encode("utf-8")
+ b"\n"
)
# Stream content in word-sized chunks for smooth UX
for part in _chunk_content(final_content):
yield (
json.dumps({"type": "content", "delta": part}).encode(
"utf-8"
)
+ b"\n"
)
yield json.dumps({"type": "done"}).encode("utf-8") + b"\n"
return StreamingResponse(
stream_body(),
media_type="application/x-ndjson",
)
return JSONResponse(
content=ChatCompletionResponse(
message=ChatMessageResponse(
role="assistant",
content=final_content,
tool_calls=None,
),
finish_reason=response.get("finish_reason", "stop"),
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)
tool_iterations += 1
logger.debug(
f"Tool calls detected (iteration {tool_iterations}/{max_iterations}): "
f"{len(pending_tool_calls)} tool(s) to execute"
)
executed_calls, tool_results = await _execute_pending_tools(
pending_tool_calls, request, allowed_cameras
)
tool_calls.extend(executed_calls)
conversation.extend(tool_results)
logger.debug(
f"Added {len(tool_results)} tool result(s) to conversation. "
f"Continuing with next LLM call..."
)
logger.warning(
f"Max tool iterations ({max_iterations}) reached. Returning partial response."
)
return JSONResponse(
content=ChatCompletionResponse(
message=ChatMessageResponse(
role="assistant",
content="I reached the maximum number of tool call iterations. Please try rephrasing your question.",
tool_calls=None,
),
finish_reason="length",
tool_iterations=tool_iterations,
tool_calls=tool_calls,
).model_dump(),
)
except Exception as e:
logger.error(f"Error in chat completion: {e}", exc_info=True)
return JSONResponse(
content={
"error": "An error occurred while processing your request.",
},
status_code=500,
)

View File

@@ -1,7 +1,8 @@
from enum import Enum from enum import Enum
from typing import Optional from typing import Optional, Union
from pydantic import BaseModel from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
class Extension(str, Enum): class Extension(str, Enum):
@@ -47,3 +48,15 @@ class MediaMjpegFeedQueryParams(BaseModel):
mask: Optional[int] = None mask: Optional[int] = None
motion: Optional[int] = None motion: Optional[int] = None
regions: Optional[int] = None regions: Optional[int] = None
class MediaRecordingsSummaryQueryParams(BaseModel):
timezone: str = "utc"
cameras: Optional[str] = "all"
class MediaRecordingsAvailabilityQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30

View File

@@ -1,21 +0,0 @@
from typing import Optional, Union
from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema
class MediaRecordingsSummaryQueryParams(BaseModel):
timezone: str = "utc"
cameras: Optional[str] = "all"
class MediaRecordingsAvailabilityQueryParams(BaseModel):
cameras: str = "all"
before: Union[float, SkipJsonSchema[None]] = None
after: Union[float, SkipJsonSchema[None]] = None
scale: int = 30
class RecordingsDeleteQueryParams(BaseModel):
keep: Optional[str] = None
cameras: Optional[str] = "all"

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, List, Optional from typing import Any, Dict, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel
class AppConfigSetBody(BaseModel): class AppConfigSetBody(BaseModel):
@@ -27,16 +27,3 @@ class AppPostLoginBody(BaseModel):
class AppPutRoleBody(BaseModel): class AppPutRoleBody(BaseModel):
role: str role: str
class MediaSyncBody(BaseModel):
dry_run: bool = Field(
default=True, description="If True, only report orphans without deleting them"
)
media_types: List[str] = Field(
default=["all"],
description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'",
)
force: bool = Field(
default=False, description="If True, bypass safety threshold checks"
)

View File

@@ -1,45 +0,0 @@
"""Chat API request models."""
from typing import Optional
from pydantic import BaseModel, Field
class ChatMessage(BaseModel):
"""A single message in a chat conversation."""
role: str = Field(
description="Message role: 'user', 'assistant', 'system', or 'tool'"
)
content: str = Field(description="Message content")
tool_call_id: Optional[str] = Field(
default=None, description="For tool messages, the ID of the tool call"
)
name: Optional[str] = Field(
default=None, description="For tool messages, the tool name"
)
class ChatCompletionRequest(BaseModel):
"""Request for chat completion with tool calling."""
messages: list[ChatMessage] = Field(
description="List of messages in the conversation"
)
max_tool_iterations: int = Field(
default=5,
ge=1,
le=10,
description="Maximum number of tool call iterations (default: 5)",
)
include_live_image: Optional[str] = Field(
default=None,
description=(
"If set, the current live frame from this camera is attached to the first "
"user message as multimodal content. Use with get_live_context for detection info."
),
)
stream: bool = Field(
default=False,
description="If true, stream the final assistant response in the body as newline-delimited JSON.",
)

View File

@@ -41,7 +41,6 @@ class EventsCreateBody(BaseModel):
duration: Optional[int] = 30 duration: Optional[int] = 30
include_recording: Optional[bool] = True include_recording: Optional[bool] = True
draw: Optional[dict] = {} draw: Optional[dict] = {}
pre_capture: Optional[int] = None
class EventsEndBody(BaseModel): class EventsEndBody(BaseModel):

View File

@@ -1,35 +0,0 @@
from typing import Optional
from pydantic import BaseModel, Field
class ExportCaseCreateBody(BaseModel):
"""Request body for creating a new export case."""
name: str = Field(max_length=100, description="Friendly name of the export case")
description: Optional[str] = Field(
default=None, description="Optional description of the export case"
)
class ExportCaseUpdateBody(BaseModel):
"""Request body for updating an existing export case."""
name: Optional[str] = Field(
default=None,
max_length=100,
description="Updated friendly name of the export case",
)
description: Optional[str] = Field(
default=None, description="Updated description of the export case"
)
class ExportCaseAssignBody(BaseModel):
"""Request body for assigning or unassigning an export to a case."""
export_case_id: Optional[str] = Field(
default=None,
max_length=30,
description="Case ID to assign to the export, or null to unassign",
)

View File

@@ -3,47 +3,18 @@ from typing import Optional, Union
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from pydantic.json_schema import SkipJsonSchema from pydantic.json_schema import SkipJsonSchema
from frigate.record.export import PlaybackSourceEnum from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
)
class ExportRecordingsBody(BaseModel): class ExportRecordingsBody(BaseModel):
playback: PlaybackFactorEnum = Field(
default=PlaybackFactorEnum.realtime, title="Playback factor"
)
source: PlaybackSourceEnum = Field( source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source" default=PlaybackSourceEnum.recordings, title="Playback source"
) )
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256) name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None image_path: Union[str, SkipJsonSchema[None]] = None
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description="ID of the export case to assign this export to",
)
class ExportRecordingsCustomBody(BaseModel):
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: str = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None
export_case_id: Optional[str] = Field(
default=None,
title="Export case ID",
max_length=30,
description="ID of the export case to assign this export to",
)
ffmpeg_input_args: Optional[str] = Field(
default=None,
title="FFmpeg input arguments",
description="Custom FFmpeg input arguments. If not provided, defaults to timelapse input args.",
)
ffmpeg_output_args: Optional[str] = Field(
default=None,
title="FFmpeg output arguments",
description="Custom FFmpeg output arguments. If not provided, defaults to timelapse output args.",
)
cpu_fallback: bool = Field(
default=False,
title="CPU Fallback",
description="If true, retry export without hardware acceleration if the initial export fails.",
)

View File

@@ -1,54 +0,0 @@
"""Chat API response models."""
from typing import Any, Optional
from pydantic import BaseModel, Field
class ToolCallInvocation(BaseModel):
"""A tool call requested by the LLM (before execution)."""
id: str = Field(description="Unique identifier for this tool call")
name: str = Field(description="Tool name to call")
arguments: dict[str, Any] = Field(description="Arguments for the tool call")
class ChatMessageResponse(BaseModel):
"""A message in the chat response."""
role: str = Field(description="Message role")
content: Optional[str] = Field(
default=None, description="Message content (None if tool calls present)"
)
tool_calls: Optional[list[ToolCallInvocation]] = Field(
default=None, description="Tool calls if LLM wants to call tools"
)
class ToolCall(BaseModel):
"""A tool that was executed during the completion, with its response."""
name: str = Field(description="Tool name that was called")
arguments: dict[str, Any] = Field(
default_factory=dict, description="Arguments passed to the tool"
)
response: str = Field(
default="",
description="The response or result returned from the tool execution",
)
class ChatCompletionResponse(BaseModel):
"""Response from chat completion."""
message: ChatMessageResponse = Field(description="The assistant's message")
finish_reason: str = Field(
description="Reason generation stopped: 'stop', 'tool_calls', 'length', 'error'"
)
tool_iterations: int = Field(
default=0, description="Number of tool call iterations performed"
)
tool_calls: list[ToolCall] = Field(
default_factory=list,
description="List of tool calls that were executed during this completion",
)

View File

@@ -1,22 +0,0 @@
from typing import List, Optional
from pydantic import BaseModel, Field
class ExportCaseModel(BaseModel):
"""Model representing a single export case."""
id: str = Field(description="Unique identifier for the export case")
name: str = Field(description="Friendly name of the export case")
description: Optional[str] = Field(
default=None, description="Optional description of the export case"
)
created_at: float = Field(
description="Unix timestamp when the export case was created"
)
updated_at: float = Field(
description="Unix timestamp when the export case was last updated"
)
ExportCasesResponse = List[ExportCaseModel]

View File

@@ -15,9 +15,6 @@ class ExportModel(BaseModel):
in_progress: bool = Field( in_progress: bool = Field(
description="Whether the export is currently being processed" description="Whether the export is currently being processed"
) )
export_case_id: Optional[str] = Field(
default=None, description="ID of the export case this export belongs to"
)
class StartExportResponse(BaseModel): class StartExportResponse(BaseModel):

View File

@@ -3,15 +3,13 @@ from enum import Enum
class Tags(Enum): class Tags(Enum):
app = "App" app = "App"
auth = "Auth"
camera = "Camera" camera = "Camera"
chat = "Chat" preview = "Preview"
events = "Events"
export = "Export"
classification = "Classification"
logs = "Logs" logs = "Logs"
media = "Media" media = "Media"
notifications = "Notifications" notifications = "Notifications"
preview = "Preview"
recordings = "Recordings"
review = "Review" review = "Review"
export = "Export"
events = "Events"
classification = "Classification"
auth = "Auth"

View File

@@ -1782,7 +1782,6 @@ def create_event(
body.duration, body.duration,
"api", "api",
body.draw, body.draw,
body.pre_capture,
), ),
EventMetadataTypeEnum.manual_event_create.value, EventMetadataTypeEnum.manual_event_create.value,
) )

View File

@@ -4,10 +4,10 @@ import logging
import random import random
import string import string
from pathlib import Path from pathlib import Path
from typing import List, Optional from typing import List
import psutil import psutil
from fastapi import APIRouter, Depends, Query, Request from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filepath from pathvalidate import sanitize_filepath
from peewee import DoesNotExist from peewee import DoesNotExist
@@ -19,20 +19,8 @@ from frigate.api.auth import (
require_camera_access, require_camera_access,
require_role, require_role,
) )
from frigate.api.defs.request.export_case_body import ( from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
ExportCaseAssignBody,
ExportCaseCreateBody,
ExportCaseUpdateBody,
)
from frigate.api.defs.request.export_recordings_body import (
ExportRecordingsBody,
ExportRecordingsCustomBody,
)
from frigate.api.defs.request.export_rename_body import ExportRenameBody from frigate.api.defs.request.export_rename_body import ExportRenameBody
from frigate.api.defs.response.export_case_response import (
ExportCaseModel,
ExportCasesResponse,
)
from frigate.api.defs.response.export_response import ( from frigate.api.defs.response.export_response import (
ExportModel, ExportModel,
ExportsResponse, ExportsResponse,
@@ -41,9 +29,9 @@ from frigate.api.defs.response.export_response import (
from frigate.api.defs.response.generic_response import GenericResponse from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.const import CLIPS_DIR, EXPORT_DIR from frigate.const import CLIPS_DIR, EXPORT_DIR
from frigate.models import Export, ExportCase, Previews, Recordings from frigate.models import Export, Previews, Recordings
from frigate.record.export import ( from frigate.record.export import (
DEFAULT_TIME_LAPSE_FFMPEG_ARGS, PlaybackFactorEnum,
PlaybackSourceEnum, PlaybackSourceEnum,
RecordingExporter, RecordingExporter,
) )
@@ -64,182 +52,17 @@ router = APIRouter(tags=[Tags.export])
) )
def get_exports( def get_exports(
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter), allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
export_case_id: Optional[str] = None,
cameras: Optional[str] = Query(default="all"),
start_date: Optional[float] = None,
end_date: Optional[float] = None,
): ):
query = Export.select().where(Export.camera << allowed_cameras) exports = (
Export.select()
if export_case_id is not None: .where(Export.camera << allowed_cameras)
if export_case_id == "unassigned": .order_by(Export.date.desc())
query = query.where(Export.export_case.is_null(True)) .dicts()
else: .iterator()
query = query.where(Export.export_case == export_case_id) )
if cameras and cameras != "all":
requested = set(cameras.split(","))
filtered_cameras = list(requested.intersection(allowed_cameras))
if not filtered_cameras:
return JSONResponse(content=[])
query = query.where(Export.camera << filtered_cameras)
if start_date is not None:
query = query.where(Export.date >= start_date)
if end_date is not None:
query = query.where(Export.date <= end_date)
exports = query.order_by(Export.date.desc()).dicts().iterator()
return JSONResponse(content=[e for e in exports]) return JSONResponse(content=[e for e in exports])
@router.get(
"/cases",
response_model=ExportCasesResponse,
dependencies=[Depends(allow_any_authenticated())],
summary="Get export cases",
description="Gets all export cases from the database.",
)
def get_export_cases():
cases = (
ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator()
)
return JSONResponse(content=[c for c in cases])
@router.post(
"/cases",
response_model=ExportCaseModel,
dependencies=[Depends(require_role(["admin"]))],
summary="Create export case",
description="Creates a new export case.",
)
def create_export_case(body: ExportCaseCreateBody):
case = ExportCase.create(
id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)),
name=body.name,
description=body.description,
created_at=Path().stat().st_mtime,
updated_at=Path().stat().st_mtime,
)
return JSONResponse(content=model_to_dict(case))
@router.get(
"/cases/{case_id}",
response_model=ExportCaseModel,
dependencies=[Depends(allow_any_authenticated())],
summary="Get a single export case",
description="Gets a specific export case by ID.",
)
def get_export_case(case_id: str):
try:
case = ExportCase.get(ExportCase.id == case_id)
return JSONResponse(content=model_to_dict(case))
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
@router.patch(
"/cases/{case_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Update export case",
description="Updates an existing export case.",
)
def update_export_case(case_id: str, body: ExportCaseUpdateBody):
try:
case = ExportCase.get(ExportCase.id == case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
if body.name is not None:
case.name = body.name
if body.description is not None:
case.description = body.description
case.save()
return JSONResponse(
content={"success": True, "message": "Successfully updated export case."}
)
@router.delete(
"/cases/{case_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete export case",
description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """,
)
def delete_export_case(case_id: str):
try:
case = ExportCase.get(ExportCase.id == case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Unassign exports from this case but keep the exports themselves
Export.update(export_case=None).where(Export.export_case == case).execute()
case.delete_instance()
return JSONResponse(
content={"success": True, "message": "Successfully deleted export case."}
)
@router.patch(
"/export/{export_id}/case",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Assign export to case",
description=(
"Assigns an export to a case, or unassigns it if export_case_id is null."
),
)
async def assign_export_case(
export_id: str,
body: ExportCaseAssignBody,
request: Request,
):
try:
export: Export = Export.get(Export.id == export_id)
await require_camera_access(export.camera, request=request)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export not found."},
status_code=404,
)
if body.export_case_id is not None:
try:
ExportCase.get(ExportCase.id == body.export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found."},
status_code=404,
)
export.export_case = body.export_case_id
else:
export.export_case = None
export.save()
return JSONResponse(
content={"success": True, "message": "Successfully updated export case."}
)
@router.post( @router.post(
"/export/{camera_name}/start/{start_time}/end/{end_time}", "/export/{camera_name}/start/{start_time}/end/{end_time}",
response_model=StartExportResponse, response_model=StartExportResponse,
@@ -265,20 +88,11 @@ def export_recording(
status_code=404, status_code=404,
) )
playback_factor = body.playback
playback_source = body.source playback_source = body.source
friendly_name = body.name friendly_name = body.name
existing_image = sanitize_filepath(body.image_path) if body.image_path else None existing_image = sanitize_filepath(body.image_path) if body.image_path else None
export_case_id = body.export_case_id
if export_case_id is not None:
try:
ExportCase.get(ExportCase.id == export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Ensure that existing_image is a valid path # Ensure that existing_image is a valid path
if existing_image and not existing_image.startswith(CLIPS_DIR): if existing_image and not existing_image.startswith(CLIPS_DIR):
return JSONResponse( return JSONResponse(
@@ -337,12 +151,16 @@ def export_recording(
existing_image, existing_image,
int(start_time), int(start_time),
int(end_time), int(end_time),
(
PlaybackFactorEnum[playback_factor]
if playback_factor in PlaybackFactorEnum.__members__.values()
else PlaybackFactorEnum.realtime
),
( (
PlaybackSourceEnum[playback_source] PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values() if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings else PlaybackSourceEnum.recordings
), ),
export_case_id,
) )
exporter.start() exporter.start()
return JSONResponse( return JSONResponse(
@@ -453,138 +271,6 @@ async def export_delete(event_id: str, request: Request):
) )
@router.post(
"/export/custom/{camera_name}/start/{start_time}/end/{end_time}",
response_model=StartExportResponse,
dependencies=[Depends(require_camera_access)],
summary="Start custom recording export",
description="""Starts an export of a recording for the specified time range using custom FFmpeg arguments.
The export can be from recordings or preview footage. Returns the export ID if
successful, or an error message if the camera is invalid or no recordings/previews
are found for the time range. If ffmpeg_input_args and ffmpeg_output_args are not provided,
defaults to timelapse export settings.""",
)
def export_recording_custom(
request: Request,
camera_name: str,
start_time: float,
end_time: float,
body: ExportRecordingsCustomBody,
):
if not camera_name or not request.app.frigate_config.cameras.get(camera_name):
return JSONResponse(
content=(
{"success": False, "message": f"{camera_name} is not a valid camera."}
),
status_code=404,
)
playback_source = body.source
friendly_name = body.name
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
ffmpeg_input_args = body.ffmpeg_input_args
ffmpeg_output_args = body.ffmpeg_output_args
cpu_fallback = body.cpu_fallback
export_case_id = body.export_case_id
if export_case_id is not None:
try:
ExportCase.get(ExportCase.id == export_case_id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Export case not found"},
status_code=404,
)
# Ensure that existing_image is a valid path
if existing_image and not existing_image.startswith(CLIPS_DIR):
return JSONResponse(
content=({"success": False, "message": "Invalid image path"}),
status_code=400,
)
if playback_source == "recordings":
recordings_count = (
Recordings.select()
.where(
Recordings.start_time.between(start_time, end_time)
| Recordings.end_time.between(start_time, end_time)
| (
(start_time > Recordings.start_time)
& (end_time < Recordings.end_time)
)
)
.where(Recordings.camera == camera_name)
.count()
)
if recordings_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No recordings found for time range"}
),
status_code=400,
)
else:
previews_count = (
Previews.select()
.where(
Previews.start_time.between(start_time, end_time)
| Previews.end_time.between(start_time, end_time)
| ((start_time > Previews.start_time) & (end_time < Previews.end_time))
)
.where(Previews.camera == camera_name)
.count()
)
if not is_current_hour(start_time) and previews_count <= 0:
return JSONResponse(
content=(
{"success": False, "message": "No previews found for time range"}
),
status_code=400,
)
export_id = f"{camera_name}_{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
# Set default values if not provided (timelapse defaults)
if ffmpeg_input_args is None:
ffmpeg_input_args = ""
if ffmpeg_output_args is None:
ffmpeg_output_args = DEFAULT_TIME_LAPSE_FFMPEG_ARGS
exporter = RecordingExporter(
request.app.frigate_config,
export_id,
camera_name,
friendly_name,
existing_image,
int(start_time),
int(end_time),
(
PlaybackSourceEnum[playback_source]
if playback_source in PlaybackSourceEnum.__members__.values()
else PlaybackSourceEnum.recordings
),
export_case_id,
ffmpeg_input_args,
ffmpeg_output_args,
cpu_fallback,
)
exporter.start()
return JSONResponse(
content=(
{
"success": True,
"message": "Starting export of recording.",
"export_id": export_id,
}
),
status_code=200,
)
@router.get( @router.get(
"/exports/{export_id}", "/exports/{export_id}",
response_model=ExportModel, response_model=ExportModel,

View File

@@ -16,14 +16,12 @@ from frigate.api import app as main_app
from frigate.api import ( from frigate.api import (
auth, auth,
camera, camera,
chat,
classification, classification,
event, event,
export, export,
media, media,
notification, notification,
preview, preview,
record,
review, review,
) )
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
@@ -33,7 +31,6 @@ from frigate.comms.event_metadata_updater import (
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.config.camera.updater import CameraConfigUpdatePublisher from frigate.config.camera.updater import CameraConfigUpdatePublisher
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.genai import GenAIClientManager
from frigate.ptz.onvif import OnvifController from frigate.ptz.onvif import OnvifController
from frigate.stats.emitter import StatsEmitter from frigate.stats.emitter import StatsEmitter
from frigate.storage import StorageMaintainer from frigate.storage import StorageMaintainer
@@ -123,7 +120,6 @@ def create_fastapi_app(
# Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters # Order of include_router matters: https://fastapi.tiangolo.com/tutorial/path-params/#order-matters
app.include_router(auth.router) app.include_router(auth.router)
app.include_router(camera.router) app.include_router(camera.router)
app.include_router(chat.router)
app.include_router(classification.router) app.include_router(classification.router)
app.include_router(review.router) app.include_router(review.router)
app.include_router(main_app.router) app.include_router(main_app.router)
@@ -132,10 +128,8 @@ def create_fastapi_app(
app.include_router(export.router) app.include_router(export.router)
app.include_router(event.router) app.include_router(event.router)
app.include_router(media.router) app.include_router(media.router)
app.include_router(record.router)
# App Properties # App Properties
app.frigate_config = frigate_config app.frigate_config = frigate_config
app.genai_manager = GenAIClientManager(frigate_config)
app.embeddings = embeddings app.embeddings = embeddings
app.detected_frames_processor = detected_frames_processor app.detected_frames_processor = detected_frames_processor
app.storage_maintainer = storage_maintainer app.storage_maintainer = storage_maintainer

View File

@@ -8,8 +8,9 @@ import os
import subprocess as sp import subprocess as sp
import time import time
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from functools import reduce
from pathlib import Path as FilePath from pathlib import Path as FilePath
from typing import Any from typing import Any, List
from urllib.parse import unquote from urllib.parse import unquote
import cv2 import cv2
@@ -18,11 +19,12 @@ import pytz
from fastapi import APIRouter, Depends, Path, Query, Request, Response from fastapi import APIRouter, Depends, Path, Query, Request, Response
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
from pathvalidate import sanitize_filename from pathvalidate import sanitize_filename
from peewee import DoesNotExist, fn from peewee import DoesNotExist, fn, operator
from tzlocal import get_localzone_name from tzlocal import get_localzone_name
from frigate.api.auth import ( from frigate.api.auth import (
allow_any_authenticated, allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access, require_camera_access,
) )
from frigate.api.defs.query.media_query_parameters import ( from frigate.api.defs.query.media_query_parameters import (
@@ -30,6 +32,8 @@ from frigate.api.defs.query.media_query_parameters import (
MediaEventsSnapshotQueryParams, MediaEventsSnapshotQueryParams,
MediaLatestFrameQueryParams, MediaLatestFrameQueryParams,
MediaMjpegFeedQueryParams, MediaMjpegFeedQueryParams,
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.camera.state import CameraState from frigate.camera.state import CameraState
@@ -40,12 +44,13 @@ from frigate.const import (
INSTALL_DIR, INSTALL_DIR,
MAX_SEGMENT_DURATION, MAX_SEGMENT_DURATION,
PREVIEW_FRAME_TYPE, PREVIEW_FRAME_TYPE,
RECORD_DIR,
) )
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.output.preview import get_most_recent_preview_frame
from frigate.track.object_processing import TrackedObjectProcessor from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.file import get_event_thumbnail_bytes from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.image import get_image_from_recording from frigate.util.image import get_image_from_recording
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -126,9 +131,7 @@ async def camera_ptz_info(request: Request, camera_name: str):
@router.get( @router.get(
"/{camera_name}/latest.{extension}", "/{camera_name}/latest.{extension}", dependencies=[Depends(require_camera_access)]
dependencies=[Depends(require_camera_access)],
description="Returns the latest frame from the specified camera in the requested format (jpg, png, webp). Falls back to preview frames if the camera is offline.",
) )
async def latest_frame( async def latest_frame(
request: Request, request: Request,
@@ -162,26 +165,9 @@ async def latest_frame(
or 10 or 10
) )
is_offline = False
if frame is None or datetime.now().timestamp() > ( if frame is None or datetime.now().timestamp() > (
frame_processor.get_current_frame_time(camera_name) + retry_interval frame_processor.get_current_frame_time(camera_name) + retry_interval
): ):
last_frame_time = frame_processor.get_current_frame_time(camera_name)
preview_path = get_most_recent_preview_frame(
camera_name, before=last_frame_time
)
if preview_path:
logger.debug(f"Using most recent preview frame for {camera_name}")
frame = cv2.imread(preview_path, cv2.IMREAD_UNCHANGED)
if frame is not None:
is_offline = True
if frame is None or not is_offline:
logger.debug(
f"No live or preview frame available for {camera_name}. Using error image."
)
if request.app.camera_error_image is None: if request.app.camera_error_image is None:
error_image = glob.glob( error_image = glob.glob(
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
@@ -214,18 +200,14 @@ async def latest_frame(
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
_, img = cv2.imencode(f".{extension.value}", frame, quality_params) _, img = cv2.imencode(f".{extension.value}", frame, quality_params)
headers = {
"Cache-Control": "no-store" if not params.store else "private, max-age=60",
}
if is_offline:
headers["X-Frigate-Offline"] = "true"
return Response( return Response(
content=img.tobytes(), content=img.tobytes(),
media_type=extension.get_mime_type(), media_type=extension.get_mime_type(),
headers=headers, headers={
"Cache-Control": "no-store"
if not params.store
else "private, max-age=60",
},
) )
elif ( elif (
camera_name == "birdseye" camera_name == "birdseye"
@@ -415,6 +397,333 @@ async def submit_recording_snapshot_to_plus(
) )
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
.order_by(Recordings.start_time)
.dicts()
.iterator()
)
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.get( @router.get(
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4", "/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
dependencies=[Depends(require_camera_access)], dependencies=[Depends(require_camera_access)],
@@ -737,7 +1046,6 @@ async def event_snapshot(
): ):
event_complete = False event_complete = False
jpg_bytes = None jpg_bytes = None
frame_time = 0
try: try:
event = Event.get(Event.id == event_id, Event.end_time != None) event = Event.get(Event.id == event_id, Event.end_time != None)
event_complete = True event_complete = True
@@ -762,7 +1070,7 @@ async def event_snapshot(
if event_id in camera_state.tracked_objects: if event_id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(event_id) tracked_obj = camera_state.tracked_objects.get(event_id)
if tracked_obj is not None: if tracked_obj is not None:
jpg_bytes, frame_time = tracked_obj.get_img_bytes( jpg_bytes = tracked_obj.get_img_bytes(
ext="jpg", ext="jpg",
timestamp=params.timestamp, timestamp=params.timestamp,
bounding_box=params.bbox, bounding_box=params.bbox,
@@ -791,7 +1099,6 @@ async def event_snapshot(
headers = { headers = {
"Content-Type": "image/jpeg", "Content-Type": "image/jpeg",
"Cache-Control": "private, max-age=31536000" if event_complete else "no-store", "Cache-Control": "private, max-age=31536000" if event_complete else "no-store",
"X-Frame-Time": str(frame_time),
} }
if params.download: if params.download:

View File

@@ -1,479 +0,0 @@
"""Recording APIs."""
import logging
from datetime import datetime, timedelta
from functools import reduce
from pathlib import Path
from typing import List
from urllib.parse import unquote
from fastapi import APIRouter, Depends, Request
from fastapi import Path as PathParam
from fastapi.responses import JSONResponse
from peewee import fn, operator
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
)
from frigate.api.defs.query.recordings_query_parameters import (
MediaRecordingsAvailabilityQueryParams,
MediaRecordingsSummaryQueryParams,
RecordingsDeleteQueryParams,
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import RECORD_DIR
from frigate.models import Event, Recordings
from frigate.util.time import get_dst_transitions
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.recordings])
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
][RECORD_DIR]
if not recording_stats:
return JSONResponse({})
total_mb = recording_stats["total"]
camera_usages: dict[str, dict] = (
request.app.storage_maintainer.calculate_camera_usages()
)
for camera_name in camera_usages.keys():
if camera_usages.get(camera_name, {}).get("usage"):
camera_usages[camera_name]["usage_percent"] = (
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
) * 100
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
else:
camera_list = allowed_cameras
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content={})
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=dict(sorted(days.items())))
@router.get(
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
return JSONResponse(content=list(days.values()))
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
async def recordings(
camera_name: str,
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
before: float = datetime.now().timestamp(),
):
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
recordings = (
Recordings.select(
Recordings.id,
Recordings.start_time,
Recordings.end_time,
Recordings.segment_size,
Recordings.motion,
Recordings.objects,
Recordings.duration,
)
.where(
Recordings.camera == camera_name,
Recordings.end_time >= after,
Recordings.start_time <= before,
)
.order_by(Recordings.start_time)
.dicts()
.iterator()
)
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Get time ranges with no recordings."""
cameras = params.cameras
if cameras != "all":
requested = set(unquote(cameras).split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content=[])
cameras = ",".join(filtered)
else:
cameras = allowed_cameras
before = params.before or datetime.datetime.now().timestamp()
after = (
params.after
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
else:
camera_list = allowed_cameras
# Get recording start times
data: list[Recordings] = (
Recordings.select(Recordings.start_time, Recordings.end_time)
.where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc())
.dicts()
.iterator()
)
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
current = after
current_gap_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
)
current_gap_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@router.delete(
"/recordings/start/{start}/end/{end}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete recordings",
description="""Deletes recordings within the specified time range.
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
""",
)
async def delete_recordings(
start: float = PathParam(..., description="Start timestamp (unix)"),
end: float = PathParam(..., description="End timestamp (unix)"),
params: RecordingsDeleteQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Delete recordings in the specified time range."""
if start >= end:
return JSONResponse(
content={
"success": False,
"message": "Start time must be less than end time.",
},
status_code=400,
)
cameras = params.cameras
if cameras != "all":
requested = set(cameras.split(","))
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(
content={
"success": False,
"message": "No valid cameras found in the request.",
},
status_code=400,
)
camera_list = list(filtered)
else:
camera_list = allowed_cameras
# Parse keep parameter
keep_set = set()
if params.keep:
keep_set = set(params.keep.split(","))
# Build query to find overlapping recordings
clauses = [
(
Recordings.start_time.between(start, end)
| Recordings.end_time.between(start, end)
| ((start > Recordings.start_time) & (end < Recordings.end_time))
),
(Recordings.camera << camera_list),
]
keep_clauses = []
if "motion" in keep_set:
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
if "object" in keep_set:
keep_clauses.append(
Recordings.objects.is_null(False) & (Recordings.objects > 0)
)
if "audio" in keep_set:
keep_clauses.append(Recordings.dBFS.is_null(False))
if keep_clauses:
keep_condition = reduce(operator.or_, keep_clauses)
clauses.append(~keep_condition)
recordings_to_delete = (
Recordings.select(Recordings.id, Recordings.path)
.where(reduce(operator.and_, clauses))
.dicts()
.iterator()
)
recording_ids = []
deleted_count = 0
error_count = 0
for recording in recordings_to_delete:
recording_ids.append(recording["id"])
try:
Path(recording["path"]).unlink(missing_ok=True)
deleted_count += 1
except Exception as e:
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
error_count += 1
if recording_ids:
max_deletes = 100000
recording_ids_list = list(recording_ids)
for i in range(0, len(recording_ids_list), max_deletes):
Recordings.delete().where(
Recordings.id << recording_ids_list[i : i + max_deletes]
).execute()
message = f"Successfully deleted {deleted_count} recording(s)."
if error_count > 0:
message += f" {error_count} file deletion error(s) occurred."
return JSONResponse(
content={"success": True, "message": message},
status_code=200,
)

View File

@@ -33,6 +33,7 @@ from frigate.api.defs.response.review_response import (
ReviewSummaryResponse, ReviewSummaryResponse,
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum from frigate.review.types import SeverityEnum
@@ -746,7 +747,9 @@ async def set_not_reviewed(
description="Use GenAI to summarize review items over a period of time.", description="Use GenAI to summarize review items over a period of time.",
) )
def generate_review_summary(request: Request, start_ts: float, end_ts: float): def generate_review_summary(request: Request, start_ts: float, end_ts: float):
if not request.app.genai_manager.vision_client: config: FrigateConfig = request.app.frigate_config
if not config.genai.provider:
return JSONResponse( return JSONResponse(
content=( content=(
{ {

View File

@@ -19,8 +19,6 @@ class CameraMetrics:
process_pid: Synchronized process_pid: Synchronized
capture_process_pid: Synchronized capture_process_pid: Synchronized
ffmpeg_pid: Synchronized ffmpeg_pid: Synchronized
reconnects_last_hour: Synchronized
stalls_last_hour: Synchronized
def __init__(self, manager: SyncManager): def __init__(self, manager: SyncManager):
self.camera_fps = manager.Value("d", 0) self.camera_fps = manager.Value("d", 0)
@@ -37,8 +35,6 @@ class CameraMetrics:
self.process_pid = manager.Value("i", 0) self.process_pid = manager.Value("i", 0)
self.capture_process_pid = manager.Value("i", 0) self.capture_process_pid = manager.Value("i", 0)
self.ffmpeg_pid = manager.Value("i", 0) self.ffmpeg_pid = manager.Value("i", 0)
self.reconnects_last_hour = manager.Value("i", 0)
self.stalls_last_hour = manager.Value("i", 0)
class PTZMetrics: class PTZMetrics:

View File

@@ -28,7 +28,6 @@ from frigate.const import (
UPDATE_CAMERA_ACTIVITY, UPDATE_CAMERA_ACTIVITY,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS, UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
UPDATE_EVENT_DESCRIPTION, UPDATE_EVENT_DESCRIPTION,
UPDATE_JOB_STATE,
UPDATE_MODEL_STATE, UPDATE_MODEL_STATE,
UPDATE_REVIEW_DESCRIPTION, UPDATE_REVIEW_DESCRIPTION,
UPSERT_REVIEW_SEGMENT, UPSERT_REVIEW_SEGMENT,
@@ -61,7 +60,6 @@ class Dispatcher:
self.camera_activity = CameraActivityManager(config, self.publish) self.camera_activity = CameraActivityManager(config, self.publish)
self.audio_activity = AudioActivityManager(config, self.publish) self.audio_activity = AudioActivityManager(config, self.publish)
self.model_state: dict[str, ModelStatusTypesEnum] = {} self.model_state: dict[str, ModelStatusTypesEnum] = {}
self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data}
self.embeddings_reindex: dict[str, Any] = {} self.embeddings_reindex: dict[str, Any] = {}
self.birdseye_layout: dict[str, Any] = {} self.birdseye_layout: dict[str, Any] = {}
self.audio_transcription_state: str = "idle" self.audio_transcription_state: str = "idle"
@@ -182,19 +180,6 @@ class Dispatcher:
def handle_model_state() -> None: def handle_model_state() -> None:
self.publish("model_state", json.dumps(self.model_state.copy())) self.publish("model_state", json.dumps(self.model_state.copy()))
def handle_update_job_state() -> None:
if payload and isinstance(payload, dict):
job_type = payload.get("job_type")
if job_type:
self.job_state[job_type] = payload
self.publish(
"job_state",
json.dumps(self.job_state),
)
def handle_job_state() -> None:
self.publish("job_state", json.dumps(self.job_state.copy()))
def handle_update_audio_transcription_state() -> None: def handle_update_audio_transcription_state() -> None:
if payload: if payload:
self.audio_transcription_state = payload self.audio_transcription_state = payload
@@ -292,7 +277,6 @@ class Dispatcher:
UPDATE_EVENT_DESCRIPTION: handle_update_event_description, UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description, UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
UPDATE_MODEL_STATE: handle_update_model_state, UPDATE_MODEL_STATE: handle_update_model_state,
UPDATE_JOB_STATE: handle_update_job_state,
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress, UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout, UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state, UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state,
@@ -300,7 +284,6 @@ class Dispatcher:
"restart": handle_restart, "restart": handle_restart,
"embeddingsReindexProgress": handle_embeddings_reindex_progress, "embeddingsReindexProgress": handle_embeddings_reindex_progress,
"modelState": handle_model_state, "modelState": handle_model_state,
"jobState": handle_job_state,
"audioTranscriptionState": handle_audio_transcription_state, "audioTranscriptionState": handle_audio_transcription_state,
"birdseyeLayout": handle_birdseye_layout, "birdseyeLayout": handle_birdseye_layout,
"onConnect": handle_on_connect, "onConnect": handle_on_connect,

View File

@@ -8,7 +8,6 @@ from .config import * # noqa: F403
from .database import * # noqa: F403 from .database import * # noqa: F403
from .logger import * # noqa: F403 from .logger import * # noqa: F403
from .mqtt import * # noqa: F403 from .mqtt import * # noqa: F403
from .network import * # noqa: F403
from .proxy import * # noqa: F403 from .proxy import * # noqa: F403
from .telemetry import * # noqa: F403 from .telemetry import * # noqa: F403
from .tls import * # noqa: F403 from .tls import * # noqa: F403

View File

@@ -8,63 +8,39 @@ __all__ = ["AuthConfig"]
class AuthConfig(FrigateBaseModel): class AuthConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable authentication")
default=True,
title="Enable authentication",
description="Enable native authentication for the Frigate UI.",
)
reset_admin_password: bool = Field( reset_admin_password: bool = Field(
default=False, default=False, title="Reset the admin password on startup"
title="Reset admin password",
description="If true, reset the admin user's password on startup and print the new password in logs.",
) )
cookie_name: str = Field( cookie_name: str = Field(
default="frigate_token", default="frigate_token", title="Name for jwt token cookie", pattern=r"^[a-z_]+$"
title="JWT cookie name",
description="Name of the cookie used to store the JWT token for native authentication.",
pattern=r"^[a-z_]+$",
)
cookie_secure: bool = Field(
default=False,
title="Secure cookie flag",
description="Set the secure flag on the auth cookie; should be true when using TLS.",
) )
cookie_secure: bool = Field(default=False, title="Set secure flag on cookie")
session_length: int = Field( session_length: int = Field(
default=86400, default=86400, title="Session length for jwt session tokens", ge=60
title="Session length",
description="Session duration in seconds for JWT-based sessions.",
ge=60,
) )
refresh_time: int = Field( refresh_time: int = Field(
default=1800, default=1800,
title="Session refresh window", title="Refresh the session if it is going to expire in this many seconds",
description="When a session is within this many seconds of expiring, refresh it back to full length.",
ge=30, ge=30,
) )
failed_login_rate_limit: Optional[str] = Field( failed_login_rate_limit: Optional[str] = Field(
default=None, default=None,
title="Failed login limits", title="Rate limits for failed login attempts.",
description="Rate limiting rules for failed login attempts to reduce brute-force attacks.",
) )
trusted_proxies: list[str] = Field( trusted_proxies: list[str] = Field(
default=[], default=[],
title="Trusted proxies", title="Trusted proxies for determining IP address to rate limit",
description="List of trusted proxy IPs used when determining client IP for rate limiting.",
) )
# As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256 # As of Feb 2023, OWASP recommends 600000 iterations for PBKDF2-SHA256
hash_iterations: int = Field( hash_iterations: int = Field(default=600000, title="Password hash iterations")
default=600000,
title="Hash iterations",
description="Number of PBKDF2-SHA256 iterations to use when hashing user passwords.",
)
roles: Dict[str, List[str]] = Field( roles: Dict[str, List[str]] = Field(
default_factory=dict, default_factory=dict,
title="Role mappings", title="Role to camera mappings. Empty list grants access to all cameras.",
description="Map roles to camera lists. An empty list grants access to all cameras for the role.",
) )
admin_first_time_login: Optional[bool] = Field( admin_first_time_login: Optional[bool] = Field(
default=False, default=False,
title="First-time admin flag", title="Internal field to expose first-time admin login flag to the UI",
description=( description=(
"When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. " "When true the UI may show a help link on the login page informing users how to sign in after an admin password reset. "
), ),

View File

@@ -17,45 +17,25 @@ class AudioFilterConfig(FrigateBaseModel):
default=0.8, default=0.8,
ge=AUDIO_MIN_CONFIDENCE, ge=AUDIO_MIN_CONFIDENCE,
lt=1.0, lt=1.0,
title="Minimum audio confidence", title="Minimum detection confidence threshold for audio to be counted.",
description="Minimum confidence threshold for the audio event to be counted.",
) )
class AudioConfig(FrigateBaseModel): class AudioConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable audio events.")
default=False,
title="Enable audio detection",
description="Enable or disable audio event detection for all cameras; can be overridden per-camera.",
)
max_not_heard: int = Field( max_not_heard: int = Field(
default=30, default=30, title="Seconds of not hearing the type of audio to end the event."
title="End timeout",
description="Amount of seconds without the configured audio type before the audio event is ended.",
) )
min_volume: int = Field( min_volume: int = Field(
default=500, default=500, title="Min volume required to run audio detection."
title="Minimum volume",
description="Minimum RMS volume threshold required to run audio detection; lower values increase sensitivity (e.g., 200 high, 500 medium, 1000 low).",
) )
listen: list[str] = Field( listen: list[str] = Field(
default=DEFAULT_LISTEN_AUDIO, default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
title="Listen types",
description="List of audio event types to detect (for example: bark, fire_alarm, scream, speech, yell).",
) )
filters: Optional[dict[str, AudioFilterConfig]] = Field( filters: Optional[dict[str, AudioFilterConfig]] = Field(
None, None, title="Audio filters."
title="Audio filters",
description="Per-audio-type filter settings such as confidence thresholds used to reduce false positives.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
None, None, title="Keep track of original state of audio detection."
title="Original audio state",
description="Indicates whether audio detection was originally enabled in the static config file.",
)
num_threads: int = Field(
default=2,
title="Detection threads",
description="Number of threads to use for audio detection processing.",
ge=1,
) )
num_threads: int = Field(default=2, title="Number of detection threads", ge=1)

View File

@@ -29,88 +29,45 @@ class BirdseyeModeEnum(str, Enum):
class BirdseyeLayoutConfig(FrigateBaseModel): class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field( scaling_factor: float = Field(
default=2.0, default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
title="Scaling factor",
description="Scaling factor used by the layout calculator (range 1.0 to 5.0).",
ge=1.0,
le=5.0,
)
max_cameras: Optional[int] = Field(
default=None,
title="Max cameras",
description="Maximum number of cameras to display at once in Birdseye; shows the most recent cameras.",
) )
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
class BirdseyeConfig(FrigateBaseModel): class BirdseyeConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable birdseye view.")
default=True,
title="Enable Birdseye",
description="Enable or disable the Birdseye view feature.",
)
mode: BirdseyeModeEnum = Field( mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, default=BirdseyeModeEnum.objects, title="Tracking mode."
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
) )
restream: bool = Field( restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
default=False, width: int = Field(default=1280, title="Birdseye width.")
title="Restream RTSP", height: int = Field(default=720, title="Birdseye height.")
description="Re-stream the Birdseye output as an RTSP feed; enabling this will keep Birdseye running continuously.",
)
width: int = Field(
default=1280,
title="Width",
description="Output width (pixels) of the composed Birdseye frame.",
)
height: int = Field(
default=720,
title="Height",
description="Output height (pixels) of the composed Birdseye frame.",
)
quality: int = Field( quality: int = Field(
default=8, default=8,
title="Encoding quality", title="Encoding quality.",
description="Encoding quality for the Birdseye mpeg1 feed (1 highest quality, 31 lowest).",
ge=1, ge=1,
le=31, le=31,
) )
inactivity_threshold: int = Field( inactivity_threshold: int = Field(
default=30, default=30, title="Birdseye Inactivity Threshold", gt=0
title="Inactivity threshold",
description="Seconds of inactivity after which a camera will stop being shown in Birdseye.",
gt=0,
) )
layout: BirdseyeLayoutConfig = Field( layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig, default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
title="Layout",
description="Layout options for the Birdseye composition.",
) )
idle_heartbeat_fps: float = Field( idle_heartbeat_fps: float = Field(
default=0.0, default=0.0,
ge=0.0, ge=0.0,
le=10.0, le=10.0,
title="Idle heartbeat FPS", title="Idle heartbeat FPS (0 disables, max 10)",
description="Frames-per-second to resend the last composed Birdseye frame when idle; set to 0 to disable.",
) )
# uses BaseModel because some global attributes are not available at the camera level # uses BaseModel because some global attributes are not available at the camera level
class BirdseyeCameraConfig(BaseModel): class BirdseyeCameraConfig(BaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable birdseye view for camera.")
default=True,
title="Enable Birdseye",
description="Enable or disable the Birdseye view feature.",
)
mode: BirdseyeModeEnum = Field( mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, default=BirdseyeModeEnum.objects, title="Tracking mode for camera."
title="Tracking mode",
description="Mode for including cameras in Birdseye: 'objects', 'motion', or 'continuous'.",
) )
order: int = Field( order: int = Field(default=0, title="Position of the camera in the birdseye view.")
default=0,
title="Position",
description="Numeric position controlling the camera's ordering in the Birdseye layout.",
)

View File

@@ -50,17 +50,10 @@ class CameraTypeEnum(str, Enum):
class CameraConfig(FrigateBaseModel): class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field( name: Optional[str] = Field(None, title="Camera name.", pattern=REGEX_CAMERA_NAME)
None,
title="Camera name",
description="Camera name is required",
pattern=REGEX_CAMERA_NAME,
)
friendly_name: Optional[str] = Field( friendly_name: Optional[str] = Field(
None, None, title="Camera friendly name used in the Frigate UI."
title="Friendly name",
description="Camera friendly name used in the Frigate UI",
) )
@model_validator(mode="before") @model_validator(mode="before")
@@ -70,129 +63,80 @@ class CameraConfig(FrigateBaseModel):
pass pass
return values return values
enabled: bool = Field(default=True, title="Enabled", description="Enabled") enabled: bool = Field(default=True, title="Enable camera.")
# Options with global fallback # Options with global fallback
audio: AudioConfig = Field( audio: AudioConfig = Field(
default_factory=AudioConfig, default_factory=AudioConfig, title="Audio events configuration."
title="Audio events",
description="Settings for audio-based event detection for this camera.",
) )
audio_transcription: CameraAudioTranscriptionConfig = Field( audio_transcription: CameraAudioTranscriptionConfig = Field(
default_factory=CameraAudioTranscriptionConfig, default_factory=CameraAudioTranscriptionConfig,
title="Audio transcription", title="Audio transcription config.",
description="Settings for live and speech audio transcription used for events and live captions.",
) )
birdseye: BirdseyeCameraConfig = Field( birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig, default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
) )
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, default_factory=DetectConfig, title="Object detection configuration."
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
) )
face_recognition: CameraFaceRecognitionConfig = Field( face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig, default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
title="Face recognition",
description="Settings for face detection and recognition for this camera.",
)
ffmpeg: CameraFfmpegConfig = Field(
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
) )
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
live: CameraLiveConfig = Field( live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, default_factory=CameraLiveConfig, title="Live playback settings."
title="Live playback",
description="Settings used by the Web UI to control live stream selection, resolution and quality.",
) )
lpr: CameraLicensePlateRecognitionConfig = Field( lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig, default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
title="License Plate Recognition",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
)
motion: MotionConfig = Field(
None,
title="Motion detection",
description="Default motion detection settings for this camera.",
) )
motion: MotionConfig = Field(None, title="Motion detection configuration.")
objects: ObjectConfig = Field( objects: ObjectConfig = Field(
default_factory=ObjectConfig, default_factory=ObjectConfig, title="Object configuration."
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
) )
record: RecordConfig = Field( record: RecordConfig = Field(
default_factory=RecordConfig, default_factory=RecordConfig, title="Record configuration."
title="Recording",
description="Recording and retention settings for this camera.",
) )
review: ReviewConfig = Field( review: ReviewConfig = Field(
default_factory=ReviewConfig, default_factory=ReviewConfig, title="Review configuration."
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage for this camera.",
) )
semantic_search: CameraSemanticSearchConfig = Field( semantic_search: CameraSemanticSearchConfig = Field(
default_factory=CameraSemanticSearchConfig, default_factory=CameraSemanticSearchConfig,
title="Semantic Search", title="Semantic search configuration.",
description="Settings for semantic search which builds and queries object embeddings to find similar items.",
) )
snapshots: SnapshotsConfig = Field( snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, default_factory=SnapshotsConfig, title="Snapshot configuration."
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects for this camera.",
) )
timestamp_style: TimestampStyleConfig = Field( timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig, default_factory=TimestampStyleConfig, title="Timestamp style configuration."
title="Timestamp style",
description="Styling options for in-feed timestamps applied to recordings and snapshots.",
) )
# Options without global fallback # Options without global fallback
best_image_timeout: int = Field( best_image_timeout: int = Field(
default=60, default=60,
title="Best image timeout", title="How long to wait for the image with the highest confidence score.",
description="How long to wait for the image with the highest confidence score.",
) )
mqtt: CameraMqttConfig = Field( mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig, default_factory=CameraMqttConfig, title="MQTT configuration."
title="MQTT",
description="MQTT image publishing settings.",
) )
notifications: NotificationConfig = Field( notifications: NotificationConfig = Field(
default_factory=NotificationConfig, default_factory=NotificationConfig, title="Notifications configuration."
title="Notifications",
description="Settings to enable and control notifications for this camera.",
) )
onvif: OnvifConfig = Field( onvif: OnvifConfig = Field(
default_factory=OnvifConfig, default_factory=OnvifConfig, title="Camera Onvif Configuration."
title="ONVIF",
description="ONVIF connection and PTZ autotracking settings for this camera.",
)
type: CameraTypeEnum = Field(
default=CameraTypeEnum.generic,
title="Camera type",
description="Camera Type",
) )
type: CameraTypeEnum = Field(default=CameraTypeEnum.generic, title="Camera Type")
ui: CameraUiConfig = Field( ui: CameraUiConfig = Field(
default_factory=CameraUiConfig, default_factory=CameraUiConfig, title="Camera UI Modifications."
title="Camera UI",
description="Display ordering and visibility for this camera in the UI. Ordering affects the default dashboard. For more granular control, use camera groups.",
) )
webui_url: Optional[str] = Field( webui_url: Optional[str] = Field(
None, None,
title="Camera URL", title="URL to visit the camera directly from system page",
description="URL to visit the camera directly from system page",
) )
zones: dict[str, ZoneConfig] = Field( zones: dict[str, ZoneConfig] = Field(
default_factory=dict, default_factory=dict, title="Zone configuration."
title="Zones",
description="Zones allow you to define a specific area of the frame so you can determine whether or not an object is within a particular area.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of camera."
title="Original camera state",
description="Keep track of original state of camera.",
) )
_ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr() _ffmpeg_cmds: list[dict[str, list[str]]] = PrivateAttr()

View File

@@ -8,82 +8,56 @@ __all__ = ["DetectConfig", "StationaryConfig", "StationaryMaxFramesConfig"]
class StationaryMaxFramesConfig(FrigateBaseModel): class StationaryMaxFramesConfig(FrigateBaseModel):
default: Optional[int] = Field( default: Optional[int] = Field(default=None, title="Default max frames.", ge=1)
default=None,
title="Default max frames",
description="Default maximum frames to track a stationary object before stopping.",
ge=1,
)
objects: dict[str, int] = Field( objects: dict[str, int] = Field(
default_factory=dict, default_factory=dict, title="Object specific max frames."
title="Object max frames",
description="Per-object overrides for maximum frames to track stationary objects.",
) )
class StationaryConfig(FrigateBaseModel): class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field( interval: Optional[int] = Field(
default=None, default=None,
title="Stationary interval", title="Frame interval for checking stationary objects.",
description="How often (in frames) to run a detection check to confirm a stationary object.",
gt=0, gt=0,
) )
threshold: Optional[int] = Field( threshold: Optional[int] = Field(
default=None, default=None,
title="Stationary threshold", title="Number of frames without a position change for an object to be considered stationary",
description="Number of frames with no position change required to mark an object as stationary.",
ge=1, ge=1,
) )
max_frames: StationaryMaxFramesConfig = Field( max_frames: StationaryMaxFramesConfig = Field(
default_factory=StationaryMaxFramesConfig, default_factory=StationaryMaxFramesConfig,
title="Max frames", title="Max frames for stationary objects.",
description="Limits how long stationary objects are tracked before being discarded.",
) )
classifier: bool = Field( classifier: bool = Field(
default=True, default=True,
title="Enable visual classifier", title="Enable visual classifier for determing if objects with jittery bounding boxes are stationary.",
description="Use a visual classifier to detect truly stationary objects even when bounding boxes jitter.",
) )
class DetectConfig(FrigateBaseModel): class DetectConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Detection Enabled.")
default=False,
title="Detection enabled",
description="Enable or disable object detection for all cameras; can be overridden per-camera. Detection must be enabled for object tracking to run.",
)
height: Optional[int] = Field( height: Optional[int] = Field(
default=None, default=None, title="Height of the stream for the detect role."
title="Detect height",
description="Height (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
) )
width: Optional[int] = Field( width: Optional[int] = Field(
default=None, default=None, title="Width of the stream for the detect role."
title="Detect width",
description="Width (pixels) of frames used for the detect stream; leave empty to use the native stream resolution.",
) )
fps: int = Field( fps: int = Field(
default=5, default=5, title="Number of frames per second to process through detection."
title="Detect FPS",
description="Desired frames per second to run detection on; lower values reduce CPU usage (recommended value is 5, only set higher - at most 10 - if tracking extremely fast moving objects).",
) )
min_initialized: Optional[int] = Field( min_initialized: Optional[int] = Field(
default=None, default=None,
title="Minimum initialization frames", title="Minimum number of consecutive hits for an object to be initialized by the tracker.",
description="Number of consecutive detection hits required before creating a tracked object. Increase to reduce false initializations. Default value is fps divided by 2.",
) )
max_disappeared: Optional[int] = Field( max_disappeared: Optional[int] = Field(
default=None, default=None,
title="Maximum disappeared frames", title="Maximum number of frames the object can disappear before detection ends.",
description="Number of frames without a detection before a tracked object is considered gone.",
) )
stationary: StationaryConfig = Field( stationary: StationaryConfig = Field(
default_factory=StationaryConfig, default_factory=StationaryConfig,
title="Stationary objects config", title="Stationary objects config.",
description="Settings to detect and manage objects that remain stationary for a period of time.",
) )
annotation_offset: int = Field( annotation_offset: int = Field(
default=0, default=0, title="Milliseconds to offset detect annotations by."
title="Annotation offset",
description="Milliseconds to shift detect annotations to better align timeline bounding boxes with recordings; can be positive or negative.",
) )

View File

@@ -35,58 +35,39 @@ DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
class FfmpegOutputArgsConfig(FrigateBaseModel): class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, list[str]] = Field( detect: Union[str, list[str]] = Field(
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT, default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect output arguments", title="Detect role FFmpeg output arguments.",
description="Default output arguments for detect role streams.",
) )
record: Union[str, list[str]] = Field( record: Union[str, list[str]] = Field(
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT, default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record output arguments", title="Record role FFmpeg output arguments.",
description="Default output arguments for record role streams.",
) )
class FfmpegConfig(FrigateBaseModel): class FfmpegConfig(FrigateBaseModel):
path: str = Field( path: str = Field(default="default", title="FFmpeg path")
default="default",
title="FFmpeg path",
description='Path to the FFmpeg binary to use or a version alias ("5.0" or "7.0").',
)
global_args: Union[str, list[str]] = Field( global_args: Union[str, list[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT, default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
title="FFmpeg global arguments",
description="Global arguments passed to FFmpeg processes.",
) )
hwaccel_args: Union[str, list[str]] = Field( hwaccel_args: Union[str, list[str]] = Field(
default="auto", default="auto", title="FFmpeg hardware acceleration arguments."
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for FFmpeg. Provider-specific presets are recommended.",
) )
input_args: Union[str, list[str]] = Field( input_args: Union[str, list[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT, default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
title="Input arguments",
description="Input arguments applied to FFmpeg input streams.",
) )
output_args: FfmpegOutputArgsConfig = Field( output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig, default_factory=FfmpegOutputArgsConfig,
title="Output arguments", title="FFmpeg output arguments per role.",
description="Default output arguments used for different FFmpeg roles such as detect and record.",
) )
retry_interval: float = Field( retry_interval: float = Field(
default=10.0, default=10.0,
title="FFmpeg retry time", title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
description="Seconds to wait before attempting to reconnect a camera stream after failure. Default is 10.",
gt=0.0, gt=0.0,
) )
apple_compatibility: bool = Field( apple_compatibility: bool = Field(
default=False, default=False,
title="Apple compatibility", title="Set tag on HEVC (H.265) recording stream to improve compatibility with Apple players.",
description="Enable HEVC tagging for better Apple player compatibility when recording H.265.",
)
gpu: int = Field(
default=0,
title="GPU index",
description="Default GPU index used for hardware acceleration if available.",
) )
gpu: int = Field(default=0, title="GPU index to use for hardware acceleration.")
@property @property
def ffmpeg_path(self) -> str: def ffmpeg_path(self) -> str:
@@ -114,36 +95,21 @@ class CameraRoleEnum(str, Enum):
class CameraInput(FrigateBaseModel): class CameraInput(FrigateBaseModel):
path: EnvString = Field( path: EnvString = Field(title="Camera input path.")
title="Input path", roles: list[CameraRoleEnum] = Field(title="Roles assigned to this input.")
description="Camera input stream URL or path.",
)
roles: list[CameraRoleEnum] = Field(
title="Input roles",
description="Roles for this input stream.",
)
global_args: Union[str, list[str]] = Field( global_args: Union[str, list[str]] = Field(
default_factory=list, default_factory=list, title="FFmpeg global arguments."
title="FFmpeg global arguments",
description="FFmpeg global arguments for this input stream.",
) )
hwaccel_args: Union[str, list[str]] = Field( hwaccel_args: Union[str, list[str]] = Field(
default_factory=list, default_factory=list, title="FFmpeg hardware acceleration arguments."
title="Hardware acceleration arguments",
description="Hardware acceleration arguments for this input stream.",
) )
input_args: Union[str, list[str]] = Field( input_args: Union[str, list[str]] = Field(
default_factory=list, default_factory=list, title="FFmpeg input arguments."
title="Input arguments",
description="Input arguments specific to this stream.",
) )
class CameraFfmpegConfig(FfmpegConfig): class CameraFfmpegConfig(FfmpegConfig):
inputs: list[CameraInput] = Field( inputs: list[CameraInput] = Field(title="Camera inputs.")
title="Camera inputs",
description="List of input stream definitions (paths and roles) for this camera.",
)
@field_validator("inputs") @field_validator("inputs")
@classmethod @classmethod

View File

@@ -6,7 +6,7 @@ from pydantic import Field
from ..base import FrigateBaseModel from ..base import FrigateBaseModel
from ..env import EnvString from ..env import EnvString
__all__ = ["GenAIConfig", "GenAIProviderEnum", "GenAIRoleEnum"] __all__ = ["GenAIConfig", "GenAIProviderEnum"]
class GenAIProviderEnum(str, Enum): class GenAIProviderEnum(str, Enum):
@@ -14,56 +14,18 @@ class GenAIProviderEnum(str, Enum):
azure_openai = "azure_openai" azure_openai = "azure_openai"
gemini = "gemini" gemini = "gemini"
ollama = "ollama" ollama = "ollama"
llamacpp = "llamacpp"
class GenAIRoleEnum(str, Enum):
tools = "tools"
vision = "vision"
embeddings = "embeddings"
class GenAIConfig(FrigateBaseModel): class GenAIConfig(FrigateBaseModel):
"""Primary GenAI Config to define GenAI Provider.""" """Primary GenAI Config to define GenAI Provider."""
api_key: Optional[EnvString] = Field( api_key: Optional[EnvString] = Field(default=None, title="Provider API key.")
default=None, base_url: Optional[str] = Field(default=None, title="Provider base url.")
title="API key", model: str = Field(default="gpt-4o", title="GenAI model.")
description="API key required by some providers (can also be set via environment variables).", provider: GenAIProviderEnum | None = Field(default=None, title="GenAI provider.")
)
base_url: Optional[str] = Field(
default=None,
title="Base URL",
description="Base URL for self-hosted or compatible providers (for example an Ollama instance).",
)
model: str = Field(
default="gpt-4o",
title="Model",
description="The model to use from the provider for generating descriptions or summaries.",
)
provider: GenAIProviderEnum | None = Field(
default=None,
title="Provider",
description="The GenAI provider to use (for example: ollama, gemini, openai).",
)
roles: list[GenAIRoleEnum] = Field(
default_factory=lambda: [
GenAIRoleEnum.embeddings,
GenAIRoleEnum.vision,
GenAIRoleEnum.tools,
],
title="Roles",
description="GenAI roles (tools, vision, embeddings); one provider per role.",
)
provider_options: dict[str, Any] = Field( provider_options: dict[str, Any] = Field(
default={}, default={}, title="GenAI Provider extra options."
title="Provider options",
description="Additional provider-specific options to pass to the GenAI client.",
json_schema_extra={"additionalProperties": {"type": "string"}},
) )
runtime_options: dict[str, Any] = Field( runtime_options: dict[str, Any] = Field(
default={}, default={}, title="Options to pass during inference calls."
title="Runtime options",
description="Runtime options passed to the provider for each inference call.",
json_schema_extra={"additionalProperties": {"type": "string"}},
) )

View File

@@ -10,18 +10,7 @@ __all__ = ["CameraLiveConfig"]
class CameraLiveConfig(FrigateBaseModel): class CameraLiveConfig(FrigateBaseModel):
streams: Dict[str, str] = Field( streams: Dict[str, str] = Field(
default_factory=list, default_factory=list,
title="Live stream names", title="Friendly names and restream names to use for live view.",
description="Mapping of configured stream names to restream/go2rtc names used for live playback.",
)
height: int = Field(
default=720,
title="Live height",
description="Height (pixels) to render the jsmpeg live stream in the Web UI; must be <= detect stream height.",
)
quality: int = Field(
default=8,
ge=1,
le=31,
title="Live quality",
description="Encoding quality for the jsmpeg stream (1 highest, 31 lowest).",
) )
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")

View File

@@ -8,64 +8,30 @@ __all__ = ["MotionConfig"]
class MotionConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable motion on all cameras.")
default=True,
title="Enable motion detection",
description="Enable or disable motion detection for all cameras; can be overridden per-camera.",
)
threshold: int = Field( threshold: int = Field(
default=30, default=30,
title="Motion threshold", title="Motion detection threshold (1-255).",
description="Pixel difference threshold used by the motion detector; higher values reduce sensitivity (range 1-255).",
ge=1, ge=1,
le=255, le=255,
) )
lightning_threshold: float = Field( lightning_threshold: float = Field(
default=0.8, default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
title="Lightning threshold",
description="Threshold to detect and ignore brief lighting spikes (lower is more sensitive, values between 0.3 and 1.0).",
ge=0.3,
le=1.0,
)
improve_contrast: bool = Field(
default=True,
title="Improve contrast",
description="Apply contrast improvement to frames before motion analysis to help detection.",
)
contour_area: Optional[int] = Field(
default=10,
title="Contour area",
description="Minimum contour area in pixels required for a motion contour to be counted.",
)
delta_alpha: float = Field(
default=0.2,
title="Delta alpha",
description="Alpha blending factor used in frame differencing for motion calculation.",
)
frame_alpha: float = Field(
default=0.01,
title="Frame alpha",
description="Alpha value used when blending frames for motion preprocessing.",
)
frame_height: Optional[int] = Field(
default=100,
title="Frame height",
description="Height in pixels to scale frames to when computing motion.",
) )
improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=10, title="Contour Area")
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
frame_height: Optional[int] = Field(default=100, title="Frame Height")
mask: Union[str, list[str]] = Field( mask: Union[str, list[str]] = Field(
default="", default="", title="Coordinates polygon for the motion mask."
title="Mask coordinates",
description="Ordered x,y coordinates defining the motion mask polygon used to include/exclude areas.",
) )
mqtt_off_delay: int = Field( mqtt_off_delay: int = Field(
default=30, default=30,
title="MQTT off delay", title="Delay for updating MQTT with no motion detected.",
description="Seconds to wait after last motion before publishing an MQTT 'off' state.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of motion detection."
title="Original motion state",
description="Indicates whether motion detection was enabled in the original static configuration.",
) )
raw_mask: Union[str, list[str]] = "" raw_mask: Union[str, list[str]] = ""

View File

@@ -6,40 +6,18 @@ __all__ = ["CameraMqttConfig"]
class CameraMqttConfig(FrigateBaseModel): class CameraMqttConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Send image over MQTT.")
default=True, timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
title="Send image", bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
description="Enable publishing image snapshots for objects to MQTT topics for this camera.", crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
) height: int = Field(default=270, title="MQTT image height.")
timestamp: bool = Field(
default=True,
title="Add timestamp",
description="Overlay a timestamp on images published to MQTT.",
)
bounding_box: bool = Field(
default=True,
title="Add bounding box",
description="Draw bounding boxes on images published over MQTT.",
)
crop: bool = Field(
default=True,
title="Crop image",
description="Crop images published to MQTT to the detected object's bounding box.",
)
height: int = Field(
default=270,
title="Image height",
description="Height (pixels) to resize images published over MQTT.",
)
required_zones: list[str] = Field( required_zones: list[str] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to send the image.",
description="Zones that an object must enter for an MQTT image to be published.",
) )
quality: int = Field( quality: int = Field(
default=70, default=70,
title="JPEG quality", title="Quality of the encoded jpeg (0-100).",
description="JPEG quality for images published to MQTT (0-100).",
ge=0, ge=0,
le=100, le=100,
) )

View File

@@ -8,24 +8,11 @@ __all__ = ["NotificationConfig"]
class NotificationConfig(FrigateBaseModel): class NotificationConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable notifications")
default=False, email: Optional[str] = Field(default=None, title="Email required for push.")
title="Enable notifications",
description="Enable or disable notifications for all cameras; can be overridden per-camera.",
)
email: Optional[str] = Field(
default=None,
title="Notification email",
description="Email address used for push notifications or required by certain notification providers.",
)
cooldown: int = Field( cooldown: int = Field(
default=0, default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
ge=0,
title="Cooldown period",
description="Cooldown (seconds) between notifications to avoid spamming recipients.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of notifications."
title="Original notifications state",
description="Indicates whether notifications were enabled in the original static configuration.",
) )

View File

@@ -13,38 +13,30 @@ DEFAULT_TRACKED_OBJECTS = ["person"]
class FilterConfig(FrigateBaseModel): class FilterConfig(FrigateBaseModel):
min_area: Union[int, float] = Field( min_area: Union[int, float] = Field(
default=0, default=0,
title="Minimum object area", title="Minimum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
description="Minimum bounding box area (pixels or percentage) required for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
) )
max_area: Union[int, float] = Field( max_area: Union[int, float] = Field(
default=24000000, default=24000000,
title="Maximum object area", title="Maximum area of bounding box for object to be counted. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
description="Maximum bounding box area (pixels or percentage) allowed for this object type. Can be pixels (int) or percentage (float between 0.000001 and 0.99).",
) )
min_ratio: float = Field( min_ratio: float = Field(
default=0, default=0,
title="Minimum aspect ratio", title="Minimum ratio of bounding box's width/height for object to be counted.",
description="Minimum width/height ratio required for the bounding box to qualify.",
) )
max_ratio: float = Field( max_ratio: float = Field(
default=24000000, default=24000000,
title="Maximum aspect ratio", title="Maximum ratio of bounding box's width/height for object to be counted.",
description="Maximum width/height ratio allowed for the bounding box to qualify.",
) )
threshold: float = Field( threshold: float = Field(
default=0.7, default=0.7,
title="Confidence threshold", title="Average detection confidence threshold for object to be counted.",
description="Average detection confidence threshold required for the object to be considered a true positive.",
) )
min_score: float = Field( min_score: float = Field(
default=0.5, default=0.5, title="Minimum detection confidence for object to be counted."
title="Minimum confidence",
description="Minimum single-frame detection confidence required for the object to be counted.",
) )
mask: Optional[Union[str, list[str]]] = Field( mask: Optional[Union[str, list[str]]] = Field(
default=None, default=None,
title="Filter mask", title="Detection area polygon mask for this filter configuration.",
description="Polygon coordinates defining where this filter applies within the frame.",
) )
raw_mask: Union[str, list[str]] = "" raw_mask: Union[str, list[str]] = ""
@@ -59,64 +51,46 @@ class FilterConfig(FrigateBaseModel):
class GenAIObjectTriggerConfig(FrigateBaseModel): class GenAIObjectTriggerConfig(FrigateBaseModel):
tracked_object_end: bool = Field( tracked_object_end: bool = Field(
default=True, default=True, title="Send once the object is no longer tracked."
title="Send on end",
description="Send a request to GenAI when the tracked object ends.",
) )
after_significant_updates: Optional[int] = Field( after_significant_updates: Optional[int] = Field(
default=None, default=None,
title="Early GenAI trigger", title="Send an early request to generative AI when X frames accumulated.",
description="Send a request to GenAI after a specified number of significant updates for the tracked object.",
ge=1, ge=1,
) )
class GenAIObjectConfig(FrigateBaseModel): class GenAIObjectConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable GenAI for camera.")
default=False,
title="Enable GenAI",
description="Enable GenAI generation of descriptions for tracked objects by default.",
)
use_snapshot: bool = Field( use_snapshot: bool = Field(
default=False, default=False, title="Use snapshots for generating descriptions."
title="Use snapshots",
description="Use object snapshots instead of thumbnails for GenAI description generation.",
) )
prompt: str = Field( prompt: str = Field(
default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.", default="Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.",
title="Caption prompt", title="Default caption prompt.",
description="Default prompt template used when generating descriptions with GenAI.",
) )
object_prompts: dict[str, str] = Field( object_prompts: dict[str, str] = Field(
default_factory=dict, default_factory=dict, title="Object specific prompts."
title="Object prompts",
description="Per-object prompts to customize GenAI outputs for specific labels.",
) )
objects: Union[str, list[str]] = Field( objects: Union[str, list[str]] = Field(
default_factory=list, default_factory=list,
title="GenAI objects", title="List of objects to run generative AI for.",
description="List of object labels to send to GenAI by default.",
) )
required_zones: Union[str, list[str]] = Field( required_zones: Union[str, list[str]] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to run generative AI.",
description="Zones that must be entered for objects to qualify for GenAI description generation.",
) )
debug_save_thumbnails: bool = Field( debug_save_thumbnails: bool = Field(
default=False, default=False,
title="Save thumbnails", title="Save thumbnails sent to generative AI for debugging purposes.",
description="Save thumbnails sent to GenAI for debugging and review.",
) )
send_triggers: GenAIObjectTriggerConfig = Field( send_triggers: GenAIObjectTriggerConfig = Field(
default_factory=GenAIObjectTriggerConfig, default_factory=GenAIObjectTriggerConfig,
title="GenAI triggers", title="What triggers to use to send frames to generative AI for a tracked object.",
description="Defines when frames should be sent to GenAI (on end, after updates, etc.).",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of generative AI."
title="Original GenAI state",
description="Indicates whether GenAI was enabled in the original static config.",
) )
@field_validator("required_zones", mode="before") @field_validator("required_zones", mode="before")
@@ -129,25 +103,14 @@ class GenAIObjectConfig(FrigateBaseModel):
class ObjectConfig(FrigateBaseModel): class ObjectConfig(FrigateBaseModel):
track: list[str] = Field( track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
default=DEFAULT_TRACKED_OBJECTS,
title="Objects to track",
description="List of object labels to track for all cameras; can be overridden per-camera.",
)
filters: dict[str, FilterConfig] = Field( filters: dict[str, FilterConfig] = Field(
default_factory=dict, default_factory=dict, title="Object filters."
title="Object filters",
description="Filters applied to detected objects to reduce false positives (area, ratio, confidence).",
)
mask: Union[str, list[str]] = Field(
default="",
title="Object mask",
description="Mask polygon used to prevent object detection in specified areas.",
) )
mask: Union[str, list[str]] = Field(default="", title="Object mask.")
genai: GenAIObjectConfig = Field( genai: GenAIObjectConfig = Field(
default_factory=GenAIObjectConfig, default_factory=GenAIObjectConfig,
title="GenAI object config", title="Config for using genai to analyze objects.",
description="GenAI options for describing tracked objects and sending frames for generation.",
) )
_all_objects: list[str] = PrivateAttr() _all_objects: list[str] = PrivateAttr()

View File

@@ -17,57 +17,37 @@ class ZoomingModeEnum(str, Enum):
class PtzAutotrackConfig(FrigateBaseModel): class PtzAutotrackConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
default=False,
title="Enable Autotracking",
description="Enable or disable automatic PTZ camera tracking of detected objects.",
)
calibrate_on_startup: bool = Field( calibrate_on_startup: bool = Field(
default=False, default=False, title="Perform a camera calibration when Frigate starts."
title="Calibrate on start",
description="Measure PTZ motor speeds on startup to improve tracking accuracy. Frigate will update config with movement_weights after calibration.",
) )
zooming: ZoomingModeEnum = Field( zooming: ZoomingModeEnum = Field(
default=ZoomingModeEnum.disabled, default=ZoomingModeEnum.disabled, title="Autotracker zooming mode."
title="Zoom mode",
description="Control zoom behavior: disabled (pan/tilt only), absolute (most compatible), or relative (concurrent pan/tilt/zoom).",
) )
zoom_factor: float = Field( zoom_factor: float = Field(
default=0.3, default=0.3,
title="Zoom factor", title="Zooming factor (0.1-0.75).",
description="Control zoom level on tracked objects. Lower values keep more scene in view; higher values zoom in closer but may lose tracking. Values between 0.1 and 0.75.",
ge=0.1, ge=0.1,
le=0.75, le=0.75,
) )
track: list[str] = Field( track: list[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
default=DEFAULT_TRACKED_OBJECTS,
title="Tracked objects",
description="List of object types that should trigger autotracking.",
)
required_zones: list[str] = Field( required_zones: list[str] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to begin autotracking.",
description="Objects must enter one of these zones before autotracking begins.",
) )
return_preset: str = Field( return_preset: str = Field(
default="home", default="home",
title="Return preset", title="Name of camera preset to return to when object tracking is over.",
description="ONVIF preset name configured in camera firmware to return to after tracking ends.",
) )
timeout: int = Field( timeout: int = Field(
default=10, default=10, title="Seconds to delay before returning to preset."
title="Return timeout",
description="Wait this many seconds after losing tracking before returning camera to preset position.",
) )
movement_weights: Optional[Union[str, list[str]]] = Field( movement_weights: Optional[Union[str, list[str]]] = Field(
default_factory=list, default_factory=list,
title="Movement weights", title="Internal value used for PTZ movements based on the speed of your camera's motor.",
description="Calibration values automatically generated by camera calibration. Do not modify manually.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of autotracking."
title="Original autotrack state",
description="Internal field to track whether autotracking was enabled in configuration.",
) )
@field_validator("movement_weights", mode="before") @field_validator("movement_weights", mode="before")
@@ -92,38 +72,16 @@ class PtzAutotrackConfig(FrigateBaseModel):
class OnvifConfig(FrigateBaseModel): class OnvifConfig(FrigateBaseModel):
host: str = Field( host: str = Field(default="", title="Onvif Host")
default="", port: int = Field(default=8000, title="Onvif Port")
title="ONVIF host", user: Optional[EnvString] = Field(default=None, title="Onvif Username")
description="Host (and optional scheme) for the ONVIF service for this camera.", password: Optional[EnvString] = Field(default=None, title="Onvif Password")
) tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
port: int = Field(
default=8000,
title="ONVIF port",
description="Port number for the ONVIF service.",
)
user: Optional[EnvString] = Field(
default=None,
title="ONVIF username",
description="Username for ONVIF authentication; some devices require admin user for ONVIF.",
)
password: Optional[EnvString] = Field(
default=None,
title="ONVIF password",
description="Password for ONVIF authentication.",
)
tls_insecure: bool = Field(
default=False,
title="Disable TLS verify",
description="Skip TLS verification and disable digest auth for ONVIF (unsafe; use in safe networks only).",
)
autotracking: PtzAutotrackConfig = Field( autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig, default_factory=PtzAutotrackConfig,
title="Autotracking", title="PTZ auto tracking config.",
description="Automatically track moving objects and keep them centered in the frame using PTZ camera movements.",
) )
ignore_time_mismatch: bool = Field( ignore_time_mismatch: bool = Field(
default=False, default=False,
title="Ignore time mismatch", title="Onvif Ignore Time Synchronization Mismatch Between Camera and Server",
description="Ignore time synchronization differences between camera and Frigate server for ONVIF communication.",
) )

View File

@@ -1,5 +1,5 @@
from enum import Enum from enum import Enum
from typing import Optional, Union from typing import Optional
from pydantic import Field from pydantic import Field
@@ -19,14 +19,11 @@ __all__ = [
"RetainModeEnum", "RetainModeEnum",
] ]
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
class RecordRetainConfig(FrigateBaseModel): class RecordRetainConfig(FrigateBaseModel):
days: float = Field( days: float = Field(default=0, ge=0, title="Default retention period.")
default=0,
ge=0,
title="Retention days",
description="Days to retain recordings.",
)
class RetainModeEnum(str, Enum): class RetainModeEnum(str, Enum):
@@ -36,37 +33,22 @@ class RetainModeEnum(str, Enum):
class ReviewRetainConfig(FrigateBaseModel): class ReviewRetainConfig(FrigateBaseModel):
days: float = Field( days: float = Field(default=10, ge=0, title="Default retention period.")
default=10, mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
ge=0,
title="Retention days",
description="Number of days to retain recordings of detection events.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retention mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
class EventsConfig(FrigateBaseModel): class EventsConfig(FrigateBaseModel):
pre_capture: int = Field( pre_capture: int = Field(
default=5, default=5,
title="Pre-capture seconds", title="Seconds to retain before event starts.",
description="Number of seconds before the detection event to include in the recording.",
le=MAX_PRE_CAPTURE, le=MAX_PRE_CAPTURE,
ge=0, ge=0,
) )
post_capture: int = Field( post_capture: int = Field(
default=5, default=5, ge=0, title="Seconds to retain after event ends."
ge=0,
title="Post-capture seconds",
description="Number of seconds after the detection event to include in the recording.",
) )
retain: ReviewRetainConfig = Field( retain: ReviewRetainConfig = Field(
default_factory=ReviewRetainConfig, default_factory=ReviewRetainConfig, title="Event retention settings."
title="Event retention",
description="Retention settings for recordings of detection events.",
) )
@@ -80,65 +62,46 @@ class RecordQualityEnum(str, Enum):
class RecordPreviewConfig(FrigateBaseModel): class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field( quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium, default=RecordQualityEnum.medium, title="Quality of recording preview."
title="Preview quality",
description="Preview quality level (very_low, low, medium, high, very_high).",
) )
class RecordExportConfig(FrigateBaseModel): class RecordExportConfig(FrigateBaseModel):
hwaccel_args: Union[str, list[str]] = Field( timelapse_args: str = Field(
default="auto", default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args"
title="Export hwaccel args",
description="Hardware acceleration args to use for export/transcode operations.",
) )
class RecordConfig(FrigateBaseModel): class RecordConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable record on all cameras.")
default=False, sync_recordings: bool = Field(
title="Enable recording", default=False, title="Sync recordings with disk on startup and once a day."
description="Enable or disable recording for all cameras; can be overridden per-camera.",
) )
expire_interval: int = Field( expire_interval: int = Field(
default=60, default=60,
title="Record cleanup interval", title="Number of minutes to wait between cleanup runs.",
description="Minutes between cleanup passes that remove expired recording segments.",
) )
continuous: RecordRetainConfig = Field( continuous: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, default_factory=RecordRetainConfig,
title="Continuous retention", title="Continuous recording retention settings.",
description="Number of days to retain recordings regardless of tracked objects or motion. Set to 0 if you only want to retain recordings of alerts and detections.",
) )
motion: RecordRetainConfig = Field( motion: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, default_factory=RecordRetainConfig, title="Motion recording retention settings."
title="Motion retention",
description="Number of days to retain recordings triggered by motion regardless of tracked objects. Set to 0 if you only want to retain recordings of alerts and detections.",
) )
detections: EventsConfig = Field( detections: EventsConfig = Field(
default_factory=EventsConfig, default_factory=EventsConfig, title="Detection specific retention settings."
title="Detection retention",
description="Recording retention settings for detection events including pre/post capture durations.",
) )
alerts: EventsConfig = Field( alerts: EventsConfig = Field(
default_factory=EventsConfig, default_factory=EventsConfig, title="Alert specific retention settings."
title="Alert retention",
description="Recording retention settings for alert events including pre/post capture durations.",
) )
export: RecordExportConfig = Field( export: RecordExportConfig = Field(
default_factory=RecordExportConfig, default_factory=RecordExportConfig, title="Recording Export Config"
title="Export config",
description="Settings used when exporting recordings such as timelapse and hardware acceleration.",
) )
preview: RecordPreviewConfig = Field( preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig, default_factory=RecordPreviewConfig, title="Recording Preview Config"
title="Preview config",
description="Settings controlling the quality of recording previews shown in the UI.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of recording."
title="Original recording state",
description="Indicates whether recording was enabled in the original static configuration.",
) )
@property @property

View File

@@ -21,32 +21,22 @@ DEFAULT_ALERT_OBJECTS = ["person", "car"]
class AlertsConfig(FrigateBaseModel): class AlertsConfig(FrigateBaseModel):
"""Configure alerts""" """Configure alerts"""
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable alerts.")
default=True,
title="Enable alerts",
description="Enable or disable alert generation for all cameras; can be overridden per-camera.",
)
labels: list[str] = Field( labels: list[str] = Field(
default=DEFAULT_ALERT_OBJECTS, default=DEFAULT_ALERT_OBJECTS, title="Labels to create alerts for."
title="Alert labels",
description="List of object labels that qualify as alerts (for example: car, person).",
) )
required_zones: Union[str, list[str]] = Field( required_zones: Union[str, list[str]] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to save the event as an alert.",
description="Zones that an object must enter to be considered an alert; leave empty to allow any zone.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of alerts."
title="Original alerts state",
description="Tracks whether alerts were originally enabled in the static configuration.",
) )
cutoff_time: int = Field( cutoff_time: int = Field(
default=40, default=40,
title="Alerts cutoff time", title="Time to cutoff alerts after no alert-causing activity has occurred.",
description="Seconds to wait after no alert-causing activity before cutting off an alert.",
) )
@field_validator("required_zones", mode="before") @field_validator("required_zones", mode="before")
@@ -61,32 +51,22 @@ class AlertsConfig(FrigateBaseModel):
class DetectionsConfig(FrigateBaseModel): class DetectionsConfig(FrigateBaseModel):
"""Configure detections""" """Configure detections"""
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable detections.")
default=True,
title="Enable detections",
description="Enable or disable detection events for all cameras; can be overridden per-camera.",
)
labels: Optional[list[str]] = Field( labels: Optional[list[str]] = Field(
default=None, default=None, title="Labels to create detections for."
title="Detection labels",
description="List of object labels that qualify as detection events.",
) )
required_zones: Union[str, list[str]] = Field( required_zones: Union[str, list[str]] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to save the event as a detection.",
description="Zones that an object must enter to be considered a detection; leave empty to allow any zone.",
) )
cutoff_time: int = Field( cutoff_time: int = Field(
default=30, default=30,
title="Detections cutoff time", title="Time to cutoff detection after no detection-causing activity has occurred.",
description="Seconds to wait after no detection-causing activity before cutting off a detection.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of detections."
title="Original detections state",
description="Tracks whether detections were originally enabled in the static configuration.",
) )
@field_validator("required_zones", mode="before") @field_validator("required_zones", mode="before")
@@ -101,42 +81,27 @@ class DetectionsConfig(FrigateBaseModel):
class GenAIReviewConfig(FrigateBaseModel): class GenAIReviewConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(
default=False, default=False,
title="Enable GenAI descriptions", title="Enable GenAI descriptions for review items.",
description="Enable or disable GenAI-generated descriptions and summaries for review items.",
)
alerts: bool = Field(
default=True,
title="Enable GenAI for alerts",
description="Use GenAI to generate descriptions for alert items.",
)
detections: bool = Field(
default=False,
title="Enable GenAI for detections",
description="Use GenAI to generate descriptions for detection items.",
) )
alerts: bool = Field(default=True, title="Enable GenAI for alerts.")
detections: bool = Field(default=False, title="Enable GenAI for detections.")
image_source: ImageSourceEnum = Field( image_source: ImageSourceEnum = Field(
default=ImageSourceEnum.preview, default=ImageSourceEnum.preview,
title="Review image source", title="Image source for review descriptions.",
description="Source of images sent to GenAI ('preview' or 'recordings'); 'recordings' uses higher quality frames but more tokens.",
) )
additional_concerns: list[str] = Field( additional_concerns: list[str] = Field(
default=[], default=[],
title="Additional concerns", title="Additional concerns that GenAI should make note of on this camera.",
description="A list of additional concerns or notes the GenAI should consider when evaluating activity on this camera.",
) )
debug_save_thumbnails: bool = Field( debug_save_thumbnails: bool = Field(
default=False, default=False,
title="Save thumbnails", title="Save thumbnails sent to generative AI for debugging purposes.",
description="Save thumbnails that are sent to the GenAI provider for debugging and review.",
) )
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, default=None, title="Keep track of original state of generative AI."
title="Original GenAI state",
description="Tracks whether GenAI review was originally enabled in the static configuration.",
) )
preferred_language: str | None = Field( preferred_language: str | None = Field(
title="Preferred language", title="Preferred language for GenAI Response",
description="Preferred language to request from the GenAI provider for generated responses.",
default=None, default=None,
) )
activity_context_prompt: str = Field( activity_context_prompt: str = Field(
@@ -174,24 +139,19 @@ Evaluate in this order:
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1) 3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""", The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.""",
title="Activity context prompt", title="Custom activity context prompt defining normal and suspicious activity patterns for this property.",
description="Custom prompt describing what is and is not suspicious activity to provide context for GenAI summaries.",
) )
class ReviewConfig(FrigateBaseModel): class ReviewConfig(FrigateBaseModel):
"""Configure reviews"""
alerts: AlertsConfig = Field( alerts: AlertsConfig = Field(
default_factory=AlertsConfig, default_factory=AlertsConfig, title="Review alerts config."
title="Alerts config",
description="Settings for which tracked objects generate alerts and how alerts are retained.",
) )
detections: DetectionsConfig = Field( detections: DetectionsConfig = Field(
default_factory=DetectionsConfig, default_factory=DetectionsConfig, title="Review detections config."
title="Detections config",
description="Settings for creating detection events (non-alert) and how long to keep them.",
) )
genai: GenAIReviewConfig = Field( genai: GenAIReviewConfig = Field(
default_factory=GenAIReviewConfig, default_factory=GenAIReviewConfig, title="Review description genai config."
title="GenAI config",
description="Controls use of generative AI for producing descriptions and summaries of review items.",
) )

View File

@@ -9,68 +9,36 @@ __all__ = ["SnapshotsConfig", "RetainConfig"]
class RetainConfig(FrigateBaseModel): class RetainConfig(FrigateBaseModel):
default: float = Field( default: float = Field(default=10, title="Default retention period.")
default=10, mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
title="Default retention",
description="Default number of days to retain snapshots.",
)
mode: RetainModeEnum = Field(
default=RetainModeEnum.motion,
title="Retention mode",
description="Mode for retention: all (save all segments), motion (save segments with motion), or active_objects (save segments with active objects).",
)
objects: dict[str, float] = Field( objects: dict[str, float] = Field(
default_factory=dict, default_factory=dict, title="Object retention period."
title="Object retention",
description="Per-object overrides for snapshot retention days.",
) )
class SnapshotsConfig(FrigateBaseModel): class SnapshotsConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Snapshots enabled.")
default=False,
title="Snapshots enabled",
description="Enable or disable saving snapshots for all cameras; can be overridden per-camera.",
)
clean_copy: bool = Field( clean_copy: bool = Field(
default=True, default=True, title="Create a clean copy of the snapshot image."
title="Save clean copy",
description="Save an unannotated clean copy of snapshots in addition to annotated ones.",
) )
timestamp: bool = Field( timestamp: bool = Field(
default=False, default=False, title="Add a timestamp overlay on the snapshot."
title="Timestamp overlay",
description="Overlay a timestamp on saved snapshots.",
) )
bounding_box: bool = Field( bounding_box: bool = Field(
default=True, default=True, title="Add a bounding box overlay on the snapshot."
title="Bounding box overlay",
description="Draw bounding boxes for tracked objects on saved snapshots.",
)
crop: bool = Field(
default=False,
title="Crop snapshot",
description="Crop saved snapshots to the detected object's bounding box.",
) )
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
required_zones: list[str] = Field( required_zones: list[str] = Field(
default_factory=list, default_factory=list,
title="Required zones", title="List of required zones to be entered in order to save a snapshot.",
description="Zones an object must enter for a snapshot to be saved.",
)
height: Optional[int] = Field(
default=None,
title="Snapshot height",
description="Height (pixels) to resize saved snapshots to; leave empty to preserve original size.",
) )
height: Optional[int] = Field(default=None, title="Snapshot image height.")
retain: RetainConfig = Field( retain: RetainConfig = Field(
default_factory=RetainConfig, default_factory=RetainConfig, title="Snapshot retention."
title="Snapshot retention",
description="Retention settings for saved snapshots including default days and per-object overrides.",
) )
quality: int = Field( quality: int = Field(
default=70, default=70,
title="JPEG quality", title="Quality of the encoded jpeg (0-100).",
description="JPEG encode quality for saved snapshots (0-100).",
ge=0, ge=0,
le=100, le=100,
) )

View File

@@ -27,27 +27,9 @@ class TimestampPositionEnum(str, Enum):
class ColorConfig(FrigateBaseModel): class ColorConfig(FrigateBaseModel):
red: int = Field( red: int = Field(default=255, ge=0, le=255, title="Red")
default=255, green: int = Field(default=255, ge=0, le=255, title="Green")
ge=0, blue: int = Field(default=255, ge=0, le=255, title="Blue")
le=255,
title="Red",
description="Red component (0-255) for timestamp color.",
)
green: int = Field(
default=255,
ge=0,
le=255,
title="Green",
description="Green component (0-255) for timestamp color.",
)
blue: int = Field(
default=255,
ge=0,
le=255,
title="Blue",
description="Blue component (0-255) for timestamp color.",
)
class TimestampEffectEnum(str, Enum): class TimestampEffectEnum(str, Enum):
@@ -57,27 +39,11 @@ class TimestampEffectEnum(str, Enum):
class TimestampStyleConfig(FrigateBaseModel): class TimestampStyleConfig(FrigateBaseModel):
position: TimestampPositionEnum = Field( position: TimestampPositionEnum = Field(
default=TimestampPositionEnum.tl, default=TimestampPositionEnum.tl, title="Timestamp position."
title="Timestamp position",
description="Position of the timestamp on the image (tl/tr/bl/br).",
)
format: str = Field(
default=DEFAULT_TIME_FORMAT,
title="Timestamp format",
description="Datetime format string used for timestamps (Python datetime format codes).",
)
color: ColorConfig = Field(
default_factory=ColorConfig,
title="Timestamp color",
description="RGB color values for the timestamp text (all values 0-255).",
)
thickness: int = Field(
default=2,
title="Timestamp thickness",
description="Line thickness of the timestamp text.",
) )
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field( effect: Optional[TimestampEffectEnum] = Field(
default=None, default=None, title="Timestamp effect."
title="Timestamp effect",
description="Visual effect for the timestamp text (none, solid, shadow).",
) )

View File

@@ -6,13 +6,7 @@ __all__ = ["CameraUiConfig"]
class CameraUiConfig(FrigateBaseModel): class CameraUiConfig(FrigateBaseModel):
order: int = Field( order: int = Field(default=0, title="Order of camera in UI.")
default=0,
title="UI order",
description="Numeric order used to sort the camera in the UI (default dashboard and lists); larger numbers appear later.",
)
dashboard: bool = Field( dashboard: bool = Field(
default=True, default=True, title="Show this camera in Frigate dashboard UI."
title="Show in UI",
description="Toggle whether this camera is visible everywhere in the Frigate UI. Disabling this will require manually editing the config to view this camera in the UI again.",
) )

View File

@@ -14,46 +14,36 @@ logger = logging.getLogger(__name__)
class ZoneConfig(BaseModel): class ZoneConfig(BaseModel):
friendly_name: Optional[str] = Field( friendly_name: Optional[str] = Field(
None, None, title="Zone friendly name used in the Frigate UI."
title="Zone name",
description="A user-friendly name for the zone, displayed in the Frigate UI. If not set, a formatted version of the zone name will be used.",
) )
filters: dict[str, FilterConfig] = Field( filters: dict[str, FilterConfig] = Field(
default_factory=dict, default_factory=dict, title="Zone filters."
title="Zone filters",
description="Filters to apply to objects within this zone. Used to reduce false positives or restrict which objects are considered present in the zone.",
) )
coordinates: Union[str, list[str]] = Field( coordinates: Union[str, list[str]] = Field(
title="Coordinates", title="Coordinates polygon for the defined zone."
description="Polygon coordinates that define the zone area. Can be a comma-separated string or a list of coordinate strings. Coordinates should be relative (0-1) or absolute (legacy).",
) )
distances: Optional[Union[str, list[str]]] = Field( distances: Optional[Union[str, list[str]]] = Field(
default_factory=list, default_factory=list,
title="Real-world distances", title="Real-world distances for the sides of quadrilateral for the defined zone.",
description="Optional real-world distances for each side of the zone quadrilateral, used for speed or distance calculations. Must have exactly 4 values if set.",
) )
inertia: int = Field( inertia: int = Field(
default=3, default=3,
title="Inertia frames", title="Number of consecutive frames required for object to be considered present in the zone.",
gt=0, gt=0,
description="Number of consecutive frames an object must be detected in the zone before it is considered present. Helps filter out transient detections.",
) )
loitering_time: int = Field( loitering_time: int = Field(
default=0, default=0,
ge=0, ge=0,
title="Loitering seconds", title="Number of seconds that an object must loiter to be considered in the zone.",
description="Number of seconds an object must remain in the zone to be considered as loitering. Set to 0 to disable loitering detection.",
) )
speed_threshold: Optional[float] = Field( speed_threshold: Optional[float] = Field(
default=None, default=None,
ge=0.1, ge=0.1,
title="Minimum speed", title="Minimum speed value for an object to be considered in the zone.",
description="Minimum speed (in real-world units if distances are set) required for an object to be considered present in the zone. Used for speed-based zone triggers.",
) )
objects: Union[str, list[str]] = Field( objects: Union[str, list[str]] = Field(
default_factory=list, default_factory=list,
title="Trigger objects", title="List of objects that can trigger the zone.",
description="List of object types (from labelmap) that can trigger this zone. Can be a string or a list of strings. If empty, all objects are considered.",
) )
_color: Optional[tuple[int, int, int]] = PrivateAttr() _color: Optional[tuple[int, int, int]] = PrivateAttr()
_contour: np.ndarray = PrivateAttr() _contour: np.ndarray = PrivateAttr()

View File

@@ -8,21 +8,13 @@ __all__ = ["CameraGroupConfig"]
class CameraGroupConfig(FrigateBaseModel): class CameraGroupConfig(FrigateBaseModel):
"""Represents a group of cameras."""
cameras: Union[str, list[str]] = Field( cameras: Union[str, list[str]] = Field(
default_factory=list, default_factory=list, title="List of cameras in this group."
title="Camera list",
description="Array of camera names included in this group.",
)
icon: str = Field(
default="generic",
title="Group icon",
description="Icon used to represent the camera group in the UI.",
)
order: int = Field(
default=0,
title="Sort order",
description="Numeric order used to sort camera groups in the UI; larger numbers appear later.",
) )
icon: str = Field(default="generic", title="Icon that represents camera group.")
order: int = Field(default=0, title="Sort order for group.")
@field_validator("cameras", mode="before") @field_validator("cameras", mode="before")
@classmethod @classmethod

View File

@@ -43,43 +43,28 @@ class ObjectClassificationType(str, Enum):
class AudioTranscriptionConfig(FrigateBaseModel): class AudioTranscriptionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable audio transcription.")
default=False,
title="Enable audio transcription",
description="Enable or disable automatic audio transcription for all cameras; can be overridden per-camera.",
)
language: str = Field( language: str = Field(
default="en", default="en",
title="Transcription language", title="Language abbreviation to use for audio event transcription/translation.",
description="Language code used for transcription/translation (for example 'en' for English). See https://whisper-api.com/docs/languages/ for supported language codes.",
) )
device: Optional[EnrichmentsDeviceEnum] = Field( device: Optional[EnrichmentsDeviceEnum] = Field(
default=EnrichmentsDeviceEnum.CPU, default=EnrichmentsDeviceEnum.CPU,
title="Transcription device", title="The device used for audio transcription.",
description="Device key (CPU/GPU) to run the transcription model on. Only NVIDIA CUDA GPUs are currently supported for transcription.",
) )
model_size: str = Field( model_size: str = Field(
default="small", default="small", title="The size of the embeddings model used."
title="Model size",
description="Model size to use for offline audio event transcription.",
) )
live_enabled: Optional[bool] = Field( live_enabled: Optional[bool] = Field(
default=False, default=False, title="Enable live transcriptions."
title="Live transcription",
description="Enable streaming live transcription for audio as it is received.",
) )
class BirdClassificationConfig(FrigateBaseModel): class BirdClassificationConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable bird classification.")
default=False,
title="Bird classification",
description="Enable or disable bird classification.",
)
threshold: float = Field( threshold: float = Field(
default=0.9, default=0.9,
title="Minimum score", title="Minimum classification score required to be considered a match.",
description="Minimum classification score required to accept a bird classification.",
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
@@ -87,62 +72,42 @@ class BirdClassificationConfig(FrigateBaseModel):
class CustomClassificationStateCameraConfig(FrigateBaseModel): class CustomClassificationStateCameraConfig(FrigateBaseModel):
crop: list[float, float, float, float] = Field( crop: list[float, float, float, float] = Field(
title="Classification crop", title="Crop of image frame on this camera to run classification on."
description="Crop coordinates to use for running classification on this camera.",
) )
class CustomClassificationStateConfig(FrigateBaseModel): class CustomClassificationStateConfig(FrigateBaseModel):
cameras: Dict[str, CustomClassificationStateCameraConfig] = Field( cameras: Dict[str, CustomClassificationStateCameraConfig] = Field(
title="Classification cameras", title="Cameras to run classification on."
description="Per-camera crop and settings for running state classification.",
) )
motion: bool = Field( motion: bool = Field(
default=False, default=False,
title="Run on motion", title="If classification should be run when motion is detected in the crop.",
description="If true, run classification when motion is detected within the specified crop.",
) )
interval: int | None = Field( interval: int | None = Field(
default=None, default=None,
title="Classification interval", title="Interval to run classification on in seconds.",
description="Interval (seconds) between periodic classification runs for state classification.",
gt=0, gt=0,
) )
class CustomClassificationObjectConfig(FrigateBaseModel): class CustomClassificationObjectConfig(FrigateBaseModel):
objects: list[str] = Field( objects: list[str] = Field(title="Object types to classify.")
default_factory=list,
title="Classify objects",
description="List of object types to run object classification on.",
)
classification_type: ObjectClassificationType = Field( classification_type: ObjectClassificationType = Field(
default=ObjectClassificationType.sub_label, default=ObjectClassificationType.sub_label,
title="Classification type", title="Type of classification that is applied.",
description="Classification type applied: 'sub_label' (adds sub_label) or other supported types.",
) )
class CustomClassificationConfig(FrigateBaseModel): class CustomClassificationConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable running the model.")
default=True, name: str | None = Field(default=None, title="Name of classification model.")
title="Enable model",
description="Enable or disable the custom classification model.",
)
name: str | None = Field(
default=None,
title="Model name",
description="Identifier for the custom classification model to use.",
)
threshold: float = Field( threshold: float = Field(
default=0.8, default=0.8, title="Classification score threshold to change the state."
title="Score threshold",
description="Score threshold used to change the classification state.",
) )
save_attempts: int | None = Field( save_attempts: int | None = Field(
default=None, default=None,
title="Save attempts", title="Number of classification attempts to save in the recent classifications tab. If not specified, defaults to 200 for object classification and 100 for state classification.",
description="How many classification attempts to save for recent classifications UI.",
ge=0, ge=0,
) )
object_config: CustomClassificationObjectConfig | None = Field(default=None) object_config: CustomClassificationObjectConfig | None = Field(default=None)
@@ -151,76 +116,47 @@ class CustomClassificationConfig(FrigateBaseModel):
class ClassificationConfig(FrigateBaseModel): class ClassificationConfig(FrigateBaseModel):
bird: BirdClassificationConfig = Field( bird: BirdClassificationConfig = Field(
default_factory=BirdClassificationConfig, default_factory=BirdClassificationConfig, title="Bird classification config."
title="Bird classification config",
description="Settings specific to bird classification models.",
) )
custom: Dict[str, CustomClassificationConfig] = Field( custom: Dict[str, CustomClassificationConfig] = Field(
default={}, default={}, title="Custom Classification Model Configs."
title="Custom Classification Models",
description="Configuration for custom classification models used for objects or state detection.",
) )
class SemanticSearchConfig(FrigateBaseModel): class SemanticSearchConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable semantic search.")
default=False,
title="Enable semantic search",
description="Enable or disable the semantic search feature.",
)
reindex: Optional[bool] = Field( reindex: Optional[bool] = Field(
default=False, default=False, title="Reindex all tracked objects on startup."
title="Reindex on startup",
description="Trigger a full reindex of historical tracked objects into the embeddings database.",
) )
model: Optional[SemanticSearchModelEnum] = Field( model: Optional[SemanticSearchModelEnum] = Field(
default=SemanticSearchModelEnum.jinav1, default=SemanticSearchModelEnum.jinav1,
title="Semantic search model", title="The CLIP model to use for semantic search.",
description="The embeddings model to use for semantic search (for example 'jinav1').",
) )
model_size: str = Field( model_size: str = Field(
default="small", default="small", title="The size of the embeddings model used."
title="Model size",
description="Select model size; 'small' runs on CPU and 'large' typically requires GPU.",
) )
device: Optional[str] = Field( device: Optional[str] = Field(
default=None, default=None,
title="Device", title="The device key to use for semantic search.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
) )
class TriggerConfig(FrigateBaseModel): class TriggerConfig(FrigateBaseModel):
friendly_name: Optional[str] = Field( friendly_name: Optional[str] = Field(
None, None, title="Trigger friendly name used in the Frigate UI."
title="Friendly name",
description="Optional friendly name displayed in the UI for this trigger.",
)
enabled: bool = Field(
default=True,
title="Enable this trigger",
description="Enable or disable this semantic search trigger.",
)
type: TriggerType = Field(
default=TriggerType.DESCRIPTION,
title="Trigger type",
description="Type of trigger: 'thumbnail' (match against image) or 'description' (match against text).",
)
data: str = Field(
title="Trigger content",
description="Text phrase or thumbnail ID to match against tracked objects.",
) )
enabled: bool = Field(default=True, title="Enable this trigger")
type: TriggerType = Field(default=TriggerType.DESCRIPTION, title="Type of trigger")
data: str = Field(title="Trigger content (text phrase or image ID)")
threshold: float = Field( threshold: float = Field(
title="Trigger threshold", title="Confidence score required to run the trigger",
description="Minimum similarity score (0-1) required to activate this trigger.",
default=0.8, default=0.8,
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
actions: List[TriggerAction] = Field( actions: List[TriggerAction] = Field(
default=[], default=[], title="Actions to perform when trigger is matched"
title="Trigger actions",
description="List of actions to execute when trigger matches (notification, sub_label, attribute).",
) )
model_config = ConfigDict(extra="forbid", protected_namespaces=()) model_config = ConfigDict(extra="forbid", protected_namespaces=())
@@ -229,191 +165,147 @@ class TriggerConfig(FrigateBaseModel):
class CameraSemanticSearchConfig(FrigateBaseModel): class CameraSemanticSearchConfig(FrigateBaseModel):
triggers: Dict[str, TriggerConfig] = Field( triggers: Dict[str, TriggerConfig] = Field(
default={}, default={},
title="Triggers", title="Trigger actions on tracked objects that match existing thumbnails or descriptions",
description="Actions and matching criteria for camera-specific semantic search triggers.",
) )
model_config = ConfigDict(extra="forbid", protected_namespaces=()) model_config = ConfigDict(extra="forbid", protected_namespaces=())
class FaceRecognitionConfig(FrigateBaseModel): class FaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable face recognition.")
default=False,
title="Enable face recognition",
description="Enable or disable face recognition for all cameras; can be overridden per-camera.",
)
model_size: str = Field( model_size: str = Field(
default="small", default="small", title="The size of the embeddings model used."
title="Model size",
description="Model size to use for face embeddings (small/large); larger may require GPU.",
) )
unknown_score: float = Field( unknown_score: float = Field(
title="Unknown score threshold", title="Minimum face distance score required to be marked as a potential match.",
description="Distance threshold below which a face is considered a potential match (higher = stricter).",
default=0.8, default=0.8,
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
detection_threshold: float = Field( detection_threshold: float = Field(
default=0.7, default=0.7,
title="Detection threshold", title="Minimum face detection score required to be considered a face.",
description="Minimum detection confidence required to consider a face detection valid.",
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
recognition_threshold: float = Field( recognition_threshold: float = Field(
default=0.9, default=0.9,
title="Recognition threshold", title="Minimum face distance score required to be considered a match.",
description="Face embedding distance threshold to consider two faces a match.",
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
min_area: int = Field( min_area: int = Field(
default=750, default=750, title="Min area of face box to consider running face recognition."
title="Minimum face area",
description="Minimum area (pixels) of a detected face box required to attempt recognition.",
) )
min_faces: int = Field( min_faces: int = Field(
default=1, default=1,
gt=0, gt=0,
le=6, le=6,
title="Minimum faces", title="Min face recognitions for the sub label to be applied to the person object.",
description="Minimum number of face recognitions required before applying a recognized sub-label to a person.",
) )
save_attempts: int = Field( save_attempts: int = Field(
default=200, default=200,
ge=0, ge=0,
title="Save attempts", title="Number of face attempts to save in the recent recognitions tab.",
description="Number of face recognition attempts to retain for recent recognition UI.",
) )
blur_confidence_filter: bool = Field( blur_confidence_filter: bool = Field(
default=True, default=True, title="Apply blur quality filter to face confidence."
title="Blur confidence filter",
description="Adjust confidence scores based on image blur to reduce false positives for poor quality faces.",
) )
device: Optional[str] = Field( device: Optional[str] = Field(
default=None, default=None,
title="Device", title="The device key to use for face recognition.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
) )
class CameraFaceRecognitionConfig(FrigateBaseModel): class CameraFaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable face recognition.")
default=False,
title="Enable face recognition",
description="Enable or disable face recognition.",
)
min_area: int = Field( min_area: int = Field(
default=750, default=750, title="Min area of face box to consider running face recognition."
title="Minimum face area",
description="Minimum area (pixels) of a detected face box required to attempt recognition.",
) )
model_config = ConfigDict(extra="forbid", protected_namespaces=()) model_config = ConfigDict(extra="forbid", protected_namespaces=())
class ReplaceRule(FrigateBaseModel): class ReplaceRule(FrigateBaseModel):
pattern: str = Field(..., title="Regex pattern") pattern: str = Field(..., title="Regex pattern to match.")
replacement: str = Field(..., title="Replacement string") replacement: str = Field(
..., title="Replacement string (supports backrefs like '\\1')."
)
class LicensePlateRecognitionConfig(FrigateBaseModel): class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable license plate recognition.")
default=False,
title="Enable LPR",
description="Enable or disable license plate recognition for all cameras; can be overridden per-camera.",
)
model_size: str = Field( model_size: str = Field(
default="small", default="small", title="The size of the embeddings model used."
title="Model size",
description="Model size used for text detection/recognition. Most users should use 'small'.",
) )
detection_threshold: float = Field( detection_threshold: float = Field(
default=0.7, default=0.7,
title="Detection threshold", title="License plate object confidence score required to begin running recognition.",
description="Detection confidence threshold to begin running OCR on a suspected plate.",
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
min_area: int = Field( min_area: int = Field(
default=1000, default=1000,
title="Minimum plate area", title="Minimum area of license plate to begin running recognition.",
description="Minimum plate area (pixels) required to attempt recognition.",
) )
recognition_threshold: float = Field( recognition_threshold: float = Field(
default=0.9, default=0.9,
title="Recognition threshold", title="Recognition confidence score required to add the plate to the object as a sub label.",
description="Confidence threshold required for recognized plate text to be attached as a sub-label.",
gt=0.0, gt=0.0,
le=1.0, le=1.0,
) )
min_plate_length: int = Field( min_plate_length: int = Field(
default=4, default=4,
title="Min plate length", title="Minimum number of characters a license plate must have to be added to the object as a sub label.",
description="Minimum number of characters a recognized plate must contain to be considered valid.",
) )
format: Optional[str] = Field( format: Optional[str] = Field(
default=None, default=None,
title="Plate format regex", title="Regular expression for the expected format of license plate.",
description="Optional regex to validate recognized plate strings against an expected format.",
) )
match_distance: int = Field( match_distance: int = Field(
default=1, default=1,
title="Match distance", title="Allow this number of missing/incorrect characters to still cause a detected plate to match a known plate.",
description="Number of character mismatches allowed when comparing detected plates to known plates.",
ge=0, ge=0,
) )
known_plates: Optional[Dict[str, List[str]]] = Field( known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, default={}, title="Known plates to track (strings or regular expressions)."
title="Known plates",
description="List of plates or regexes to specially track or alert on.",
) )
enhancement: int = Field( enhancement: int = Field(
default=0, default=0,
title="Enhancement level", title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.",
ge=0, ge=0,
le=10, le=10,
) )
debug_save_plates: bool = Field( debug_save_plates: bool = Field(
default=False, default=False,
title="Save debug plates", title="Save plates captured for LPR for debugging purposes.",
description="Save plate crop images for debugging LPR performance.",
) )
device: Optional[str] = Field( device: Optional[str] = Field(
default=None, default=None,
title="Device", title="The device key to use for LPR.",
description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information", description="This is an override, to target a specific device. See https://onnxruntime.ai/docs/execution-providers/ for more information",
) )
replace_rules: List[ReplaceRule] = Field( replace_rules: List[ReplaceRule] = Field(
default_factory=list, default_factory=list,
title="Replacement rules", title="List of regex replacement rules for normalizing detected plates. Each rule has 'pattern' and 'replacement'.",
description="Regex replacement rules used to normalize detected plate strings before matching.",
) )
class CameraLicensePlateRecognitionConfig(FrigateBaseModel): class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable license plate recognition.")
default=False,
title="Enable LPR",
description="Enable or disable LPR on this camera.",
)
expire_time: int = Field( expire_time: int = Field(
default=3, default=3,
title="Expire seconds", title="Expire plates not seen after number of seconds (for dedicated LPR cameras only).",
description="Time in seconds after which an unseen plate is expired from the tracker (for dedicated LPR cameras only).",
gt=0, gt=0,
) )
min_area: int = Field( min_area: int = Field(
default=1000, default=1000,
title="Minimum plate area", title="Minimum area of license plate to begin running recognition.",
description="Minimum plate area (pixels) required to attempt recognition.",
) )
enhancement: int = Field( enhancement: int = Field(
default=0, default=0,
title="Enhancement level", title="Amount of contrast adjustment and denoising to apply to license plate images before recognition.",
description="Enhancement level (0-10) to apply to plate crops prior to OCR; higher values may not always improve results, levels above 5 may only work with night time plates and should be used with caution.",
ge=0, ge=0,
le=10, le=10,
) )
@@ -422,18 +314,12 @@ class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
class CameraAudioTranscriptionConfig(FrigateBaseModel): class CameraAudioTranscriptionConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable audio transcription.")
default=False,
title="Enable transcription",
description="Enable or disable manually triggered audio event transcription.",
)
enabled_in_config: Optional[bool] = Field( enabled_in_config: Optional[bool] = Field(
default=None, title="Original transcription state" default=None, title="Keep track of original state of audio transcription."
) )
live_enabled: Optional[bool] = Field( live_enabled: Optional[bool] = Field(
default=False, default=False, title="Enable live transcriptions."
title="Live transcription",
description="Enable streaming live transcription for audio as it is received.",
) )
model_config = ConfigDict(extra="forbid", protected_namespaces=()) model_config = ConfigDict(extra="forbid", protected_namespaces=())

View File

@@ -45,7 +45,7 @@ from .camera.audio import AudioConfig
from .camera.birdseye import BirdseyeConfig from .camera.birdseye import BirdseyeConfig
from .camera.detect import DetectConfig from .camera.detect import DetectConfig
from .camera.ffmpeg import FfmpegConfig from .camera.ffmpeg import FfmpegConfig
from .camera.genai import GenAIConfig, GenAIRoleEnum from .camera.genai import GenAIConfig
from .camera.motion import MotionConfig from .camera.motion import MotionConfig
from .camera.notification import NotificationConfig from .camera.notification import NotificationConfig
from .camera.objects import FilterConfig, ObjectConfig from .camera.objects import FilterConfig, ObjectConfig
@@ -299,189 +299,116 @@ def verify_lpr_and_face(
class FrigateConfig(FrigateBaseModel): class FrigateConfig(FrigateBaseModel):
version: Optional[str] = Field( version: Optional[str] = Field(default=None, title="Current config version.")
default=None,
title="Current config version",
description="Numeric or string version of the active configuration to help detect migrations or format changes.",
)
safe_mode: bool = Field( safe_mode: bool = Field(
default=False, default=False, title="If Frigate should be started in safe mode."
title="Safe mode",
description="When enabled, start Frigate in safe mode with reduced features for troubleshooting.",
) )
# Fields that install global state should be defined first, so that their validators run first. # Fields that install global state should be defined first, so that their validators run first.
environment_vars: EnvVars = Field( environment_vars: EnvVars = Field(
default_factory=dict, default_factory=dict, title="Frigate environment variables."
title="Environment variables",
description="Key/value pairs of environment variables to set for the Frigate process in Home Assistant OS. Non-HAOS users must use Docker environment variable configuration instead.",
) )
logger: LoggerConfig = Field( logger: LoggerConfig = Field(
default_factory=LoggerConfig, default_factory=LoggerConfig,
title="Logging", title="Logging configuration.",
description="Controls default log verbosity and per-component log level overrides.",
validate_default=True, validate_default=True,
) )
# Global config # Global config
auth: AuthConfig = Field( auth: AuthConfig = Field(default_factory=AuthConfig, title="Auth configuration.")
default_factory=AuthConfig,
title="Authentication",
description="Authentication and session-related settings including cookie and rate limit options.",
)
database: DatabaseConfig = Field( database: DatabaseConfig = Field(
default_factory=DatabaseConfig, default_factory=DatabaseConfig, title="Database configuration."
title="Database",
description="Settings for the SQLite database used by Frigate to store tracked object and recording metadata.",
) )
go2rtc: RestreamConfig = Field( go2rtc: RestreamConfig = Field(
default_factory=RestreamConfig, default_factory=RestreamConfig, title="Global restream configuration."
title="go2rtc",
description="Settings for the integrated go2rtc restreaming service used for live stream relaying and translation.",
)
mqtt: MqttConfig = Field(
title="MQTT",
description="Settings for connecting and publishing telemetry, snapshots, and event details to an MQTT broker.",
) )
mqtt: MqttConfig = Field(title="MQTT configuration.")
notifications: NotificationConfig = Field( notifications: NotificationConfig = Field(
default_factory=NotificationConfig, default_factory=NotificationConfig, title="Global notification configuration."
title="Notifications",
description="Settings to enable and control notifications for all cameras; can be overridden per-camera.",
) )
networking: NetworkingConfig = Field( networking: NetworkingConfig = Field(
default_factory=NetworkingConfig, default_factory=NetworkingConfig, title="Networking configuration"
title="Networking",
description="Network-related settings such as IPv6 enablement for Frigate endpoints.",
) )
proxy: ProxyConfig = Field( proxy: ProxyConfig = Field(
default_factory=ProxyConfig, default_factory=ProxyConfig, title="Proxy configuration."
title="Proxy",
description="Settings for integrating Frigate behind a reverse proxy that passes authenticated user headers.",
) )
telemetry: TelemetryConfig = Field( telemetry: TelemetryConfig = Field(
default_factory=TelemetryConfig, default_factory=TelemetryConfig, title="Telemetry configuration."
title="Telemetry",
description="System telemetry and stats options including GPU and network bandwidth monitoring.",
)
tls: TlsConfig = Field(
default_factory=TlsConfig,
title="TLS",
description="TLS settings for Frigate's web endpoints (port 8971).",
)
ui: UIConfig = Field(
default_factory=UIConfig,
title="UI",
description="User interface preferences such as timezone, time/date formatting, and units.",
) )
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config # Detector config
detectors: Dict[str, BaseDetectorConfig] = Field( detectors: Dict[str, BaseDetectorConfig] = Field(
default=DEFAULT_DETECTORS, default=DEFAULT_DETECTORS,
title="Detector hardware", title="Detector hardware configuration.",
description="Configuration for object detectors (CPU, GPU, ONNX backends) and any detector-specific model settings.",
) )
model: ModelConfig = Field( model: ModelConfig = Field(
default_factory=ModelConfig, default_factory=ModelConfig, title="Detection model configuration."
title="Detection model",
description="Settings to configure a custom object detection model and its input shape.",
) )
# GenAI config (named provider configs: name -> GenAIConfig) # GenAI config
genai: Dict[str, GenAIConfig] = Field( genai: GenAIConfig = Field(
default_factory=dict, default_factory=GenAIConfig, title="Generative AI configuration."
title="Generative AI configuration (named providers).",
description="Settings for integrated generative AI providers used to generate object descriptions and review summaries.",
) )
# Camera config # Camera config
cameras: Dict[str, CameraConfig] = Field(title="Cameras", description="Cameras") cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
audio: AudioConfig = Field( audio: AudioConfig = Field(
default_factory=AudioConfig, default_factory=AudioConfig, title="Global Audio events configuration."
title="Audio events",
description="Settings for audio-based event detection for all cameras; can be overridden per-camera.",
) )
birdseye: BirdseyeConfig = Field( birdseye: BirdseyeConfig = Field(
default_factory=BirdseyeConfig, default_factory=BirdseyeConfig, title="Birdseye configuration."
title="Birdseye",
description="Settings for the Birdseye composite view that composes multiple camera feeds into a single layout.",
) )
detect: DetectConfig = Field( detect: DetectConfig = Field(
default_factory=DetectConfig, default_factory=DetectConfig, title="Global object tracking configuration."
title="Object Detection",
description="Settings for the detection/detect role used to run object detection and initialize trackers.",
) )
ffmpeg: FfmpegConfig = Field( ffmpeg: FfmpegConfig = Field(
default_factory=FfmpegConfig, default_factory=FfmpegConfig, title="Global FFmpeg configuration."
title="FFmpeg",
description="FFmpeg settings including binary path, args, hwaccel options, and per-role output args.",
) )
live: CameraLiveConfig = Field( live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, default_factory=CameraLiveConfig, title="Live playback settings."
title="Live playback",
description="Settings used by the Web UI to control live stream resolution and quality.",
) )
motion: Optional[MotionConfig] = Field( motion: Optional[MotionConfig] = Field(
default=None, default=None, title="Global motion detection configuration."
title="Motion detection",
description="Default motion detection settings applied to cameras unless overridden per-camera.",
) )
objects: ObjectConfig = Field( objects: ObjectConfig = Field(
default_factory=ObjectConfig, default_factory=ObjectConfig, title="Global object configuration."
title="Objects",
description="Object tracking defaults including which labels to track and per-object filters.",
) )
record: RecordConfig = Field( record: RecordConfig = Field(
default_factory=RecordConfig, default_factory=RecordConfig, title="Global record configuration."
title="Recording",
description="Recording and retention settings applied to cameras unless overridden per-camera.",
) )
review: ReviewConfig = Field( review: ReviewConfig = Field(
default_factory=ReviewConfig, default_factory=ReviewConfig, title="Review configuration."
title="Review",
description="Settings that control alerts, detections, and GenAI review summaries used by the UI and storage.",
) )
snapshots: SnapshotsConfig = Field( snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, default_factory=SnapshotsConfig, title="Global snapshots configuration."
title="Snapshots",
description="Settings for saved JPEG snapshots of tracked objects for all cameras; can be overridden per-camera.",
) )
timestamp_style: TimestampStyleConfig = Field( timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig, default_factory=TimestampStyleConfig,
title="Timestamp style", title="Global timestamp style configuration.",
description="Styling options for in-feed timestamps applied to debug view and snapshots.",
) )
# Classification Config # Classification Config
audio_transcription: AudioTranscriptionConfig = Field( audio_transcription: AudioTranscriptionConfig = Field(
default_factory=AudioTranscriptionConfig, default_factory=AudioTranscriptionConfig, title="Audio transcription config."
title="Audio transcription",
description="Settings for live and speech audio transcription used for events and live captions.",
) )
classification: ClassificationConfig = Field( classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, default_factory=ClassificationConfig, title="Object classification config."
title="Object classification",
description="Settings for classification models used to refine object labels or state classification.",
) )
semantic_search: SemanticSearchConfig = Field( semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, default_factory=SemanticSearchConfig, title="Semantic search configuration."
title="Semantic Search",
description="Settings for Semantic Search which builds and queries object embeddings to find similar items.",
) )
face_recognition: FaceRecognitionConfig = Field( face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, default_factory=FaceRecognitionConfig, title="Face recognition config."
title="Face recognition",
description="Settings for face detection and recognition for all cameras; can be overridden per-camera.",
) )
lpr: LicensePlateRecognitionConfig = Field( lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig, default_factory=LicensePlateRecognitionConfig,
title="License Plate Recognition", title="License Plate recognition config.",
description="License plate recognition settings including detection thresholds, formatting, and known plates.",
) )
camera_groups: Dict[str, CameraGroupConfig] = Field( camera_groups: Dict[str, CameraGroupConfig] = Field(
default_factory=dict, default_factory=dict, title="Camera group configuration"
title="Camera groups",
description="Configuration for named camera groups used to organize cameras in the UI.",
) )
_plus_api: PlusApi _plus_api: PlusApi
@@ -504,18 +431,6 @@ class FrigateConfig(FrigateBaseModel):
# set notifications state # set notifications state
self.notifications.enabled_in_config = self.notifications.enabled self.notifications.enabled_in_config = self.notifications.enabled
# validate genai: each role (tools, vision, embeddings) at most once
role_to_name: dict[GenAIRoleEnum, str] = {}
for name, genai_cfg in self.genai.items():
for role in genai_cfg.roles:
if role in role_to_name:
raise ValueError(
f"GenAI role '{role.value}' is assigned to both "
f"'{role_to_name[role]}' and '{name}'; each role must have "
"exactly one provider."
)
role_to_name[role] = name
# set default min_score for object attributes # set default min_score for object attributes
for attribute in self.model.all_attributes: for attribute in self.model.all_attributes:
if not self.objects.filters.get(attribute): if not self.objects.filters.get(attribute):
@@ -560,9 +475,6 @@ class FrigateConfig(FrigateBaseModel):
# users should not set model themselves # users should not set model themselves
if detector_config.model: if detector_config.model:
logger.warning(
"The model key should be specified at the root level of the config, not under detectors. The nested model key will be ignored."
)
detector_config.model = None detector_config.model = None
model_config = self.model.model_dump(exclude_unset=True, warnings="none") model_config = self.model.model_dump(exclude_unset=True, warnings="none")
@@ -613,14 +525,6 @@ class FrigateConfig(FrigateBaseModel):
if camera_config.ffmpeg.hwaccel_args == "auto": if camera_config.ffmpeg.hwaccel_args == "auto":
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
# Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg
# This allows per-camera override for exports (e.g., when camera resolution
# exceeds hardware encoder limits)
if camera_config.record.export.hwaccel_args == "auto":
camera_config.record.export.hwaccel_args = (
camera_config.ffmpeg.hwaccel_args
)
for input in camera_config.ffmpeg.inputs: for input in camera_config.ffmpeg.inputs:
need_detect_dimensions = "detect" in input.roles and ( need_detect_dimensions = "detect" in input.roles and (
camera_config.detect.height is None camera_config.detect.height is None

View File

@@ -8,8 +8,4 @@ __all__ = ["DatabaseConfig"]
class DatabaseConfig(FrigateBaseModel): class DatabaseConfig(FrigateBaseModel):
path: str = Field( path: str = Field(default=DEFAULT_DB_PATH, title="Database path.") # noqa: F821
default=DEFAULT_DB_PATH,
title="Database path",
description="Filesystem path where the Frigate SQLite database file will be stored.",
) # noqa: F821

View File

@@ -9,15 +9,9 @@ __all__ = ["LoggerConfig"]
class LoggerConfig(FrigateBaseModel): class LoggerConfig(FrigateBaseModel):
default: LogLevel = Field( default: LogLevel = Field(default=LogLevel.info, title="Default logging level.")
default=LogLevel.info,
title="Logging level",
description="Default global log verbosity (debug, info, warning, error).",
)
logs: dict[str, LogLevel] = Field( logs: dict[str, LogLevel] = Field(
default_factory=dict, default_factory=dict, title="Log level for specified processes."
title="Per-process log level",
description="Per-component log level overrides to increase or decrease verbosity for specific modules.",
) )
@model_validator(mode="after") @model_validator(mode="after")

View File

@@ -12,73 +12,25 @@ __all__ = ["MqttConfig"]
class MqttConfig(FrigateBaseModel): class MqttConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable MQTT Communication.")
default=True, host: str = Field(default="", title="MQTT Host")
title="Enable MQTT", port: int = Field(default=1883, title="MQTT Port")
description="Enable or disable MQTT integration for state, events, and snapshots.", topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
) client_id: str = Field(default="frigate", title="MQTT Client ID")
host: str = Field(
default="",
title="MQTT host",
description="Hostname or IP address of the MQTT broker.",
)
port: int = Field(
default=1883,
title="MQTT port",
description="Port of the MQTT broker (usually 1883 for plain MQTT).",
)
topic_prefix: str = Field(
default="frigate",
title="Topic prefix",
description="MQTT topic prefix for all Frigate topics; must be unique if running multiple instances.",
)
client_id: str = Field(
default="frigate",
title="Client ID",
description="Client identifier used when connecting to the MQTT broker; should be unique per instance.",
)
stats_interval: int = Field( stats_interval: int = Field(
default=60, default=60, ge=FREQUENCY_STATS_POINTS, title="MQTT Camera Stats Interval"
ge=FREQUENCY_STATS_POINTS,
title="Stats interval",
description="Interval in seconds for publishing system and camera stats to MQTT.",
)
user: Optional[EnvString] = Field(
default=None,
title="MQTT username",
description="Optional MQTT username; can be provided via environment variables or secrets.",
) )
user: Optional[EnvString] = Field(default=None, title="MQTT Username")
password: Optional[EnvString] = Field( password: Optional[EnvString] = Field(
default=None, default=None, title="MQTT Password", validate_default=True
title="MQTT password",
description="Optional MQTT password; can be provided via environment variables or secrets.",
validate_default=True,
)
tls_ca_certs: Optional[str] = Field(
default=None,
title="TLS CA certs",
description="Path to CA certificate for TLS connections to the broker (for self-signed certs).",
) )
tls_ca_certs: Optional[str] = Field(default=None, title="MQTT TLS CA Certificates")
tls_client_cert: Optional[str] = Field( tls_client_cert: Optional[str] = Field(
default=None, default=None, title="MQTT TLS Client Certificate"
title="Client cert",
description="Client certificate path for TLS mutual authentication; do not set user/password when using client certs.",
)
tls_client_key: Optional[str] = Field(
default=None,
title="Client key",
description="Private key path for the client certificate.",
)
tls_insecure: Optional[bool] = Field(
default=None,
title="TLS insecure",
description="Allow insecure TLS connections by skipping hostname verification (not recommended).",
)
qos: int = Field(
default=0,
title="MQTT QoS",
description="Quality of Service level for MQTT publishes/subscriptions (0, 1, or 2).",
) )
tls_client_key: Optional[str] = Field(default=None, title="MQTT TLS Client Key")
tls_insecure: Optional[bool] = Field(default=None, title="MQTT TLS Insecure")
qos: int = Field(default=0, title="MQTT QoS")
@model_validator(mode="after") @model_validator(mode="after")
def user_requires_pass(self, info: ValidationInfo) -> Self: def user_requires_pass(self, info: ValidationInfo) -> Self:

View File

@@ -1,41 +1,13 @@
from typing import Union
from pydantic import Field from pydantic import Field
from .base import FrigateBaseModel from .base import FrigateBaseModel
__all__ = ["IPv6Config", "ListenConfig", "NetworkingConfig"] __all__ = ["IPv6Config", "NetworkingConfig"]
class IPv6Config(FrigateBaseModel): class IPv6Config(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=False, title="Enable IPv6 for port 5000 and/or 8971")
default=False,
title="Enable IPv6",
description="Enable IPv6 support for Frigate services (API and UI) where applicable.",
)
class ListenConfig(FrigateBaseModel):
internal: Union[int, str] = Field(
default=5000,
title="Internal port",
description="Internal listening port for Frigate (default 5000).",
)
external: Union[int, str] = Field(
default=8971,
title="External port",
description="External listening port for Frigate (default 8971).",
)
class NetworkingConfig(FrigateBaseModel): class NetworkingConfig(FrigateBaseModel):
ipv6: IPv6Config = Field( ipv6: IPv6Config = Field(default_factory=IPv6Config, title="Network configuration")
default_factory=IPv6Config,
title="IPv6 configuration",
description="IPv6-specific settings for Frigate network services.",
)
listen: ListenConfig = Field(
default_factory=ListenConfig,
title="Listening ports configuration",
description="Configuration for internal and external listening ports. This is for advanced users. For the majority of use cases it's recommended to change the ports section of your Docker compose file.",
)

View File

@@ -10,47 +10,36 @@ __all__ = ["ProxyConfig", "HeaderMappingConfig"]
class HeaderMappingConfig(FrigateBaseModel): class HeaderMappingConfig(FrigateBaseModel):
user: str = Field( user: str = Field(
default=None, default=None, title="Header name from upstream proxy to identify user."
title="User header",
description="Header containing the authenticated username provided by the upstream proxy.",
) )
role: str = Field( role: str = Field(
default=None, default=None,
title="Role header", title="Header name from upstream proxy to identify user role.",
description="Header containing the authenticated user's role or groups from the upstream proxy.",
) )
role_map: Optional[dict[str, list[str]]] = Field( role_map: Optional[dict[str, list[str]]] = Field(
default_factory=dict, default_factory=dict,
title=("Role mapping"), title=("Mapping of Frigate roles to upstream group values. "),
description="Map upstream group values to Frigate roles (for example map admin groups to the admin role).",
) )
class ProxyConfig(FrigateBaseModel): class ProxyConfig(FrigateBaseModel):
header_map: HeaderMappingConfig = Field( header_map: HeaderMappingConfig = Field(
default_factory=HeaderMappingConfig, default_factory=HeaderMappingConfig,
title="Header mapping", title="Header mapping definitions for proxy user passing.",
description="Map incoming proxy headers to Frigate user and role fields for proxy-based auth.",
) )
logout_url: Optional[str] = Field( logout_url: Optional[str] = Field(
default=None, default=None, title="Redirect url for logging out with proxy."
title="Logout URL",
description="URL to redirect users to when logging out via the proxy.",
) )
auth_secret: Optional[EnvString] = Field( auth_secret: Optional[EnvString] = Field(
default=None, default=None,
title="Proxy secret", title="Secret value for proxy authentication.",
description="Optional secret checked against the X-Proxy-Secret header to verify trusted proxies.",
) )
default_role: Optional[str] = Field( default_role: Optional[str] = Field(
default="viewer", default="viewer", title="Default role for proxy users."
title="Default role",
description="Default role assigned to proxy-authenticated users when no role mapping applies (admin or viewer).",
) )
separator: Optional[str] = Field( separator: Optional[str] = Field(
default=",", default=",",
title="Separator character", title="The character used to separate values in a mapped header.",
description="Character used to split multiple values provided in proxy headers.",
) )
@field_validator("separator", mode="before") @field_validator("separator", mode="before")

View File

@@ -8,41 +8,22 @@ __all__ = ["TelemetryConfig", "StatsConfig"]
class StatsConfig(FrigateBaseModel): class StatsConfig(FrigateBaseModel):
amd_gpu_stats: bool = Field( amd_gpu_stats: bool = Field(default=True, title="Enable AMD GPU stats.")
default=True, intel_gpu_stats: bool = Field(default=True, title="Enable Intel GPU stats.")
title="AMD GPU stats",
description="Enable collection of AMD GPU statistics if an AMD GPU is present.",
)
intel_gpu_stats: bool = Field(
default=True,
title="Intel GPU stats",
description="Enable collection of Intel GPU statistics if an Intel GPU is present.",
)
network_bandwidth: bool = Field( network_bandwidth: bool = Field(
default=False, default=False, title="Enable network bandwidth for ffmpeg processes."
title="Network bandwidth",
description="Enable per-process network bandwidth monitoring for camera ffmpeg processes and detectors (requires capabilities).",
) )
intel_gpu_device: Optional[str] = Field( intel_gpu_device: Optional[str] = Field(
default=None, default=None, title="Define the device to use when gathering SR-IOV stats."
title="SR-IOV device",
description="Device identifier used when treating Intel GPUs as SR-IOV to fix GPU stats.",
) )
class TelemetryConfig(FrigateBaseModel): class TelemetryConfig(FrigateBaseModel):
network_interfaces: list[str] = Field( network_interfaces: list[str] = Field(
default=[], default=[],
title="Network interfaces", title="Enabled network interfaces for bandwidth calculation.",
description="List of network interface name prefixes to monitor for bandwidth statistics.",
) )
stats: StatsConfig = Field( stats: StatsConfig = Field(
default_factory=StatsConfig, default_factory=StatsConfig, title="System Stats Configuration"
title="System stats",
description="Options to enable/disable collection of various system and GPU statistics.",
)
version_check: bool = Field(
default=True,
title="Version check",
description="Enable an outbound check to detect if a newer Frigate version is available.",
) )
version_check: bool = Field(default=True, title="Enable latest version check.")

View File

@@ -6,8 +6,4 @@ __all__ = ["TlsConfig"]
class TlsConfig(FrigateBaseModel): class TlsConfig(FrigateBaseModel):
enabled: bool = Field( enabled: bool = Field(default=True, title="Enable TLS for port 8971")
default=True,
title="Enable TLS",
description="Enable TLS for Frigate's web UI and API on the configured TLS port.",
)

View File

@@ -27,28 +27,16 @@ class UnitSystemEnum(str, Enum):
class UIConfig(FrigateBaseModel): class UIConfig(FrigateBaseModel):
timezone: Optional[str] = Field( timezone: Optional[str] = Field(default=None, title="Override UI timezone.")
default=None,
title="Timezone",
description="Optional timezone to display across the UI (defaults to browser local time if unset).",
)
time_format: TimeFormatEnum = Field( time_format: TimeFormatEnum = Field(
default=TimeFormatEnum.browser, default=TimeFormatEnum.browser, title="Override UI time format."
title="Time format",
description="Time format to use in the UI (browser, 12hour, or 24hour).",
) )
date_style: DateTimeStyleEnum = Field( date_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.short, default=DateTimeStyleEnum.short, title="Override UI dateStyle."
title="Date style",
description="Date style to use in the UI (full, long, medium, short).",
) )
time_style: DateTimeStyleEnum = Field( time_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.medium, default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
title="Time style",
description="Time style to use in the UI (full, long, medium, short).",
) )
unit_system: UnitSystemEnum = Field( unit_system: UnitSystemEnum = Field(
default=UnitSystemEnum.metric, default=UnitSystemEnum.metric, title="The unit system to use for measurements."
title="Unit system",
description="Unit system for display (metric or imperial) used in the UI and MQTT.",
) )

View File

@@ -14,6 +14,7 @@ RECORD_DIR = f"{BASE_DIR}/recordings"
TRIGGER_DIR = f"{CLIPS_DIR}/triggers" TRIGGER_DIR = f"{CLIPS_DIR}/triggers"
BIRDSEYE_PIPE = "/tmp/cache/birdseye" BIRDSEYE_PIPE = "/tmp/cache/birdseye"
CACHE_DIR = "/tmp/cache" CACHE_DIR = "/tmp/cache"
FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video" PLUS_API_HOST = "https://api.frigate.video"
@@ -121,7 +122,6 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description"
UPDATE_MODEL_STATE = "update_model_state" UPDATE_MODEL_STATE = "update_model_state"
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress" UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout" UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout"
UPDATE_JOB_STATE = "update_job_state"
NOTIFICATION_TEST = "notification_test" NOTIFICATION_TEST = "notification_test"
# IO Nice Values # IO Nice Values

View File

@@ -22,7 +22,7 @@ from .api import RealTimeProcessorApi
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -32,7 +32,7 @@ from .api import RealTimeProcessorApi
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -73,6 +73,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.__build_detector() self.__build_detector()
def __build_detector(self) -> None: def __build_detector(self) -> None:
try:
from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError:
from tensorflow.lite.python.interpreter import Interpreter
model_path = os.path.join(self.model_dir, "model.tflite") model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt") labelmap_path = os.path.join(self.model_dir, "labelmap.txt")

View File

@@ -131,8 +131,10 @@ class ONNXModelRunner(BaseModelRunner):
return model_type in [ return model_type in [
EnrichmentModelTypeEnum.paddleocr.value, EnrichmentModelTypeEnum.paddleocr.value,
EnrichmentModelTypeEnum.yolov9_license_plate.value,
EnrichmentModelTypeEnum.jina_v1.value,
EnrichmentModelTypeEnum.jina_v2.value, EnrichmentModelTypeEnum.jina_v2.value,
EnrichmentModelTypeEnum.arcface.value, EnrichmentModelTypeEnum.facenet.value,
ModelTypeEnum.rfdetr.value, ModelTypeEnum.rfdetr.value,
ModelTypeEnum.dfine.value, ModelTypeEnum.dfine.value,
] ]

View File

@@ -45,55 +45,30 @@ class ModelTypeEnum(str, Enum):
class ModelConfig(BaseModel): class ModelConfig(BaseModel):
path: Optional[str] = Field( path: Optional[str] = Field(None, title="Custom Object detection model path.")
None,
title="Custom Object detection model path",
description="Path to a custom detection model file (or plus://<model_id> for Frigate+ models).",
)
labelmap_path: Optional[str] = Field( labelmap_path: Optional[str] = Field(
None, None, title="Label map for custom object detector."
title="Label map for custom object detector",
description="Path to a labelmap file that maps numeric classes to string labels for the detector.",
)
width: int = Field(
default=320,
title="Object detection model input width",
description="Width of the model input tensor in pixels.",
)
height: int = Field(
default=320,
title="Object detection model input height",
description="Height of the model input tensor in pixels.",
) )
width: int = Field(default=320, title="Object detection model input width.")
height: int = Field(default=320, title="Object detection model input height.")
labelmap: Dict[int, str] = Field( labelmap: Dict[int, str] = Field(
default_factory=dict, default_factory=dict, title="Labelmap customization."
title="Labelmap customization",
description="Overrides or remapping entries to merge into the standard labelmap.",
) )
attributes_map: Dict[str, list[str]] = Field( attributes_map: Dict[str, list[str]] = Field(
default=DEFAULT_ATTRIBUTE_LABEL_MAP, default=DEFAULT_ATTRIBUTE_LABEL_MAP,
title="Map of object labels to their attribute labels", title="Map of object labels to their attribute labels.",
description="Mapping from object labels to attribute labels used to attach metadata (for example 'car' -> ['license_plate']).",
) )
input_tensor: InputTensorEnum = Field( input_tensor: InputTensorEnum = Field(
default=InputTensorEnum.nhwc, default=InputTensorEnum.nhwc, title="Model Input Tensor Shape"
title="Model Input Tensor Shape",
description="Tensor format expected by the model: 'nhwc' or 'nchw'.",
) )
input_pixel_format: PixelFormatEnum = Field( input_pixel_format: PixelFormatEnum = Field(
default=PixelFormatEnum.rgb, default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
title="Model Input Pixel Color Format",
description="Pixel colorspace expected by the model: 'rgb', 'bgr', or 'yuv'.",
) )
input_dtype: InputDTypeEnum = Field( input_dtype: InputDTypeEnum = Field(
default=InputDTypeEnum.int, default=InputDTypeEnum.int, title="Model Input D Type"
title="Model Input D Type",
description="Data type of the model input tensor (for example 'float32').",
) )
model_type: ModelTypeEnum = Field( model_type: ModelTypeEnum = Field(
default=ModelTypeEnum.ssd, default=ModelTypeEnum.ssd, title="Object Detection Model Type"
title="Object Detection Model Type",
description="Detector model architecture type (ssd, yolox, yolonas) used by some detectors for optimization.",
) )
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr() _merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr() _colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
@@ -235,20 +210,12 @@ class ModelConfig(BaseModel):
class BaseDetectorConfig(BaseModel): class BaseDetectorConfig(BaseModel):
# the type field must be defined in all subclasses # the type field must be defined in all subclasses
type: str = Field( type: str = Field(default="cpu", title="Detector Type")
default="cpu",
title="Detector Type",
description="Type of detector to use for object detection (for example 'cpu', 'edgetpu', 'openvino').",
)
model: Optional[ModelConfig] = Field( model: Optional[ModelConfig] = Field(
default=None, default=None, title="Detector specific model configuration."
title="Detector specific model configuration",
description="Detector-specific model configuration options (path, input size, etc.).",
) )
model_path: Optional[str] = Field( model_path: Optional[str] = Field(
default=None, default=None, title="Detector specific model path."
title="Detector specific model path",
description="File path to the detector model binary if required by the chosen detector.",
) )
model_config = ConfigDict( model_config = ConfigDict(
extra="allow", arbitrary_types_allowed=True, protected_namespaces=() extra="allow", arbitrary_types_allowed=True, protected_namespaces=()

View File

@@ -6,7 +6,7 @@ import numpy as np
try: try:
from tflite_runtime.interpreter import Interpreter, load_delegate from tflite_runtime.interpreter import Interpreter, load_delegate
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter, load_delegate from tensorflow.lite.python.interpreter import Interpreter, load_delegate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -1,6 +1,6 @@
import logging import logging
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -12,7 +12,7 @@ from ..detector_utils import tflite_detect_raw, tflite_init
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -21,18 +21,8 @@ DETECTOR_KEY = "cpu"
class CpuDetectorConfig(BaseDetectorConfig): class CpuDetectorConfig(BaseDetectorConfig):
"""CPU TFLite detector that runs TensorFlow Lite models on the host CPU without hardware acceleration. Not recommended."""
model_config = ConfigDict(
title="CPU",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
num_threads: int = Field( num_threads: int = Field(default=3, title="Number of detection threads")
default=3,
title="Number of detection threads",
description="The number of threads used for CPU-based inference.",
)
class CpuTfl(DetectionApi): class CpuTfl(DetectionApi):

View File

@@ -4,7 +4,7 @@ import logging
import numpy as np import numpy as np
import requests import requests
from PIL import Image from PIL import Image
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -16,28 +16,12 @@ DETECTOR_KEY = "deepstack"
class DeepstackDetectorConfig(BaseDetectorConfig): class DeepstackDetectorConfig(BaseDetectorConfig):
"""DeepStack/CodeProject.AI detector that sends images to a remote DeepStack HTTP API for inference. Not recommended."""
model_config = ConfigDict(
title="DeepStack",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
api_url: str = Field( api_url: str = Field(
default="http://localhost:80/v1/vision/detection", default="http://localhost:80/v1/vision/detection", title="DeepStack API URL"
title="DeepStack API URL",
description="The URL of the DeepStack API.",
)
api_timeout: float = Field(
default=0.1,
title="DeepStack API timeout (in seconds)",
description="Maximum time allowed for a DeepStack API request.",
)
api_key: str = Field(
default="",
title="DeepStack API key (if required)",
description="Optional API key for authenticated DeepStack services.",
) )
api_timeout: float = Field(default=0.1, title="DeepStack API timeout (in seconds)")
api_key: str = Field(default="", title="DeepStack API key (if required)")
class DeepStack(DetectionApi): class DeepStack(DetectionApi):

View File

@@ -2,7 +2,7 @@ import logging
import queue import queue
import numpy as np import numpy as np
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -14,28 +14,10 @@ DETECTOR_KEY = "degirum"
### DETECTOR CONFIG ### ### DETECTOR CONFIG ###
class DGDetectorConfig(BaseDetectorConfig): class DGDetectorConfig(BaseDetectorConfig):
"""DeGirum detector for running models via DeGirum cloud or local inference services."""
model_config = ConfigDict(
title="DeGirum",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
location: str = Field( location: str = Field(default=None, title="Inference Location")
default=None, zoo: str = Field(default=None, title="Model Zoo")
title="Inference Location", token: str = Field(default=None, title="DeGirum Cloud Token")
description="Location of the DeGirim inference engine (e.g. '@cloud', '127.0.0.1').",
)
zoo: str = Field(
default=None,
title="Model Zoo",
description="Path or URL to the DeGirum model zoo.",
)
token: str = Field(
default=None,
title="DeGirum Cloud Token",
description="Token for DeGirum Cloud access.",
)
### ACTUAL DETECTOR ### ### ACTUAL DETECTOR ###

View File

@@ -4,7 +4,7 @@ import os
import cv2 import cv2
import numpy as np import numpy as np
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -13,7 +13,7 @@ from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
try: try:
from tflite_runtime.interpreter import Interpreter, load_delegate from tflite_runtime.interpreter import Interpreter, load_delegate
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter, load_delegate from tensorflow.lite.python.interpreter import Interpreter, load_delegate
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -21,18 +21,8 @@ DETECTOR_KEY = "edgetpu"
class EdgeTpuDetectorConfig(BaseDetectorConfig): class EdgeTpuDetectorConfig(BaseDetectorConfig):
"""EdgeTPU detector that runs TensorFlow Lite models compiled for Coral EdgeTPU using the EdgeTPU delegate."""
model_config = ConfigDict(
title="EdgeTPU",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: str = Field( device: str = Field(default=None, title="Device Type")
default=None,
title="Device Type",
description="The device to use for EdgeTPU inference (e.g. 'usb', 'pci').",
)
class EdgeTpuTfl(DetectionApi): class EdgeTpuTfl(DetectionApi):

View File

@@ -8,7 +8,7 @@ from typing import Dict, List, Optional, Tuple
import cv2 import cv2
import numpy as np import numpy as np
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.const import MODEL_CACHE_DIR from frigate.const import MODEL_CACHE_DIR
@@ -410,15 +410,5 @@ class HailoDetector(DetectionApi):
# ----------------- HailoDetectorConfig Class ----------------- # # ----------------- HailoDetectorConfig Class ----------------- #
class HailoDetectorConfig(BaseDetectorConfig): class HailoDetectorConfig(BaseDetectorConfig):
"""Hailo-8/Hailo-8L detector using HEF models and the HailoRT SDK for inference on Hailo hardware."""
model_config = ConfigDict(
title="Hailo-8/Hailo-8L",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: str = Field( device: str = Field(default="PCIe", title="Device Type")
default="PCIe",
title="Device Type",
description="The device to use for Hailo inference (e.g. 'PCIe', 'M.2').",
)

View File

@@ -8,7 +8,7 @@ from queue import Queue
import cv2 import cv2
import numpy as np import numpy as np
from pydantic import BaseModel, ConfigDict, Field from pydantic import BaseModel, Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -30,18 +30,8 @@ class ModelConfig(BaseModel):
class MemryXDetectorConfig(BaseDetectorConfig): class MemryXDetectorConfig(BaseDetectorConfig):
"""MemryX MX3 detector that runs compiled DFP models on MemryX accelerators."""
model_config = ConfigDict(
title="MemryX",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: str = Field( device: str = Field(default="PCIe", title="Device Path")
default="PCIe",
title="Device Path",
description="The device to use for MemryX inference (e.g. 'PCIe').",
)
class MemryXDetector(DetectionApi): class MemryXDetector(DetectionApi):

View File

@@ -1,7 +1,7 @@
import logging import logging
import numpy as np import numpy as np
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -23,18 +23,8 @@ DETECTOR_KEY = "onnx"
class ONNXDetectorConfig(BaseDetectorConfig): class ONNXDetectorConfig(BaseDetectorConfig):
"""ONNX detector for running ONNX models; will use available acceleration backends (CUDA/ROCm/OpenVINO) when available."""
model_config = ConfigDict(
title="ONNX",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: str = Field( device: str = Field(default="AUTO", title="Device Type")
default="AUTO",
title="Device Type",
description="The device to use for ONNX inference (e.g. 'AUTO', 'CPU', 'GPU').",
)
class ONNXDetector(DetectionApi): class ONNXDetector(DetectionApi):

View File

@@ -2,7 +2,7 @@ import logging
import numpy as np import numpy as np
import openvino as ov import openvino as ov
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -20,18 +20,8 @@ DETECTOR_KEY = "openvino"
class OvDetectorConfig(BaseDetectorConfig): class OvDetectorConfig(BaseDetectorConfig):
"""OpenVINO detector for AMD and Intel CPUs, Intel GPUs and Intel VPU hardware."""
model_config = ConfigDict(
title="OpenVINO",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: str = Field( device: str = Field(default=None, title="Device Type")
default=None,
title="Device Type",
description="The device to use for OpenVINO inference (e.g. 'CPU', 'GPU', 'NPU').",
)
class OvDetector(DetectionApi): class OvDetector(DetectionApi):

View File

@@ -6,7 +6,7 @@ from typing import Literal
import cv2 import cv2
import numpy as np import numpy as np
from pydantic import ConfigDict, Field from pydantic import Field
from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -29,20 +29,8 @@ model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/")
class RknnDetectorConfig(BaseDetectorConfig): class RknnDetectorConfig(BaseDetectorConfig):
"""RKNN detector for Rockchip NPUs; runs compiled RKNN models on Rockchip hardware."""
model_config = ConfigDict(
title="RKNN",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
num_cores: int = Field( num_cores: int = Field(default=0, ge=0, le=3, title="Number of NPU cores to use.")
default=0,
ge=0,
le=3,
title="Number of NPU cores to use.",
description="The number of NPU cores to use (0 for auto).",
)
class Rknn(DetectionApi): class Rknn(DetectionApi):

View File

@@ -2,7 +2,6 @@ import logging
import os import os
import numpy as np import numpy as np
from pydantic import ConfigDict
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -28,12 +27,6 @@ DETECTOR_KEY = "synaptics"
class SynapDetectorConfig(BaseDetectorConfig): class SynapDetectorConfig(BaseDetectorConfig):
"""Synaptics NPU detector for models in .synap format using the Synap SDK on Synaptics hardware."""
model_config = ConfigDict(
title="Synaptics",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]

View File

@@ -1,6 +1,5 @@
import logging import logging
from pydantic import ConfigDict
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -19,12 +18,6 @@ DETECTOR_KEY = "teflon_tfl"
class TeflonDetectorConfig(BaseDetectorConfig): class TeflonDetectorConfig(BaseDetectorConfig):
"""Teflon delegate detector for TFLite using Mesa Teflon delegate library to accelerate inference on supported GPUs."""
model_config = ConfigDict(
title="Teflon",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]

View File

@@ -14,7 +14,7 @@ try:
except ModuleNotFoundError: except ModuleNotFoundError:
TRT_SUPPORT = False TRT_SUPPORT = False
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -46,16 +46,8 @@ if TRT_SUPPORT:
class TensorRTDetectorConfig(BaseDetectorConfig): class TensorRTDetectorConfig(BaseDetectorConfig):
"""TensorRT detector for Nvidia Jetson devices using serialized TensorRT engines for accelerated inference."""
model_config = ConfigDict(
title="TensorRT",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
device: int = Field( device: int = Field(default=0, title="GPU Device Index")
default=0, title="GPU Device Index", description="The GPU device index to use."
)
class HostDeviceMem(object): class HostDeviceMem(object):

View File

@@ -5,7 +5,7 @@ from typing import Any, List
import numpy as np import numpy as np
import zmq import zmq
from pydantic import ConfigDict, Field from pydantic import Field
from typing_extensions import Literal from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
@@ -17,28 +17,14 @@ DETECTOR_KEY = "zmq"
class ZmqDetectorConfig(BaseDetectorConfig): class ZmqDetectorConfig(BaseDetectorConfig):
"""ZMQ IPC detector that offloads inference to an external process via a ZeroMQ IPC endpoint."""
model_config = ConfigDict(
title="ZMQ IPC",
)
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
endpoint: str = Field( endpoint: str = Field(
default="ipc:///tmp/cache/zmq_detector", default="ipc:///tmp/cache/zmq_detector", title="ZMQ IPC endpoint"
title="ZMQ IPC endpoint",
description="The ZMQ endpoint to connect to.",
) )
request_timeout_ms: int = Field( request_timeout_ms: int = Field(
default=200, default=200, title="ZMQ request timeout in milliseconds"
title="ZMQ request timeout in milliseconds",
description="Timeout for ZMQ requests in milliseconds.",
)
linger_ms: int = Field(
default=0,
title="ZMQ socket linger in milliseconds",
description="Socket linger period in milliseconds.",
) )
linger_ms: int = Field(default=0, title="ZMQ socket linger in milliseconds")
class ZmqIpcDetector(DetectionApi): class ZmqIpcDetector(DetectionApi):

View File

@@ -59,7 +59,7 @@ from frigate.data_processing.real_time.license_plate import (
from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum
from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
from frigate.genai import GenAIClientManager from frigate.genai import get_genai_client
from frigate.models import Event, Recordings, ReviewSegment, Trigger from frigate.models import Event, Recordings, ReviewSegment, Trigger
from frigate.util.builtin import serialize from frigate.util.builtin import serialize
from frigate.util.file import get_event_thumbnail_bytes from frigate.util.file import get_event_thumbnail_bytes
@@ -144,7 +144,7 @@ class EmbeddingMaintainer(threading.Thread):
self.frame_manager = SharedMemoryFrameManager() self.frame_manager = SharedMemoryFrameManager()
self.detected_license_plates: dict[str, dict[str, Any]] = {} self.detected_license_plates: dict[str, dict[str, Any]] = {}
self.genai_manager = GenAIClientManager(config) self.genai_client = get_genai_client(config)
# model runners to share between realtime and post processors # model runners to share between realtime and post processors
if self.config.lpr.enabled: if self.config.lpr.enabled:
@@ -203,15 +203,12 @@ class EmbeddingMaintainer(threading.Thread):
# post processors # post processors
self.post_processors: list[PostProcessorApi] = [] self.post_processors: list[PostProcessorApi] = []
if self.genai_manager.vision_client is not None and any( if self.genai_client is not None and any(
c.review.genai.enabled_in_config for c in self.config.cameras.values() c.review.genai.enabled_in_config for c in self.config.cameras.values()
): ):
self.post_processors.append( self.post_processors.append(
ReviewDescriptionProcessor( ReviewDescriptionProcessor(
self.config, self.config, self.requestor, self.metrics, self.genai_client
self.requestor,
self.metrics,
self.genai_manager.vision_client,
) )
) )
@@ -249,7 +246,7 @@ class EmbeddingMaintainer(threading.Thread):
) )
self.post_processors.append(semantic_trigger_processor) self.post_processors.append(semantic_trigger_processor)
if self.genai_manager.vision_client is not None and any( if self.genai_client is not None and any(
c.objects.genai.enabled_in_config for c in self.config.cameras.values() c.objects.genai.enabled_in_config for c in self.config.cameras.values()
): ):
self.post_processors.append( self.post_processors.append(
@@ -258,7 +255,7 @@ class EmbeddingMaintainer(threading.Thread):
self.embeddings, self.embeddings,
self.requestor, self.requestor,
self.metrics, self.metrics,
self.genai_manager.vision_client, self.genai_client,
semantic_trigger_processor, semantic_trigger_processor,
) )
) )

View File

@@ -17,7 +17,7 @@ from .base_embedding import BaseEmbedding
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -43,7 +43,7 @@ from frigate.video import start_or_restart_ffmpeg, stop_ffmpeg
try: try:
from tflite_runtime.interpreter import Interpreter from tflite_runtime.interpreter import Interpreter
except ModuleNotFoundError: except ModuleNotFoundError:
from ai_edge_litert.interpreter import Interpreter from tensorflow.lite.python.interpreter import Interpreter
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -9,24 +9,13 @@ from typing import Any, Optional
from playhouse.shortcuts import model_to_dict from playhouse.shortcuts import model_to_dict
from frigate.config import CameraConfig, GenAIConfig, GenAIProviderEnum from frigate.config import CameraConfig, FrigateConfig, GenAIConfig, GenAIProviderEnum
from frigate.const import CLIPS_DIR from frigate.const import CLIPS_DIR
from frigate.data_processing.post.types import ReviewMetadata from frigate.data_processing.post.types import ReviewMetadata
from frigate.genai.manager import GenAIClientManager
from frigate.models import Event from frigate.models import Event
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
__all__ = [
"GenAIClient",
"GenAIClientManager",
"GenAIConfig",
"GenAIProviderEnum",
"PROVIDERS",
"load_providers",
"register_genai_provider",
]
PROVIDERS = {} PROVIDERS = {}
@@ -80,7 +69,7 @@ class GenAIClient:
return "\n- (No objects detected)" return "\n- (No objects detected)"
context_prompt = f""" context_prompt = f"""
Your task is to analyze a sequence of images taken in chronological order from a security camera. Your task is to analyze the sequence of images ({len(thumbnails)} total) taken in chronological order from the perspective of the {review_data["camera"]} security camera.
## Normal Activity Patterns for This Property ## Normal Activity Patterns for This Property
@@ -119,8 +108,7 @@ Your response MUST be a flat JSON object with:
## Sequence Details ## Sequence Details
- Camera: {review_data["camera"]} - Frame 1 = earliest, Frame {len(thumbnails)} = latest
- Total frames: {len(thumbnails)} (Frame 1 = earliest, Frame {len(thumbnails)} = latest)
- Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds - Activity started at {review_data["start"]} and lasted {review_data["duration"]} seconds
- Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"} - Zones involved: {", ".join(review_data["zones"]) if review_data["zones"] else "None"}
@@ -304,63 +292,18 @@ Guidelines:
"""Get the context window size for this provider in tokens.""" """Get the context window size for this provider in tokens."""
return 4096 return 4096
def chat_with_tools(
self,
messages: list[dict[str, Any]],
tools: Optional[list[dict[str, Any]]] = None,
tool_choice: Optional[str] = "auto",
) -> dict[str, Any]:
"""
Send chat messages to LLM with optional tool definitions.
This method handles conversation-style interactions with the LLM, def get_genai_client(config: FrigateConfig) -> Optional[GenAIClient]:
including function calling/tool usage capabilities. """Get the GenAI client."""
if not config.genai.provider:
return None
Args: load_providers()
messages: List of message dictionaries. Each message should have: provider = PROVIDERS.get(config.genai.provider)
- 'role': str - One of 'user', 'assistant', 'system', or 'tool' if provider:
- 'content': str - The message content return provider(config.genai)
- 'tool_call_id': Optional[str] - For tool responses, the ID of the tool call
- 'name': Optional[str] - For tool messages, the tool name
tools: Optional list of tool definitions in OpenAI-compatible format.
Each tool should have 'type': 'function' and 'function' with:
- 'name': str - Tool name
- 'description': str - Tool description
- 'parameters': dict - JSON schema for parameters
tool_choice: How the model should handle tools:
- 'auto': Model decides whether to call tools
- 'none': Model must not call tools
- 'required': Model must call at least one tool
- Or a dict specifying a specific tool to call
**kwargs: Additional provider-specific parameters.
Returns: return None
Dictionary with:
- 'content': Optional[str] - The text response from the LLM, None if tool calls
- 'tool_calls': Optional[List[Dict]] - List of tool calls if LLM wants to call tools.
Each tool call dict has:
- 'id': str - Unique identifier for this tool call
- 'name': str - Tool name to call
- 'arguments': dict - Arguments for the tool call (parsed JSON)
- 'finish_reason': str - Reason generation stopped:
- 'stop': Normal completion
- 'tool_calls': LLM wants to call tools
- 'length': Hit token limit
- 'error': An error occurred
Raises:
NotImplementedError: If the provider doesn't implement this method.
"""
# Base implementation - each provider should override this
logger.warning(
f"{self.__class__.__name__} does not support chat_with_tools. "
"This method should be overridden by the provider implementation."
)
return {
"content": None,
"tool_calls": None,
"finish_reason": "error",
}
def load_providers(): def load_providers():

Some files were not shown because too many files have changed in this diff Show More