mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-10 22:38:29 -05:00
Compare commits
78 Commits
media-sync
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d952a97bda | ||
|
|
93016c662f | ||
|
|
c08fa15724 | ||
|
|
f3543cfee2 | ||
|
|
74d14cb8ca | ||
|
|
99d48ecbc3 | ||
|
|
c8f55ac41f | ||
|
|
047ae19191 | ||
|
|
d1f28eb8e1 | ||
|
|
b5d2f86a9b | ||
|
|
15c223d0e5 | ||
|
|
e0d6365f62 | ||
|
|
fb9604fbcc | ||
|
|
e2a1208c90 | ||
|
|
3655b9269d | ||
|
|
3c5eb1aee5 | ||
|
|
e20b324e0a | ||
|
|
ca0e53f671 | ||
|
|
a2e98dc89b | ||
|
|
b54cb219f8 | ||
|
|
edeb47a08e | ||
|
|
f34e2200b5 | ||
|
|
57d344a441 | ||
|
|
225c5f0d71 | ||
|
|
5d960aa282 | ||
|
|
bfc2859c8e | ||
|
|
d2aa2a0558 | ||
|
|
bd2382dc45 | ||
|
|
aa9dbbb48d | ||
|
|
a1094615e1 | ||
|
|
59780203a3 | ||
|
|
525cc5b663 | ||
|
|
29bcb7f47a | ||
|
|
2522a10afb | ||
|
|
f94aa0ff2c | ||
|
|
32429688ff | ||
|
|
4ae3c97865 | ||
|
|
1be7c561d7 | ||
|
|
50a5e40410 | ||
|
|
d7e10dffc6 | ||
|
|
8fb413ce7c | ||
|
|
bb3991f62b | ||
|
|
a4ece9dae3 | ||
|
|
f862ef5d0c | ||
|
|
f74df040bb | ||
|
|
54f4af3c6a | ||
|
|
8a4d5f34da | ||
|
|
60052e5f9f | ||
|
|
e636449d56 | ||
|
|
6a0e31dcf9 | ||
|
|
074b060e9c | ||
|
|
ae009b9861 | ||
|
|
13957fec00 | ||
|
|
3edfd905de | ||
|
|
78eace258e | ||
|
|
c292cd207d | ||
|
|
e7d047715d | ||
|
|
818cccb2e3 | ||
|
|
f543d0ab31 | ||
|
|
39af85625e | ||
|
|
fa16539429 | ||
|
|
1d5c2466a8 | ||
|
|
0a293aebab | ||
|
|
1de7519d1a | ||
|
|
c3f596327e | ||
|
|
90344540b3 | ||
|
|
7167cf57c5 | ||
|
|
e47e82f4be | ||
|
|
a43d294bd1 | ||
|
|
9f95a5f31f | ||
|
|
592c245dcd | ||
|
|
914ff4f1e5 | ||
|
|
9589c5fc24 | ||
|
|
3620ef27db | ||
|
|
5cf2ae0121 | ||
|
|
17d2bc240a | ||
|
|
6fd7f862f5 | ||
|
|
5d038b5c75 |
8
.github/workflows/pull_request.yml
vendored
8
.github/workflows/pull_request.yml
vendored
@@ -19,9 +19,9 @@ jobs:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 16.x
|
||||
node-version: 20.x
|
||||
- run: npm install
|
||||
working-directory: ./web
|
||||
- name: Lint
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20.x
|
||||
- run: npm install
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20.x
|
||||
- name: Install devcontainer cli
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -39,14 +39,14 @@ jobs:
|
||||
STABLE_TAG=${BASE}:stable
|
||||
PULL_TAG=${BASE}:${BUILD_TAG}
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm synaptics; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
|
||||
done
|
||||
|
||||
# stable tag
|
||||
if [[ "${BUILD_TYPE}" == "stable" ]]; then
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm synaptics; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
|
||||
done
|
||||
fi
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2025 Frigate LLC (Frigate™)
|
||||
Copyright (c) 2026 Frigate, Inc. (Frigate™)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -40,7 +40,7 @@ If you would like to make a donation to support development, please use [Github
|
||||
This project is licensed under the **MIT License**.
|
||||
|
||||
- **Code:** The source code, configuration files, and documentation in this repository are available under the [MIT License](LICENSE). You are free to use, modify, and distribute the code as long as you include the original copyright notice.
|
||||
- **Trademarks:** The "Frigate" name, the "Frigate NVR" brand, and the Frigate logo are **trademarks of Frigate LLC** and are **not** covered by the MIT License.
|
||||
- **Trademarks:** The "Frigate" name, the "Frigate NVR" brand, and the Frigate logo are **trademarks of Frigate, Inc.** and are **not** covered by the MIT License.
|
||||
|
||||
Please see our [Trademark Policy](TRADEMARK.md) for details on acceptable use of our brand assets.
|
||||
|
||||
@@ -67,7 +67,7 @@ Please see our [Trademark Policy](TRADEMARK.md) for details on acceptable use of
|
||||
### Built-in mask and zone editor
|
||||
|
||||
<div>
|
||||
<img width="800" alt="Multi-camera scrubbing" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
|
||||
<img width="800" alt="Built-in mask and zone editor" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
|
||||
</div>
|
||||
|
||||
## Translations
|
||||
@@ -80,4 +80,4 @@ We use [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) to support la
|
||||
|
||||
---
|
||||
|
||||
**Copyright © 2025 Frigate LLC.**
|
||||
**Copyright © 2026 Frigate, Inc.**
|
||||
|
||||
13
README_CN.md
13
README_CN.md
@@ -4,14 +4,14 @@
|
||||
|
||||
# Frigate NVR™ - 一个具有实时目标检测的本地 NVR
|
||||
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
|
||||
</a>
|
||||
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备 AI 目标/物体检测功能。使用 OpenCV 和 TensorFlow 在本地为 IP 摄像头执行实时物体检测。
|
||||
|
||||
强烈推荐使用 GPU 或者 AI 加速器(例如[Google Coral 加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/)等)。它们的运行效率远远高于现在的顶级 CPU,并且功耗也极低。
|
||||
@@ -38,9 +38,10 @@
|
||||
## 协议
|
||||
|
||||
本项目采用 **MIT 许可证**授权。
|
||||
|
||||
**代码部分**:本代码库中的源代码、配置文件和文档均遵循 [MIT 许可证](LICENSE)。您可以自由使用、修改和分发这些代码,但必须保留原始版权声明。
|
||||
|
||||
**商标部分**:“Frigate”名称、“Frigate NVR”品牌以及 Frigate 的 Logo 为 **Frigate LLC 的商标**,**不在** MIT 许可证覆盖范围内。
|
||||
**商标部分**:“Frigate”名称、“Frigate NVR”品牌以及 Frigate 的 Logo 为 **Frigate, Inc. 的商标**,**不在** MIT 许可证覆盖范围内。
|
||||
有关品牌资产的规范使用详情,请参阅我们的[《商标政策》](TRADEMARK.md)。
|
||||
|
||||
## 截图
|
||||
@@ -86,4 +87,4 @@ Bilibili:https://space.bilibili.com/3546894915602564
|
||||
|
||||
---
|
||||
|
||||
**Copyright © 2025 Frigate LLC.**
|
||||
**Copyright © 2026 Frigate, Inc.**
|
||||
|
||||
@@ -6,7 +6,7 @@ This document outlines the policy regarding the use of the trademarks associated
|
||||
|
||||
## 1. Our Trademarks
|
||||
|
||||
The following terms and visual assets are trademarks (the "Marks") of **Frigate LLC**:
|
||||
The following terms and visual assets are trademarks (the "Marks") of **Frigate, Inc.**:
|
||||
|
||||
- **Frigate™**
|
||||
- **Frigate NVR™**
|
||||
@@ -14,7 +14,7 @@ The following terms and visual assets are trademarks (the "Marks") of **Frigate
|
||||
- **The Frigate Logo**
|
||||
|
||||
**Note on Common Law Rights:**
|
||||
Frigate LLC asserts all common law rights in these Marks. The absence of a federal registration symbol (®) does not constitute a waiver of our intellectual property rights.
|
||||
Frigate, Inc. asserts all common law rights in these Marks. The absence of a federal registration symbol (®) does not constitute a waiver of our intellectual property rights.
|
||||
|
||||
## 2. Interaction with the MIT License
|
||||
|
||||
@@ -25,7 +25,7 @@ The software in this repository is licensed under the [MIT License](LICENSE).
|
||||
- The **Code** is free to use, modify, and distribute under the MIT terms.
|
||||
- The **Brand (Trademarks)** is **NOT** licensed under MIT.
|
||||
|
||||
You may not use the Marks in any way that is not explicitly permitted by this policy or by written agreement with Frigate LLC.
|
||||
You may not use the Marks in any way that is not explicitly permitted by this policy or by written agreement with Frigate, Inc.
|
||||
|
||||
## 3. Acceptable Use
|
||||
|
||||
@@ -40,7 +40,7 @@ You may use the Marks without prior written permission in the following specific
|
||||
You may **NOT** use the Marks in the following ways:
|
||||
|
||||
- **Commercial Products:** You may not use "Frigate" in the name of a commercial product, service, or app (e.g., selling an app named _"Frigate Viewer"_ is prohibited).
|
||||
- **Implying Affiliation:** You may not use the Marks in a way that suggests your project is official, sponsored by, or endorsed by Frigate LLC.
|
||||
- **Implying Affiliation:** You may not use the Marks in a way that suggests your project is official, sponsored by, or endorsed by Frigate, Inc.
|
||||
- **Confusing Forks:** If you fork this repository to create a derivative work, you **must** remove the Frigate logo and rename your project to avoid user confusion. You cannot distribute a modified version of the software under the name "Frigate".
|
||||
- **Domain Names:** You may not register domain names containing "Frigate" that are likely to confuse users (e.g., `frigate-official-support.com`).
|
||||
|
||||
|
||||
@@ -237,8 +237,18 @@ ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
||||
# Set HailoRT to disable logging
|
||||
ENV HAILORT_LOGGER_PATH=NONE
|
||||
|
||||
# TensorFlow error only
|
||||
# TensorFlow C++ logging suppression (must be set before import)
|
||||
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
|
||||
ENV TF_CPP_MIN_LOG_LEVEL=3
|
||||
# Suppress verbose logging from TensorFlow C++ code
|
||||
ENV TF_CPP_MIN_VLOG_LEVEL=3
|
||||
# Disable oneDNN optimization messages ("optimized with oneDNN...")
|
||||
ENV TF_ENABLE_ONEDNN_OPTS=0
|
||||
# Suppress AutoGraph verbosity during conversion
|
||||
ENV AUTOGRAPH_VERBOSITY=0
|
||||
# Google Logging (GLOG) suppression for TensorFlow components
|
||||
ENV GLOG_minloglevel=3
|
||||
ENV GLOG_logtostderr=0
|
||||
|
||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ onnxruntime == 1.22.*
|
||||
transformers == 4.45.*
|
||||
# Generative AI
|
||||
google-generativeai == 0.8.*
|
||||
ollama == 0.5.*
|
||||
ollama == 0.6.*
|
||||
openai == 1.65.*
|
||||
# push notifications
|
||||
py-vapid == 1.9.*
|
||||
|
||||
@@ -55,7 +55,7 @@ function setup_homekit_config() {
|
||||
|
||||
if [[ ! -f "${config_path}" ]]; then
|
||||
echo "[INFO] Creating empty HomeKit config file..."
|
||||
echo '{}' > "${config_path}"
|
||||
echo 'homekit: {}' > "${config_path}"
|
||||
fi
|
||||
|
||||
# Convert YAML to JSON for jq processing
|
||||
@@ -70,12 +70,14 @@ function setup_homekit_config() {
|
||||
jq '
|
||||
# Keep only the homekit section if it exists, otherwise empty object
|
||||
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
|
||||
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || {
|
||||
echo '{"homekit": {}}' > "${cleaned_json}"
|
||||
}
|
||||
|
||||
# Convert back to YAML and write to the config file
|
||||
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
|
||||
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
|
||||
echo '{"homekit": {}}' > "${config_path}"
|
||||
echo 'homekit: {}' > "${config_path}"
|
||||
}
|
||||
|
||||
# Clean up temp files
|
||||
|
||||
@@ -22,6 +22,11 @@ sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
# Check if arbitrary exec sources are allowed (defaults to False for security)
|
||||
ALLOW_ARBITRARY_EXEC = os.environ.get(
|
||||
"GO2RTC_ALLOW_ARBITRARY_EXEC", "false"
|
||||
).lower() in ("true", "1", "yes")
|
||||
|
||||
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
|
||||
# read docker secret files as env vars too
|
||||
if os.path.isdir("/run/secrets"):
|
||||
@@ -109,14 +114,26 @@ if LIBAVFORMAT_VERSION_MAJOR < 59:
|
||||
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
go2rtc_config["ffmpeg"]["rtsp"] = rtsp_args
|
||||
|
||||
for name in go2rtc_config.get("streams", {}):
|
||||
|
||||
def is_restricted_source(stream_source: str) -> bool:
|
||||
"""Check if a stream source is restricted (echo, expr, or exec)."""
|
||||
return stream_source.strip().startswith(("echo:", "expr:", "exec:"))
|
||||
|
||||
|
||||
for name in list(go2rtc_config.get("streams", {})):
|
||||
stream = go2rtc_config["streams"][name]
|
||||
|
||||
if isinstance(stream, str):
|
||||
try:
|
||||
go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
formatted_stream = stream.format(**FRIGATE_ENV_VARS)
|
||||
if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream):
|
||||
print(
|
||||
f"[ERROR] Stream '{name}' uses a restricted source (echo/expr/exec) which is disabled by default for security. "
|
||||
f"Set GO2RTC_ALLOW_ARBITRARY_EXEC=true to enable arbitrary exec sources."
|
||||
)
|
||||
del go2rtc_config["streams"][name]
|
||||
continue
|
||||
go2rtc_config["streams"][name] = formatted_stream
|
||||
except KeyError as e:
|
||||
print(
|
||||
"[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info."
|
||||
@@ -124,15 +141,33 @@ for name in go2rtc_config.get("streams", {}):
|
||||
sys.exit(e)
|
||||
|
||||
elif isinstance(stream, list):
|
||||
for i, stream in enumerate(stream):
|
||||
filtered_streams = []
|
||||
for i, stream_item in enumerate(stream):
|
||||
try:
|
||||
go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS)
|
||||
formatted_stream = stream_item.format(**FRIGATE_ENV_VARS)
|
||||
if not ALLOW_ARBITRARY_EXEC and is_restricted_source(formatted_stream):
|
||||
print(
|
||||
f"[ERROR] Stream '{name}' item {i + 1} uses a restricted source (echo/expr/exec) which is disabled by default for security. "
|
||||
f"Set GO2RTC_ALLOW_ARBITRARY_EXEC=true to enable arbitrary exec sources."
|
||||
)
|
||||
continue
|
||||
|
||||
filtered_streams.append(formatted_stream)
|
||||
except KeyError as e:
|
||||
print(
|
||||
"[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info."
|
||||
)
|
||||
sys.exit(e)
|
||||
|
||||
if filtered_streams:
|
||||
go2rtc_config["streams"][name] = filtered_streams
|
||||
else:
|
||||
print(
|
||||
f"[ERROR] Stream '{name}' was removed because all sources were restricted (echo/expr/exec). "
|
||||
f"Set GO2RTC_ALLOW_ARBITRARY_EXEC=true to enable arbitrary exec sources."
|
||||
)
|
||||
del go2rtc_config["streams"][name]
|
||||
|
||||
# add birdseye restream stream if enabled
|
||||
if config.get("birdseye", {}).get("restream", False):
|
||||
birdseye: dict[str, Any] = config.get("birdseye")
|
||||
|
||||
@@ -18,6 +18,10 @@ proxy_set_header X-Forwarded-User $http_x_forwarded_user;
|
||||
proxy_set_header X-Forwarded-Groups $http_x_forwarded_groups;
|
||||
proxy_set_header X-Forwarded-Email $http_x_forwarded_email;
|
||||
proxy_set_header X-Forwarded-Preferred-Username $http_x_forwarded_preferred_username;
|
||||
proxy_set_header X-Auth-Request-User $http_x_auth_request_user;
|
||||
proxy_set_header X-Auth-Request-Groups $http_x_auth_request_groups;
|
||||
proxy_set_header X-Auth-Request-Email $http_x_auth_request_email;
|
||||
proxy_set_header X-Auth-Request-Preferred-Username $http_x_auth_request_preferred_username;
|
||||
proxy_set_header X-authentik-username $http_x_authentik_username;
|
||||
proxy_set_header X-authentik-groups $http_x_authentik_groups;
|
||||
proxy_set_header X-authentik-email $http_x_authentik_email;
|
||||
|
||||
@@ -50,7 +50,7 @@ cameras:
|
||||
|
||||
### Configuring Minimum Volume
|
||||
|
||||
The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection.
|
||||
The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that Frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. The Debug view in the Frigate UI has an Audio tab for cameras that have the `audio` role assigned where a graph and the current levels are is displayed. The `min_volume` parameter should be set to the minimum the `RMS` level required to run audio detection.
|
||||
|
||||
:::tip
|
||||
|
||||
|
||||
@@ -188,10 +188,10 @@ go2rtc:
|
||||
# example for connectin to a Reolink camera that supports two way talk
|
||||
your_reolink_camera_twt:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub"
|
||||
your_reolink_camera_twt_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub"
|
||||
# example for connecting to a Reolink NVR
|
||||
your_reolink_camera_via_nvr:
|
||||
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15
|
||||
@@ -227,6 +227,12 @@ cameras:
|
||||
|
||||
### Unifi Protect Cameras
|
||||
|
||||
:::note
|
||||
|
||||
Unifi G5s cameras and newer need a Unifi Protect server to enable rtsps stream, it's not posible to enable it in standalone mode.
|
||||
|
||||
:::
|
||||
|
||||
Unifi protect cameras require the rtspx stream to be used with go2rtc.
|
||||
To utilize a Unifi protect camera, modify the rtsps link to begin with rtspx.
|
||||
Additionally, remove the "?enableSrtp" from the end of the Unifi link.
|
||||
@@ -252,6 +258,10 @@ ffmpeg:
|
||||
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
|
||||
### Wyze Wireless Cameras
|
||||
|
||||
Some community members have found better performance on Wyze cameras by using an alternative firmware known as [Thingino](https://thingino.com/).
|
||||
|
||||
## USB Cameras (aka Webcams)
|
||||
|
||||
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
|
||||
|
||||
@@ -94,18 +94,19 @@ This list of working and non-working PTZ cameras is based on user feedback. If y
|
||||
The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/) can provide a starting point to determine a camera's compatibility with Frigate's autotracking. Look to see if a camera lists `PTZRelative`, `PTZRelativePanTilt` and/or `PTZRelativeZoom`. These features are required for autotracking, but some cameras still fail to respond even if they claim support. If they are missing, autotracking will not work (though basic PTZ in the WebUI might). Avoid cameras with no database entry unless they are confirmed as working below.
|
||||
|
||||
| Brand or specific camera | PTZ Controls | Autotracking | Notes |
|
||||
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- |
|
||||
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking |
|
||||
| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 |
|
||||
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
|
||||
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
|
||||
| Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" |
|
||||
| Axis Q-6155E | ✅ | ❌ | ONVIF service port: 80; Camera does not support MoveStatus. |
|
||||
| Ctronics PTZ | ✅ | ❌ | |
|
||||
| Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, picoo series (commonly), among others) have been reported to not support autotracking. These models usually don't have a four digit model number with chassis prefix and options postfix (e.g. DH-P5AE-PV vs DH-SD49825GB-HNR). |
|
||||
| Dahua DH-SD2A500HB | ✅ | ❌ | |
|
||||
| Dahua DH-SD49825GB-HNR | ✅ | ✅ | |
|
||||
| Dahua DH-P5AE-PV | ❌ | ❌ | |
|
||||
| Foscam | ✅ | ❌ | In general support PTZ, but not relative move. There are no official ONVIF certifications and tests available on the ONVIF Conformant Products Database | |
|
||||
| Foscam | ✅ | ❌ | In general support PTZ, but not relative move. There are no official ONVIF certifications and tests available on the ONVIF Conformant Products Database |
|
||||
| Foscam R5 | ✅ | ❌ | |
|
||||
| Foscam SD4 | ✅ | ❌ | |
|
||||
| Hanwha XNP-6550RH | ✅ | ❌ | |
|
||||
|
||||
@@ -3,7 +3,7 @@ id: object_classification
|
||||
title: Object Classification
|
||||
---
|
||||
|
||||
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
|
||||
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@@ -11,6 +11,8 @@ Object classification models are lightweight and run very fast on CPU. Inference
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
|
||||
@@ -31,9 +33,15 @@ For object classification:
|
||||
- Example: `cat` → `Leo`, `Charlie`, `None`.
|
||||
|
||||
- **Attribute**:
|
||||
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
|
||||
- Added as metadata to the object, visible in the Tracked Object Details pane in Explore, `frigate/events` MQTT messages, and the HTTP API response as `<model_name>: <predicted_value>`.
|
||||
- Ideal when multiple attributes can coexist independently.
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not, and if they are wearing a yellow vest or not.
|
||||
|
||||
:::note
|
||||
|
||||
A tracked object can only have a single sub label. If you are using Triggers or Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. This could also occur with `car` objects that are assigned a sub label for a delivery carrier. Consider using the `attribute` type instead.
|
||||
|
||||
:::
|
||||
|
||||
## Assignment Requirements
|
||||
|
||||
@@ -73,13 +81,17 @@ classification:
|
||||
classification_type: sub_label # or: attribute
|
||||
```
|
||||
|
||||
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For object classification models, the default is 200.
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps:
|
||||
|
||||
### Step 1: Name and Define
|
||||
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Include a `none` class for objects that don't fit any specific category.
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Frigate will automatically include a `none` class for objects that don't fit any specific category.
|
||||
|
||||
For example: To classify your two cats, create a model named "Our Cats" and create two classes, "Charlie" and "Leo". A third class, "none", will be created automatically for other neighborhood cats that are not your own.
|
||||
|
||||
### Step 2: Assign Training Examples
|
||||
|
||||
@@ -87,6 +99,8 @@ The system will automatically generate example images from detected objects matc
|
||||
|
||||
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
|
||||
|
||||
If examples for some of your classes do not appear in the grid, you can continue configuring the model without them. New images will begin to appear in the Recent Classifications view. When your missing classes are seen, classify them from this view and retrain your model.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
|
||||
@@ -94,3 +108,23 @@ When choosing which objects to classify, start with a small number of visually d
|
||||
- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered.
|
||||
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
|
||||
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
|
||||
|
||||
## Debugging Classification Models
|
||||
|
||||
To troubleshoot issues with object classification models, enable debug logging to see detailed information about classification attempts, scores, and consensus calculations.
|
||||
|
||||
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
The debug logs will show:
|
||||
|
||||
- Classification probabilities for each attempt
|
||||
- Whether scores meet the threshold requirement
|
||||
- Consensus calculations and when assignments are made
|
||||
- Object classification history and weighted scores
|
||||
|
||||
@@ -3,7 +3,7 @@ id: state_classification
|
||||
title: State Classification
|
||||
---
|
||||
|
||||
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
|
||||
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@@ -11,6 +11,8 @@ State classification models are lightweight and run very fast on CPU. Inference
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
|
||||
@@ -46,6 +48,8 @@ classification:
|
||||
crop: [0, 180, 220, 400]
|
||||
```
|
||||
|
||||
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For state classification models, the default is 100.
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps:
|
||||
@@ -70,3 +74,34 @@ Once some images are assigned, training will begin automatically.
|
||||
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
|
||||
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
|
||||
|
||||
## Debugging Classification Models
|
||||
|
||||
To troubleshoot issues with state classification models, enable debug logging to see detailed information about classification attempts, scores, and state verification.
|
||||
|
||||
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
The debug logs will show:
|
||||
|
||||
- Classification probabilities for each attempt
|
||||
- Whether scores meet the threshold requirement
|
||||
- State verification progress (consecutive detections needed)
|
||||
- When state changes are published
|
||||
|
||||
### Recent Classifications
|
||||
|
||||
For state classification, images are only added to recent classifications under specific circumstances:
|
||||
|
||||
- **First detection**: The first classification attempt for a camera is always saved
|
||||
- **State changes**: Images are saved when the detected state differs from the current verified state
|
||||
- **Pending verification**: Images are saved when there's a pending state change being verified (requires 3 consecutive identical states)
|
||||
- **Low confidence**: Images with scores below 100% are saved even if the state matches the current state (useful for training)
|
||||
|
||||
Images are **not** saved when the state is stable (detected state matches current state) **and** the score is 100%. This prevents unnecessary storage of redundant high-confidence classifications.
|
||||
|
||||
@@ -48,15 +48,29 @@ Using Ollama on CPU is not recommended, high inference times make using Generati
|
||||
|
||||
:::
|
||||
|
||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||
|
||||
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||
|
||||
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
||||
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
|
||||
|
||||
### Model Types: Instruct vs Thinking
|
||||
|
||||
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
|
||||
|
||||
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
|
||||
- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
|
||||
|
||||
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses.
|
||||
|
||||
**Recommendation:**
|
||||
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model provider’s documentation or model library for guidance on the correct model variant to use.
|
||||
|
||||
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/search?c=vision). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull qwen3-vl:2b-instruct` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||
|
||||
:::note
|
||||
|
||||
@@ -64,6 +78,10 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
|
||||
|
||||
:::
|
||||
|
||||
#### Ollama Cloud models
|
||||
|
||||
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
@@ -193,7 +211,7 @@ You are also able to define custom prompts in your configuration.
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava
|
||||
model: qwen3-vl:8b-instruct
|
||||
|
||||
objects:
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
|
||||
@@ -39,9 +39,10 @@ You are also able to define custom prompts in your configuration.
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava
|
||||
model: qwen3-vl:8b-instruct
|
||||
|
||||
objects:
|
||||
genai:
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
object_prompts:
|
||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||
|
||||
@@ -16,12 +16,13 @@ Review summaries provide structured JSON responses that are saved for each revie
|
||||
```
|
||||
- `title` (string): A concise, direct title that describes the purpose or overall action (e.g., "Person taking out trash", "Joe walking dog").
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish, including setting, detected objects, and their observable actions.
|
||||
- `shortSummary` (string): A brief 2-sentence summary of the scene, suitable for notifications. This is a condensed version of the scene description.
|
||||
- `confidence` (float): 0-1 confidence in the analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous.
|
||||
- `other_concerns` (list): List of user-defined concerns that may need additional investigation.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
|
||||
```
|
||||
|
||||
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will also automatically show the title and description when the data is available.
|
||||
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will automatically show the title and `shortSummary` when the data is available, while the full `scene` description is available in the UI for detailed review.
|
||||
|
||||
### Defining Typical Activity
|
||||
|
||||
@@ -30,40 +31,43 @@ Each installation and even camera can have different parameters for what is cons
|
||||
<details>
|
||||
<summary>Default Activity Context Prompt</summary>
|
||||
|
||||
```
|
||||
### Normal Activity Indicators (Level 0)
|
||||
- Known/verified people in any zone at any time
|
||||
- People with pets in residential areas
|
||||
- Deliveries or services during daytime/evening (6 AM - 10 PM): carrying packages to doors/porches, placing items, leaving
|
||||
- Services/maintenance workers with visible tools, uniforms, or service vehicles during daytime
|
||||
- Activity confined to public areas only (sidewalks, streets) without entering property at any time
|
||||
```yaml
|
||||
review:
|
||||
genai:
|
||||
activity_context_prompt: |
|
||||
### Normal Activity Indicators (Level 0)
|
||||
- Known/verified people in any zone at any time
|
||||
- People with pets in residential areas
|
||||
- Deliveries or services during daytime/evening (6 AM - 10 PM): carrying packages to doors/porches, placing items, leaving
|
||||
- Services/maintenance workers with visible tools, uniforms, or service vehicles during daytime
|
||||
- Activity confined to public areas only (sidewalks, streets) without entering property at any time
|
||||
|
||||
### Suspicious Activity Indicators (Level 1)
|
||||
- **Testing or attempting to open doors/windows/handles on vehicles or buildings** — ALWAYS Level 1 regardless of time or duration
|
||||
- **Unidentified person in private areas (driveways, near vehicles/buildings) during late night/early morning (11 PM - 5 AM)** — ALWAYS Level 1 regardless of activity or duration
|
||||
- Taking items that don't belong to them (packages, objects from porches/driveways)
|
||||
- Climbing or jumping fences/barriers to access property
|
||||
- Attempting to conceal actions or items from view
|
||||
- Prolonged loitering: remaining in same area without visible purpose throughout most of the sequence
|
||||
### Suspicious Activity Indicators (Level 1)
|
||||
- **Testing or attempting to open doors/windows/handles on vehicles or buildings** — ALWAYS Level 1 regardless of time or duration
|
||||
- **Unidentified person in private areas (driveways, near vehicles/buildings) during late night/early morning (11 PM - 5 AM)** — ALWAYS Level 1 regardless of activity or duration
|
||||
- Taking items that don't belong to them (packages, objects from porches/driveways)
|
||||
- Climbing or jumping fences/barriers to access property
|
||||
- Attempting to conceal actions or items from view
|
||||
- Prolonged loitering: remaining in same area without visible purpose throughout most of the sequence
|
||||
|
||||
### Critical Threat Indicators (Level 2)
|
||||
- Holding break-in tools (crowbars, pry bars, bolt cutters)
|
||||
- Weapons visible (guns, knives, bats used aggressively)
|
||||
- Forced entry in progress
|
||||
- Physical aggression or violence
|
||||
- Active property damage or theft in progress
|
||||
### Critical Threat Indicators (Level 2)
|
||||
- Holding break-in tools (crowbars, pry bars, bolt cutters)
|
||||
- Weapons visible (guns, knives, bats used aggressively)
|
||||
- Forced entry in progress
|
||||
- Physical aggression or violence
|
||||
- Active property damage or theft in progress
|
||||
|
||||
### Assessment Guidance
|
||||
Evaluate in this order:
|
||||
### Assessment Guidance
|
||||
Evaluate in this order:
|
||||
|
||||
1. **If person is verified/known** → Level 0 regardless of time or activity
|
||||
2. **If person is unidentified:**
|
||||
- Check time: If late night/early morning (11 PM - 5 AM) AND in private areas (driveways, near vehicles/buildings) → Level 1
|
||||
- Check actions: If testing doors/handles, taking items, climbing → Level 1
|
||||
- Otherwise, if daytime/evening (6 AM - 10 PM) with clear legitimate purpose (delivery, service worker) → Level 0
|
||||
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
|
||||
1. **If person is verified/known** → Level 0 regardless of time or activity
|
||||
2. **If person is unidentified:**
|
||||
- Check time: If late night/early morning (11 PM - 5 AM) AND in private areas (driveways, near vehicles/buildings) → Level 1
|
||||
- Check actions: If testing doors/handles, taking items, climbing → Level 1
|
||||
- Otherwise, if daytime/evening (6 AM - 10 PM) with clear legitimate purpose (delivery, service worker) → Level 0
|
||||
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
|
||||
|
||||
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.
|
||||
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -108,6 +112,17 @@ review:
|
||||
- animals in the garden
|
||||
```
|
||||
|
||||
### Preferred Language
|
||||
|
||||
By default, review summaries are generated in English. You can configure Frigate to generate summaries in your preferred language by setting the `preferred_language` option:
|
||||
|
||||
```yaml
|
||||
review:
|
||||
genai:
|
||||
enabled: true
|
||||
preferred_language: Spanish
|
||||
```
|
||||
|
||||
## Review Reports
|
||||
|
||||
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.
|
||||
|
||||
@@ -13,7 +13,7 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image.
|
||||
- ROCm support in the `-rocm` Frigate image is automatically detected for enrichments, but only some enrichment models are available due to ROCm's focus on LLMs and limited stability with certain neural network models. Frigate disables models that perform poorly or are unstable to ensure reliable operation, so only compatible enrichments may be active.
|
||||
|
||||
- **Intel**
|
||||
|
||||
|
||||
@@ -3,78 +3,65 @@ id: hardware_acceleration_video
|
||||
title: Video Decoding
|
||||
---
|
||||
|
||||
import CommunityBadge from '@site/src/components/CommunityBadge';
|
||||
|
||||
# Video Decoding
|
||||
|
||||
It is highly recommended to use a GPU for hardware acceleration video decoding in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
|
||||
It is highly recommended to use an integrated or discrete GPU for hardware acceleration video decoding in Frigate.
|
||||
|
||||
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
|
||||
Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. To verify that hardware acceleration is working:
|
||||
- Check the logs: A message will either say that hardware acceleration was automatically detected, or there will be a warning that no hardware acceleration was automatically detected
|
||||
- If hardware acceleration is specified in the config, verification can be done by ensuring the logs are free from errors. There is no CPU fallback for hardware acceleration.
|
||||
|
||||
:::info
|
||||
|
||||
## Raspberry Pi 3/4
|
||||
Frigate supports presets for optimal hardware accelerated video decoding:
|
||||
|
||||
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
|
||||
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
|
||||
**AMD**
|
||||
|
||||
```yaml
|
||||
# if you want to decode a h264 stream
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-rpi-64-h264
|
||||
- [AMD](#amd-based-cpus): Frigate can utilize modern AMD integrated GPUs and AMD discrete GPUs to accelerate video decoding.
|
||||
|
||||
# if you want to decode a h265 (hevc) stream
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-rpi-64-h265
|
||||
```
|
||||
**Intel**
|
||||
|
||||
:::note
|
||||
- [Intel](#intel-based-cpus): Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video decoding.
|
||||
|
||||
If running Frigate through Docker, you either need to run in privileged mode or
|
||||
map the `/dev/video*` devices to Frigate. With Docker Compose add:
|
||||
**Nvidia GPU**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
devices:
|
||||
- /dev/video11:/dev/video11
|
||||
```
|
||||
- [Nvidia GPU](#nvidia-gpus): Frigate can utilize most modern Nvidia GPUs to accelerate video decoding.
|
||||
|
||||
Or with `docker run`:
|
||||
**Raspberry Pi 3/4**
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
--device /dev/video11 \
|
||||
ghcr.io/blakeblackshear/frigate:stable
|
||||
```
|
||||
- [Raspberry Pi](#raspberry-pi-34): Frigate can utilize the media engine in the Raspberry Pi 3 and 4 to slightly accelerate video decoding.
|
||||
|
||||
`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check
|
||||
by running the following and looking for `H264`:
|
||||
**Nvidia Jetson** <CommunityBadge />
|
||||
|
||||
```bash
|
||||
for d in /dev/video*; do
|
||||
echo -e "---\n$d"
|
||||
v4l2-ctl --list-formats-ext -d $d
|
||||
done
|
||||
```
|
||||
- [Jetson](#nvidia-jetson): Frigate can utilize the media engine in Jetson hardware to accelerate video decoding.
|
||||
|
||||
Or map in all the `/dev/video*` devices.
|
||||
**Rockchip** <CommunityBadge />
|
||||
|
||||
- [RKNN](#rockchip-platform): Frigate can utilize the media engine in RockChip SOCs to accelerate video decoding.
|
||||
|
||||
**Other Hardware**
|
||||
|
||||
Depending on your system, these presets may not be compatible, and you may need to use manual hwaccel args to take advantage of your hardware. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
|
||||
|
||||
:::
|
||||
|
||||
## Intel-based CPUs
|
||||
|
||||
Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video decoding.
|
||||
|
||||
:::info
|
||||
|
||||
**Recommended hwaccel Preset**
|
||||
|
||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||
| -------------- | ------------ | ------------------- | ------------------------------------ |
|
||||
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported |
|
||||
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
|
||||
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
|
||||
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
|
||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
|
||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||
| -------------- | ------------ | ------------------- | ------------------------------------------- |
|
||||
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 |
|
||||
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
|
||||
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
|
||||
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
|
||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
|
||||
|
||||
:::
|
||||
|
||||
@@ -195,15 +182,17 @@ telemetry:
|
||||
|
||||
If you are passing in a device path, make sure you've passed the device through to the container.
|
||||
|
||||
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
|
||||
## AMD-based CPUs
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
Frigate can utilize modern AMD integrated GPUs and AMD GPUs to accelerate video decoding using VAAPI.
|
||||
|
||||
:::note
|
||||
### Configuring Radeon Driver
|
||||
|
||||
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
|
||||
|
||||
:::
|
||||
### Via VAAPI
|
||||
|
||||
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
|
||||
|
||||
```yaml
|
||||
ffmpeg:
|
||||
@@ -264,7 +253,7 @@ processes:
|
||||
|
||||
:::note
|
||||
|
||||
`nvidia-smi` may not show `ffmpeg` processes when run inside the container [due to docker limitations](https://github.com/NVIDIA/nvidia-docker/issues/179#issuecomment-645579458).
|
||||
`nvidia-smi` will not show `ffmpeg` processes when run inside the container [due to docker limitations](https://github.com/NVIDIA/nvidia-docker/issues/179#issuecomment-645579458).
|
||||
|
||||
:::
|
||||
|
||||
@@ -300,12 +289,63 @@ If you do not see these processes, check the `docker logs` for the container and
|
||||
|
||||
These instructions were originally based on the [Jellyfin documentation](https://jellyfin.org/docs/general/administration/hardware-acceleration.html#nvidia-hardware-acceleration-on-docker-linux).
|
||||
|
||||
## Raspberry Pi 3/4
|
||||
|
||||
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
|
||||
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
|
||||
|
||||
```yaml
|
||||
# if you want to decode a h264 stream
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-rpi-64-h264
|
||||
|
||||
# if you want to decode a h265 (hevc) stream
|
||||
ffmpeg:
|
||||
hwaccel_args: preset-rpi-64-h265
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
If running Frigate through Docker, you either need to run in privileged mode or
|
||||
map the `/dev/video*` devices to Frigate. With Docker Compose add:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
devices:
|
||||
- /dev/video11:/dev/video11
|
||||
```
|
||||
|
||||
Or with `docker run`:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name frigate \
|
||||
...
|
||||
--device /dev/video11 \
|
||||
ghcr.io/blakeblackshear/frigate:stable
|
||||
```
|
||||
|
||||
`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check
|
||||
by running the following and looking for `H264`:
|
||||
|
||||
```bash
|
||||
for d in /dev/video*; do
|
||||
echo -e "---\n$d"
|
||||
v4l2-ctl --list-formats-ext -d $d
|
||||
done
|
||||
```
|
||||
|
||||
Or map in all the `/dev/video*` devices.
|
||||
|
||||
:::
|
||||
|
||||
# Community Supported
|
||||
|
||||
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
|
||||
## NVIDIA Jetson
|
||||
|
||||
A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build
|
||||
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
|
||||
A separate set of docker images is available for Jetson devices. They come with an `ffmpeg` build with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
|
||||
|
||||
You will need to use the image with the nvidia container runtime:
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt
|
||||
| ------ | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| jsmpeg | same as `detect -> fps`, capped at 10 | 720p | no | no | Resolution is configurable, but go2rtc is recommended if you want higher resolutions and better frame rates. jsmpeg is Frigate's default without go2rtc configured. |
|
||||
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
|
||||
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration, doesn't support h.265. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
|
||||
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
|
||||
|
||||
### Camera Settings Recommendations
|
||||
|
||||
@@ -127,7 +127,8 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
|
||||
```
|
||||
|
||||
- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block.
|
||||
- Note that WebRTC does not support H.265.
|
||||
|
||||
- Note that some browsers may not support H.265 (HEVC). You can check your browser's current version for H.265 compatibility [here](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness).
|
||||
|
||||
:::tip
|
||||
|
||||
|
||||
@@ -146,18 +146,18 @@ detectors:
|
||||
|
||||
### EdgeTPU Supported Models
|
||||
|
||||
| Model | Notes |
|
||||
| ------------------------------------- | ------------------------------------------- |
|
||||
| [MobileNet v2](#ssdlite-mobilenet-v2) | Default model |
|
||||
| [YOLOv9](#yolo-v9) | More accurate but slower than default model |
|
||||
| Model | Notes |
|
||||
| ----------------------- | ------------------------------------------- |
|
||||
| [Mobiledet](#mobiledet) | Default model |
|
||||
| [YOLOv9](#yolov9) | More accurate but slower than default model |
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
#### Mobiledet
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
#### YOLO v9
|
||||
#### YOLOv9
|
||||
|
||||
[YOLOv9](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite) models that are compiled for Tensorflow Lite and properly quantized are supported, but not included by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. Note that the model may require a custom label file (eg. [use this 17 label file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) for the model linked above.)
|
||||
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
|
||||
|
||||
<details>
|
||||
<summary>YOLOv9 Setup & Config</summary>
|
||||
@@ -178,7 +178,7 @@ model:
|
||||
labelmap_path: /config/labels-coco17.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 17 objects.
|
||||
Note that due to hardware limitations of the Coral, the labelmap is a subset of the COCO labels and includes only 17 object classes.
|
||||
|
||||
</details>
|
||||
|
||||
@@ -477,7 +477,7 @@ After placing the downloaded onnx model in your config/model_cache folder, you c
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: GPU
|
||||
device: CPU
|
||||
|
||||
model:
|
||||
model_type: dfine
|
||||
@@ -569,10 +569,10 @@ When using Docker Compose:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
---
|
||||
devices:
|
||||
- /dev/dri
|
||||
- /dev/kfd
|
||||
...
|
||||
devices:
|
||||
- /dev/dri
|
||||
- /dev/kfd
|
||||
```
|
||||
|
||||
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
|
||||
@@ -600,9 +600,9 @@ When using Docker Compose:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
|
||||
...
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
|
||||
```
|
||||
|
||||
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
|
||||
@@ -1508,17 +1508,17 @@ COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL
|
||||
EOF
|
||||
```
|
||||
|
||||
### Download RF-DETR Model
|
||||
### Downloading RF-DETR Model
|
||||
|
||||
RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size.
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=Nano --output . -f- <<'EOF'
|
||||
docker build . --build-arg MODEL_SIZE=Nano --rm --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /rfdetr
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnxscript
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
|
||||
FROM scratch
|
||||
|
||||
@@ -11,7 +11,7 @@ This adds features including the ability to deep link directly into the app.
|
||||
|
||||
In order to install Frigate as a PWA, the following requirements must be met:
|
||||
|
||||
- Frigate must be accessed via a secure context (localhost, secure https, etc.)
|
||||
- Frigate must be accessed via a secure context (localhost, secure https, VPN, etc.)
|
||||
- On Android, Firefox, Chrome, Edge, Opera, and Samsung Internet Browser all support installing PWAs.
|
||||
- On iOS 16.4 and later, PWAs can be installed from the Share menu in Safari, Chrome, Edge, Firefox, and Orion.
|
||||
|
||||
@@ -22,3 +22,7 @@ Installation varies slightly based on the device that is being used:
|
||||
- Desktop: Use the install button typically found in right edge of the address bar
|
||||
- Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
|
||||
## Usage
|
||||
|
||||
Once setup, the Frigate app can be used wherever it has access to Frigate. This means it can be setup as local-only, VPN-only, or fully accessible depending on your needs.
|
||||
|
||||
@@ -185,10 +185,35 @@ In this configuration:
|
||||
- `front_door` stream is used by Frigate for viewing, recording, and detection. The `#backchannel=0` parameter prevents go2rtc from establishing the audio output backchannel, so it won't block two-way talk access.
|
||||
- `front_door_twoway` stream is used for two-way talk functionality. This stream can be used by Frigate's WebRTC viewer when two-way talk is enabled, or by other applications (like Home Assistant Advanced Camera Card) that need access to the camera's audio output channel.
|
||||
|
||||
## Security: Restricted Stream Sources
|
||||
|
||||
For security reasons, the `echo:`, `expr:`, and `exec:` stream sources are disabled by default in go2rtc. These sources allow arbitrary command execution and can pose security risks if misconfigured.
|
||||
|
||||
If you attempt to use these sources in your configuration, the streams will be removed and an error message will be printed in the logs.
|
||||
|
||||
To enable these sources, you must set the environment variable `GO2RTC_ALLOW_ARBITRARY_EXEC=true`. This can be done in your Docker Compose file or container environment:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- GO2RTC_ALLOW_ARBITRARY_EXEC=true
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
Enabling arbitrary exec sources allows execution of arbitrary commands through go2rtc stream configurations. Only enable this if you understand the security implications and trust all sources of your configuration.
|
||||
|
||||
:::
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
:::warning
|
||||
|
||||
The `exec:`, `echo:`, and `expr:` sources are disabled by default for security. You must set `GO2RTC_ALLOW_ARBITRARY_EXEC=true` to use them. See [Security: Restricted Stream Sources](#security-restricted-stream-sources) for more information.
|
||||
|
||||
:::
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -20,7 +20,7 @@ Here are some of the cameras I recommend:
|
||||
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
- <a href="https://www.bhphotovideo.com/c/product/1705511-REG/hikvision_colorvu_ds_2cd2387g2p_lsu_sl_8mp_network.html" target="_blank" rel="nofollow noopener">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
@@ -38,9 +38,11 @@ If the EQ13 is out of stock, the link below may take you to a suggested alternat
|
||||
|
||||
:::
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| Name | Capabilities | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Can run object detection on several 1080p cameras with low-medium activity | Dual gigabit NICs for easy isolated camera network. |
|
||||
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP) | Can handle a large number of 1080p cameras with high activity | |
|
||||
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
|
||||
|
||||
## Detectors
|
||||
|
||||
@@ -125,10 +127,16 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
|
||||
|
||||
### Google Coral TPU
|
||||
|
||||
:::warning
|
||||
|
||||
The Coral is no longer recommended for new Frigate installations, except in deployments with particularly low power requirements or hardware incapable of utilizing alternative AI accelerators for object detection. Instead, we suggest using one of the numerous other supported object detectors. Frigate will continue to provide support for the Coral TPU for as long as practicably possible given its still one of the most power-efficient devices for executing object detection models.
|
||||
|
||||
:::
|
||||
|
||||
Frigate supports both the USB and M.2 versions of the Google Coral.
|
||||
|
||||
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. https://github.com/jnicolson/gasket-builder should be used.
|
||||
|
||||
A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
|
||||
|
||||
|
||||
@@ -94,6 +94,10 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576
|
||||
|
||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||
|
||||
## Extra Steps for Specific Hardware
|
||||
|
||||
The following sections contain additional setup steps that are only required if you are using specific hardware. If you are not using any of these hardware types, you can skip to the [Docker](#docker) installation section.
|
||||
|
||||
### Raspberry Pi 3/4
|
||||
|
||||
By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options).
|
||||
@@ -106,14 +110,107 @@ The Hailo-8 and Hailo-8L AI accelerators are available in both M.2 and HAT form
|
||||
|
||||
#### Installation
|
||||
|
||||
For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simply follow this [guide](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation) to install the driver and software.
|
||||
:::warning
|
||||
|
||||
For other installations, follow these steps for installation:
|
||||
The Raspberry Pi kernel includes an older version of the Hailo driver that is incompatible with Frigate. You **must** follow the installation steps below to install the correct driver version, and you **must** disable the built-in kernel driver as described in step 1.
|
||||
|
||||
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
|
||||
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
|
||||
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||
4. Run the script with `./user_installation.sh`
|
||||
:::
|
||||
|
||||
1. **Disable the built-in Hailo driver (Raspberry Pi only)**:
|
||||
|
||||
:::note
|
||||
|
||||
If you are **not** using a Raspberry Pi, skip this step and proceed directly to step 2.
|
||||
|
||||
:::
|
||||
|
||||
If you are using a Raspberry Pi, you need to blacklist the built-in kernel Hailo driver to prevent conflicts. First, check if the driver is currently loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo
|
||||
```
|
||||
|
||||
If it shows `hailo_pci`, unload it:
|
||||
|
||||
```bash
|
||||
sudo rmmod hailo_pci
|
||||
```
|
||||
|
||||
Now blacklist the driver to prevent it from loading on boot:
|
||||
|
||||
```bash
|
||||
echo "blacklist hailo_pci" | sudo tee /etc/modprobe.d/blacklist-hailo_pci.conf
|
||||
```
|
||||
|
||||
Update initramfs to ensure the blacklist takes effect:
|
||||
|
||||
```bash
|
||||
sudo update-initramfs -u
|
||||
```
|
||||
|
||||
Reboot your Raspberry Pi:
|
||||
|
||||
```bash
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
After rebooting, verify the built-in driver is not loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo
|
||||
```
|
||||
|
||||
This command should return no results. If it still shows `hailo_pci`, the blacklist did not take effect properly and you may need to check for other Hailo packages installed via apt that are loading the driver.
|
||||
|
||||
2. **Run the installation script**:
|
||||
|
||||
Download the installation script:
|
||||
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/blakeblackshear/frigate/dev/docker/hailo8l/user_installation.sh
|
||||
```
|
||||
|
||||
Make it executable:
|
||||
|
||||
```bash
|
||||
sudo chmod +x user_installation.sh
|
||||
```
|
||||
|
||||
Run the script:
|
||||
|
||||
```bash
|
||||
./user_installation.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
- Install necessary build dependencies
|
||||
- Clone and build the Hailo driver from the official repository
|
||||
- Install the driver
|
||||
- Download and install the required firmware
|
||||
- Set up udev rules
|
||||
|
||||
3. **Reboot your system**:
|
||||
|
||||
After the script completes successfully, reboot to load the firmware:
|
||||
|
||||
```bash
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
4. **Verify the installation**:
|
||||
|
||||
After rebooting, verify that the Hailo device is available:
|
||||
|
||||
```bash
|
||||
ls -l /dev/hailo0
|
||||
```
|
||||
|
||||
You should see the device listed. You can also verify the driver is loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo_pci
|
||||
```
|
||||
|
||||
#### Setup
|
||||
|
||||
@@ -302,7 +399,7 @@ services:
|
||||
shm_size: "512mb" # update for your cameras based on calculation above
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
|
||||
- /dev/video11:/dev/video11 # For Raspberry Pi 4B
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware
|
||||
- /dev/accel:/dev/accel # Intel NPU
|
||||
@@ -368,6 +465,7 @@ There are important limitations in HA OS to be aware of:
|
||||
|
||||
- Separate local storage for media is not yet supported by Home Assistant
|
||||
- AMD GPUs are not supported because HA OS does not include the mesa driver.
|
||||
- Intel NPUs are not supported because HA OS does not include the NPU firmware.
|
||||
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
|
||||
|
||||
:::
|
||||
|
||||
@@ -5,7 +5,7 @@ title: Updating
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.16.2**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.2).
|
||||
The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
@@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.2` instead of `0.15.2`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.3`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
image: ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.2`, `0.16.2-tensorrt`, or `stable`):
|
||||
- Pull the image with the appropriate tag (e.g., `0.17.0`, `0.17.0-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.2
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
@@ -105,8 +105,8 @@ If an update causes issues:
|
||||
1. Stop Frigate.
|
||||
2. Restore your backed-up config file and database.
|
||||
3. Revert to the previous image version:
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`), and re-run `docker compose up -d`.
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||
4. Verify the old version is running again.
|
||||
|
||||
|
||||
@@ -134,31 +134,13 @@ Now you should be able to start Frigate by running `docker compose up -d` from w
|
||||
|
||||
This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution.
|
||||
|
||||
### Step 1: Add a detect stream
|
||||
### Step 1: Start Frigate
|
||||
|
||||
First we will add the detect stream for the camera:
|
||||
At this point you should be able to start Frigate and a basic config will be created automatically.
|
||||
|
||||
```yaml
|
||||
mqtt:
|
||||
enabled: False
|
||||
### Step 2: Add a camera
|
||||
|
||||
cameras:
|
||||
name_of_your_camera: # <------ Name the camera
|
||||
enabled: True
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection
|
||||
roles:
|
||||
- detect
|
||||
```
|
||||
|
||||
### Step 2: Start Frigate
|
||||
|
||||
At this point you should be able to start Frigate and see the video feed in the UI.
|
||||
|
||||
If you get an error image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with H264 RTSP cameras that support TCP connections.
|
||||
|
||||
FFmpeg arguments for other types of cameras can be found [here](../configuration/camera_specific.md).
|
||||
You can click the `Add Camera` button to use the camera setup wizard to get your first camera added into Frigate.
|
||||
|
||||
### Step 3: Configure hardware acceleration (recommended)
|
||||
|
||||
@@ -173,7 +155,7 @@ services:
|
||||
frigate:
|
||||
...
|
||||
devices:
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128 # for intel & amd hwaccel, needs to be updated for your hardware
|
||||
...
|
||||
```
|
||||
|
||||
@@ -202,7 +184,7 @@ services:
|
||||
...
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
|
||||
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
@@ -245,6 +245,12 @@ To load a preview gif of a review item:
|
||||
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
|
||||
```
|
||||
|
||||
To load the thumbnail of a review item:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<review-id>/<camera>/review_thumbnail.webp
|
||||
```
|
||||
|
||||
<a name="streams"></a>
|
||||
|
||||
## RTSP stream
|
||||
|
||||
@@ -280,7 +280,7 @@ Topic with current state of notifications. Published values are `ON` and `OFF`.
|
||||
|
||||
## Frigate Camera Topics
|
||||
|
||||
### `frigate/<camera_name>/<role>/status`
|
||||
### `frigate/<camera_name>/status/<role>`
|
||||
|
||||
Publishes the current health status of each role that is enabled (`audio`, `detect`, `record`). Possible values are:
|
||||
|
||||
|
||||
@@ -38,3 +38,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
## [Periscope](https://github.com/maksz42/periscope)
|
||||
|
||||
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
|
||||
|
||||
## [Scrypted - Frigate bridge plugin](https://github.com/apocaliss92/scrypted-frigate-bridge)
|
||||
|
||||
[Scrypted - Frigate bridge](https://github.com/apocaliss92/scrypted-frigate-bridge) is an plugin that allows to ingest Frigate detections, motion, videoclips on Scrypted as well as provide templates to export rebroadcast configurations on Frigate.
|
||||
|
||||
@@ -15,13 +15,11 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo
|
||||
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. |
|
||||
|
||||
_\* Support coming in 0.17_
|
||||
| Model Type | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
|
||||
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
|
||||
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX, Apple Silicon, and Rockchip NPUs. |
|
||||
|
||||
### YOLOv9 Details
|
||||
|
||||
@@ -39,7 +37,7 @@ If you have a Hailo device, you will need to specify the hardware you have when
|
||||
|
||||
#### Rockchip (RKNN) Support
|
||||
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17.
|
||||
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is available in 0.17 and later.
|
||||
|
||||
## Supported detector types
|
||||
|
||||
@@ -55,7 +53,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
|
||||
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
|
||||
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
|
||||
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._
|
||||
_\* Requires manual conversion in 0.16. Automatic conversion available in 0.17 and later._
|
||||
|
||||
## Improving your model
|
||||
|
||||
|
||||
73
docs/docs/troubleshooting/cpu.md
Normal file
73
docs/docs/troubleshooting/cpu.md
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
id: cpu
|
||||
title: High CPU Usage
|
||||
---
|
||||
|
||||
High CPU usage can impact Frigate's performance and responsiveness. This guide outlines the most effective configuration changes to help reduce CPU consumption and optimize resource usage.
|
||||
|
||||
## 1. Hardware Acceleration for Video Decoding
|
||||
|
||||
**Priority: Critical**
|
||||
|
||||
Video decoding is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams. Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware.
|
||||
|
||||
### Key Concepts
|
||||
|
||||
**Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras.
|
||||
|
||||
**Hardware Acceleration Benefits:** By using dedicated video decode hardware, you can:
|
||||
|
||||
- Significantly reduce CPU usage per camera stream
|
||||
- Support 2-3x more cameras on the same hardware
|
||||
- Free up CPU resources for motion detection and other Frigate processes
|
||||
- Reduce system heat and power consumption
|
||||
|
||||
### Configuration
|
||||
|
||||
Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference) as described in the [getting started guide](../guides/getting_started).
|
||||
|
||||
### Troubleshooting Hardware Acceleration
|
||||
|
||||
If hardware acceleration isn't working:
|
||||
|
||||
1. Check Frigate logs for FFmpeg errors related to hwaccel
|
||||
2. Verify the hardware device is accessible inside the container
|
||||
3. Ensure your camera streams use H.264 or H.265 codecs (most common)
|
||||
4. Try different presets if the automatic detection fails
|
||||
5. Check that your GPU drivers are properly installed on the host system
|
||||
|
||||
## 2. Detector Selection and Configuration
|
||||
|
||||
**Priority: Critical**
|
||||
|
||||
Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [hardware documentation](../frigate/hardware).
|
||||
|
||||
### Understanding Detector Performance
|
||||
|
||||
Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle.
|
||||
|
||||
**Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind.
|
||||
|
||||
### Choosing the Right Detector
|
||||
|
||||
Different detectors have vastly different performance characteristics, see the expected performance for object detectors in [the hardware docs](../frigate/hardware)
|
||||
|
||||
### Multiple Detector Instances
|
||||
|
||||
When a single detector cannot keep up with your camera count, some detector types (`openvino`, `onnx`) allow you to define multiple detector instances to share the workload. This is particularly useful with GPU-based detectors that have sufficient VRAM to run multiple inference processes.
|
||||
|
||||
For detailed instructions on configuring multiple detectors, see the [Object Detectors documentation](../configuration/object_detectors).
|
||||
|
||||
|
||||
**When to add a second detector:**
|
||||
|
||||
- Skipped FPS is consistently > 0 even during normal activity
|
||||
|
||||
### Model Selection and Optimization
|
||||
|
||||
The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors).
|
||||
|
||||
**Model Size Trade-offs:**
|
||||
|
||||
- Smaller models (320x320): Faster inference, Frigate is specifically optimized for a 320x320 size model.
|
||||
- Larger models (640x640): Slower inference, can sometimes have higher accuracy on very large objects that take up a majority of the frame.
|
||||
60
docs/docs/troubleshooting/dummy-camera.md
Normal file
60
docs/docs/troubleshooting/dummy-camera.md
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
id: dummy-camera
|
||||
title: Analyzing Object Detection
|
||||
---
|
||||
|
||||
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
|
||||
|
||||
## When to use
|
||||
|
||||
- Replaying an exported clip to reproduce incorrect detections
|
||||
- Testing configuration changes (model settings, trackers, filters) against a known clip
|
||||
- Gathering deterministic logs and recordings for debugging or issue reports
|
||||
|
||||
## Example Config
|
||||
|
||||
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
test:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: /media/frigate/car-stopping.mp4
|
||||
input_args: -re -stream_loop -1 -fflags +genpts
|
||||
roles:
|
||||
- detect
|
||||
detect:
|
||||
enabled: true
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
|
||||
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`).
|
||||
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
|
||||
- If you're debugging a specific camera, copy the settings from that camera (frame rate, model/enrichment settings, zones, etc.) into the temporary camera so the replay closely matches the original environment. Leave `record` and `snapshots` disabled unless you are specifically debugging recording or snapshot behavior.
|
||||
3. Restart Frigate.
|
||||
4. Observe the Debug view in the UI and logs as the clip is replayed. Watch detections, zones, or any feature you're looking to debug, and note any errors in the logs to reproduce the issue.
|
||||
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
|
||||
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
|
||||
|
||||
## Variables to consider in object tracking
|
||||
|
||||
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
|
||||
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
|
||||
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
|
||||
|
||||
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- No video: verify the path is correct and accessible from the Frigate process/container.
|
||||
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
|
||||
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
id: edgetpu
|
||||
title: Troubleshooting EdgeTPU
|
||||
title: EdgeTPU Errors
|
||||
---
|
||||
|
||||
## USB Coral Not Detected
|
||||
@@ -68,8 +68,7 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
|
||||
|
||||
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
|
||||
|
||||
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||
- For some newer Linux distros (for example, Ubuntu 22.04+), https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
- In most cases https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
|
||||
## Attempting to load TPU as pci & Fatal Python error: Illegal instruction
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
id: gpu
|
||||
title: Troubleshooting GPU
|
||||
title: GPU Errors
|
||||
---
|
||||
|
||||
## OpenVINO
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
id: memory
|
||||
title: Memory Troubleshooting
|
||||
title: Memory Usage
|
||||
---
|
||||
|
||||
Frigate includes built-in memory profiling using [memray](https://bloomberg.github.io/memray/) to help diagnose memory issues. This feature allows you to profile specific Frigate modules to identify memory leaks, excessive allocations, or other memory-related problems.
|
||||
@@ -9,8 +9,20 @@ Frigate includes built-in memory profiling using [memray](https://bloomberg.gith
|
||||
|
||||
Memory profiling is controlled via the `FRIGATE_MEMRAY_MODULES` environment variable. Set it to a comma-separated list of module names you want to profile:
|
||||
|
||||
```yaml
|
||||
# docker-compose example
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
environment:
|
||||
- FRIGATE_MEMRAY_MODULES=frigate.embeddings,frigate.capture
|
||||
```
|
||||
|
||||
```bash
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
# docker run example
|
||||
docker run -e FRIGATE_MEMRAY_MODULES="frigate.embeddings" \
|
||||
...
|
||||
--name frigate <frigate_image>
|
||||
```
|
||||
|
||||
### Module Names
|
||||
@@ -28,7 +40,7 @@ Frigate processes are named using a module-based naming scheme. Common module na
|
||||
You can also specify the full process name (including camera-specific identifiers) if you want to profile a specific camera:
|
||||
|
||||
```bash
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.capture:front_door"
|
||||
FRIGATE_MEMRAY_MODULES=frigate.capture:front_door
|
||||
```
|
||||
|
||||
When you specify a module name (e.g., `frigate.capture`), all processes with that module prefix will be profiled. For example, `frigate.capture` will profile all camera capture processes.
|
||||
@@ -55,11 +67,20 @@ After a process exits normally, you'll find HTML reports in `/config/memray_repo
|
||||
|
||||
If a process crashes or you want to generate a report from an existing binary file, you can manually create the HTML report:
|
||||
|
||||
- Run `memray` inside the Frigate container:
|
||||
|
||||
```bash
|
||||
memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
docker-compose exec frigate memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
# or
|
||||
docker exec -it <container_name_or_id> memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
```
|
||||
|
||||
This will generate an HTML file that you can open in your browser.
|
||||
- You can also copy the `.bin` file to the host and run `memray` locally if you have it installed:
|
||||
|
||||
```bash
|
||||
docker cp <container_name_or_id>:/config/memray_reports/<module_name>.bin /tmp/
|
||||
memray flamegraph /tmp/<module_name>.bin
|
||||
```
|
||||
|
||||
## Understanding the Reports
|
||||
|
||||
@@ -110,20 +131,4 @@ The interactive HTML reports allow you to:
|
||||
- Check that memray is properly installed (included by default in Frigate)
|
||||
- Verify the process actually started and ran (check process logs)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```bash
|
||||
# Enable profiling for review and capture modules
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
|
||||
# Start Frigate
|
||||
# ... let it run for a while ...
|
||||
|
||||
# Check for reports
|
||||
ls -lh /config/memray_reports/
|
||||
|
||||
# If a process crashed, manually generate report
|
||||
memray flamegraph /config/memray_reports/frigate_capture_front_door.bin
|
||||
```
|
||||
|
||||
For more information about memray and interpreting reports, see the [official memray documentation](https://bloomberg.github.io/memray/).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
id: recordings
|
||||
title: Troubleshooting Recordings
|
||||
title: Recordings Errors
|
||||
---
|
||||
|
||||
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
|
||||
|
||||
@@ -170,7 +170,7 @@ const config: Config = {
|
||||
],
|
||||
},
|
||||
],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Frigate LLC`,
|
||||
copyright: `Copyright © ${new Date().getFullYear()} Frigate, Inc.`,
|
||||
},
|
||||
},
|
||||
plugins: [
|
||||
|
||||
@@ -129,9 +129,27 @@ const sidebars: SidebarsConfig = {
|
||||
Troubleshooting: [
|
||||
"troubleshooting/faqs",
|
||||
"troubleshooting/recordings",
|
||||
"troubleshooting/gpu",
|
||||
"troubleshooting/edgetpu",
|
||||
"troubleshooting/memory",
|
||||
"troubleshooting/dummy-camera",
|
||||
{
|
||||
type: "category",
|
||||
label: "Troubleshooting Hardware",
|
||||
link: {
|
||||
type: "generated-index",
|
||||
title: "Troubleshooting Hardware",
|
||||
description: "Troubleshooting Problems with Hardware",
|
||||
},
|
||||
items: ["troubleshooting/gpu", "troubleshooting/edgetpu"],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Troubleshooting Resource Usage",
|
||||
link: {
|
||||
type: "generated-index",
|
||||
title: "Troubleshooting Resource Usage",
|
||||
description: "Troubleshooting issues with resource usage",
|
||||
},
|
||||
items: ["troubleshooting/cpu", "troubleshooting/memory"],
|
||||
},
|
||||
],
|
||||
Development: [
|
||||
"development/contributing",
|
||||
|
||||
99
docs/static/frigate-api.yaml
vendored
99
docs/static/frigate-api.yaml
vendored
@@ -17,20 +17,25 @@ paths:
|
||||
summary: Authenticate request
|
||||
description: |-
|
||||
Authenticates the current request based on proxy headers or JWT token.
|
||||
Returns user role and permissions for camera access.
|
||||
This endpoint verifies authentication credentials and manages JWT token refresh.
|
||||
On success, no JSON body is returned; authentication state is communicated via response headers and cookies.
|
||||
operationId: auth_auth_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"202":
|
||||
description: Authentication Accepted
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
description: Authentication Accepted (no response body, different headers depending on auth method)
|
||||
headers:
|
||||
remote-user:
|
||||
description: Authenticated username or "viewer" in proxy-only mode
|
||||
schema:
|
||||
type: string
|
||||
remote-role:
|
||||
description: Resolved role (e.g., admin, viewer, or custom)
|
||||
schema:
|
||||
type: string
|
||||
Set-Cookie:
|
||||
description: May include refreshed JWT cookie ("frigate-token") when applicable
|
||||
schema:
|
||||
type: string
|
||||
"401":
|
||||
description: Authentication Failed
|
||||
/profile:
|
||||
@@ -611,6 +616,32 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/classification/attributes:
|
||||
get:
|
||||
tags:
|
||||
- Classification
|
||||
summary: Get custom classification attributes
|
||||
description: |-
|
||||
Returns custom classification attributes for a given object type.
|
||||
Only includes models with classification_type set to 'attribute'.
|
||||
By default returns a flat sorted list of all attribute labels.
|
||||
If group_by_model is true, returns attributes grouped by model name.
|
||||
operationId: get_custom_attributes_classification_attributes_get
|
||||
parameters:
|
||||
- name: object_type
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: group_by_model
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"422":
|
||||
description: Validation Error
|
||||
/classification/{name}/dataset:
|
||||
get:
|
||||
tags:
|
||||
@@ -2907,6 +2938,42 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/events/{event_id}/attributes:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Set custom classification attributes
|
||||
description: |-
|
||||
Sets an event's custom classification attributes for all attribute-type
|
||||
models that apply to the event's object type.
|
||||
Returns a success message or an error if the event is not found.
|
||||
operationId: set_attributes_events__event_id__attributes_post
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/EventsAttributesBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/GenericResponse"
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/events/{event_id}/description:
|
||||
post:
|
||||
tags:
|
||||
@@ -4954,6 +5021,18 @@ components:
|
||||
required:
|
||||
- subLabel
|
||||
title: EventsSubLabelBody
|
||||
EventsAttributesBody:
|
||||
properties:
|
||||
attributes:
|
||||
type: object
|
||||
title: Attributes
|
||||
description: Object with model names as keys and attribute values
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- attributes
|
||||
title: EventsAttributesBody
|
||||
ExportModel:
|
||||
properties:
|
||||
id:
|
||||
|
||||
10
docs/static/img/branding/LICENSE.md
vendored
10
docs/static/img/branding/LICENSE.md
vendored
@@ -1,12 +1,12 @@
|
||||
# COPYRIGHT AND TRADEMARK NOTICE
|
||||
|
||||
The images, logos, and icons contained in this directory (the "Brand Assets") are
|
||||
proprietary to Frigate LLC and are NOT covered by the MIT License governing the
|
||||
proprietary to Frigate, Inc. and are NOT covered by the MIT License governing the
|
||||
rest of this repository.
|
||||
|
||||
1. TRADEMARK STATUS
|
||||
The "Frigate" name and the accompanying logo are common law trademarks™ of
|
||||
Frigate LLC. Frigate LLC reserves all rights to these marks.
|
||||
Frigate, Inc. Frigate, Inc. reserves all rights to these marks.
|
||||
|
||||
2. LIMITED PERMISSION FOR USE
|
||||
Permission is hereby granted to display these Brand Assets strictly for the
|
||||
@@ -17,9 +17,9 @@ rest of this repository.
|
||||
3. RESTRICTIONS
|
||||
You may NOT:
|
||||
a. Use these Brand Assets to represent a derivative work (fork) as an official
|
||||
product of Frigate LLC.
|
||||
product of Frigate, Inc.
|
||||
b. Use these Brand Assets in a way that implies endorsement, sponsorship, or
|
||||
commercial affiliation with Frigate LLC.
|
||||
commercial affiliation with Frigate, Inc.
|
||||
c. Modify or alter the Brand Assets.
|
||||
|
||||
If you fork this repository with the intent to distribute a modified or competing
|
||||
@@ -27,4 +27,4 @@ version of the software, you must replace these Brand Assets with your own
|
||||
original content.
|
||||
|
||||
ALL RIGHTS RESERVED.
|
||||
Copyright (c) 2025 Frigate LLC.
|
||||
Copyright (c) 2026 Frigate, Inc.
|
||||
|
||||
BIN
docs/static/img/frigate-autotracking-example.gif
vendored
BIN
docs/static/img/frigate-autotracking-example.gif
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 28 MiB After Width: | Height: | Size: 12 MiB |
@@ -143,17 +143,6 @@ def require_admin_by_default():
|
||||
return admin_checker
|
||||
|
||||
|
||||
def _is_authenticated(request: Request) -> bool:
|
||||
"""
|
||||
Helper to determine if a request is from an authenticated user.
|
||||
|
||||
Returns True if the request has a valid authenticated user (not anonymous).
|
||||
Port 5000 internal requests are considered anonymous despite having admin role.
|
||||
"""
|
||||
username = request.headers.get("remote-user")
|
||||
return username is not None and username != "anonymous"
|
||||
|
||||
|
||||
def allow_public():
|
||||
"""
|
||||
Override dependency to allow unauthenticated access to an endpoint.
|
||||
@@ -173,27 +162,24 @@ def allow_public():
|
||||
|
||||
def allow_any_authenticated():
|
||||
"""
|
||||
Override dependency to allow any authenticated user (bypass admin requirement).
|
||||
Override dependency to allow any request that passed through the /auth endpoint.
|
||||
|
||||
Allows:
|
||||
- Port 5000 internal requests (have admin role despite anonymous user)
|
||||
- Any authenticated user with a real username (not "anonymous")
|
||||
- Port 5000 internal requests (remote-user: "anonymous", remote-role: "admin")
|
||||
- Authenticated users with JWT tokens (remote-user: username)
|
||||
- Unauthenticated requests when auth is disabled (remote-user: "viewer")
|
||||
|
||||
Rejects:
|
||||
- Port 8971 requests with anonymous user (auth disabled, no proxy auth)
|
||||
- Requests with no remote-user header (did not pass through /auth endpoint)
|
||||
|
||||
Example:
|
||||
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
|
||||
"""
|
||||
|
||||
async def auth_checker(request: Request):
|
||||
# Port 5000 requests have admin role and should be allowed
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
|
||||
# Otherwise require a real authenticated user (not anonymous)
|
||||
if not _is_authenticated(request):
|
||||
# Ensure a remote-user has been set by the /auth endpoint
|
||||
username = request.headers.get("remote-user")
|
||||
if username is None:
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
return
|
||||
|
||||
@@ -553,7 +539,32 @@ def resolve_role(
|
||||
"/auth",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Authenticate request",
|
||||
description="Authenticates the current request based on proxy headers or JWT token. Returns user role and permissions for camera access.",
|
||||
description=(
|
||||
"Authenticates the current request based on proxy headers or JWT token. "
|
||||
"This endpoint verifies authentication credentials and manages JWT token refresh. "
|
||||
"On success, no JSON body is returned; authentication state is communicated via response headers and cookies."
|
||||
),
|
||||
status_code=202,
|
||||
responses={
|
||||
202: {
|
||||
"description": "Authentication Accepted (no response body)",
|
||||
"headers": {
|
||||
"remote-user": {
|
||||
"description": 'Authenticated username or "viewer" in proxy-only mode',
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
"remote-role": {
|
||||
"description": "Resolved role (e.g., admin, viewer, or custom)",
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
"Set-Cookie": {
|
||||
"description": "May include refreshed JWT cookie when applicable",
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
401: {"description": "Authentication Failed"},
|
||||
},
|
||||
)
|
||||
def auth(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
@@ -581,12 +592,12 @@ def auth(request: Request):
|
||||
# if auth is disabled, just apply the proxy header map and return success
|
||||
if not auth_config.enabled:
|
||||
# pass the user header value from the upstream proxy if a mapping is specified
|
||||
# or use anonymous if none are specified
|
||||
# or use viewer if none are specified
|
||||
user_header = proxy_config.header_map.user
|
||||
success_response.headers["remote-user"] = (
|
||||
request.headers.get(user_header, default="anonymous")
|
||||
request.headers.get(user_header, default="viewer")
|
||||
if user_header
|
||||
else "anonymous"
|
||||
else "viewer"
|
||||
)
|
||||
|
||||
# parse header and resolve a valid role
|
||||
@@ -698,10 +709,10 @@ def auth(request: Request):
|
||||
"/profile",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get user profile",
|
||||
description="Returns the current authenticated user's profile including username, role, and allowed cameras.",
|
||||
description="Returns the current authenticated user's profile including username, role, and allowed cameras. This endpoint requires authentication and returns information about the user's permissions.",
|
||||
)
|
||||
def profile(request: Request):
|
||||
username = request.headers.get("remote-user", "anonymous")
|
||||
username = request.headers.get("remote-user", "viewer")
|
||||
role = request.headers.get("remote-role", "viewer")
|
||||
|
||||
all_camera_names = set(request.app.frigate_config.cameras.keys())
|
||||
@@ -717,7 +728,7 @@ def profile(request: Request):
|
||||
"/logout",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Logout user",
|
||||
description="Logs out the current user by clearing the session cookie.",
|
||||
description="Logs out the current user by clearing the session cookie. After logout, subsequent requests will require re-authentication.",
|
||||
)
|
||||
def logout(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
@@ -733,7 +744,7 @@ limiter = Limiter(key_func=get_remote_addr)
|
||||
"/login",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Login with credentials",
|
||||
description="Authenticates a user with username and password. Returns a JWT token as a secure HTTP-only cookie that can be used for subsequent API requests. The token can also be retrieved and used as a Bearer token in the Authorization header.",
|
||||
description='Authenticates a user with username and password. Returns a JWT token as a secure HTTP-only cookie that can be used for subsequent API requests. The JWT token can also be retrieved from the response and used as a Bearer token in the Authorization header.\n\nExample using Bearer token:\n```\ncurl -H "Authorization: Bearer <token_value>" https://frigate_ip:8971/api/profile\n```',
|
||||
)
|
||||
@limiter.limit(limit_value=rateLimiter.get_limit)
|
||||
def login(request: Request, body: AppPostLoginBody):
|
||||
@@ -776,7 +787,7 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
"/users",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get all users",
|
||||
description="Returns a list of all users with their usernames and roles. Requires admin role.",
|
||||
description="Returns a list of all users with their usernames and roles. Requires admin role. Each user object contains the username and assigned role.",
|
||||
)
|
||||
def get_users():
|
||||
exports = (
|
||||
@@ -789,7 +800,7 @@ def get_users():
|
||||
"/users",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create new user",
|
||||
description="Creates a new user with the specified username, password, and role. Requires admin role. Password must meet strength requirements.",
|
||||
description='Creates a new user with the specified username, password, and role. Requires admin role. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?":{} |<>).',
|
||||
)
|
||||
def create_user(
|
||||
request: Request,
|
||||
@@ -823,7 +834,7 @@ def create_user(
|
||||
"/users/{username}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete user",
|
||||
description="Deletes a user by username. The built-in admin user cannot be deleted. Requires admin role.",
|
||||
description="Deletes a user by username. The built-in admin user cannot be deleted. Requires admin role. Returns success message or error if user not found.",
|
||||
)
|
||||
def delete_user(request: Request, username: str):
|
||||
# Prevent deletion of the built-in admin user
|
||||
@@ -840,7 +851,7 @@ def delete_user(request: Request, username: str):
|
||||
"/users/{username}/password",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Update user password",
|
||||
description="Updates a user's password. Users can only change their own password unless they have admin role. Requires the current password to verify identity. Password must meet strength requirements (minimum 8 characters, uppercase letter, digit, and special character).",
|
||||
description="Updates a user's password. Users can only change their own password unless they have admin role. Requires the current password to verify identity for non-admin users. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?\":{} |<>). If user changes their own password, a new JWT cookie is automatically issued.",
|
||||
)
|
||||
async def update_password(
|
||||
request: Request,
|
||||
@@ -868,13 +879,9 @@ async def update_password(
|
||||
except DoesNotExist:
|
||||
return JSONResponse(content={"message": "User not found"}, status_code=404)
|
||||
|
||||
# Require old_password when:
|
||||
# 1. Non-admin user is changing another user's password (admin only action)
|
||||
# 2. Any user is changing their own password
|
||||
is_changing_own_password = current_username == username
|
||||
is_non_admin = current_role != "admin"
|
||||
|
||||
if is_changing_own_password or is_non_admin:
|
||||
# Require old_password when non-admin user is changing any password
|
||||
# Admin users changing passwords do NOT need to provide the current password
|
||||
if current_role != "admin":
|
||||
if not body.old_password:
|
||||
return JSONResponse(
|
||||
content={"message": "Current password is required"},
|
||||
@@ -926,7 +933,7 @@ async def update_password(
|
||||
"/users/{username}/role",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update user role",
|
||||
description="Updates a user's role. The built-in admin user's role cannot be modified. Requires admin role.",
|
||||
description="Updates a user's role. The built-in admin user's role cannot be modified. Requires admin role. Valid roles are defined in the configuration.",
|
||||
)
|
||||
async def update_role(
|
||||
request: Request,
|
||||
|
||||
@@ -31,6 +31,7 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.config.classification import ObjectClassificationType
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
@@ -39,6 +40,7 @@ from frigate.util.classification import (
|
||||
collect_state_classification_examples,
|
||||
get_dataset_image_count,
|
||||
read_training_metadata,
|
||||
write_training_metadata,
|
||||
)
|
||||
from frigate.util.file import get_event_snapshot
|
||||
|
||||
@@ -622,6 +624,59 @@ def get_classification_dataset(name: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/classification/attributes",
|
||||
summary="Get custom classification attributes",
|
||||
description="""Returns custom classification attributes for a given object type.
|
||||
Only includes models with classification_type set to 'attribute'.
|
||||
By default returns a flat sorted list of all attribute labels.
|
||||
If group_by_model is true, returns attributes grouped by model name.""",
|
||||
)
|
||||
def get_custom_attributes(
|
||||
request: Request, object_type: str = None, group_by_model: bool = False
|
||||
):
|
||||
models_with_attributes = {}
|
||||
|
||||
for (
|
||||
model_key,
|
||||
model_config,
|
||||
) in request.app.frigate_config.classification.custom.items():
|
||||
if (
|
||||
not model_config.enabled
|
||||
or not model_config.object_config
|
||||
or model_config.object_config.classification_type
|
||||
!= ObjectClassificationType.attribute
|
||||
):
|
||||
continue
|
||||
|
||||
model_objects = getattr(model_config.object_config, "objects", []) or []
|
||||
if object_type is not None and object_type not in model_objects:
|
||||
continue
|
||||
|
||||
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
|
||||
if not os.path.exists(dataset_dir):
|
||||
continue
|
||||
|
||||
attributes = []
|
||||
for category_name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category_name)
|
||||
if os.path.isdir(category_dir) and category_name != "none":
|
||||
attributes.append(category_name)
|
||||
|
||||
if attributes:
|
||||
model_name = model_config.name or model_key
|
||||
models_with_attributes[model_name] = sorted(attributes)
|
||||
|
||||
if group_by_model:
|
||||
return JSONResponse(content=models_with_attributes)
|
||||
else:
|
||||
# Flatten to a unique sorted list
|
||||
all_attributes = set()
|
||||
for attributes in models_with_attributes.values():
|
||||
all_attributes.update(attributes)
|
||||
return JSONResponse(content=sorted(list(all_attributes)))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/classification/{name}/train",
|
||||
summary="Get classification train images",
|
||||
@@ -788,6 +843,12 @@ def rename_classification_category(
|
||||
|
||||
try:
|
||||
os.rename(old_folder, new_folder)
|
||||
|
||||
# Mark dataset as ready to train by resetting training metadata
|
||||
# This ensures the dataset is marked as changed after renaming
|
||||
sanitized_name = sanitize_filename(name)
|
||||
write_training_metadata(sanitized_name, 0)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
|
||||
@@ -12,6 +12,7 @@ class EventsQueryParams(BaseModel):
|
||||
labels: Optional[str] = "all"
|
||||
sub_label: Optional[str] = "all"
|
||||
sub_labels: Optional[str] = "all"
|
||||
attributes: Optional[str] = "all"
|
||||
zone: Optional[str] = "all"
|
||||
zones: Optional[str] = "all"
|
||||
limit: Optional[int] = 100
|
||||
@@ -58,6 +59,8 @@ class EventsSearchQueryParams(BaseModel):
|
||||
limit: Optional[int] = 50
|
||||
cameras: Optional[str] = "all"
|
||||
labels: Optional[str] = "all"
|
||||
sub_labels: Optional[str] = "all"
|
||||
attributes: Optional[str] = "all"
|
||||
zones: Optional[str] = "all"
|
||||
after: Optional[float] = None
|
||||
before: Optional[float] = None
|
||||
|
||||
@@ -24,6 +24,13 @@ class EventsLPRBody(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class EventsAttributesBody(BaseModel):
|
||||
attributes: List[str] = Field(
|
||||
title="Selected classification attributes for the event",
|
||||
default_factory=list,
|
||||
)
|
||||
|
||||
|
||||
class EventsDescriptionBody(BaseModel):
|
||||
description: Union[str, None] = Field(title="The description of the event")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
@@ -16,5 +16,5 @@ class ExportRecordingsBody(BaseModel):
|
||||
source: PlaybackSourceEnum = Field(
|
||||
default=PlaybackSourceEnum.recordings, title="Playback source"
|
||||
)
|
||||
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
|
||||
@@ -37,6 +37,7 @@ from frigate.api.defs.query.regenerate_query_parameters import (
|
||||
RegenerateQueryParameters,
|
||||
)
|
||||
from frigate.api.defs.request.events_body import (
|
||||
EventsAttributesBody,
|
||||
EventsCreateBody,
|
||||
EventsDeleteBody,
|
||||
EventsDescriptionBody,
|
||||
@@ -55,6 +56,7 @@ from frigate.api.defs.response.event_response import (
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
||||
from frigate.config.classification import ObjectClassificationType
|
||||
from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
@@ -99,6 +101,8 @@ def events(
|
||||
if sub_labels == "all" and sub_label != "all":
|
||||
sub_labels = sub_label
|
||||
|
||||
attributes = unquote(params.attributes)
|
||||
|
||||
zone = params.zone
|
||||
zones = params.zones
|
||||
|
||||
@@ -187,6 +191,17 @@ def events(
|
||||
sub_label_clause = reduce(operator.or_, sub_label_clauses)
|
||||
clauses.append((sub_label_clause))
|
||||
|
||||
if attributes != "all":
|
||||
# Custom classification results are stored as data[model_name] = result_value
|
||||
filtered_attributes = attributes.split(",")
|
||||
attribute_clauses = []
|
||||
|
||||
for attr in filtered_attributes:
|
||||
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
|
||||
|
||||
attribute_clause = reduce(operator.or_, attribute_clauses)
|
||||
clauses.append(attribute_clause)
|
||||
|
||||
if recognized_license_plate != "all":
|
||||
filtered_recognized_license_plates = recognized_license_plate.split(",")
|
||||
|
||||
@@ -492,6 +507,8 @@ def events_search(
|
||||
# Filters
|
||||
cameras = params.cameras
|
||||
labels = params.labels
|
||||
sub_labels = params.sub_labels
|
||||
attributes = params.attributes
|
||||
zones = params.zones
|
||||
after = params.after
|
||||
before = params.before
|
||||
@@ -566,6 +583,38 @@ def events_search(
|
||||
if labels != "all":
|
||||
event_filters.append((Event.label << labels.split(",")))
|
||||
|
||||
if sub_labels != "all":
|
||||
# use matching so joined sub labels are included
|
||||
# for example a sub label 'bob' would get events
|
||||
# with sub labels 'bob' and 'bob, john'
|
||||
sub_label_clauses = []
|
||||
filtered_sub_labels = sub_labels.split(",")
|
||||
|
||||
if "None" in filtered_sub_labels:
|
||||
filtered_sub_labels.remove("None")
|
||||
sub_label_clauses.append((Event.sub_label.is_null()))
|
||||
|
||||
for label in filtered_sub_labels:
|
||||
sub_label_clauses.append(
|
||||
(Event.sub_label.cast("text") == label)
|
||||
) # include exact matches
|
||||
|
||||
# include this label when part of a list
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
|
||||
|
||||
event_filters.append((reduce(operator.or_, sub_label_clauses)))
|
||||
|
||||
if attributes != "all":
|
||||
# Custom classification results are stored as data[model_name] = result_value
|
||||
filtered_attributes = attributes.split(",")
|
||||
attribute_clauses = []
|
||||
|
||||
for attr in filtered_attributes:
|
||||
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
|
||||
|
||||
event_filters.append(reduce(operator.or_, attribute_clauses))
|
||||
|
||||
if zones != "all":
|
||||
zone_clauses = []
|
||||
filtered_zones = zones.split(",")
|
||||
@@ -1351,6 +1400,107 @@ async def set_plate(
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/attributes",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Set custom classification attributes",
|
||||
description=(
|
||||
"Sets an event's custom classification attributes for all attribute-type "
|
||||
"models that apply to the event's object type."
|
||||
),
|
||||
)
|
||||
async def set_attributes(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
body: EventsAttributesBody,
|
||||
):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
await require_camera_access(event.camera, request=request)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": f"Event {event_id} not found."}),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
object_type = event.label
|
||||
selected_attributes = set(body.attributes or [])
|
||||
applied_updates: list[dict[str, str | float | None]] = []
|
||||
|
||||
for (
|
||||
model_key,
|
||||
model_config,
|
||||
) in request.app.frigate_config.classification.custom.items():
|
||||
# Only apply to enabled attribute classifiers that target this object type
|
||||
if (
|
||||
not model_config.enabled
|
||||
or not model_config.object_config
|
||||
or model_config.object_config.classification_type
|
||||
!= ObjectClassificationType.attribute
|
||||
or object_type not in (model_config.object_config.objects or [])
|
||||
):
|
||||
continue
|
||||
|
||||
# Get available labels from dataset directory
|
||||
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
|
||||
available_labels = set()
|
||||
|
||||
if os.path.exists(dataset_dir):
|
||||
for category_name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category_name)
|
||||
if os.path.isdir(category_dir):
|
||||
available_labels.add(category_name)
|
||||
|
||||
if not available_labels:
|
||||
logger.warning(
|
||||
"No dataset found for custom attribute model %s at %s",
|
||||
model_key,
|
||||
dataset_dir,
|
||||
)
|
||||
continue
|
||||
|
||||
# Find all selected attributes that apply to this model
|
||||
model_name = model_config.name or model_key
|
||||
matching_attrs = selected_attributes & available_labels
|
||||
|
||||
if matching_attrs:
|
||||
# Publish updates for each selected attribute
|
||||
for attr in matching_attrs:
|
||||
request.app.event_metadata_updater.publish(
|
||||
(event_id, model_name, attr, 1.0),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
applied_updates.append(
|
||||
{"model": model_name, "label": attr, "score": 1.0}
|
||||
)
|
||||
else:
|
||||
# Clear this model's attribute
|
||||
request.app.event_metadata_updater.publish(
|
||||
(event_id, model_name, None, None),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
applied_updates.append({"model": model_name, "label": None, "score": None})
|
||||
|
||||
if len(applied_updates) == 0:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No matching attributes found for this object type.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Updated {len(applied_updates)} attribute(s)",
|
||||
"applied": applied_updates,
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/description",
|
||||
response_model=GenericResponse,
|
||||
|
||||
@@ -1935,7 +1935,7 @@ async def label_clip(request: Request, camera_name: str, label: str):
|
||||
try:
|
||||
event = event_query.get()
|
||||
|
||||
return await event_clip(request, event.id)
|
||||
return await event_clip(request, event.id, 0)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Event not found"}, status_code=404
|
||||
|
||||
@@ -100,6 +100,10 @@ class FrigateApp:
|
||||
)
|
||||
if (
|
||||
config.semantic_search.enabled
|
||||
or any(
|
||||
c.objects.genai.enabled or c.review.genai.enabled
|
||||
for c in config.cameras.values()
|
||||
)
|
||||
or config.lpr.enabled
|
||||
or config.face_recognition.enabled
|
||||
or len(config.classification.custom) > 0
|
||||
|
||||
@@ -225,7 +225,8 @@ class MqttClient(Communicator):
|
||||
"birdseye_mode",
|
||||
"review_alerts",
|
||||
"review_detections",
|
||||
"genai",
|
||||
"object_descriptions",
|
||||
"review_descriptions",
|
||||
]
|
||||
|
||||
for name in self.config.cameras.keys():
|
||||
|
||||
@@ -388,7 +388,7 @@ class WebPushClient(Communicator):
|
||||
else:
|
||||
title = base_title
|
||||
|
||||
message = payload["after"]["data"]["metadata"]["scene"]
|
||||
message = payload["after"]["data"]["metadata"]["shortSummary"]
|
||||
else:
|
||||
zone_names = payload["after"]["data"]["zones"]
|
||||
formatted_zone_names = []
|
||||
|
||||
@@ -28,6 +28,7 @@ from frigate.util.builtin import (
|
||||
get_ffmpeg_arg_list,
|
||||
)
|
||||
from frigate.util.config import (
|
||||
CURRENT_CONFIG_VERSION,
|
||||
StreamInfoRetriever,
|
||||
convert_area_to_pixels,
|
||||
find_config_file,
|
||||
@@ -76,11 +77,12 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
DEFAULT_CONFIG = """
|
||||
DEFAULT_CONFIG = f"""
|
||||
mqtt:
|
||||
enabled: False
|
||||
|
||||
cameras: {} # No cameras defined, UI wizard should be used
|
||||
cameras: {{}} # No cameras defined, UI wizard should be used
|
||||
version: {CURRENT_CONFIG_VERSION}
|
||||
"""
|
||||
|
||||
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
|
||||
@@ -753,8 +755,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if new_config and f.tell() == 0:
|
||||
f.write(DEFAULT_CONFIG)
|
||||
logger.info(
|
||||
"Created default config file, see the getting started docs \
|
||||
for configuration https://docs.frigate.video/guides/getting_started"
|
||||
"Created default config file, see the getting started docs for configuration: https://docs.frigate.video/guides/getting_started"
|
||||
)
|
||||
|
||||
f.seek(0)
|
||||
|
||||
@@ -77,6 +77,9 @@ FFMPEG_HWACCEL_RKMPP = "preset-rkmpp"
|
||||
FFMPEG_HWACCEL_AMF = "preset-amd-amf"
|
||||
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
|
||||
|
||||
# RKNN constants
|
||||
SUPPORTED_RK_SOCS = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
# Regex constants
|
||||
|
||||
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
|
||||
|
||||
@@ -374,6 +374,9 @@ class LicensePlateProcessingMixin:
|
||||
combined_plate = re.sub(
|
||||
pattern, replacement, combined_plate
|
||||
)
|
||||
logger.debug(
|
||||
f"{camera}: Processing replace rule: '{pattern}' -> '{replacement}', result: '{combined_plate}'"
|
||||
)
|
||||
except re.error as e:
|
||||
logger.warning(
|
||||
f"{camera}: Invalid regex in replace_rules '{pattern}': {e}"
|
||||
@@ -381,7 +384,7 @@ class LicensePlateProcessingMixin:
|
||||
|
||||
if combined_plate != original_combined:
|
||||
logger.debug(
|
||||
f"{camera}: Rules applied: '{original_combined}' -> '{combined_plate}'"
|
||||
f"{camera}: All rules applied: '{original_combined}' -> '{combined_plate}'"
|
||||
)
|
||||
|
||||
# Compute the combined area for qualifying boxes
|
||||
|
||||
@@ -131,8 +131,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
},
|
||||
)
|
||||
|
||||
# Embed the description
|
||||
self.embeddings.embed_description(event_id, transcription)
|
||||
# Embed the description if semantic search is enabled
|
||||
if self.config.semantic_search.enabled:
|
||||
self.embeddings.embed_description(event_id, transcription)
|
||||
|
||||
except DoesNotExist:
|
||||
logger.debug("No recording found for audio transcription post-processing")
|
||||
|
||||
@@ -86,7 +86,11 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
and data["id"] not in self.early_request_sent
|
||||
):
|
||||
if data["has_clip"] and data["has_snapshot"]:
|
||||
event: Event = Event.get(Event.id == data["id"])
|
||||
try:
|
||||
event: Event = Event.get(Event.id == data["id"])
|
||||
except DoesNotExist:
|
||||
logger.error(f"Event {data['id']} not found")
|
||||
return
|
||||
|
||||
if (
|
||||
not camera_config.objects.genai.objects
|
||||
@@ -131,6 +135,8 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
)
|
||||
):
|
||||
self._process_genai_description(event, camera_config, thumbnail)
|
||||
else:
|
||||
self.cleanup_event(event.id)
|
||||
|
||||
def __regenerate_description(self, event_id: str, source: str, force: bool) -> None:
|
||||
"""Regenerate the description for an event."""
|
||||
@@ -204,6 +210,17 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
)
|
||||
return None
|
||||
|
||||
def cleanup_event(self, event_id: str) -> None:
|
||||
"""Clean up tracked event data to prevent memory leaks.
|
||||
|
||||
This should be called when an event ends, regardless of whether
|
||||
genai processing is triggered.
|
||||
"""
|
||||
if event_id in self.tracked_events:
|
||||
del self.tracked_events[event_id]
|
||||
if event_id in self.early_request_sent:
|
||||
del self.early_request_sent[event_id]
|
||||
|
||||
def _read_and_crop_snapshot(self, event: Event) -> bytes | None:
|
||||
"""Read, decode, and crop the snapshot image."""
|
||||
|
||||
@@ -299,9 +316,8 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
),
|
||||
).start()
|
||||
|
||||
# Delete tracked events based on the event_id
|
||||
if event.id in self.tracked_events:
|
||||
del self.tracked_events[event.id]
|
||||
# Clean up tracked events and early request state
|
||||
self.cleanup_event(event.id)
|
||||
|
||||
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
|
||||
"""Embed the description for an event."""
|
||||
|
||||
@@ -92,7 +92,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
|
||||
pixels_per_image = width * height
|
||||
tokens_per_image = pixels_per_image / 1250
|
||||
prompt_tokens = 3500
|
||||
prompt_tokens = 3800
|
||||
response_tokens = 300
|
||||
available_tokens = context_size - prompt_tokens - response_tokens
|
||||
max_frames = int(available_tokens / tokens_per_image)
|
||||
@@ -311,6 +311,7 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
start_ts,
|
||||
end_ts,
|
||||
events_with_context,
|
||||
self.config.review.genai.preferred_language,
|
||||
self.config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -8,6 +8,9 @@ class ReviewMetadata(BaseModel):
|
||||
scene: str = Field(
|
||||
description="A comprehensive description of the setting and entities, including relevant context and plausible inferences if supported by visual evidence."
|
||||
)
|
||||
shortSummary: str = Field(
|
||||
description="A brief 2-sentence summary of the scene, suitable for notifications. Should capture the key activity and context without full detail."
|
||||
)
|
||||
confidence: float = Field(
|
||||
description="A float between 0 and 1 representing your overall confidence in this analysis."
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.comms.event_metadata_updater import (
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.util.object import calculate_region
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
@@ -80,13 +80,14 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to download {path}: {e}")
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from frigate.config.classification import (
|
||||
ObjectClassificationType,
|
||||
)
|
||||
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
|
||||
from frigate.util.object import box_overlaps, calculate_region
|
||||
@@ -52,7 +52,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.requestor = requestor
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.interpreter: Interpreter = None
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.labelmap: dict[int, str] = {}
|
||||
@@ -72,8 +72,12 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.last_run = datetime.datetime.now().timestamp()
|
||||
self.__build_detector()
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter
|
||||
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
|
||||
@@ -84,11 +88,13 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
@@ -224,28 +230,34 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
if not should_run:
|
||||
return
|
||||
|
||||
x, y, x2, y2 = calculate_region(
|
||||
frame.shape,
|
||||
crop[0],
|
||||
crop[1],
|
||||
crop[2],
|
||||
crop[3],
|
||||
224,
|
||||
1.0,
|
||||
)
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
frame = rgb[
|
||||
y:y2,
|
||||
x:x2,
|
||||
]
|
||||
height, width = rgb.shape[:2]
|
||||
|
||||
if frame.shape != (224, 224):
|
||||
try:
|
||||
resized_frame = cv2.resize(frame, (224, 224))
|
||||
except Exception:
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
# Convert normalized crop coordinates to pixel values
|
||||
x1 = int(camera_config.crop[0] * width)
|
||||
y1 = int(camera_config.crop[1] * height)
|
||||
x2 = int(camera_config.crop[2] * width)
|
||||
y2 = int(camera_config.crop[3] * height)
|
||||
|
||||
# Clip coordinates to frame boundaries
|
||||
x1 = max(0, min(x1, width))
|
||||
y1 = max(0, min(y1, height))
|
||||
x2 = max(0, min(x2, width))
|
||||
y2 = max(0, min(y2, height))
|
||||
|
||||
if x2 <= x1 or y2 <= y1:
|
||||
logger.warning(
|
||||
f"Invalid crop coordinates for {camera}: [{x1}, {y1}, {x2}, {y2}]"
|
||||
)
|
||||
return
|
||||
|
||||
frame = rgb[y1:y2, x1:x2]
|
||||
|
||||
try:
|
||||
resized_frame = cv2.resize(frame, (224, 224))
|
||||
except Exception:
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||
@@ -345,7 +357,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.model_config = model_config
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.interpreter: Interpreter = None
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.requestor = requestor
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
@@ -366,7 +378,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
self.__build_detector()
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
@@ -378,11 +389,13 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
@@ -508,6 +521,13 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
0.0,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
|
||||
# Still track history even when model doesn't exist to respect MAX_OBJECT_CLASSIFICATIONS
|
||||
# Add an entry with "unknown" label so the history limit is enforced
|
||||
if object_id not in self.classification_history:
|
||||
self.classification_history[object_id] = []
|
||||
|
||||
self.classification_history[object_id].append(("unknown", 0.0, now))
|
||||
return
|
||||
|
||||
input = np.expand_dims(resized_crop, axis=0)
|
||||
@@ -649,5 +669,5 @@ def write_classification_attempt(
|
||||
|
||||
if len(files) > max_files:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
except FileNotFoundError:
|
||||
except (FileNotFoundError, OSError):
|
||||
pass
|
||||
|
||||
@@ -131,6 +131,7 @@ class ONNXModelRunner(BaseModelRunner):
|
||||
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.yolov9_license_plate.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
EnrichmentModelTypeEnum.facenet.value,
|
||||
@@ -138,8 +139,31 @@ class ONNXModelRunner(BaseModelRunner):
|
||||
ModelTypeEnum.dfine.value,
|
||||
]
|
||||
|
||||
def __init__(self, ort: ort.InferenceSession):
|
||||
@staticmethod
|
||||
def is_concurrent_model(model_type: str | None) -> bool:
|
||||
"""Check if model requires thread locking for concurrent inference.
|
||||
|
||||
Some models (like JinaV2) share one runner between text and vision embeddings
|
||||
called from different threads, requiring thread synchronization.
|
||||
"""
|
||||
if not model_type:
|
||||
return False
|
||||
|
||||
# Import here to avoid circular imports
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
|
||||
return model_type == EnrichmentModelTypeEnum.jina_v2.value
|
||||
|
||||
def __init__(self, ort: ort.InferenceSession, model_type: str | None = None):
|
||||
self.ort = ort
|
||||
self.model_type = model_type
|
||||
|
||||
# Thread lock to prevent concurrent inference (needed for JinaV2 which shares
|
||||
# one runner between text and vision embeddings called from different threads)
|
||||
if self.is_concurrent_model(model_type):
|
||||
self._inference_lock = threading.Lock()
|
||||
else:
|
||||
self._inference_lock = None
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
return [input.name for input in self.ort.get_inputs()]
|
||||
@@ -149,6 +173,10 @@ class ONNXModelRunner(BaseModelRunner):
|
||||
return self.ort.get_inputs()[0].shape[3]
|
||||
|
||||
def run(self, input: dict[str, Any]) -> Any | None:
|
||||
if self._inference_lock:
|
||||
with self._inference_lock:
|
||||
return self.ort.run(None, input)
|
||||
|
||||
return self.ort.run(None, input)
|
||||
|
||||
|
||||
@@ -169,6 +197,7 @@ class CudaGraphRunner(BaseModelRunner):
|
||||
|
||||
return model_type not in [
|
||||
ModelTypeEnum.yolonas.value,
|
||||
ModelTypeEnum.dfine.value,
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
@@ -574,5 +603,6 @@ def get_optimized_runner(
|
||||
),
|
||||
providers=providers,
|
||||
provider_options=options,
|
||||
)
|
||||
),
|
||||
model_type=model_type,
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.log import suppress_stderr_during
|
||||
|
||||
from ..detector_utils import tflite_detect_raw, tflite_init
|
||||
|
||||
@@ -28,12 +28,13 @@ class CpuDetectorConfig(BaseDetectorConfig):
|
||||
class CpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __init__(self, detector_config: CpuDetectorConfig):
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
|
||||
tflite_init(self, interpreter)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import cv2
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detection_runners import RKNNModelRunner
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
@@ -19,8 +19,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "rknn"
|
||||
|
||||
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
supported_models = {
|
||||
ModelTypeEnum.yologeneric: "^frigate-fp16-yolov9-[cemst]$",
|
||||
ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$",
|
||||
@@ -82,9 +80,9 @@ class Rknn(DetectionApi):
|
||||
except FileNotFoundError:
|
||||
raise Exception("Make sure to run docker in privileged mode.")
|
||||
|
||||
if soc not in supported_socs:
|
||||
if soc not in SUPPORTED_RK_SOCS:
|
||||
raise Exception(
|
||||
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {supported_socs}."
|
||||
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {SUPPORTED_RK_SOCS}."
|
||||
)
|
||||
|
||||
return soc
|
||||
|
||||
@@ -203,7 +203,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
# post processors
|
||||
self.post_processors: list[PostProcessorApi] = []
|
||||
|
||||
if any(c.review.genai.enabled_in_config for c in self.config.cameras.values()):
|
||||
if self.genai_client is not None and any(
|
||||
c.review.genai.enabled_in_config for c in self.config.cameras.values()
|
||||
):
|
||||
self.post_processors.append(
|
||||
ReviewDescriptionProcessor(
|
||||
self.config, self.requestor, self.metrics, self.genai_client
|
||||
@@ -244,7 +246,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
self.post_processors.append(semantic_trigger_processor)
|
||||
|
||||
if any(c.objects.genai.enabled_in_config for c in self.config.cameras.values()):
|
||||
if self.genai_client is not None and any(
|
||||
c.objects.genai.enabled_in_config for c in self.config.cameras.values()
|
||||
):
|
||||
self.post_processors.append(
|
||||
ObjectDescriptionProcessor(
|
||||
self.config,
|
||||
@@ -522,6 +526,8 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
elif isinstance(processor, ObjectDescriptionProcessor):
|
||||
if not updated_db:
|
||||
# Still need to cleanup tracked events even if not processing
|
||||
processor.cleanup_event(event_id)
|
||||
continue
|
||||
|
||||
processor.process_data(
|
||||
@@ -627,7 +633,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
camera, frame_name, _, _, motion_boxes, _ = data
|
||||
|
||||
if not camera or len(motion_boxes) == 0:
|
||||
if not camera or len(motion_boxes) == 0 or camera not in self.config.cameras:
|
||||
return
|
||||
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
@@ -8,7 +8,7 @@ import numpy as np
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_runners import get_optimized_runner
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
from ...config import FaceRecognitionConfig
|
||||
@@ -57,17 +57,18 @@ class FaceNetEmbedding(BaseEmbedding):
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.runner = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.runner.allocate_tensors()
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.runner = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.runner.allocate_tensors()
|
||||
self.tensor_input_details = self.runner.get_input_details()
|
||||
self.tensor_output_details = self.runner.get_output_details()
|
||||
|
||||
|
||||
@@ -186,6 +186,9 @@ class JinaV1ImageEmbedding(BaseEmbedding):
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
# Avoid lazy loading in worker threads: block until downloads complete
|
||||
# and load the model on the main thread during initialization.
|
||||
self._load_model_and_utils()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
@@ -53,6 +54,11 @@ class JinaV2Embedding(BaseEmbedding):
|
||||
self.tokenizer = None
|
||||
self.image_processor = None
|
||||
self.runner = None
|
||||
|
||||
# Lock to prevent concurrent calls (text and vision share this instance)
|
||||
self._call_lock = threading.Lock()
|
||||
|
||||
# download the model and tokenizer
|
||||
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
@@ -65,6 +71,9 @@ class JinaV2Embedding(BaseEmbedding):
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
# Avoid lazy loading in worker threads: block until downloads complete
|
||||
# and load the model on the main thread during initialization.
|
||||
self._load_model_and_utils()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
@@ -197,37 +206,40 @@ class JinaV2Embedding(BaseEmbedding):
|
||||
def __call__(
|
||||
self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None
|
||||
) -> list[np.ndarray]:
|
||||
self.embedding_type = embedding_type
|
||||
if not self.embedding_type:
|
||||
raise ValueError(
|
||||
"embedding_type must be specified either in __init__ or __call__"
|
||||
)
|
||||
# Lock the entire call to prevent race conditions when text and vision
|
||||
# embeddings are called concurrently from different threads
|
||||
with self._call_lock:
|
||||
self.embedding_type = embedding_type
|
||||
if not self.embedding_type:
|
||||
raise ValueError(
|
||||
"embedding_type must be specified either in __init__ or __call__"
|
||||
)
|
||||
|
||||
self._load_model_and_utils()
|
||||
processed = self._preprocess_inputs(inputs)
|
||||
batch_size = len(processed)
|
||||
self._load_model_and_utils()
|
||||
processed = self._preprocess_inputs(inputs)
|
||||
batch_size = len(processed)
|
||||
|
||||
# Prepare ONNX inputs with matching batch sizes
|
||||
onnx_inputs = {}
|
||||
if self.embedding_type == "text":
|
||||
onnx_inputs["input_ids"] = np.stack([x[0] for x in processed])
|
||||
onnx_inputs["pixel_values"] = np.zeros(
|
||||
(batch_size, 3, 512, 512), dtype=np.float32
|
||||
)
|
||||
elif self.embedding_type == "vision":
|
||||
onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64)
|
||||
onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed])
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
# Prepare ONNX inputs with matching batch sizes
|
||||
onnx_inputs = {}
|
||||
if self.embedding_type == "text":
|
||||
onnx_inputs["input_ids"] = np.stack([x[0] for x in processed])
|
||||
onnx_inputs["pixel_values"] = np.zeros(
|
||||
(batch_size, 3, 512, 512), dtype=np.float32
|
||||
)
|
||||
elif self.embedding_type == "vision":
|
||||
onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64)
|
||||
onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed])
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
|
||||
# Run inference
|
||||
outputs = self.runner.run(onnx_inputs)
|
||||
if self.embedding_type == "text":
|
||||
embeddings = outputs[2] # text embeddings
|
||||
elif self.embedding_type == "vision":
|
||||
embeddings = outputs[3] # image embeddings
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
# Run inference
|
||||
outputs = self.runner.run(onnx_inputs)
|
||||
if self.embedding_type == "text":
|
||||
embeddings = outputs[2] # text embeddings
|
||||
elif self.embedding_type == "vision":
|
||||
embeddings = outputs[3] # image embeddings
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
|
||||
embeddings = self._postprocess_outputs(embeddings)
|
||||
return [embedding for embedding in embeddings]
|
||||
embeddings = self._postprocess_outputs(embeddings)
|
||||
return [embedding for embedding in embeddings]
|
||||
|
||||
@@ -34,7 +34,7 @@ from frigate.data_processing.real_time.audio_transcription import (
|
||||
AudioTranscriptionRealTimeProcessor,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe, redirect_output_to_logger
|
||||
from frigate.log import LogPipe, suppress_stderr_during
|
||||
from frigate.object_detection.base import load_labels
|
||||
from frigate.util.builtin import get_ffmpeg_arg_list
|
||||
from frigate.util.process import FrigateProcess
|
||||
@@ -367,17 +367,17 @@ class AudioEventMaintainer(threading.Thread):
|
||||
|
||||
|
||||
class AudioTfl:
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __init__(self, stop_event: threading.Event, num_threads=2):
|
||||
self.stop_event = stop_event
|
||||
self.num_threads = num_threads
|
||||
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
|
||||
self.interpreter = Interpreter(
|
||||
model_path="/cpu_audio_model.tflite",
|
||||
num_threads=self.num_threads,
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path="/cpu_audio_model.tflite",
|
||||
num_threads=self.num_threads,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
@@ -46,7 +46,7 @@ def should_update_state(prev_event: Event, current_event: Event) -> bool:
|
||||
if prev_event["sub_label"] != current_event["sub_label"]:
|
||||
return True
|
||||
|
||||
if len(prev_event["current_zones"]) < len(current_event["current_zones"]):
|
||||
if set(prev_event["current_zones"]) != set(current_event["current_zones"]):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -153,7 +153,7 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -hwaccel cuda -hwaccel_device {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
|
||||
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
|
||||
@@ -101,6 +101,7 @@ When forming your description:
|
||||
Your response MUST be a flat JSON object with:
|
||||
- `title` (string): A concise, direct title that describes the primary action or event in the sequence, not just what you literally see. Use spatial context when available to make titles more meaningful. When multiple objects/actions are present, prioritize whichever is most prominent or occurs first. Use names from "Objects in Scene" based on what you visually observe. If you see both a name and an unidentified object of the same type but visually observe only one person/object, use ONLY the name. Examples: "Joe walking dog", "Person taking out trash", "Vehicle arriving in driveway", "Joe accessing vehicle", "Person leaving porch for driveway".
|
||||
- `scene` (string): A narrative description of what happens across the sequence from start to finish, in chronological order. Start by describing how the sequence begins, then describe the progression of events. **Describe all significant movements and actions in the order they occur.** For example, if a vehicle arrives and then a person exits, describe both actions sequentially. **Only describe actions you can actually observe happening in the frames provided.** Do not infer or assume actions that aren't visible (e.g., if you see someone walking but never see them sit, don't say they sat down). Include setting, detected objects, and their observable actions. Avoid speculation or filling in assumed behaviors. Your description should align with and support the threat level you assign.
|
||||
- `shortSummary` (string): A brief 2-sentence summary of the scene, suitable for notifications. Should capture the key activity and context without full detail. This should be a condensed version of the scene description above.
|
||||
- `confidence` (float): 0-1 confidence in your analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous. Lower confidence when the sequence is unclear, objects are partially obscured, or context is ambiguous.
|
||||
- `potential_threat_level` (integer): 0, 1, or 2 as defined in "Normal Activity Patterns for This Property" above. Your threat level must be consistent with your scene description and the guidance above.
|
||||
{get_concern_prompt()}
|
||||
@@ -178,6 +179,7 @@ Each line represents a detection state, not necessarily unique individuals. Pare
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
events: list[dict[str, Any]],
|
||||
preferred_language: str | None,
|
||||
debug_save: bool,
|
||||
) -> str | None:
|
||||
"""Generate a summary of review item descriptions over a period of time."""
|
||||
@@ -191,6 +193,8 @@ Input format: Each event is a JSON object with:
|
||||
- "title", "scene", "confidence", "potential_threat_level" (0-2), "other_concerns", "camera", "time", "start_time", "end_time"
|
||||
- "context": array of related events from other cameras that occurred during overlapping time periods
|
||||
|
||||
**Note: Use the "scene" field for event descriptions in the report. Ignore any "shortSummary" field if present.**
|
||||
|
||||
Report Structure - Use this EXACT format:
|
||||
|
||||
# Security Summary - {time_range}
|
||||
@@ -232,6 +236,9 @@ Guidelines:
|
||||
for event in events:
|
||||
timeline_summary_prompt += f"\n{event}\n"
|
||||
|
||||
if preferred_language:
|
||||
timeline_summary_prompt += f"\nProvide your answer in {preferred_language}"
|
||||
|
||||
if debug_save:
|
||||
with open(
|
||||
os.path.join(
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
from httpx import TimeoutException
|
||||
from httpx import RemoteProtocolError, TimeoutException
|
||||
from ollama import Client as ApiClient
|
||||
from ollama import ResponseError
|
||||
|
||||
@@ -68,7 +68,12 @@ class OllamaClient(GenAIClient):
|
||||
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
|
||||
)
|
||||
return result["response"].strip()
|
||||
except (TimeoutException, ResponseError, ConnectionError) as e:
|
||||
except (
|
||||
TimeoutException,
|
||||
ResponseError,
|
||||
RemoteProtocolError,
|
||||
ConnectionError,
|
||||
) as e:
|
||||
logger.warning("Ollama returned an error: %s", str(e))
|
||||
return None
|
||||
|
||||
|
||||
@@ -80,10 +80,15 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
|
||||
log_levels = {
|
||||
"absl": LogLevel.error,
|
||||
"httpx": LogLevel.error,
|
||||
"h5py": LogLevel.error,
|
||||
"keras": LogLevel.error,
|
||||
"matplotlib": LogLevel.error,
|
||||
"tensorflow": LogLevel.error,
|
||||
"tensorflow.python": LogLevel.error,
|
||||
"werkzeug": LogLevel.error,
|
||||
"ws4py": LogLevel.error,
|
||||
"PIL": LogLevel.warning,
|
||||
"numba": LogLevel.warning,
|
||||
**log_levels,
|
||||
}
|
||||
|
||||
@@ -318,3 +323,31 @@ def suppress_os_output(func: Callable) -> Callable:
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@contextmanager
|
||||
def suppress_stderr_during(operation_name: str) -> Generator[None, None, None]:
|
||||
"""
|
||||
Context manager to suppress stderr output during a specific operation.
|
||||
|
||||
Useful for silencing LLVM debug output, CUDA messages, and other native
|
||||
library logging that cannot be controlled via Python logging or environment
|
||||
variables. Completely redirects file descriptor 2 (stderr) to /dev/null.
|
||||
|
||||
Usage:
|
||||
with suppress_stderr_during("model_conversion"):
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
tflite_model = converter.convert()
|
||||
|
||||
Args:
|
||||
operation_name: Name of the operation for debugging purposes
|
||||
"""
|
||||
original_stderr_fd = os.dup(2)
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
try:
|
||||
os.dup2(devnull, 2)
|
||||
yield
|
||||
finally:
|
||||
os.dup2(original_stderr_fd, 2)
|
||||
os.close(devnull)
|
||||
os.close(original_stderr_fd)
|
||||
|
||||
@@ -139,9 +139,11 @@ class OutputProcess(FrigateProcess):
|
||||
if CameraConfigUpdateEnum.add in updates:
|
||||
for camera in updates["add"]:
|
||||
jsmpeg_cameras[camera] = JsmpegCamera(
|
||||
cam_config, self.stop_event, websocket_server
|
||||
self.config.cameras[camera], self.stop_event, websocket_server
|
||||
)
|
||||
preview_recorders[camera] = PreviewRecorder(
|
||||
self.config.cameras[camera]
|
||||
)
|
||||
preview_recorders[camera] = PreviewRecorder(cam_config)
|
||||
preview_write_times[camera] = 0
|
||||
|
||||
if (
|
||||
|
||||
@@ -119,6 +119,7 @@ class RecordingCleanup(threading.Thread):
|
||||
Recordings.path,
|
||||
Recordings.objects,
|
||||
Recordings.motion,
|
||||
Recordings.dBFS,
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == config.name)
|
||||
@@ -126,6 +127,7 @@ class RecordingCleanup(threading.Thread):
|
||||
(
|
||||
(Recordings.end_time < continuous_expire_date)
|
||||
& (Recordings.motion == 0)
|
||||
& (Recordings.dBFS == 0)
|
||||
)
|
||||
| (Recordings.end_time < motion_expire_date)
|
||||
)
|
||||
@@ -185,6 +187,7 @@ class RecordingCleanup(threading.Thread):
|
||||
mode == RetainModeEnum.motion
|
||||
and recording.motion == 0
|
||||
and recording.objects == 0
|
||||
and recording.dBFS == 0
|
||||
)
|
||||
or (mode == RetainModeEnum.active_objects and recording.objects == 0)
|
||||
):
|
||||
|
||||
@@ -67,7 +67,7 @@ class SegmentInfo:
|
||||
if (
|
||||
not keep
|
||||
and retain_mode == RetainModeEnum.motion
|
||||
and (self.motion_count > 0 or self.average_dBFS > 0)
|
||||
and (self.motion_count > 0 or self.average_dBFS != 0)
|
||||
):
|
||||
keep = True
|
||||
|
||||
|
||||
@@ -42,11 +42,10 @@ def get_latest_version(config: FrigateConfig) -> str:
|
||||
"https://api.github.com/repos/blakeblackshear/frigate/releases/latest",
|
||||
timeout=10,
|
||||
)
|
||||
response = request.json()
|
||||
except (RequestException, JSONDecodeError):
|
||||
return "unknown"
|
||||
|
||||
response = request.json()
|
||||
|
||||
if request.ok and response and "tag_name" in response:
|
||||
return str(response.get("tag_name").replace("v", ""))
|
||||
else:
|
||||
|
||||
@@ -86,11 +86,11 @@ class TimelineProcessor(threading.Thread):
|
||||
event_data: dict[Any, Any],
|
||||
) -> bool:
|
||||
"""Handle object detection."""
|
||||
save = False
|
||||
camera_config = self.config.cameras[camera]
|
||||
event_id = event_data["id"]
|
||||
|
||||
timeline_entry = {
|
||||
# Base timeline entry data that all entries will share
|
||||
base_entry = {
|
||||
Timeline.timestamp: event_data["frame_time"],
|
||||
Timeline.camera: camera,
|
||||
Timeline.source: "tracked_object",
|
||||
@@ -123,40 +123,64 @@ class TimelineProcessor(threading.Thread):
|
||||
e[Timeline.data]["sub_label"] = event_data["sub_label"]
|
||||
|
||||
if event_type == EventStateEnum.start:
|
||||
timeline_entry = base_entry.copy()
|
||||
timeline_entry[Timeline.class_type] = "visible"
|
||||
save = True
|
||||
self.insert_or_save(timeline_entry, prev_event_data, event_data)
|
||||
elif event_type == EventStateEnum.update:
|
||||
# Check all conditions and create timeline entries for each change
|
||||
entries_to_save = []
|
||||
|
||||
# Check for zone changes
|
||||
prev_zones = set(prev_event_data["current_zones"])
|
||||
current_zones = set(event_data["current_zones"])
|
||||
zones_changed = prev_zones != current_zones
|
||||
|
||||
# Only save "entered_zone" events when the object is actually IN zones
|
||||
if (
|
||||
len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
|
||||
zones_changed
|
||||
and not event_data["stationary"]
|
||||
and len(current_zones) > 0
|
||||
):
|
||||
timeline_entry[Timeline.class_type] = "entered_zone"
|
||||
timeline_entry[Timeline.data]["zones"] = event_data["current_zones"]
|
||||
save = True
|
||||
elif prev_event_data["stationary"] != event_data["stationary"]:
|
||||
timeline_entry[Timeline.class_type] = (
|
||||
zone_entry = base_entry.copy()
|
||||
zone_entry[Timeline.class_type] = "entered_zone"
|
||||
zone_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
zone_entry[Timeline.data]["zones"] = event_data["current_zones"]
|
||||
entries_to_save.append(zone_entry)
|
||||
|
||||
# Check for stationary status change
|
||||
if prev_event_data["stationary"] != event_data["stationary"]:
|
||||
stationary_entry = base_entry.copy()
|
||||
stationary_entry[Timeline.class_type] = (
|
||||
"stationary" if event_data["stationary"] else "active"
|
||||
)
|
||||
save = True
|
||||
elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
|
||||
timeline_entry[Timeline.class_type] = "attribute"
|
||||
timeline_entry[Timeline.data]["attribute"] = list(
|
||||
stationary_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
entries_to_save.append(stationary_entry)
|
||||
|
||||
# Check for new attributes
|
||||
if prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
|
||||
attribute_entry = base_entry.copy()
|
||||
attribute_entry[Timeline.class_type] = "attribute"
|
||||
attribute_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
attribute_entry[Timeline.data]["attribute"] = list(
|
||||
event_data["attributes"].keys()
|
||||
)[0]
|
||||
|
||||
if len(event_data["current_attributes"]) > 0:
|
||||
timeline_entry[Timeline.data]["attribute_box"] = to_relative_box(
|
||||
attribute_entry[Timeline.data]["attribute_box"] = to_relative_box(
|
||||
camera_config.detect.width,
|
||||
camera_config.detect.height,
|
||||
event_data["current_attributes"][0]["box"],
|
||||
)
|
||||
|
||||
save = True
|
||||
elif event_type == EventStateEnum.end:
|
||||
timeline_entry[Timeline.class_type] = "gone"
|
||||
save = True
|
||||
entries_to_save.append(attribute_entry)
|
||||
|
||||
if save:
|
||||
# Save all entries
|
||||
for entry in entries_to_save:
|
||||
self.insert_or_save(entry, prev_event_data, event_data)
|
||||
|
||||
elif event_type == EventStateEnum.end:
|
||||
timeline_entry = base_entry.copy()
|
||||
timeline_entry[Timeline.class_type] = "gone"
|
||||
self.insert_or_save(timeline_entry, prev_event_data, event_data)
|
||||
|
||||
def handle_api_entry(
|
||||
|
||||
@@ -19,9 +19,10 @@ from frigate.const import (
|
||||
PROCESS_PRIORITY_LOW,
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.log import redirect_output_to_logger, suppress_stderr_during
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.process import FrigateProcess
|
||||
@@ -121,6 +122,10 @@ def get_dataset_image_count(model_name: str) -> int:
|
||||
|
||||
class ClassificationTrainingProcess(FrigateProcess):
|
||||
def __init__(self, model_name: str) -> None:
|
||||
self.BASE_WEIGHT_URL = os.environ.get(
|
||||
"TF_KERAS_MOBILENET_V2_WEIGHTS_URL",
|
||||
"",
|
||||
)
|
||||
super().__init__(
|
||||
stop_event=None,
|
||||
priority=PROCESS_PRIORITY_LOW,
|
||||
@@ -179,11 +184,23 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
)
|
||||
return False
|
||||
|
||||
weights_path = "imagenet"
|
||||
# Download MobileNetV2 weights if not present
|
||||
if self.BASE_WEIGHT_URL:
|
||||
weights_path = os.path.join(
|
||||
MODEL_CACHE_DIR, "MobileNet", "mobilenet_v2_weights.h5"
|
||||
)
|
||||
if not os.path.exists(weights_path):
|
||||
logger.info("Downloading MobileNet V2 weights file")
|
||||
ModelDownloader.download_from_url(
|
||||
self.BASE_WEIGHT_URL, weights_path
|
||||
)
|
||||
|
||||
# Start with imagenet base model with 35% of channels in each layer
|
||||
base_model = MobileNetV2(
|
||||
input_shape=(224, 224, 3),
|
||||
include_top=False,
|
||||
weights="imagenet",
|
||||
weights=weights_path,
|
||||
alpha=0.35,
|
||||
)
|
||||
base_model.trainable = False # Freeze pre-trained layers
|
||||
@@ -233,15 +250,20 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
logger.debug(f"Converting {self.model_name} to TFLite...")
|
||||
|
||||
# convert model to tflite
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
# Suppress stderr during conversion to avoid LLVM debug output
|
||||
# (fully_quantize, inference_type, MLIR optimization messages, etc)
|
||||
with suppress_stderr_during("tflite_conversion"):
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [
|
||||
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
|
||||
]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
|
||||
# write model
|
||||
model_path = os.path.join(model_dir, "model.tflite")
|
||||
@@ -338,8 +360,6 @@ def collect_state_classification_examples(
|
||||
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 1: Get review items for the cameras
|
||||
camera_names = list(cameras.keys())
|
||||
@@ -354,6 +374,10 @@ def collect_state_classification_examples(
|
||||
logger.warning(f"No review items found for cameras: {camera_names}")
|
||||
return
|
||||
|
||||
# The temp directory is only created when there are review_items.
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 2: Create balanced timestamp selection (100 samples)
|
||||
timestamps = _select_balanced_timestamps(review_items, target_count=100)
|
||||
|
||||
@@ -482,6 +506,10 @@ def _extract_keyframes(
|
||||
"""
|
||||
Extract keyframes from recordings at specified timestamps and crop to specified regions.
|
||||
|
||||
This implementation batches work by running multiple ffmpeg snapshot commands
|
||||
concurrently, which significantly reduces total runtime compared to
|
||||
processing each timestamp serially.
|
||||
|
||||
Args:
|
||||
ffmpeg_path: Path to ffmpeg binary
|
||||
timestamps: List of timestamp dicts from _select_balanced_timestamps
|
||||
@@ -491,15 +519,21 @@ def _extract_keyframes(
|
||||
Returns:
|
||||
List of paths to successfully extracted and cropped keyframe images
|
||||
"""
|
||||
keyframe_paths = []
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
for idx, ts_info in enumerate(timestamps):
|
||||
if not timestamps:
|
||||
return []
|
||||
|
||||
# Limit the number of concurrent ffmpeg processes so we don't overload the host.
|
||||
max_workers = min(5, len(timestamps))
|
||||
|
||||
def _process_timestamp(idx: int, ts_info: dict) -> tuple[int, str | None]:
|
||||
camera = ts_info["camera"]
|
||||
timestamp = ts_info["timestamp"]
|
||||
|
||||
if camera not in camera_crops:
|
||||
logger.warning(f"No crop coordinates for camera {camera}")
|
||||
continue
|
||||
return idx, None
|
||||
|
||||
norm_x1, norm_y1, norm_x2, norm_y2 = camera_crops[camera]
|
||||
|
||||
@@ -516,7 +550,7 @@ def _extract_keyframes(
|
||||
.get()
|
||||
)
|
||||
except Exception:
|
||||
continue
|
||||
return idx, None
|
||||
|
||||
relative_time = timestamp - recording.start_time
|
||||
|
||||
@@ -530,38 +564,57 @@ def _extract_keyframes(
|
||||
height=None,
|
||||
)
|
||||
|
||||
if image_data:
|
||||
nparr = np.frombuffer(image_data, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
if not image_data:
|
||||
return idx, None
|
||||
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
nparr = np.frombuffer(image_data, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
x1 = int(norm_x1 * width)
|
||||
y1 = int(norm_y1 * height)
|
||||
x2 = int(norm_x2 * width)
|
||||
y2 = int(norm_y2 * height)
|
||||
if img is None:
|
||||
return idx, None
|
||||
|
||||
x1_clipped = max(0, min(x1, width))
|
||||
y1_clipped = max(0, min(y1, height))
|
||||
x2_clipped = max(0, min(x2, width))
|
||||
y2_clipped = max(0, min(y2, height))
|
||||
height, width = img.shape[:2]
|
||||
|
||||
if x2_clipped > x1_clipped and y2_clipped > y1_clipped:
|
||||
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
x1 = int(norm_x1 * width)
|
||||
y1 = int(norm_y1 * height)
|
||||
x2 = int(norm_x2 * width)
|
||||
y2 = int(norm_y2 * height)
|
||||
|
||||
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
keyframe_paths.append(output_path)
|
||||
x1_clipped = max(0, min(x1, width))
|
||||
y1_clipped = max(0, min(y1, height))
|
||||
x2_clipped = max(0, min(x2, width))
|
||||
y2_clipped = max(0, min(y2, height))
|
||||
|
||||
if x2_clipped <= x1_clipped or y2_clipped <= y1_clipped:
|
||||
return idx, None
|
||||
|
||||
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
|
||||
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
return idx, output_path
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to extract frame from {recording.path} at {relative_time}s: {e}"
|
||||
)
|
||||
continue
|
||||
return idx, None
|
||||
|
||||
return keyframe_paths
|
||||
keyframes_with_index: list[tuple[int, str]] = []
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
future_to_idx = {
|
||||
executor.submit(_process_timestamp, idx, ts_info): idx
|
||||
for idx, ts_info in enumerate(timestamps)
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_idx):
|
||||
_, path = future.result()
|
||||
if path:
|
||||
keyframes_with_index.append((future_to_idx[future], path))
|
||||
|
||||
keyframes_with_index.sort(key=lambda item: item[0])
|
||||
return [path for _, path in keyframes_with_index]
|
||||
|
||||
|
||||
def _select_distinct_images(
|
||||
|
||||
@@ -65,10 +65,15 @@ class FrigateProcess(BaseProcess):
|
||||
logging.basicConfig(handlers=[], force=True)
|
||||
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
|
||||
|
||||
# Always apply base log level suppressions for noisy third-party libraries
|
||||
# even if no specific logConfig is provided
|
||||
if logConfig:
|
||||
frigate.log.apply_log_levels(
|
||||
logConfig.default.value.upper(), logConfig.logs
|
||||
)
|
||||
else:
|
||||
# Apply default INFO level with standard library suppressions
|
||||
frigate.log.apply_log_levels("INFO", {})
|
||||
|
||||
self._setup_memray()
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from frigate.const import SUPPORTED_RK_SOCS
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -68,9 +69,20 @@ def is_rknn_compatible(model_path: str, model_type: str | None = None) -> bool:
|
||||
True if the model is RKNN-compatible, False otherwise
|
||||
"""
|
||||
soc = get_soc_type()
|
||||
|
||||
if soc is None:
|
||||
return False
|
||||
|
||||
# Check if the SoC is actually a supported RK device
|
||||
# This prevents false positives on non-RK devices (e.g., macOS Docker)
|
||||
# where /proc/device-tree/compatible might exist but contain non-RK content
|
||||
if soc not in SUPPORTED_RK_SOCS:
|
||||
logger.debug(
|
||||
f"SoC '{soc}' is not a supported RK device for RKNN conversion. "
|
||||
f"Supported SoCs: {SUPPORTED_RK_SOCS}"
|
||||
)
|
||||
return False
|
||||
|
||||
if not model_type:
|
||||
model_type = get_rknn_model_type(model_path)
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
# COPYRIGHT AND TRADEMARK NOTICE
|
||||
|
||||
The images, logos, and icons contained in this directory (the "Brand Assets") are
|
||||
proprietary to Frigate LLC and are NOT covered by the MIT License governing the
|
||||
proprietary to Frigate, Inc. and are NOT covered by the MIT License governing the
|
||||
rest of this repository.
|
||||
|
||||
1. TRADEMARK STATUS
|
||||
The "Frigate" name and the accompanying logo are common law trademarks™ of
|
||||
Frigate LLC. Frigate LLC reserves all rights to these marks.
|
||||
Frigate, Inc. Frigate, Inc. reserves all rights to these marks.
|
||||
|
||||
2. LIMITED PERMISSION FOR USE
|
||||
Permission is hereby granted to display these Brand Assets strictly for the
|
||||
@@ -17,9 +17,9 @@ rest of this repository.
|
||||
3. RESTRICTIONS
|
||||
You may NOT:
|
||||
a. Use these Brand Assets to represent a derivative work (fork) as an official
|
||||
product of Frigate LLC.
|
||||
product of Frigate, Inc.
|
||||
b. Use these Brand Assets in a way that implies endorsement, sponsorship, or
|
||||
commercial affiliation with Frigate LLC.
|
||||
commercial affiliation with Frigate, Inc.
|
||||
c. Modify or alter the Brand Assets.
|
||||
|
||||
If you fork this repository with the intent to distribute a modified or competing
|
||||
@@ -30,4 +30,4 @@ For full usage guidelines, strictly see the TRADEMARK.md file in the
|
||||
repository root.
|
||||
|
||||
ALL RIGHTS RESERVED.
|
||||
Copyright (c) 2025 Frigate LLC.
|
||||
Copyright (c) 2026 Frigate, Inc.
|
||||
|
||||
@@ -132,5 +132,9 @@
|
||||
},
|
||||
"count_one": "{{count}} Classe",
|
||||
"count_other": "{{count}} Classes"
|
||||
},
|
||||
"attributes": {
|
||||
"label": "Atributs de classificació",
|
||||
"all": "Tots els atributs"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
},
|
||||
"renameCategory": {
|
||||
"title": "Reanomena la classe",
|
||||
"desc": "Introduïu un nom nou per {{name}}. Se us requerirà que torneu a entrenar el model per al canvi de nom a afectar."
|
||||
"desc": "Introduïu un nom nou per {{name}}. Se us requerirà que torneu a entrenar el model per al canvi de nom afectar."
|
||||
},
|
||||
"description": {
|
||||
"invalidName": "Nom no vàlid. Els noms només poden incloure lletres, números, espais, apòstrofs, guions baixos i guions."
|
||||
@@ -116,7 +116,8 @@
|
||||
"classesUnique": "Els noms de classe han de ser únics",
|
||||
"stateRequiresTwoClasses": "Els models d'estat requereixen almenys 2 classes",
|
||||
"objectLabelRequired": "Seleccioneu una etiqueta d'objecte",
|
||||
"objectTypeRequired": "Seleccioneu un tipus de classificació"
|
||||
"objectTypeRequired": "Seleccioneu un tipus de classificació",
|
||||
"noneNotAllowed": "La classe 'none' no està permesa"
|
||||
},
|
||||
"states": "Estats"
|
||||
},
|
||||
@@ -172,7 +173,9 @@
|
||||
"states": "Estats"
|
||||
},
|
||||
"details": {
|
||||
"scoreInfo": "La puntuació representa la confiança mitjana de la classificació en totes les deteccions d'aquest objecte."
|
||||
"scoreInfo": "La puntuació representa la confiança mitjana de la classificació en totes les deteccions d'aquest objecte.",
|
||||
"none": "Cap",
|
||||
"unknown": "Desconegut"
|
||||
},
|
||||
"edit": {
|
||||
"title": "Edita el model de classificació",
|
||||
|
||||
@@ -100,13 +100,15 @@
|
||||
"updatedSublabel": "Subetiqueta actualitzada amb èxit.",
|
||||
"updatedLPR": "Matrícula actualitzada amb èxit.",
|
||||
"regenerate": "El {{provider}} ha sol·licitat una nova descripció. En funció de la velocitat del vostre proveïdor, la nova descripció pot trigar un temps a regenerar-se.",
|
||||
"audioTranscription": "S'ha sol·licitat correctament la transcripció d'àudio. Depenent de la velocitat del vostre servidor Frigate, la transcripció pot trigar una estona a completar-se."
|
||||
"audioTranscription": "S'ha sol·licitat correctament la transcripció d'àudio. Depenent de la velocitat del vostre servidor Frigate, la transcripció pot trigar una estona a completar-se.",
|
||||
"updatedAttributes": "Els atributs s'han actualitzat correctament."
|
||||
},
|
||||
"error": {
|
||||
"regenerate": "No s'ha pogut contactar amb {{provider}} per obtenir una nova descripció: {{errorMessage}}",
|
||||
"updatedSublabelFailed": "No s'ha pogut actualitzar la subetiqueta: {{errorMessage}}",
|
||||
"updatedLPRFailed": "No s'ha pogut actualitzar la matrícula: {{errorMessage}}",
|
||||
"audioTranscription": "Error en demanar la transcripció d'audio {{errorMessage}}"
|
||||
"audioTranscription": "Error en demanar la transcripció d'audio {{errorMessage}}",
|
||||
"updatedAttributesFailed": "No s'han pogut actualitzar els atributs: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"title": "Revisar detalls de l'element",
|
||||
@@ -162,7 +164,12 @@
|
||||
},
|
||||
"score": {
|
||||
"label": "Puntuació"
|
||||
}
|
||||
},
|
||||
"editAttributes": {
|
||||
"title": "Edita els atributs",
|
||||
"desc": "Seleccioneu els atributs de classificació per a aquesta {{label}}"
|
||||
},
|
||||
"attributes": "Atributs de classificació"
|
||||
},
|
||||
"searchResult": {
|
||||
"tooltip": "S'ha identificat {{type}} amb una confiança del {{confidence}}%",
|
||||
|
||||
@@ -15,7 +15,8 @@
|
||||
"max_speed": "Velocitat màxima",
|
||||
"recognized_license_plate": "Matrícula reconeguda",
|
||||
"has_clip": "Té Clip",
|
||||
"has_snapshot": "Té instantània"
|
||||
"has_snapshot": "Té instantània",
|
||||
"attributes": "Atributs"
|
||||
},
|
||||
"searchType": {
|
||||
"thumbnail": "Miniatura",
|
||||
|
||||
@@ -484,7 +484,7 @@
|
||||
"users": {
|
||||
"table": {
|
||||
"username": "Usuari",
|
||||
"password": "Contrasenya",
|
||||
"password": "Restableix la contrasenya",
|
||||
"deleteUser": "Suprimir usuari",
|
||||
"noUsers": "No s'han trobat usuaris.",
|
||||
"changeRole": "Canviar la funció d’usuari",
|
||||
@@ -595,7 +595,7 @@
|
||||
"title": "Gestió d'usuaris",
|
||||
"desc": "Gestioneu els comptes d'usuari d'aquesta instància de Frigate."
|
||||
},
|
||||
"updatePassword": "Actualitzar contrasenya"
|
||||
"updatePassword": "Restableix la contrasenya"
|
||||
},
|
||||
"frigatePlus": {
|
||||
"snapshotConfig": {
|
||||
@@ -696,7 +696,7 @@
|
||||
"title": "Classificació d'ocells",
|
||||
"desc": "La classificació d’ocells identifica ocells coneguts mitjançant un model TensorFlow quantitzat. Quan es reconeix un ocell conegut, el seu nom comú s’afegeix com a subetiqueta. Aquesta informació es mostra a la interfície d’usuari, als filtres i també a les notificacions."
|
||||
},
|
||||
"title": "Parmàmetres complementaris",
|
||||
"title": "Configuració dels enriquiments",
|
||||
"toast": {
|
||||
"error": "No s'han pogut guardar els canvis de configuració: {{errorMessage}}",
|
||||
"success": "Els paràmetres complementaris s'han desat. Reinicia Frigate per aplicar els canvis."
|
||||
@@ -805,7 +805,7 @@
|
||||
"documentTitle": "Disparadors",
|
||||
"management": {
|
||||
"title": "Activadors",
|
||||
"desc": "Gestionar els disparadors de {{camera}}. Usa les tipus de miniatures per disparar miniatures similars a l'objecte a seguir seleccionat, i el tipus de descripció per disparar en cas de descripcions similars a l'especificada."
|
||||
"desc": "Gestionar els disparadors de {{camera}}. Usa els tipus de miniatures per disparar miniatures similars a l'objecte a seguir seleccionat, i el tipus de descripció per disparar en cas de descripcions similars a l'especificada."
|
||||
},
|
||||
"addTrigger": "Afegir disaprador",
|
||||
"semanticSearch": {
|
||||
|
||||
@@ -190,7 +190,10 @@
|
||||
"review_description_events_per_second": "Descripció de la revisió",
|
||||
"object_description": "Descripció de l'objecte",
|
||||
"object_description_speed": "Velocitat de la descripció de l'objecte",
|
||||
"object_description_events_per_second": "Descripció de l'objecte"
|
||||
"object_description_events_per_second": "Descripció de l'objecte",
|
||||
"classification": "{{name}} Classificació",
|
||||
"classification_speed": "Velocitat de classificació de {{name}}",
|
||||
"classification_events_per_second": "{{name}} Esdeveniments de classificació per segon"
|
||||
},
|
||||
"infPerSecond": "Inferències per segon",
|
||||
"averageInf": "Temps mitjà d'inferència"
|
||||
|
||||
@@ -1,34 +1,47 @@
|
||||
{
|
||||
"documentTitle": "Klasifikační modely",
|
||||
"documentTitle": "Klasifikační modely - Frigate",
|
||||
"button": {
|
||||
"deleteClassificationAttempts": "Odstrániť Klasifikačné obrazy",
|
||||
"renameCategory": "Premenovať triedu",
|
||||
"deleteCategory": "Zmazať triedu",
|
||||
"deleteImages": "Zmazať obrázok",
|
||||
"trainModel": "Trenovací model",
|
||||
"addClassification": "Pridať klasifikáciu",
|
||||
"deleteModels": "Zmazať modeli",
|
||||
"editModel": "Upraviť model"
|
||||
"renameCategory": "Přejmenovat třídu",
|
||||
"deleteCategory": "Smazat třídu",
|
||||
"deleteImages": "Smazat obrázek",
|
||||
"trainModel": "Trénovat model",
|
||||
"addClassification": "Přidat klasifikaci",
|
||||
"deleteModels": "Smazat modely",
|
||||
"editModel": "Upravit model"
|
||||
},
|
||||
"details": {
|
||||
"scoreInfo": "Skóre predstavuje priemernú istotu klasifikácie naprieč detekciami tohoto objektu."
|
||||
"scoreInfo": "Skóre predstavuje priemernú istotu klasifikácie naprieč detekciami tohoto objektu.",
|
||||
"none": "Nic",
|
||||
"unknown": "Neznámý"
|
||||
},
|
||||
"tooltip": {
|
||||
"trainingInProgress": "Model se práve trénuje",
|
||||
"noNewImages": "Žiadne nové obrázky na trénovanie. Najskôr klasifikujte viac obrazkov v datasete.",
|
||||
"noChanges": "Od posledného treningu nedošlo k žiadnym zmenám v datasete.",
|
||||
"modelNotReady": "Model nieje pripravený na trénovanie."
|
||||
"trainingInProgress": "Model se právě trénuje",
|
||||
"noNewImages": "Žádné obrázky pro trénování. Nejdříve klasifikujte obrázky pro dataset.",
|
||||
"noChanges": "Od posledního trénování nedošlo k žádné změně.",
|
||||
"modelNotReady": "Model není připravený na trénování."
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"deletedImage": "Zmazať obrazky",
|
||||
"deletedModel_one": "Úspešne odstranený {{count}} model",
|
||||
"deletedModel_few": "Úspešne odstranené {{count}} modely",
|
||||
"deletedModel_other": "Úspěšne ostranených {{count}} modelov",
|
||||
"deletedCategory": "Zmazať triedu",
|
||||
"deletedImage": "Smazat obrázky",
|
||||
"deletedModel_one": "Úspěšně odstraněný {{count}} model",
|
||||
"deletedModel_few": "Úspěšně odstraněné {{count}} modely",
|
||||
"deletedModel_other": "Úspěšně odstraněných {{count}} modelů",
|
||||
"deletedCategory": "Smazat třídu",
|
||||
"categorizedImage": "Obrázek úspěšně klasifikován",
|
||||
"trainedModel": "Úspěšně vytrénovaný model.",
|
||||
"trainingModel": "Trénování modelu bylo úspěšně zahájeno."
|
||||
"trainingModel": "Trénování modelu bylo úspěšně zahájeno.",
|
||||
"updatedModel": "Konfigurace modelu úspěšně aktualizována.",
|
||||
"renamedCategory": "Třída úspěšně přejmenována na {{name}}"
|
||||
},
|
||||
"error": {
|
||||
"deleteImageFailed": "Chyba při mazání: {{errorMessage}}",
|
||||
"deleteCategoryFailed": "Chyba při mazání třídy: {{errorMessage}}",
|
||||
"deleteModelFailed": "Chyba při mazání modelu: {{errorMessage}}",
|
||||
"categorizeFailed": "Chyba při mazání obrázku: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"train": {
|
||||
"titleShort": "Nedávný"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"label": "Detail",
|
||||
"noDataFound": "Žádná detailní data k prohlédnutí",
|
||||
"aria": "Přepnout detailní zobrazení",
|
||||
"trackedObject_other": "{{count}} objektů"
|
||||
"trackedObject_other": "{{count}} objektů",
|
||||
"trackedObject_one": "{{count}} objektů"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,8 @@
|
||||
"train": {
|
||||
"title": "Nedávná rozpoznání",
|
||||
"empty": "Nejsou zde žádné předchozí pokusy o rozpoznání obličeje",
|
||||
"aria": "Vybrat trénink"
|
||||
"aria": "Vybrat poslední rozpoznávání",
|
||||
"titleShort": "Nedávný"
|
||||
},
|
||||
"description": {
|
||||
"addFace": "Přidejte novou kolekci do Knihovny obličejů nahráním prvního obrázku.",
|
||||
@@ -76,7 +77,7 @@
|
||||
"deletedName_one": "{{count}} obličej byl úspěšně odstraněn.",
|
||||
"deletedName_few": "{{count}} tváře byly úspěšně odstraněny.",
|
||||
"deletedName_other": "{{count}} tváře byly úspěšně odstraněny.",
|
||||
"updatedFaceScore": "Úspěšně aktualizováno skóre obličeje.",
|
||||
"updatedFaceScore": "Úspěšně aktualizováno skóre obličeje na {{name}} ({{score}}).",
|
||||
"addFaceLibrary": "{{name}} byl(a) úspěšně přidán(a) do Knihovny obličejů!"
|
||||
},
|
||||
"error": {
|
||||
|
||||
@@ -26,7 +26,8 @@
|
||||
"min_score": "Minimální Skóre",
|
||||
"recognized_license_plate": "Rozpoznaná SPZ",
|
||||
"has_clip": "Má Klip",
|
||||
"has_snapshot": "Má Snímek"
|
||||
"has_snapshot": "Má Snímek",
|
||||
"attributes": "Atributy"
|
||||
},
|
||||
"tips": {
|
||||
"desc": {
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"masksAndZones": "Editor masky a zón - Frigate",
|
||||
"motionTuner": "Ladění detekce pohybu - Frigate",
|
||||
"object": "Ladění - Frigate",
|
||||
"general": "Nastavení rozhraní- Frigate",
|
||||
"general": "Nastavení rozhraní - Frigate",
|
||||
"frigatePlus": "Frigate+ nastavení - Frigate",
|
||||
"enrichments": "Nastavení obohacení - Frigate",
|
||||
"cameraManagement": "Správa kamer - Frigate",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"noRecordingsFoundForThisTime": "Ingen optagelser fundet i det angivet tidsrum",
|
||||
"noRecordingsFoundForThisTime": "Ingen optagelser fundet i det angivne tidsrum",
|
||||
"noPreviewFound": "Ingen forhåndsvisning fundet",
|
||||
"cameraDisabled": "Kamera er deaktiveret",
|
||||
"noPreviewFoundFor": "Ingen forhåndsvisning fundet for {{cameraName}}",
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
{
|
||||
"documentTitle": "Klassifikationsmodeller",
|
||||
"details": {
|
||||
"scoreInfo": "Scoren repræsenterer den gennemsnitlige klassifikationssikkerhed på tværs af alle registreringer af dette objekt."
|
||||
"scoreInfo": "Scoren repræsenterer den gennemsnitlige klassifikationssikkerhed på tværs af alle registreringer af dette objekt.",
|
||||
"unknown": "Ukendt"
|
||||
},
|
||||
"description": {
|
||||
"invalidName": "Ugyldigt navn. Navne må kun indeholde bogstaver, tal, mellemrum, apostroffer, understregninger og bindestreger."
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"24hours": "24 Stunden",
|
||||
"month_one": "{{time}} Monat",
|
||||
"month_other": "{{time}} Monate",
|
||||
"d": "{{time}} Tag",
|
||||
"d": "{{time}} Tg.",
|
||||
"day_one": "{{time}} Tag",
|
||||
"day_other": "{{time}} Tage",
|
||||
"m": "{{time}} Min",
|
||||
@@ -37,12 +37,12 @@
|
||||
"30minutes": "30 Minuten",
|
||||
"1hour": "1 Stunde",
|
||||
"lastWeek": "Letzte Woche",
|
||||
"h": "{{time}} Stunde",
|
||||
"h": "{{time}} Std.",
|
||||
"ago": "vor {{timeAgo}}",
|
||||
"untilRestart": "Bis zum Neustart",
|
||||
"justNow": "Gerade",
|
||||
"pm": "nachmittags",
|
||||
"mo": "{{time}}Monat",
|
||||
"mo": "{{time}} Mon.",
|
||||
"formattedTimestamp": {
|
||||
"12hour": "d. MMM, hh:mm:ss aaa",
|
||||
"24hour": "dd. MMM, hh:mm:ss aaa"
|
||||
@@ -82,7 +82,7 @@
|
||||
"12hour": "d. MMM yyyy",
|
||||
"24hour": "d. MMM yyyy"
|
||||
},
|
||||
"inProgress": "In Bearbeitung",
|
||||
"inProgress": "Im Gange",
|
||||
"invalidStartTime": "Ungültige Startzeit",
|
||||
"invalidEndTime": "Ungültige Endzeit"
|
||||
},
|
||||
|
||||
@@ -132,5 +132,9 @@
|
||||
},
|
||||
"count_one": "{{count}} Klasse",
|
||||
"count_other": "{{count}} Klassen"
|
||||
},
|
||||
"attributes": {
|
||||
"label": "Klassifizierungsattribute",
|
||||
"all": "Alle Attribute"
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user