Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
cf5f89598d Bump prettier-plugin-tailwindcss from 0.6.5 to 0.7.1 in /web
Bumps [prettier-plugin-tailwindcss](https://github.com/tailwindlabs/prettier-plugin-tailwindcss) from 0.6.5 to 0.7.1.
- [Release notes](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/releases)
- [Changelog](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/compare/v0.6.5...v0.7.1)

---
updated-dependencies:
- dependency-name: prettier-plugin-tailwindcss
  dependency-version: 0.7.1
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 12:01:56 +00:00
952 changed files with 14028 additions and 52554 deletions

View File

@@ -22,7 +22,6 @@ autotrack
autotracked
autotracker
autotracking
backchannel
balena
Beelink
BGRA
@@ -192,7 +191,6 @@ ONVIF
openai
opencv
openvino
overfitting
OWASP
paddleocr
paho
@@ -317,4 +315,4 @@ yolo
yolonas
yolox
zeep
zerolatency
zerolatency

View File

@@ -1,6 +0,0 @@
---
globs: ["**/*.ts", "**/*.tsx"]
alwaysApply: false
---
Never write strings in the frontend directly, always write to and reference the relevant translations file.

View File

@@ -1,129 +0,0 @@
title: "[Beta Support]: "
labels: ["support", "triage", "beta"]
body:
- type: markdown
attributes:
value: |
Thank you for testing Frigate beta versions! Use this form for support with beta releases.
**Note:** Beta versions may have incomplete features, known issues, or unexpected behavior. Please check the [release notes](https://github.com/blakeblackshear/frigate/releases) and [recent discussions][discussions] for known beta issues before submitting.
Before submitting, read the [beta documentation][docs].
[docs]: https://deploy-preview-19787--frigate-docs.netlify.app/
- type: textarea
id: description
attributes:
label: Describe the problem you are having
description: Please be as detailed as possible. Include what you expected to happen vs what actually happened.
validations:
required: true
- type: input
id: version
attributes:
label: Beta Version
description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.17.0-beta1)
placeholder: "0.17.0-beta1"
validations:
required: true
- type: dropdown
id: issue-category
attributes:
label: Issue Category
description: What area is your issue related to? This helps us understand the context.
options:
- Object Detection / Detectors
- Hardware Acceleration
- Configuration / Setup
- WebUI / Frontend
- Recordings / Storage
- Notifications / Events
- Integration (Home Assistant, etc)
- Performance / Stability
- Installation / Updates
- Other
validations:
required: true
- type: textarea
id: config
attributes:
label: Frigate config file
description: This will be automatically formatted into code, so no need for backticks. Remove any sensitive information like passwords or URLs.
render: yaml
validations:
required: true
- type: textarea
id: frigatelogs
attributes:
label: Relevant Frigate log output
description: Please copy and paste any relevant Frigate log output. Include logs before and after your exact error when possible. This will be automatically formatted into code, so no need for backticks.
render: shell
validations:
required: true
- type: textarea
id: go2rtclogs
attributes:
label: Relevant go2rtc log output (if applicable)
description: If your issue involves cameras, streams, or playback, please include go2rtc logs. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks.
render: shell
- type: dropdown
id: install-method
attributes:
label: Install method
options:
- Home Assistant Add-on
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: textarea
id: docker
attributes:
label: docker-compose file or Docker CLI command
description: This will be automatically formatted into code, so no need for backticks. Include relevant environment variables and device mappings.
render: yaml
validations:
required: true
- type: dropdown
id: os
attributes:
label: Operating system
options:
- Home Assistant OS
- Debian
- Ubuntu
- Other Linux
- Proxmox
- UNRAID
- Windows
- Other
validations:
required: true
- type: input
id: hardware
attributes:
label: CPU / GPU / Hardware
description: Provide details about your hardware (e.g., Intel i5-9400, NVIDIA RTX 3060, Raspberry Pi 4, etc)
placeholder: "Intel i7-10700, NVIDIA GTX 1660"
- type: textarea
id: screenshots
attributes:
label: Screenshots
description: Screenshots of the issue, System metrics pages, or any relevant UI. Drag and drop or paste images directly.
- type: textarea
id: steps-to-reproduce
attributes:
label: Steps to reproduce
description: If applicable, provide detailed steps to reproduce the issue
placeholder: |
1. Go to '...'
2. Click on '...'
3. See error
- type: textarea
id: other
attributes:
label: Any other information that may be helpful
description: Additional context, related issues, when the problem started appearing, etc.

View File

@@ -6,8 +6,6 @@ body:
value: |
Use this form to submit a reproducible bug in Frigate or Frigate's UI.
**⚠️ If you are running a beta version (0.17.0-beta or similar), please use the [Beta Support template](https://github.com/blakeblackshear/frigate/discussions/new?category=beta-support) instead.**
Before submitting your bug report, please ask the AI with the "Ask AI" button on the [official documentation site][ai] about your issue, [search the discussions][discussions], look at recent open and closed [pull requests][prs], read the [official Frigate documentation][docs], and read the [Frigate FAQ][faq] pinned at the Discussion page to see if your bug has already been fixed by the developers or reported by the community.
**If you are unsure if your issue is actually a bug or not, please submit a support request first.**

View File

@@ -1,2 +0,0 @@
Never write strings in the frontend directly, always write to and reference the relevant translations file.
Always conform new and refactored code to the existing coding style in the project.

View File

@@ -15,7 +15,7 @@ concurrency:
cancel-in-progress: true
env:
PYTHON_VERSION: 3.11
PYTHON_VERSION: 3.9
jobs:
amd64_build:
@@ -23,7 +23,7 @@ jobs:
name: AMD64 Build
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
@@ -47,7 +47,7 @@ jobs:
name: ARM Build
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
@@ -82,7 +82,7 @@ jobs:
name: Jetson Jetpack 6
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
@@ -113,7 +113,7 @@ jobs:
- amd64_build
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
@@ -136,6 +136,7 @@ jobs:
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-tensorrt,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v6
with:
@@ -154,7 +155,7 @@ jobs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx
@@ -179,7 +180,7 @@ jobs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up QEMU and Buildx

View File

@@ -16,12 +16,12 @@ jobs:
name: Web - Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
persist-credentials: false
- uses: actions/setup-node@v6
- uses: actions/setup-node@master
with:
node-version: 20.x
node-version: 16.x
- run: npm install
working-directory: ./web
- name: Lint
@@ -32,10 +32,10 @@ jobs:
name: Web - Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
persist-credentials: false
- uses: actions/setup-node@v6
- uses: actions/setup-node@master
with:
node-version: 20.x
- run: npm install
@@ -52,7 +52,7 @@ jobs:
name: Python Checks
steps:
- name: Check out the repository
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
@@ -75,10 +75,10 @@ jobs:
name: Python Tests
steps:
- name: Check out code
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
persist-credentials: false
- uses: actions/setup-node@v6
- uses: actions/setup-node@master
with:
node-version: 20.x
- name: Install devcontainer cli

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
persist-credentials: false
- id: lowercaseRepo

1
.gitignore vendored
View File

@@ -15,7 +15,6 @@ frigate/version.py
web/build
web/node_modules
web/coverage
web/.env
core
!/web/**/*.ts
.idea/*

View File

@@ -1,6 +1,6 @@
The MIT License
Copyright (c) 2026 Frigate, Inc. (Frigate™)
Copyright (c) 2020 Blake Blackshear
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SOFTWARE.

View File

@@ -14,7 +14,6 @@ push-boards: $(BOARDS:%=push-%)
version:
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
echo 'VITE_GIT_COMMIT_HASH=$(COMMIT_HASH)' > web/.env
local: version
docker buildx build --target=frigate --file docker/main/Dockerfile . \

View File

@@ -1,10 +1,8 @@
<p align="center">
<img align="center" alt="logo" src="docs/static/img/branding/frigate.png">
<img align="center" alt="logo" src="docs/static/img/frigate.png">
</p>
# Frigate NVR™ - Realtime Object Detection for IP Cameras
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
# Frigate - NVR With Realtime Object Detection for IP Cameras
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
<img src="https://hosted.weblate.org/widget/frigate-nvr/language-badge.svg" alt="Translation status" />
@@ -14,7 +12,7 @@
A complete and local NVR designed for [Home Assistant](https://www.home-assistant.io) with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
Use of a GPU or AI accelerator is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead. See Frigate's supported [object detectors](https://docs.frigate.video/configuration/object_detectors/).
Use of a GPU or AI accelerator such as a [Google Coral](https://coral.ai/products/) or [Hailo](https://hailo.ai/) is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead.
- Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration)
- Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary
@@ -35,15 +33,6 @@ View the documentation at https://docs.frigate.video
If you would like to make a donation to support development, please use [Github Sponsors](https://github.com/sponsors/blakeblackshear).
## License
This project is licensed under the **MIT License**.
- **Code:** The source code, configuration files, and documentation in this repository are available under the [MIT License](LICENSE). You are free to use, modify, and distribute the code as long as you include the original copyright notice.
- **Trademarks:** The "Frigate" name, the "Frigate NVR" brand, and the Frigate logo are **trademarks of Frigate, Inc.** and are **not** covered by the MIT License.
Please see our [Trademark Policy](TRADEMARK.md) for details on acceptable use of our brand assets.
## Screenshots
### Live dashboard
@@ -77,7 +66,3 @@ We use [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) to support la
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
<img src="https://hosted.weblate.org/widget/frigate-nvr/multi-auto.svg" alt="Translation status" />
</a>
---
**Copyright © 2026 Frigate, Inc.**

View File

@@ -1,31 +1,28 @@
<p align="center">
<img align="center" alt="logo" src="docs/static/img/branding/frigate.png">
<img align="center" alt="logo" src="docs/static/img/frigate.png">
</p>
# Frigate NVR™ - 一个具有实时目标检测的本地 NVR
# Frigate - 一个具有实时目标检测的本地NVR
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
</a>
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
一个完整的本地网络视频录像机NVR专为[Home Assistant](https://www.home-assistant.io)设计具备AI物体检测功能。使用OpenCV和TensorFlow在本地为IP摄像头执行实时物体检测。
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
一个完整的本地网络视频录像机NVR专为[Home Assistant](https://www.home-assistant.io)设计,具备 AI 目标/物体检测功能。使用 OpenCV 和 TensorFlow 在本地为 IP 摄像头执行实时物体检测。
强烈推荐使用 GPU 或者 AI 加速器(例如[Google Coral 加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/)等)。它们的运行效率远远高于现在的顶级 CPU并且功耗也极低。
- 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与 Home Assistant 紧密集成
- 设计上通过仅在必要时和必要地点寻找目标,最大限度地减少资源使用并最大化性能
强烈推荐使用GPU或者AI加速器例如[Google Coral加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/)。它们的性能甚至超过目前的顶级CPU并且可以以极低的耗电实现更优的性能。
- 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与Home Assistant紧密集成
- 设计上通过仅在必要时和必要地点寻找物体,最大限度地减少资源使用并最大化性能
- 大量利用多进程处理,强调实时性而非处理每一帧
- 使用非常低开销的画面变动检测(也叫运动检测来确定运行目标检测的位置
- 使用 TensorFlow 进行目标检测,运行在单独的进程中以达到最大 FPS
- 通过 MQTT 进行通信,便于集成到其他系统中
- 使用非常低开销的运动检测来确定运行物体检测的位置
- 使用TensorFlow进行物体检测运行在单独的进程中以达到最大FPS
- 通过MQTT进行通信便于集成到其他系统中
- 根据检测到的物体设置保留时间进行视频录制
- 24/7 全天候录制
- 通过 RTSP 重新流传输以减少摄像头的连接数
- 支持 WebRTCMSE实现低延迟的实时观看
- 24/7全天候录制
- 通过RTSP重新流传输以减少摄像头的连接数
- 支持WebRTCMSE实现低延迟的实时观看
## 社区中文翻译文档
@@ -35,56 +32,39 @@
如果您想通过捐赠支持开发,请使用 [Github Sponsors](https://github.com/sponsors/blakeblackshear)。
## 协议
本项目采用 **MIT 许可证**授权。
**代码部分**:本代码库中的源代码、配置文件和文档均遵循 [MIT 许可证](LICENSE)。您可以自由使用、修改和分发这些代码,但必须保留原始版权声明。
**商标部分**“Frigate”名称、“Frigate NVR”品牌以及 Frigate 的 Logo 为 **Frigate, Inc. 的商标****不在** MIT 许可证覆盖范围内。
有关品牌资产的规范使用详情,请参阅我们的[《商标政策》](TRADEMARK.md)。
## 截图
### 实时监控面板
<div>
<img width="800" alt="实时监控面板" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e">
</div>
### 简单的核查工作流程
<div>
<img width="800" alt="简单的审查工作流程" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff">
</div>
### 多摄像头可按时间轴查看
<div>
<img width="800" alt="多摄像头可按时间轴查看" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74">
</div>
### 内置遮罩和区域编辑器
<div>
<img width="800" alt="内置遮罩和区域编辑器" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
</div>
## 翻译
## 翻译
我们使用 [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) 平台提供翻译支持,欢迎参与进来一起完善。
## 非官方中文讨论社区
欢迎加入中文讨论 QQ 群:[1043861059](https://qm.qq.com/q/7vQKsTmSz)
## 非官方中文讨论社区
欢迎加入中文讨论QQ群[1043861059](https://qm.qq.com/q/7vQKsTmSz)
Bilibilihttps://space.bilibili.com/3546894915602564
## 中文社区赞助商
## 中文社区赞助商
[![EdgeOne](https://edgeone.ai/media/34fe3a45-492d-4ea4-ae5d-ea1087ca7b4b.png)](https://edgeone.ai/zh?from=github)
本项目 CDN 加速及安全防护由 Tencent EdgeOne 赞助
---
**Copyright © 2026 Frigate, Inc.**

View File

@@ -1,58 +0,0 @@
# Trademark Policy
**Last Updated:** November 2025
This document outlines the policy regarding the use of the trademarks associated with the Frigate NVR project.
## 1. Our Trademarks
The following terms and visual assets are trademarks (the "Marks") of **Frigate, Inc.**:
- **Frigate™**
- **Frigate NVR™**
- **Frigate+™**
- **The Frigate Logo**
**Note on Common Law Rights:**
Frigate, Inc. asserts all common law rights in these Marks. The absence of a federal registration symbol (®) does not constitute a waiver of our intellectual property rights.
## 2. Interaction with the MIT License
The software in this repository is licensed under the [MIT License](LICENSE).
**Crucial Distinction:**
- The **Code** is free to use, modify, and distribute under the MIT terms.
- The **Brand (Trademarks)** is **NOT** licensed under MIT.
You may not use the Marks in any way that is not explicitly permitted by this policy or by written agreement with Frigate, Inc.
## 3. Acceptable Use
You may use the Marks without prior written permission in the following specific contexts:
- **Referential Use:** To truthfully refer to the software (e.g., _"I use Frigate NVR for my home security"_).
- **Compatibility:** To indicate that your product or project works with the software (e.g., _"MyPlugin for Frigate NVR"_ or _"Compatible with Frigate"_).
- **Commentary:** In news articles, blog posts, or tutorials discussing the software.
## 4. Prohibited Use
You may **NOT** use the Marks in the following ways:
- **Commercial Products:** You may not use "Frigate" in the name of a commercial product, service, or app (e.g., selling an app named _"Frigate Viewer"_ is prohibited).
- **Implying Affiliation:** You may not use the Marks in a way that suggests your project is official, sponsored by, or endorsed by Frigate, Inc.
- **Confusing Forks:** If you fork this repository to create a derivative work, you **must** remove the Frigate logo and rename your project to avoid user confusion. You cannot distribute a modified version of the software under the name "Frigate".
- **Domain Names:** You may not register domain names containing "Frigate" that are likely to confuse users (e.g., `frigate-official-support.com`).
## 5. The Logo
The Frigate logo (the bird icon) is a visual trademark.
- You generally **cannot** use the logo on your own website or product packaging without permission.
- If you are building a dashboard or integration that interfaces with Frigate, you may use the logo only to represent the Frigate node/service, provided it does not imply you _are_ Frigate.
## 6. Questions & Permissions
If you are unsure if your intended use violates this policy, or if you wish to request a specific license to use the Marks (e.g., for a partnership), please contact us at:
**help@frigate.video**

View File

@@ -237,18 +237,8 @@ ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
# TensorFlow C++ logging suppression (must be set before import)
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
# TensorFlow error only
ENV TF_CPP_MIN_LOG_LEVEL=3
# Suppress verbose logging from TensorFlow C++ code
ENV TF_CPP_MIN_VLOG_LEVEL=3
# Disable oneDNN optimization messages ("optimized with oneDNN...")
ENV TF_ENABLE_ONEDNN_OPTS=0
# Suppress AutoGraph verbosity during conversion
ENV AUTOGRAPH_VERBOSITY=0
# Google Logging (GLOG) suppression for TensorFlow components
ENV GLOG_minloglevel=3
ENV GLOG_logtostderr=0
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"

View File

@@ -5,27 +5,21 @@ set -euxo pipefail
SQLITE3_VERSION="3.46.1"
PYSQLITE3_VERSION="0.5.3"
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
if ! dpkg -l | grep -q libsqlite3-dev; then
echo "Installing libsqlite3-dev for compilation..."
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
fi
# Fetch the pre-built sqlite amalgamation instead of building from source
if [[ ! -d "sqlite" ]]; then
mkdir sqlite
cd sqlite
# Download the pre-built amalgamation from sqlite.org
# For SQLite 3.46.1, the amalgamation version is 3460100
SQLITE_AMALGAMATION_VERSION="3460100"
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
unzip sqlite-amalgamation.zip
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
rm sqlite-amalgamation.zip
cd ../
fi

View File

@@ -20,7 +20,6 @@ apt-get -qq install --no-install-recommends -y \
libgl1 \
libglib2.0-0 \
libusb-1.0.0 \
python3-h2 \
libgomp1 # memryx detector
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
@@ -145,6 +144,6 @@ rm -rf /var/lib/apt/lists/*
# Install yq, for frigate-prepare and go2rtc echo source
curl -fsSL \
"https://github.com/mikefarah/yq/releases/download/v4.48.2/yq_linux_$(dpkg --print-architecture)" \
"https://github.com/mikefarah/yq/releases/download/v4.33.3/yq_linux_$(dpkg --print-architecture)" \
--output /usr/local/bin/yq
chmod +x /usr/local/bin/yq

View File

@@ -2,9 +2,9 @@
set -e
# Download the MxAccl for Frigate github release
wget https://github.com/memryx/mx_accl_frigate/archive/refs/tags/v2.1.0.zip -O /tmp/mxaccl.zip
wget https://github.com/memryx/mx_accl_frigate/archive/refs/heads/main.zip -O /tmp/mxaccl.zip
unzip /tmp/mxaccl.zip -d /tmp
mv /tmp/mx_accl_frigate-2.1.0 /opt/mx_accl_frigate
mv /tmp/mx_accl_frigate-main /opt/mx_accl_frigate
rm /tmp/mxaccl.zip
# Install Python dependencies

View File

@@ -21,7 +21,7 @@ onvif-zeep-async == 4.0.*
paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
peewee_migrate == 1.14.*
peewee_migrate == 1.13.*
psutil == 7.1.*
pydantic == 2.10.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
@@ -56,7 +56,7 @@ pywebpush == 2.0.*
# alpr
pyclipper == 1.3.*
shapely == 2.0.*
rapidfuzz==3.12.*
Levenshtein==0.26.*
# HailoRT Wheels
appdirs==1.4.*
argcomplete==2.0.*
@@ -81,5 +81,3 @@ librosa==0.11.*
soundfile==0.13.*
# DeGirum detector
degirum == 0.16.*
# Memory profiling
memray == 1.15.*

View File

@@ -1 +1,2 @@
scikit-build == 0.18.*
nvidia-pyindex

View File

@@ -55,7 +55,7 @@ function setup_homekit_config() {
if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty HomeKit config file..."
echo 'homekit: {}' > "${config_path}"
echo '{}' > "${config_path}"
fi
# Convert YAML to JSON for jq processing
@@ -70,14 +70,12 @@ function setup_homekit_config() {
jq '
# Keep only the homekit section if it exists, otherwise empty object
if has("homekit") then {homekit: .homekit} else {homekit: {}} end
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || {
echo '{"homekit": {}}' > "${cleaned_json}"
}
' "${temp_json}" > "${cleaned_json}" 2>/dev/null || echo '{"homekit": {}}' > "${cleaned_json}"
# Convert back to YAML and write to the config file
yq eval -P "${cleaned_json}" > "${config_path}" 2>/dev/null || {
echo "[WARNING] Failed to convert cleaned config to YAML, creating minimal config"
echo 'homekit: {}' > "${config_path}"
echo '{"homekit": {}}' > "${config_path}"
}
# Clean up temp files

View File

@@ -73,8 +73,6 @@ http {
vod_manifest_segment_durations_mode accurate;
vod_ignore_edit_list on;
vod_segment_duration 10000;
# MPEG-TS settings (not used when fMP4 is enabled, kept for reference)
vod_hls_mpegts_align_frames off;
vod_hls_mpegts_interleave_frames on;
@@ -107,10 +105,6 @@ http {
aio threads;
vod hls;
# Use fMP4 (fragmented MP4) instead of MPEG-TS for better performance
# Smaller segments, faster generation, better browser compatibility
vod_hls_container_format fmp4;
secure_token $args;
secure_token_types application/vnd.apple.mpegurl;
@@ -280,18 +274,6 @@ http {
include proxy.conf;
}
# Allow unauthenticated access to the first_time_login endpoint
# so the login page can load help text before authentication.
location /api/auth/first_time_login {
auth_request off;
limit_except GET {
deny all;
}
rewrite ^/api(/.*)$ $1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}
location /api/stats {
include auth_request.conf;
access_log off;
@@ -320,12 +302,6 @@ http {
add_header Cache-Control "public";
}
location /fonts/ {
access_log off;
expires 1y;
add_header Cache-Control "public";
}
location /locales/ {
access_log off;
add_header Cache-Control "public";

View File

@@ -18,10 +18,6 @@ proxy_set_header X-Forwarded-User $http_x_forwarded_user;
proxy_set_header X-Forwarded-Groups $http_x_forwarded_groups;
proxy_set_header X-Forwarded-Email $http_x_forwarded_email;
proxy_set_header X-Forwarded-Preferred-Username $http_x_forwarded_preferred_username;
proxy_set_header X-Auth-Request-User $http_x_auth_request_user;
proxy_set_header X-Auth-Request-Groups $http_x_auth_request_groups;
proxy_set_header X-Auth-Request-Email $http_x_auth_request_email;
proxy_set_header X-Auth-Request-Preferred-Username $http_x_auth_request_preferred_username;
proxy_set_header X-authentik-username $http_x_authentik_username;
proxy_set_header X-authentik-groups $http_x_authentik_groups;
proxy_set_header X-authentik-email $http_x_authentik_email;

View File

@@ -24,13 +24,10 @@ echo "Adding MemryX GPG key and repository..."
wget -qO- https://developer.memryx.com/deb/memryx.asc | sudo tee /etc/apt/trusted.gpg.d/memryx.asc >/dev/null
echo 'deb https://developer.memryx.com/deb stable main' | sudo tee /etc/apt/sources.list.d/memryx.list >/dev/null
# Update and install specific SDK 2.1 packages
echo "Installing MemryX SDK 2.1 packages..."
# Update and install memx-drivers
echo "Installing memx-drivers..."
sudo apt update
sudo apt install -y memx-drivers=2.1.* memx-accl=2.1.* mxa-manager=2.1.*
# Hold packages to prevent automatic upgrades
sudo apt-mark hold memx-drivers memx-accl mxa-manager
sudo apt install -y memx-drivers
# ARM-specific board setup
if [[ "$arch" == "aarch64" || "$arch" == "arm64" ]]; then
@@ -40,5 +37,11 @@ fi
echo -e "\n\n\033[1;31mYOU MUST RESTART YOUR COMPUTER NOW\033[0m\n\n"
echo "MemryX SDK 2.1 installation complete!"
# Install other runtime packages
packages=("memx-accl" "mxa-manager")
for pkg in "${packages[@]}"; do
echo "Installing $pkg..."
sudo apt install -y "$pkg"
done
echo "MemryX installation complete!"

View File

@@ -3,6 +3,7 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive
ARG ROCM=1
ARG AMDGPU=gfx900
ARG HSA_OVERRIDE_GFX_VERSION
ARG HSA_OVERRIDE
@@ -10,10 +11,11 @@ ARG HSA_OVERRIDE
FROM wget AS rocm
ARG ROCM
ARG AMDGPU
RUN apt update -qq && \
apt install -y wget gpg && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.1.1/ubuntu/jammy/amdgpu-install_7.1.1.70101-1_all.deb && \
wget -O rocm.deb https://repo.radeon.com/amdgpu-install/7.0.2/ubuntu/jammy/amdgpu-install_7.0.2.70002-1_all.deb && \
apt install -y ./rocm.deb && \
apt update && \
apt install -qq -y rocm
@@ -34,10 +36,7 @@ FROM deps AS deps-prelim
COPY docker/rocm/debian-backports.sources /etc/apt/sources.list.d/debian-backports.sources
RUN apt-get update && \
apt-get install -y libnuma1 && \
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers && \
# Install C++ standard library headers for HIPRTC kernel compilation fallback
apt-get install -qq -y libstdc++-12-dev && \
rm -rf /var/lib/apt/lists/*
apt-get install -qq -y -t bookworm-backports mesa-va-drivers mesa-vulkan-drivers
WORKDIR /opt/frigate
COPY --from=rootfs / /
@@ -55,14 +54,12 @@ RUN pip3 uninstall -y onnxruntime \
FROM scratch AS rocm-dist
ARG ROCM
ARG AMDGPU
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
# Copy MIOpen database files for gfx10xx and gfx11xx only (RDNA2/RDNA3)
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx10* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx11* /opt/rocm-$ROCM/share/miopen/db/
# Copy rocBLAS library files for gfx10xx and gfx11xx only
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx10* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*gfx11* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
#######################################################################

View File

@@ -1 +1 @@
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.1.0/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl
onnxruntime-migraphx @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v7.0.2/onnxruntime_migraphx-1.23.1-cp311-cp311-linux_x86_64.whl

View File

@@ -1,5 +1,8 @@
variable "AMDGPU" {
default = "gfx900"
}
variable "ROCM" {
default = "7.1.1"
default = "7.0.2"
}
variable "HSA_OVERRIDE_GFX_VERSION" {
default = ""
@@ -35,6 +38,7 @@ target rocm {
}
platforms = ["linux/amd64"]
args = {
AMDGPU = AMDGPU,
ROCM = ROCM,
HSA_OVERRIDE_GFX_VERSION = HSA_OVERRIDE_GFX_VERSION,
HSA_OVERRIDE = HSA_OVERRIDE

View File

@@ -1,15 +1,53 @@
BOARDS += rocm
# AMD/ROCm is chunky so we build couple of smaller images for specific chipsets
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
local-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
--load \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm \
--load
build-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
push-rocm: build-rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
--push \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
--push

View File

@@ -21,7 +21,7 @@ FROM deps AS frigate-tensorrt
ARG PIP_BREAK_SYSTEM_PACKAGES
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 uninstall -y onnxruntime \
pip3 uninstall -y onnxruntime tensorflow-cpu \
&& pip3 install -U /deps/trt-wheels/*.whl
COPY --from=rootfs / /

View File

@@ -112,7 +112,7 @@ RUN apt-get update \
&& apt-get install -y protobuf-compiler libprotobuf-dev \
&& rm -rf /var/lib/apt/lists/*
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
FROM wget AS jetson-ffmpeg
ARG DEBIAN_FRONTEND
@@ -145,8 +145,7 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
pip3 uninstall -y onnxruntime \
&& pip3 install -U /deps/trt-wheels/*.whl \
&& pip3 install -U /deps/trt-model-wheels/*.whl \
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
&& ldconfig
WORKDIR /opt/frigate/

View File

@@ -13,6 +13,7 @@ nvidia_cusolver_cu12==11.6.3.*; platform_machine == 'x86_64'
nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
tensorflow==2.19.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@@ -1,2 +1 @@
cuda-python == 12.6.*; platform_machine == 'aarch64'
numpy == 1.26.*; platform_machine == 'aarch64'

View File

@@ -25,7 +25,7 @@ Examples of available modules are:
- `frigate.app`
- `frigate.mqtt`
- `frigate.object_detection.base`
- `frigate.object_detection`
- `detector.<detector_name>`
- `watchdog.<camera_name>`
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
@@ -53,17 +53,6 @@ environment_vars:
VARIABLE_NAME: variable_value
```
#### TensorFlow Thread Configuration
If you encounter thread creation errors during classification model training, you can limit TensorFlow's thread usage:
```yaml
environment_vars:
TF_INTRA_OP_PARALLELISM_THREADS: "2" # Threads within operations (0 = use default)
TF_INTER_OP_PARALLELISM_THREADS: "2" # Threads between operations (0 = use default)
TF_DATASET_THREAD_POOL_SIZE: "2" # Data pipeline threads (0 = use default)
```
### `database`
Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
@@ -258,7 +247,7 @@ curl -X POST http://frigate_host:5000/api/config/save -d @config.json
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
```bash
yq -o=json '.' config.yaml | curl -X POST 'http://frigate_host:5000/api/config/save?save_option=saveonly' --data-binary @-
yq r -j config.yml | curl -X POST http://frigate_host:5000/api/config/save -d @-
```
### Via Command Line

View File

@@ -75,13 +75,7 @@ audio:
### Audio Transcription
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAIs open-source Whisper models via `faster-whisper`. The goal of this feature is to support Semantic Search for `speech` audio events. Frigate is not intended to act as a continuous, fully-automatic speech transcription service — automatically transcribing all speech (or queuing many audio events for transcription) requires substantial CPU (or GPU) resources and is impractical on most systems. For this reason, transcriptions for events are initiated manually from the UI or the API rather than being run continuously in the background.
Transcription accuracy also depends heavily on the quality of your camera's microphone and recording conditions. Many cameras use inexpensive microphones, and distance to the speaker, low audio bitrate, or background noise can significantly reduce transcription quality. If you need higher accuracy, more robust long-running queues, or large-scale automatic transcription, consider using the HTTP API in combination with an automation platform and a cloud transcription service.
#### Configuration
To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
Frigate supports fully local audio transcription using either `sherpa-onnx` or OpenAIs open-source Whisper models via `faster-whisper`. To enable transcription, enable it in your config. Note that audio detection must also be enabled as described above in order to use audio transcription features.
```yaml
audio_transcription:
@@ -150,28 +144,4 @@ In order to use transcription and translation for past events, you must enable a
The transcribed/translated speech will appear in the description box in the Tracked Object Details pane. If Semantic Search is enabled, embeddings are generated for the transcription text and are fully searchable using the description search type.
:::note
Only one `speech` event may be transcribed at a time. Frigate does not automatically transcribe `speech` events or implement a queue for long-running transcription model inference.
:::
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a supported Nvidia GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.
#### FAQ
1. Why doesn't Frigate automatically transcribe all `speech` events?
Frigate does not implement a queue mechanism for speech transcription, and adding one is not trivial. A proper queue would need backpressure, prioritization, memory/disk buffering, retry logic, crash recovery, and safeguards to prevent unbounded growth when events outpace processing. Thats a significant amount of complexity for a feature that, in most real-world environments, would mostly just churn through low-value noise.
Because transcription is **serialized (one event at a time)** and speech events can be generated far faster than they can be processed, an auto-transcribe toggle would very quickly create an ever-growing backlog and degrade core functionality. For the amount of engineering and risk involved, it adds **very little practical value** for the majority of deployments, which are often on low-powered, edge hardware.
If you hear speech thats actually important and worth saving/indexing for the future, **just press the transcribe button in Explore** on that specific `speech` event - that keeps things explicit, reliable, and under your control.
Other options are being considered for future versions of Frigate to add transcription options that support external `whisper` Docker containers. A single transcription service could then be shared by Frigate and other applications (for example, Home Assistant Voice), and run on more powerful machines when available.
2. Why don't you save live transcription text and use that for `speech` events?
Theres no guarantee that a `speech` event is even created from the exact audio that went through the transcription model. Live transcription and `speech` event creation are **separate, asynchronous processes**. Even when both are correctly configured, trying to align the **precise start and end time of a speech event** with whatever audio the model happened to be processing at that moment is unreliable.
Automatically persisting that data would often result in **misaligned, partial, or irrelevant transcripts**, while still incurring all of the CPU, storage, and privacy costs of transcription. Thats why Frigate treats transcription as an **explicit, user-initiated action** rather than an automatic side-effect of every `speech` event.
Recorded `speech` events will always use a `whisper` model, regardless of the `model_size` config setting. Without a GPU, generating transcriptions for longer `speech` events may take a fair amount of time, so be patient.

View File

@@ -270,42 +270,3 @@ To use role-based access control, you must connect to Frigate via the **authenti
1. Log in as an **admin** user via port `8971`.
2. Navigate to **Settings > Users**.
3. Edit a users role by selecting **admin** or **viewer**.
## API Authentication Guide
### Getting a Bearer Token
To use the Frigate API, you need to authenticate first. Follow these steps to obtain a Bearer token:
#### 1. Login
Make a POST request to `/login` with your credentials:
```bash
curl -i -X POST https://frigate_ip:8971/api/login \
-H "Content-Type: application/json" \
-d '{"user": "admin", "password": "your_password"}'
```
:::note
You may need to include `-k` in the argument list in these steps (eg: `curl -k -i -X POST ...`) if your Frigate instance is using a self-signed certificate.
:::
The response will contain a cookie with the JWT token.
#### 2. Using the Bearer Token
Once you have the token, include it in the Authorization header for subsequent requests:
```bash
curl -H "Authorization: Bearer <your_token>" https://frigate_ip:8971/api/profile
```
#### 3. Token Lifecycle
- Tokens are valid for the configured session length
- Tokens are automatically refreshed when you visit the `/auth` endpoint
- Tokens are invalidated when the user's password is changed
- Use `/logout` to clear your session cookie

View File

@@ -164,35 +164,13 @@ According to [this discussion](https://github.com/blakeblackshear/frigate/issues
Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels.
The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras.
<details>
<summary>Example Config</summary>
:::tip
Reolink's latest cameras support two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
NOTE: The RTSP stream can not be prefixed with `ffmpeg:`, as go2rtc needs to handle the stream to support two way audio.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
:::
```yaml
go2rtc:
streams:
# example for connecting to a standard Reolink camera
your_reolink_camera:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
your_reolink_camera_sub:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
# example for connectin to a Reolink camera that supports two way talk
your_reolink_camera_twt:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
- "rtsp://username:password@reolink_ip/Preview_01_sub"
your_reolink_camera_twt_sub:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
- "rtsp://username:password@reolink_ip/Preview_01_sub"
# example for connecting to a Reolink NVR
your_reolink_camera_via_nvr:
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15
- "ffmpeg:your_reolink_camera_via_nvr#audio=aac"
@@ -223,16 +201,25 @@ cameras:
roles:
- detect
```
</details>
#### Reolink Doorbell
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
```yaml
go2rtc:
streams:
your_reolink_doorbell:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
- rtsp://reolink_ip/Preview_01_sub
your_reolink_doorbell_sub:
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
```
### Unifi Protect Cameras
:::note
Unifi G5s cameras and newer need a Unifi Protect server to enable rtsps stream, it's not posible to enable it in standalone mode.
:::
Unifi protect cameras require the rtspx stream to be used with go2rtc.
To utilize a Unifi protect camera, modify the rtsps link to begin with rtspx.
Additionally, remove the "?enableSrtp" from the end of the Unifi link.
@@ -258,10 +245,6 @@ ffmpeg:
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
### Wyze Wireless Cameras
Some community members have found better performance on Wyze cameras by using an alternative firmware known as [Thingino](https://thingino.com/).
## USB Cameras (aka Webcams)
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:

View File

@@ -94,19 +94,18 @@ This list of working and non-working PTZ cameras is based on user feedback. If y
The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/) can provide a starting point to determine a camera's compatibility with Frigate's autotracking. Look to see if a camera lists `PTZRelative`, `PTZRelativePanTilt` and/or `PTZRelativeZoom`. These features are required for autotracking, but some cameras still fail to respond even if they claim support. If they are missing, autotracking will not work (though basic PTZ in the WebUI might). Avoid cameras with no database entry unless they are confirmed as working below.
| Brand or specific camera | PTZ Controls | Autotracking | Notes |
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| ---------------------------- | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- |
| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking |
| Amcrest ASH21 | ✅ | ❌ | ONVIF service port: 80 |
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
| Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" |
| Axis Q-6155E | ✅ | ❌ | ONVIF service port: 80; Camera does not support MoveStatus. |
| Ctronics PTZ | ✅ | ❌ | |
| Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, picoo series (commonly), among others) have been reported to not support autotracking. These models usually don't have a four digit model number with chassis prefix and options postfix (e.g. DH-P5AE-PV vs DH-SD49825GB-HNR). |
| Dahua DH-SD2A500HB | ✅ | ❌ | |
| Dahua DH-SD49825GB-HNR | ✅ | ✅ | |
| Dahua DH-P5AE-PV | ❌ | ❌ | |
| Foscam | ✅ | ❌ | In general support PTZ, but not relative move. There are no official ONVIF certifications and tests available on the ONVIF Conformant Products Database |
| Foscam | ✅ | ❌ | In general support PTZ, but not relative move. There are no official ONVIF certifications and tests available on the ONVIF Conformant Products Database | |
| Foscam R5 | ✅ | ❌ | |
| Foscam SD4 | ✅ | ❌ | |
| Hanwha XNP-6550RH | ✅ | ❌ | |

View File

@@ -3,28 +3,16 @@ id: object_classification
title: Object Classification
---
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
## Minimum System Requirements
Object classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
A CPU with AVX instructions is required for training and inference.
## Classes
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
For object classification:
- Define classes that represent different types or attributes of the detected object
- Examples: For `person` objects, classes might be `delivery_person`, `resident`, `stranger`
- Include a `none` class for objects that don't fit any specific category
- Keep classes visually distinct to improve accuracy
### Classification Type
### Sub label vs Attribute
- **Sub label**:
@@ -33,24 +21,9 @@ For object classification:
- Example: `cat``Leo`, `Charlie`, `None`.
- **Attribute**:
- Added as metadata to the object, visible in the Tracked Object Details pane in Explore, `frigate/events` MQTT messages, and the HTTP API response as `<model_name>: <predicted_value>`.
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
- Ideal when multiple attributes can coexist independently.
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not, and if they are wearing a yellow vest or not.
:::note
A tracked object can only have a single sub label. If you are using Triggers or Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. This could also occur with `car` objects that are assigned a sub label for a delivery carrier. Consider using the `attribute` type instead.
:::
## Assignment Requirements
Sub labels and attributes are only assigned when both conditions are met:
1. **Threshold**: Each classification attempt must have a confidence score that meets or exceeds the configured `threshold` (default: `0.8`).
2. **Class Consensus**: After at least 3 classification attempts, 60% of attempts must agree on the same class label. If the consensus class is `none`, no assignment is made.
This two-step verification prevents false positives by requiring consistent predictions across multiple frames before assigning a sub label or attribute.
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
## Example use cases
@@ -81,50 +54,20 @@ classification:
classification_type: sub_label # or: attribute
```
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For object classification models, the default is 200.
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps:
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Step 1: Name and Define
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Frigate will automatically include a `none` class for objects that don't fit any specific category.
For example: To classify your two cats, create a model named "Our Cats" and create two classes, "Charlie" and "Leo". A third class, "none", will be created automatically for other neighborhood cats that are not your own.
### Step 2: Assign Training Examples
The system will automatically generate example images from detected objects matching your selected label. You'll be guided through each class one at a time to select which images represent that class. Any images not assigned to a specific class will automatically be assigned to `none` when you complete the last class. Once all images are processed, training will begin automatically.
### Getting Started
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
If examples for some of your classes do not appear in the grid, you can continue configuring the model without them. New images will begin to appear in the Recent Classifications view. When your missing classes are seen, classify them from this view and retrain your model.
// TODO add this section once UI is implemented. Explain process of selecting objects and curating training examples.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
- **Data collection**: Use the models Recent Classification tab to gather balanced examples across times of day, weather, and distances.
- **Data collection**: Use the models Train tab to gather balanced examples across times of day, weather, and distances.
- **Preprocessing**: Ensure examples reflect object crops similar to Frigates boxes; keep the subject centered.
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
## Debugging Classification Models
To troubleshoot issues with object classification models, enable debug logging to see detailed information about classification attempts, scores, and consensus calculations.
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
```yaml
logger:
default: info
logs:
frigate.data_processing.real_time.custom_classification: debug
```
The debug logs will show:
- Classification probabilities for each attempt
- Whether scores meet the threshold requirement
- Consensus calculations and when assignments are made
- Object classification history and weighted scores

View File

@@ -3,26 +3,14 @@ id: state_classification
title: State Classification
---
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration.
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
## Minimum System Requirements
State classification models are lightweight and run very fast on CPU. Inference should be usable on virtually any machine that can run Frigate.
Training the model does briefly use a high amount of system resources for about 13 minutes per training run. On lower-power devices, training may take longer.
A CPU with AVX instructions is required for training and inference.
## Classes
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
For state classification:
- Define classes that represent mutually exclusive states
- Examples: `open` and `closed` for a garage door, `on` and `off` for lights
- Use at least 2 classes (typically binary states work best)
- Keep class names clear and descriptive
When running the `-tensorrt` image, Nvidia GPUs will automatically be used to accelerate training.
## Example use cases
@@ -48,60 +36,17 @@ classification:
crop: [0, 180, 220, 400]
```
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For state classification models, the default is 100.
## Training the model
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps:
Creating and training the model is done within the Frigate UI using the `Classification` page.
### Step 1: Name and Define
### Getting Started
Enter a name for your model and define at least 2 classes (states) that represent mutually exclusive states. For example, `open` and `closed` for a door, or `on` and `off` for lights.
When choosing a portion of the camera frame for state classification, it is important to make the crop tight around the area of interest to avoid extra signals unrelated to what is being classified.
### Step 2: Select the Crop Area
Choose one or more cameras and draw a rectangle over the area of interest for each camera. The crop should be tight around the region you want to classify to avoid extra signals unrelated to what is being classified. You can drag and resize the rectangle to adjust the crop area.
### Step 3: Assign Training Examples
The system will automatically generate example images from your camera feeds. You'll be guided through each class one at a time to select which images represent that state. It's not strictly required to select all images you see. If a state is missing from the samples, you can train it from the Recent tab later.
Once some images are assigned, training will begin automatically.
// TODO add this section once UI is implemented. Explain process of selecting a crop.
### Improving the Model
- **Problem framing**: Keep classes visually distinct and state-focused (e.g., `open`, `closed`, `unknown`). Avoid combining object identity with state in a single model unless necessary.
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
## Debugging Classification Models
To troubleshoot issues with state classification models, enable debug logging to see detailed information about classification attempts, scores, and state verification.
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
```yaml
logger:
default: info
logs:
frigate.data_processing.real_time.custom_classification: debug
```
The debug logs will show:
- Classification probabilities for each attempt
- Whether scores meet the threshold requirement
- State verification progress (consecutive detections needed)
- When state changes are published
### Recent Classifications
For state classification, images are only added to recent classifications under specific circumstances:
- **First detection**: The first classification attempt for a camera is always saved
- **State changes**: Images are saved when the detected state differs from the current verified state
- **Pending verification**: Images are saved when there's a pending state change being verified (requires 3 consecutive identical states)
- **Low confidence**: Images with scores below 100% are saved even if the state matches the current state (useful for training)
Images are **not** saved when the state is stable (detected state matches current state) **and** the score is 100%. This prevents unnecessary storage of redundant high-confidence classifications.
- **Data collection**: Use the models Train tab to gather balanced examples across times of day and weather.

View File

@@ -70,7 +70,7 @@ Fine-tune face recognition with these optional parameters at the global level of
- `min_faces`: Min face recognitions for the sub label to be applied to the person object.
- Default: `1`
- `save_attempts`: Number of images of recognized faces to save for training.
- Default: `200`.
- Default: `100`.
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
- Default: `True`.
- `device`: Target a specific device to run the face recognition model on (multi-GPU installation).
@@ -114,9 +114,9 @@ When choosing images to include in the face training set it is recommended to al
:::
### Understanding the Recent Recognitions Tab
### Understanding the Train Tab
The Recent Recognitions tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching.
The Train tab in the face library displays recent face recognition attempts. Detected face images are grouped according to the person they were identified as potentially matching.
Each face image is labeled with a name (or `Unknown`) along with the confidence score of the recognition attempt. While each image can be used to train the system for a specific person, not all images are suitable for training.
@@ -140,7 +140,7 @@ Once front-facing images are performing well, start choosing slightly off-angle
Start with the [Usage](#usage) section and re-read the [Model Requirements](#model-requirements) above.
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Recent Recognitions tab in the Frigate UI's Face Library.
1. Ensure `person` is being _detected_. A `person` will automatically be scanned by Frigate for a face. Any detected faces will appear in the Train tab in the Frigate UI's Face Library.
If you are using a Frigate+ or `face` detecting model:
@@ -161,8 +161,6 @@ Start with the [Usage](#usage) section and re-read the [Model Requirements](#mod
Accuracy is definitely a going to be improved with higher quality cameras / streams. It is important to look at the DORI (Detection Observation Recognition Identification) range of your camera, if that specification is posted. This specification explains the distance from the camera that a person can be detected, observed, recognized, and identified. The identification range is the most relevant here, and the distance listed by the camera is the furthest that face recognition will realistically work.
Some users have also noted that setting the stream in camera firmware to a constant bit rate (CBR) leads to better image clarity than with a variable bit rate (VBR).
### Why can't I bulk upload photos?
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
@@ -188,7 +186,7 @@ Avoid training on images that already score highly, as this can lead to over-fit
No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person.
For more guidance, refer to the section above on improving recognition accuracy.
### I see scores above the threshold in the Recent Recognitions tab, but a sub label wasn't assigned?
### I see scores above the threshold in the train tab, but a sub label wasn't assigned?
The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results.

View File

@@ -17,17 +17,18 @@ To use Generative AI, you must define a single provider at the global level of y
genai:
provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-2.0-flash
model: gemini-1.5-flash
cameras:
front_camera:
objects:
genai:
enabled: True # <- enable GenAI for your front camera
use_snapshot: True
objects:
- person
required_zones:
- steps
enabled: True # <- enable GenAI for your front camera
use_snapshot: True
objects:
- person
required_zones:
- steps
indoor_camera:
objects:
genai:
@@ -48,29 +49,15 @@ Using Ollama on CPU is not recommended, high inference times make using Generati
:::
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://docs.ollama.com/faq#how-does-ollama-handle-concurrent-requests).
### Model Types: Instruct vs Thinking
Most vision-language models are available as **instruct** models, which are fine-tuned to follow instructions and respond concisely to prompts. However, some models (such as certain Qwen-VL or minigpt variants) offer both **instruct** and **thinking** versions.
- **Instruct models** are always recommended for use with Frigate. These models generate direct, relevant, actionable descriptions that best fit Frigate's object and event summary use case.
- **Thinking models** are fine-tuned for more free-form, open-ended, and speculative outputs, which are typically not concise and may not provide the practical summaries Frigate expects. For this reason, Frigate does **not** recommend or support using thinking models.
Some models are labeled as **hybrid** (capable of both thinking and instruct tasks). In these cases, Frigate will always use instruct-style prompts and specifically disables thinking-mode behaviors to ensure concise, useful responses.
**Recommendation:**
Always select the `-instruct` or documented instruct/tagged variant of any model you use in your Frigate configuration. If in doubt, refer to your model providers documentation or model library for guidance on the correct model variant to use.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/search?c=vision). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull qwen3-vl:2b-instruct` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::note
@@ -78,17 +65,13 @@ You should have at least 8 GB of RAM available (or VRAM if running on GPU) to ru
:::
#### Ollama Cloud models
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
### Configuration
```yaml
genai:
provider: ollama
base_url: http://localhost:11434
model: qwen3-vl:4b
model: llava:7b
```
## Google Gemini
@@ -97,7 +80,7 @@ Google Gemini has a free tier allowing [15 queries per minute](https://ai.google
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
### Get API Key
@@ -114,7 +97,7 @@ To start using Gemini, you must first get an API key from [Google AI Studio](htt
genai:
provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-2.0-flash
model: gemini-1.5-flash
```
:::note
@@ -129,7 +112,7 @@ OpenAI does not have a free tier for their API. With the release of gpt-4o, pric
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Get API Key
@@ -156,19 +139,18 @@ Microsoft offers several vision models through Azure OpenAI. A subscription is r
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Create Resource and Get API Key
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
### Configuration
```yaml
genai:
provider: azure_openai
base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview
model: gpt-5-mini
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
api_key: "{FRIGATE_OPENAI_API_KEY}"
```
@@ -211,13 +193,13 @@ You are also able to define custom prompts in your configuration.
genai:
provider: ollama
base_url: http://localhost:11434
model: qwen3-vl:8b-instruct
model: llava
objects:
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
```
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.

View File

@@ -35,18 +35,18 @@ Each model is available in multiple parameter sizes (3b, 4b, 8b, etc.). Larger s
:::tip
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. qwen3-VL supports vision and tools simultaneously in Ollama.
If you are trying to use a single model for Frigate and HomeAssistant, it will need to support vision and tools calling. https://github.com/skye-harris/ollama-modelfiles contains optimized model configs for this task.
:::
The following models are recommended:
| Model | Notes |
| ----------------- | -------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
| Model | Notes |
| ----------------- | ----------------------------------------------------------- |
| `Intern3.5VL` | Relatively fast with good vision comprehension
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5vl` | Fast but capable model with good vision comprehension |
| `llava-phi3` | Lightweight and fast model with vision comprehension |
:::note

View File

@@ -39,10 +39,9 @@ You are also able to define custom prompts in your configuration.
genai:
provider: ollama
base_url: http://localhost:11434
model: qwen3-vl:8b-instruct
model: llava
objects:
genai:
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."

View File

@@ -7,96 +7,38 @@ Generative AI can be used to automatically generate structured summaries of revi
Requests for a summary are requested automatically to your AI provider for alert review items when the activity has ended, they can also be optionally enabled for detections as well.
Generative AI review summaries can also be toggled dynamically for a [camera via MQTT](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
Generative AI review summaries can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/review_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_namereviewdescriptionsset).
## Review Summary Usage and Best Practices
Review summaries provide structured JSON responses that are saved for each review item:
```
- `title` (string): A concise, direct title that describes the purpose or overall action (e.g., "Person taking out trash", "Joe walking dog").
- `scene` (string): A narrative description of what happens across the sequence from start to finish, including setting, detected objects, and their observable actions.
- `shortSummary` (string): A brief 2-sentence summary of the scene, suitable for notifications. This is a condensed version of the scene description.
- `confidence` (float): 0-1 confidence in the analysis. Higher confidence when objects/actions are clearly visible and context is unambiguous.
- `scene` (string): A full description including setting, entities, actions, and any plausible supported inferences.
- `confidence` (float): 0-1 confidence in the analysis.
- `other_concerns` (list): List of user-defined concerns that may need additional investigation.
- `potential_threat_level` (integer): 0, 1, or 2 as defined below.
Threat-level definitions:
- 0 — Typical or expected activity for this location/time (includes residents, guests, or known animals engaged in normal activities, even if they glance around or scan surroundings).
- 1 — Unusual or suspicious activity: At least one security-relevant behavior is present **and not explainable by a normal residential activity**.
- 2 — Active or immediate threat: Breaking in, vandalism, aggression, weapon display.
```
This will show in multiple places in the UI to give additional context about each activity, and allow viewing more details when extra attention is required. Frigate's built in notifications will automatically show the title and `shortSummary` when the data is available, while the full `scene` description is available in the UI for detailed review.
This will show in the UI as a list of concerns that each review item has along with the general description.
### Defining Typical Activity
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response.
<details>
<summary>Default Activity Context Prompt</summary>
Each installation and even camera can have different parameters for what is considered suspicious activity. Frigate allows the `activity_context_prompt` to be defined globally and at the camera level, which allows you to define more specifically what should be considered normal activity. It is important that this is not overly specific as it can sway the output of the response. The default `activity_context_prompt` is below:
```
### Normal Activity Indicators (Level 0)
- Known/verified people in any zone at any time
- People with pets in residential areas
- Deliveries or services during daytime/evening (6 AM - 10 PM): carrying packages to doors/porches, placing items, leaving
- Services/maintenance workers with visible tools, uniforms, or service vehicles during daytime
- Activity confined to public areas only (sidewalks, streets) without entering property at any time
### Suspicious Activity Indicators (Level 1)
- **Testing or attempting to open doors/windows/handles on vehicles or buildings** — ALWAYS Level 1 regardless of time or duration
- **Unidentified person in private areas (driveways, near vehicles/buildings) during late night/early morning (11 PM - 5 AM)** — ALWAYS Level 1 regardless of activity or duration
- Taking items that don't belong to them (packages, objects from porches/driveways)
- Climbing or jumping fences/barriers to access property
- Attempting to conceal actions or items from view
- Prolonged loitering: remaining in same area without visible purpose throughout most of the sequence
### Critical Threat Indicators (Level 2)
- Holding break-in tools (crowbars, pry bars, bolt cutters)
- Weapons visible (guns, knives, bats used aggressively)
- Forced entry in progress
- Physical aggression or violence
- Active property damage or theft in progress
### Assessment Guidance
Evaluate in this order:
1. **If person is verified/known** → Level 0 regardless of time or activity
2. **If person is unidentified:**
- Check time: If late night/early morning (11 PM - 5 AM) AND in private areas (driveways, near vehicles/buildings) → Level 1
- Check actions: If testing doors/handles, taking items, climbing → Level 1
- Otherwise, if daytime/evening (6 AM - 10 PM) with clear legitimate purpose (delivery, service worker) → Level 0
3. **Escalate to Level 2 if:** Weapons, break-in tools, forced entry in progress, violence, or active property damage visible (escalates from Level 0 or 1)
The mere presence of an unidentified person in private areas during late night hours is inherently suspicious and warrants human review, regardless of what activity they appear to be doing or how brief the sequence is.
- **Zone context is critical**: Private enclosed spaces (back yards, back decks, fenced areas, inside garages) are resident territory where brief transient activity, routine tasks, and pet care are expected and normal. Front yards, driveways, and porches are semi-public but still resident spaces where deliveries, parking, and coming/going are routine. Consider whether the zone and activity align with normal residential use.
- **Person + Pet = Normal Activity**: When both "Person" and "Dog" (or "Cat") are detected together in residential zones, this is routine pet care activity (walking, letting out, playing, supervising). Assign Level 0 unless there are OTHER strong suspicious behaviors present (like testing doors, taking items, etc.). A person with their pet in a residential zone is baseline normal activity.
- Brief appearances in private zones (back yards, garages) are normal residential patterns.
- Normal residential activity includes: residents, family members, guests, deliveries, services, maintenance workers, routine property use (parking, unloading, mail pickup, trash removal).
- Brief movement with legitimate items (bags, packages, tools, equipment) in appropriate zones is routine.
```
</details>
### Image Source
By default, review summaries use preview images (cached preview frames) which have a lower resolution but use fewer tokens per image. For better image quality and more detailed analysis, you can configure Frigate to extract frames directly from recordings at a higher resolution:
```yaml
review:
genai:
enabled: true
image_source: recordings # Options: "preview" (default) or "recordings"
```
When using `recordings`, frames are extracted at 480px height while maintaining the camera's original aspect ratio, providing better detail for the LLM while being mindful of context window size. This is particularly useful for scenarios where fine details matter, such as identifying license plates, reading text, or analyzing distant objects.
The number of frames sent to the LLM is dynamically calculated based on:
- Your LLM provider's context window size
- The camera's resolution and aspect ratio (ultrawide cameras like 32:9 use more tokens per image)
- The image source (recordings use more tokens than preview images)
Frame counts are automatically optimized to use ~98% of the available context window while capping at 20 frames maximum to ensure reasonable inference times. Note that using recordings will:
- Provide higher quality images to the LLM (480p vs 180p preview images)
- Use more tokens per image due to higher resolution
- Result in fewer frames being sent for ultrawide cameras due to larger image size
- Require that recordings are enabled for the camera
If recordings are not available for a given time period, the system will automatically fall back to using preview frames.
### Additional Concerns
Along with the concern of suspicious activity or immediate threat, you may have concerns such as animals in your garden or a gate being left open. These concerns can be configured so that the review summaries will make note of them if the activity requires additional review. For example:
@@ -111,10 +53,4 @@ review:
## Review Reports
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.
### Requesting Reports Programmatically
Review reports can be requested via the [API](/integrations/api#review-summarization) by sending a POST request to `/api/review/summarize/start/{start_ts}/end/{end_ts}` with Unix timestamps.
For Home Assistant users, there is a built-in service (`frigate.review_summarize`) that makes it easy to request review reports as part of automations or scripts. This allows you to automatically generate daily summaries, vacation reports, or custom time period reports based on your specific needs.
Along with individual review item summaries, Generative AI provides the ability to request a report of a given time period. For example, you can get a daily report while on a vacation of any suspicious activity or other concerns that may require review.

View File

@@ -5,7 +5,7 @@ title: Enrichments
# Enrichments
Some of Frigate's enrichments can use a discrete GPU or integrated GPU for accelerated processing.
Some of Frigate's enrichments can use a discrete GPU / NPU for accelerated processing.
## Requirements
@@ -13,15 +13,13 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
- **AMD**
- ROCm support in the `-rocm` Frigate image is automatically detected for enrichments, but only some enrichment models are available due to ROCm's focus on LLMs and limited stability with certain neural network models. Frigate disables models that perform poorly or are unstable to ensure reliable operation, so only compatible enrichments may be active.
- ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
- **Note:** Intel NPUs have limited model support for enrichments. GPU is recommended for enrichments when available.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.

View File

@@ -3,65 +3,78 @@ id: hardware_acceleration_video
title: Video Decoding
---
import CommunityBadge from '@site/src/components/CommunityBadge';
# Video Decoding
It is highly recommended to use an integrated or discrete GPU for hardware acceleration video decoding in Frigate.
It is highly recommended to use a GPU for hardware acceleration video decoding in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg. To verify that hardware acceleration is working:
- Check the logs: A message will either say that hardware acceleration was automatically detected, or there will be a warning that no hardware acceleration was automatically detected
- If hardware acceleration is specified in the config, verification can be done by ensuring the logs are free from errors. There is no CPU fallback for hardware acceleration.
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
:::info
Frigate supports presets for optimal hardware accelerated video decoding:
## Raspberry Pi 3/4
**AMD**
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
- [AMD](#amd-based-cpus): Frigate can utilize modern AMD integrated GPUs and AMD discrete GPUs to accelerate video decoding.
```yaml
# if you want to decode a h264 stream
ffmpeg:
hwaccel_args: preset-rpi-64-h264
**Intel**
# if you want to decode a h265 (hevc) stream
ffmpeg:
hwaccel_args: preset-rpi-64-h265
```
- [Intel](#intel-based-cpus): Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video decoding.
:::note
**Nvidia GPU**
If running Frigate through Docker, you either need to run in privileged mode or
map the `/dev/video*` devices to Frigate. With Docker Compose add:
- [Nvidia GPU](#nvidia-gpus): Frigate can utilize most modern Nvidia GPUs to accelerate video decoding.
```yaml
services:
frigate:
...
devices:
- /dev/video11:/dev/video11
```
**Raspberry Pi 3/4**
Or with `docker run`:
- [Raspberry Pi](#raspberry-pi-34): Frigate can utilize the media engine in the Raspberry Pi 3 and 4 to slightly accelerate video decoding.
```bash
docker run -d \
--name frigate \
...
--device /dev/video11 \
ghcr.io/blakeblackshear/frigate:stable
```
**Nvidia Jetson** <CommunityBadge />
`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check
by running the following and looking for `H264`:
- [Jetson](#nvidia-jetson): Frigate can utilize the media engine in Jetson hardware to accelerate video decoding.
```bash
for d in /dev/video*; do
echo -e "---\n$d"
v4l2-ctl --list-formats-ext -d $d
done
```
**Rockchip** <CommunityBadge />
- [RKNN](#rockchip-platform): Frigate can utilize the media engine in RockChip SOCs to accelerate video decoding.
**Other Hardware**
Depending on your system, these presets may not be compatible, and you may need to use manual hwaccel args to take advantage of your hardware. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
Or map in all the `/dev/video*` devices.
:::
## Intel-based CPUs
Frigate can utilize most Intel integrated GPUs and Arc GPUs to accelerate video decoding.
:::info
**Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------- | ------------------------------------------- |
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported, may not support H.265 |
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------- | ------------------------------------ |
| gen1 - gen5 | i965 | preset-vaapi | qsv is not supported |
| gen6 - gen7 | iHD | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
:::
@@ -182,17 +195,15 @@ telemetry:
If you are passing in a device path, make sure you've passed the device through to the container.
## AMD-based CPUs
## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver
Frigate can utilize modern AMD integrated GPUs and AMD GPUs to accelerate video decoding using VAAPI.
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
### Configuring Radeon Driver
:::note
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
### Via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
:::
```yaml
ffmpeg:
@@ -253,7 +264,7 @@ processes:
:::note
`nvidia-smi` will not show `ffmpeg` processes when run inside the container [due to docker limitations](https://github.com/NVIDIA/nvidia-docker/issues/179#issuecomment-645579458).
`nvidia-smi` may not show `ffmpeg` processes when run inside the container [due to docker limitations](https://github.com/NVIDIA/nvidia-docker/issues/179#issuecomment-645579458).
:::
@@ -289,63 +300,12 @@ If you do not see these processes, check the `docker logs` for the container and
These instructions were originally based on the [Jellyfin documentation](https://jellyfin.org/docs/general/administration/hardware-acceleration.html#nvidia-hardware-acceleration-on-docker-linux).
## Raspberry Pi 3/4
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
```yaml
# if you want to decode a h264 stream
ffmpeg:
hwaccel_args: preset-rpi-64-h264
# if you want to decode a h265 (hevc) stream
ffmpeg:
hwaccel_args: preset-rpi-64-h265
```
:::note
If running Frigate through Docker, you either need to run in privileged mode or
map the `/dev/video*` devices to Frigate. With Docker Compose add:
```yaml
services:
frigate:
...
devices:
- /dev/video11:/dev/video11
```
Or with `docker run`:
```bash
docker run -d \
--name frigate \
...
--device /dev/video11 \
ghcr.io/blakeblackshear/frigate:stable
```
`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check
by running the following and looking for `H264`:
```bash
for d in /dev/video*; do
echo -e "---\n$d"
v4l2-ctl --list-formats-ext -d $d
done
```
Or map in all the `/dev/video*` devices.
:::
# Community Supported
## NVIDIA Jetson
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
A separate set of docker images is available for Jetson devices. They come with an `ffmpeg` build with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
You will need to use the image with the nvidia container runtime:

View File

@@ -3,18 +3,18 @@ id: license_plate_recognition
title: License Plate Recognition (LPR)
---
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a [known](#matching) name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. When a vehicle becomes stationary, LPR continues to run for a short time after to attempt recognition.
When a plate is recognized, the details are:
- Added as a `sub_label` (if [known](#matching)) or the `recognized_license_plate` field (if unknown) to a tracked object.
- Viewable in the Details pane in Review/History.
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object.
- Viewable in the Review Item Details pane in Review (sub labels).
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
- Filterable through the More Filters menu in Explore.
- Published via the `frigate/events` MQTT topic as a `sub_label` ([known](#matching)) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if [known](#matching)) and `plate`.
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`.
## Model Requirements
@@ -30,7 +30,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
## Minimum System Requirements
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
## Configuration
@@ -74,8 +74,8 @@ Fine-tune the LPR feature using these optional parameters at the global level of
- Default: `small`
- This can be `small` or `large`.
- The `small` model is fast and identifies groups of Latin and Chinese characters.
- The `large` model identifies Latin characters only, and uses an enhanced text detector to find characters on multi-line plates. It is significantly slower than the `small` model.
- If your country or region does not use multi-line plates, you should use the `small` model as performance is much better for single-line plates.
- The `large` model identifies Latin characters only, but uses an enhanced text detector and is more capable at finding characters on multi-line plates. It is significantly slower than the `small` model. Note that using the `large` model does not improve _text recognition_, but it may improve _text detection_.
- For most users, the `small` model is recommended.
### Recognition
@@ -107,23 +107,23 @@ Fine-tune the LPR feature using these optional parameters at the global level of
### Normalization Rules
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially and are applied _before_ the `format` regex, if specified. Each rule must have a `pattern` (which can be a string or a regex) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
- **`replace_rules`**: List of regex replacement rules to normalize detected plates. These rules are applied sequentially. Each rule must have a `pattern` (which can be a string or a regex, prepended by `r`) and `replacement` (a string, which also supports [backrefs](https://docs.python.org/3/library/re.html#re.sub) like `\1`). These rules are useful for dealing with common OCR issues like noise characters, separators, or confusions (e.g., 'O'→'0').
These rules must be defined at the global level of your `lpr` config.
```yaml
lpr:
replace_rules:
- pattern: "[%#*?]" # Remove noise symbols
- pattern: r'[%#*?]' # Remove noise symbols
replacement: ""
- pattern: "[= ]" # Normalize = or space to dash
- pattern: r'[= ]' # Normalize = or space to dash
replacement: "-"
- pattern: "O" # Swap 'O' to '0' (common OCR error)
replacement: "0"
- pattern: "I" # Swap 'I' to '1'
- pattern: r'I' # Swap 'I' to '1'
replacement: "1"
- pattern: '(\w{3})(\w{3})' # Split 6 chars into groups (e.g., ABC123 → ABC-123) - use single quotes to preserve backslashes
replacement: '\1-\2'
- pattern: r'(\w{3})(\w{3})' # Split 6 chars into groups (e.g., ABC123 → ABC-123)
replacement: r'\1-\2'
```
- Rules fire in order: In the example above: clean noise first, then separators, then swaps, then splits.
@@ -178,7 +178,7 @@ lpr:
:::note
If a camera is configured to detect `car` or `motorcycle` but you don't want Frigate to run LPR for that camera, disable LPR at the camera level:
If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras.
```yaml
cameras:
@@ -306,7 +306,7 @@ With this setup:
- Review items will always be classified as a `detection`.
- Snapshots will always be saved.
- Zones and object masks are **not** used.
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a [known](#matching) plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a known plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
- License plate snapshots are saved at the highest-scoring moment and appear in Explore.
- Debug view will not show `license_plate` bounding boxes.
@@ -374,19 +374,9 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
Start with ["Why isn't my license plate being detected and recognized?"](#why-isnt-my-license-plate-being-detected-and-recognized). If you are still having issues, work through these steps.
1. Start with a simplified LPR config.
1. Enable debug logs to see exactly what Frigate is doing.
- Remove or comment out everything in your LPR config, including `min_area`, `min_plate_length`, `format`, `known_plates`, or `enhancement` values so that the only values left are `enabled` and `debug_save_plates`. This will run LPR with Frigate's default values.
```yaml
lpr:
enabled: true
debug_save_plates: true
```
2. Enable debug logs to see exactly what Frigate is doing.
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary. Restart Frigate after this change.
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only keep this enabled when necessary.
```yaml
logger:
@@ -395,7 +385,7 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is
frigate.data_processing.common.license_plate: debug
```
3. Ensure your plates are being _detected_.
2. Ensure your plates are being _detected_.
If you are using a Frigate+ or `license_plate` detecting model:
@@ -408,7 +398,7 @@ Start with ["Why isn't my license plate being detected and recognized?"](#why-is
- Watch the debug logs for messages from the YOLOv9 plate detector.
- You may need to adjust your `detection_threshold` if your plates are not being detected.
4. Ensure the characters on detected plates are being _recognized_.
3. Ensure the characters on detected plates are being _recognized_.
- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). Ensure these images are readable and the text is clear.
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` or `motorcycle` label will change to the recognized plate when LPR is enabled and working.

View File

@@ -15,7 +15,7 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt
| ------ | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| jsmpeg | same as `detect -> fps`, capped at 10 | 720p | no | no | Resolution is configurable, but go2rtc is recommended if you want higher resolutions and better frame rates. jsmpeg is Frigate's default without go2rtc configured. |
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration, doesn't support h.265. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
### Camera Settings Recommendations
@@ -127,8 +127,7 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
```
- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block.
- Note that some browsers may not support H.265 (HEVC). You can check your browser's current version for H.265 compatibility [here](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness).
- Note that WebRTC does not support H.265.
:::tip
@@ -175,12 +174,10 @@ For devices that support two way talk, Frigate can be configured to use the feat
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-cameras)
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
As a starting point to check compatibility for your camera, view the list of cameras supported for two-way talk on the [go2rtc repository](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#two-way-audio). For cameras in the category `ONVIF Profile T`, you can use the [ONVIF Conformant Products Database](https://www.onvif.org/conformant-products/)'s FeatureList to check for the presence of `AudioOutput`. A camera that supports `ONVIF Profile T` _usually_ supports this, but due to inconsistent support, a camera that explicitly lists this feature may still not work. If no entry for your camera exists on the database, it is recommended not to buy it or to consult with the manufacturer's support on the feature availability.
To prevent go2rtc from blocking other applications from accessing your camera's two-way audio, you must configure your stream with `#backchannel=0`. See [preventing go2rtc from blocking two-way audio](/configuration/restream#two-way-talk-restream) in the restream documentation.
### Streaming options on camera group dashboards
Frigate provides a dialog in the Camera Group Edit pane with several options for streaming on a camera group's dashboard. These settings are _per device_ and are saved in your device's local storage.
@@ -217,42 +214,6 @@ For restreamed cameras, go2rtc remains active but does not use system resources
Note that disabling a camera through the config file (`enabled: False`) removes all related UI elements, including historical footage access. To retain access while disabling the camera, keep it enabled in the config and use the UI or MQTT to disable it temporarily.
### Live player error messages
When your browser runs into problems playing back your camera streams, it will log short error messages to the browser console. They indicate playback, codec, or network issues on the client/browser side, not something server side with Frigate itself. Below are the common messages you may see and simple actions you can take to try to resolve them.
- **startup**
- What it means: The player failed to initialize or connect to the live stream (network or startup error).
- What to try: Reload the Live view or click _Reset_. Verify `go2rtc` is running and the camera stream is reachable. Try switching to a different stream from the Live UI dropdown (if available) or use a different browser.
- Possible console messages from the player code:
- `Error opening MediaSource.`
- `Browser reported a network error.`
- `Max error count ${errorCount} exceeded.` (the numeric value will vary)
- **mse-decode**
- What it means: The browser reported a decoding error while trying to play the stream, which usually is a result of a codec incompatibility or corrupted frames.
- What to try: Check the browser console for the supported and negotiated codecs. Ensure your camera/restream is using H.264 video and AAC audio (these are the most compatible). If your camera uses a non-standard audio codec, configure `go2rtc` to transcode the stream to AAC. Try another browser (some browsers have stricter MSE/codec support) and, for iPhone, ensure you're on iOS 17.1 or newer.
- Possible console messages from the player code:
- `Safari cannot open MediaSource.`
- `Safari reported InvalidStateError.`
- `Safari reported decoding errors.`
- **stalled**
- What it means: Playback has stalled because the player has fallen too far behind live (extended buffering or no data arriving).
- What to try: This is usually indicative of the browser struggling to decode too many high-resolution streams at once. Try selecting a lower-bandwidth stream (substream), reduce the number of live streams open, improve the network connection, or lower the camera resolution. Also check your camera's keyframe (I-frame) interval — shorter intervals make playback start and recover faster. You can also try increasing the timeout value in the UI pane of Frigate's settings.
- Possible console messages from the player code:
- `Buffer time (10 seconds) exceeded, browser may not be playing media correctly.`
- `Media playback has stalled after <n> seconds due to insufficient buffering or a network interruption.` (the seconds value will vary)
## Live view FAQ
1. **Why don't I have audio in my Live view?**
@@ -316,38 +277,3 @@ When your browser runs into problems playing back your camera streams, it will l
7. **My camera streams have lots of visual artifacts / distortion.**
Some cameras don't include the hardware to support multiple connections to the high resolution stream, and this can cause unexpected behavior. In this case it is recommended to [restream](./restream.md) the high resolution stream so that it can be used for live view and recordings.
8. **Why does my camera stream switch aspect ratios on the Live dashboard?**
Your camera may change aspect ratios on the dashboard because Frigate uses different streams for different purposes. With go2rtc and Smart Streaming, Frigate shows a static image from the `detect` stream when no activity is present, and switches to the live stream when motion is detected. The camera image will change size if your streams use different aspect ratios.
To prevent this, make the `detect` stream match the go2rtc live stream's aspect ratio (resolution does not need to match, just the aspect ratio). You can either adjust the camera's output resolution or set the `width` and `height` values in your config's `detect` section to a resolution with an aspect ratio that matches.
Example: Resolutions from two streams
- Mismatched (may cause aspect ratio switching on the dashboard):
- Live/go2rtc stream: 1920x1080 (16:9)
- Detect stream: 640x352 (~1.82:1, not 16:9)
- Matched (prevents switching):
- Live/go2rtc stream: 1920x1080 (16:9)
- Detect stream: 640x360 (16:9)
You can update the detect settings in your camera config to match the aspect ratio of your go2rtc live stream. For example:
```yaml
cameras:
front_door:
detect:
width: 640
height: 360 # set this to 360 instead of 352
ffmpeg:
inputs:
- path: rtsp://127.0.0.1:8554/front_door # main stream 1920x1080
roles:
- record
- path: rtsp://127.0.0.1:8554/front_door_sub # sub stream 640x352
roles:
- detect
```

View File

@@ -28,6 +28,7 @@ To create a poly mask:
5. Click the plus icon under the type of mask or zone you would like to create
6. Click on the camera's latest image to create the points for a masked area. Click the first point again to close the polygon.
7. When you've finished creating your mask, press Save.
8. Restart Frigate to apply your changes.
Your config file will be updated with the relative coordinates of the mask/zone:

View File

@@ -3,8 +3,6 @@ id: object_detectors
title: Object Detectors
---
import CommunityBadge from '@site/src/components/CommunityBadge';
# Supported Hardware
:::info
@@ -13,10 +11,10 @@ Frigate supports multiple different detectors that work on different types of ha
**Most Hardware**
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB, Mini PCIe, and m.2 formats allowing for a wide range of compatibility with devices.
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
- <CommunityBadge /> [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
- [MemryX](#memryx-mx3): The MX3 Acceleration module is available in m.2 format, offering broad compatibility across various platforms.
- [DeGirum](#degirum): Service for using hardware devices in the cloud or locally. Hardware and models provided on the cloud on [their website](https://hub.degirum.com).
**AMD**
@@ -36,16 +34,16 @@ Frigate supports multiple different detectors that work on different types of ha
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured.
**Nvidia Jetson** <CommunityBadge />
**Nvidia Jetson**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Jetson devices, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt-jp6` Frigate image when a supported ONNX model is configured.
**Rockchip** <CommunityBadge />
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**Synaptics** <CommunityBadge />
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs.
@@ -69,10 +67,12 @@ Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8
## Edge TPU Detector
The Edge TPU detector type runs TensorFlow Lite models utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`.
The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds.
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
:::tip
See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected.
@@ -144,44 +144,6 @@ detectors:
device: pci
```
### EdgeTPU Supported Models
| Model | Notes |
| ----------------------- | ------------------------------------------- |
| [Mobiledet](#mobiledet) | Default model |
| [YOLOv9](#yolov9) | More accurate but slower than default model |
#### Mobiledet
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
#### YOLOv9
YOLOv9 models that are compiled for TensorFlow Lite and properly quantized are supported, but not included by default. [Download the model](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite), bind mount the file into the container, and provide the path with `model.path`. Note that the linked model requires a 17-label [labelmap file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) that includes only 17 COCO classes.
<details>
<summary>YOLOv9 Setup & Config</summary>
After placing the downloaded files for the tflite model and labels in your config folder, you can use the following configuration:
```yaml
detectors:
coral:
type: edgetpu
device: usb
model:
model_type: yolo-generic
width: 320 # <--- should match the imgsize of the model, typically 320
height: 320 # <--- should match the imgsize of the model, typically 320
path: /config/model_cache/yolov9-s-relu6-best_320_int8_edgetpu.tflite
labelmap_path: /config/labels-coco17.txt
```
Note that due to hardware limitations of the Coral, the labelmap is a subset of the COCO labels and includes only 17 object classes.
</details>
---
## Hailo-8
@@ -299,8 +261,6 @@ OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will al
:::tip
**NPU + GPU Systems:** If you have both NPU and GPU available (Intel Core Ultra processors), use NPU for object detection and GPU for enrichments (semantic search, face recognition, etc.) for best performance and compatibility.
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml
@@ -323,7 +283,7 @@ detectors:
| [RF-DETR](#rf-detr) | ✅ | ✅ | Requires XE iGPU or Arc |
| [YOLO-NAS](#yolo-nas) | ✅ | ✅ | |
| [MobileNet v2](#ssdlite-mobilenet-v2) | ✅ | ✅ | Fast and lightweight model, less accurate than larger models |
| [YOLOX](#yolox) | ✅ | ? | |
| [YOLOX](#yolox) | ✅ | ? | |
| [D-FINE](#d-fine) | ❌ | ❌ | |
#### SSDLite MobileNet v2
@@ -400,7 +360,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::warning
If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
@@ -477,7 +437,7 @@ After placing the downloaded onnx model in your config/model_cache folder, you c
detectors:
ov:
type: openvino
device: CPU
device: GPU
model:
model_type: dfine
@@ -569,10 +529,10 @@ When using Docker Compose:
```yaml
services:
frigate:
...
devices:
- /dev/dri
- /dev/kfd
---
devices:
- /dev/dri
- /dev/kfd
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
@@ -600,9 +560,9 @@ When using Docker Compose:
```yaml
services:
frigate:
...
environment:
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
environment:
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
@@ -740,7 +700,7 @@ The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv
:::warning
If you are using a Frigate+ model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
If you are using a Frigate+ YOLOv9 model, you should not define any of the below `model` parameters in your config except for `path`. See [the Frigate+ model docs](/plus/first_model#step-3-set-your-model-id-in-the-config) for more information on setting up your model.
:::
@@ -1000,6 +960,7 @@ model:
# path: /config/yolov9.zip
# The .zip file must contain:
# ├── yolov9.dfp (a file ending with .dfp)
# └── yolov9_post.onnx (optional; only if the model includes a cropped post-processing network)
```
#### YOLOX
@@ -1494,7 +1455,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /dfine
RUN git clone https://github.com/Peterande/D-FINE.git .
RUN uv pip install --system -r requirements.txt
RUN uv pip install --system onnx onnxruntime onnxsim onnxscript
RUN uv pip install --system onnx onnxruntime onnxsim
# Create output directory and download checkpoint
RUN mkdir -p output
ARG MODEL_SIZE
@@ -1508,19 +1469,19 @@ COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL
EOF
```
### Downloading RF-DETR Model
### Download RF-DETR Model
RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size.
```sh
docker build . --build-arg MODEL_SIZE=Nano --rm --output . -f- <<'EOF'
docker build . --build-arg MODEL_SIZE=Nano --output . -f- <<'EOF'
FROM python:3.11 AS build
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /rfdetr
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 onnxscript
RUN uv pip install --system rfdetr onnx onnxruntime onnxsim onnx-graphsurgeon
ARG MODEL_SIZE
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export()"
FROM scratch
ARG MODEL_SIZE
COPY --from=build /rfdetr/output/inference_model.onnx /rfdetr-${MODEL_SIZE}.onnx
@@ -1568,7 +1529,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
WORKDIR /yolov9
ADD https://github.com/WongKinYiu/yolov9.git .
RUN uv pip install --system -r requirements.txt
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 onnxscript
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1
ARG MODEL_SIZE
ARG IMG_SIZE
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt

View File

@@ -11,7 +11,7 @@ This adds features including the ability to deep link directly into the app.
In order to install Frigate as a PWA, the following requirements must be met:
- Frigate must be accessed via a secure context (localhost, secure https, VPN, etc.)
- Frigate must be accessed via a secure context (localhost, secure https, etc.)
- On Android, Firefox, Chrome, Edge, Opera, and Samsung Internet Browser all support installing PWAs.
- On iOS 16.4 and later, PWAs can be installed from the Share menu in Safari, Chrome, Edge, Firefox, and Orion.
@@ -22,7 +22,3 @@ Installation varies slightly based on the device that is being used:
- Desktop: Use the install button typically found in right edge of the address bar
- Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox
- iOS: Use the `Add to Homescreen` button in the share menu
## Usage
Once setup, the Frigate app can be used wherever it has access to Frigate. This means it can be setup as local-only, VPN-only, or fully accessible depending on your needs.

View File

@@ -123,7 +123,7 @@ auth:
# Optional: Refresh time in seconds (default: shown below)
# When the session is going to expire in less time than this setting,
# it will be refreshed back to the session_length.
refresh_time: 1800 # 30 minutes
refresh_time: 43200 # 12 hours
# Optional: Rate limiting for login failures to help prevent brute force
# login attacks (default: shown below)
# See the docs for more information on valid values
@@ -246,7 +246,7 @@ birdseye:
# Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
ffmpeg:
# Optional: ffmpeg binary path (default: shown below)
# Optional: ffmpeg binry path (default: shown below)
# can also be set to `7.0` or `5.0` to specify one of the included versions
# or can be set to any path that holds `bin/ffmpeg` & `bin/ffprobe`
path: "default"
@@ -429,15 +429,6 @@ review:
alerts: True
# Optional: Enable GenAI review summaries for detections (default: shown below)
detections: False
# Optional: Activity Context Prompt to give context to the GenAI what activity is and is not suspicious.
# It is important to be direct and detailed. See documentation for the default prompt structure.
activity_context_prompt: """Define what is and is not suspicious
"""
# Optional: Image source for GenAI (default: preview)
# Options: "preview" (uses cached preview frames at ~180p) or "recordings" (extracts frames from recordings at 480p)
# Using "recordings" provides better image quality but uses more tokens per image.
# Frame count is automatically calculated based on context window size, aspect ratio, and image source (capped at 20 frames).
image_source: preview
# Optional: Additional concerns that the GenAI should make note of (default: None)
additional_concerns:
- Animals in the garden
@@ -639,7 +630,7 @@ face_recognition:
# Optional: Min face recognitions for the sub label to be applied to the person object (default: shown below)
min_faces: 1
# Optional: Number of images of recognized faces to save for training (default: shown below)
save_attempts: 200
save_attempts: 100
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
blur_confidence_filter: True
# Optional: Set the model size used face recognition. (default: shown below)
@@ -680,18 +671,20 @@ lpr:
# Optional: List of regex replacement rules to normalize detected plates (default: shown below)
replace_rules: {}
# Optional: Configuration for AI / LLM provider
# Optional: Configuration for AI generated tracked object descriptions
# WARNING: Depending on the provider, this will send thumbnails over the internet
# to Google or OpenAI's LLMs to generate descriptions. GenAI features can be configured at
# the camera level to enhance privacy for indoor cameras.
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
# the camera level (enabled: False) to enhance privacy for indoor cameras.
genai:
# Required: Provider must be one of ollama, gemini, or openai
# Optional: Enable AI description generation (default: shown below)
enabled: False
# Required if enabled: Provider must be one of ollama, gemini, or openai
provider: ollama
# Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider.
base_url: http://localhost::11434
# Required if gemini or openai
api_key: "{FRIGATE_GENAI_API_KEY}"
# Required: The model to use with the provider.
# Required if enabled: The model to use with the provider.
model: gemini-1.5-flash
# Optional additional args to pass to the GenAI Provider (default: None)
provider_options:
@@ -700,54 +693,16 @@ genai:
# Optional: Configuration for audio transcription
# NOTE: only the enabled option can be overridden at the camera level
audio_transcription:
# Optional: Enable live and speech event audio transcription (default: shown below)
# Optional: Enable license plate recognition (default: shown below)
enabled: False
# Optional: The device to run the models on for live transcription. (default: shown below)
# Optional: The device to run the models on (default: shown below)
device: CPU
# Optional: Set the model size used for live transcription. (default: shown below)
# Optional: Set the model size used for transcription. (default: shown below)
model_size: small
# Optional: Set the language used for transcription translation. (default: shown below)
# List of language codes: https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
language: en
# Optional: Configuration for classification models
classification:
# Optional: Configuration for bird classification
bird:
# Optional: Enable bird classification (default: shown below)
enabled: False
# Optional: Minimum classification score required to be considered a match (default: shown below)
threshold: 0.9
custom:
# Required: name of the classification model
model_name:
# Optional: Enable running the model (default: shown below)
enabled: True
# Optional: Name of classification model (default: shown below)
name: None
# Optional: Classification score threshold to change the state (default: shown below)
threshold: 0.8
# Optional: Number of classification attempts to save in the recent classifications tab (default: shown below)
# NOTE: Defaults to 200 for object classification and 100 for state classification if not specified
save_attempts: None
# Optional: Object classification configuration
object_config:
# Required: Object types to classify
objects: [dog]
# Optional: Type of classification that is applied (default: shown below)
classification_type: sub_label
# Optional: State classification configuration
state_config:
# Required: Cameras to run classification on
cameras:
camera_name:
# Required: Crop of image frame on this camera to run classification on
crop: [0, 180, 220, 400]
# Optional: If classification should be run when motion is detected in the crop (default: shown below)
motion: False
# Optional: Interval to run classification on in seconds (default: shown below)
interval: None
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
# NOTE: The default go2rtc API port (1984) must be used,
@@ -848,8 +803,6 @@ cameras:
# NOTE: This must be different than any camera names, but can match with another zone on another
# camera.
front_steps:
# Optional: A friendly name or descriptive text for the zones
friendly_name: ""
# Required: List of x,y coordinates to define the polygon of the zone.
# NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box.
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
@@ -911,7 +864,7 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification and disable digest authentication for the ONVIF server (default: shown below)
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
@@ -969,13 +922,10 @@ cameras:
type: thumbnail
# Reference data for matching, either an event ID for `thumbnail` or a text string for `description`. (default: none)
data: 1751565549.853251-b69j73
# Similarity threshold for triggering. (default: shown below)
threshold: 0.8
# Similarity threshold for triggering. (default: none)
threshold: 0.7
# List of actions to perform when the trigger fires. (default: none)
# Available options:
# - `notification` (send a webpush notification)
# - `sub_label` (add trigger friendly name as a sub label to the triggering tracked object)
# - `attribute` (add trigger's name and similarity score as a data attribute to the triggering tracked object)
# Available options: `notification` (send a webpush notification)
actions:
- notification
@@ -1002,6 +952,10 @@ ui:
# full: 8:15:22 PM Mountain Standard Time
# (default: shown below).
time_style: medium
# Optional: Ability to manually override the date / time styling to use strftime format
# https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html
# possible values are shown above (default: not set)
strftime_fmt: "%Y/%m/%d %H:%M"
# Optional: Set the unit system to either "imperial" or "metric" (default: metric)
# Used in the UI and in MQTT topics
unit_system: metric

View File

@@ -24,12 +24,11 @@ birdseye:
restream: True
```
:::tip
:::tip
To improve connection speed when using Birdseye via restream you can enable a small idle heartbeat by setting `birdseye.idle_heartbeat_fps` to a low value (e.g. `12`). This makes Frigate periodically push the last frame even when no motion is detected, reducing initial connection latency.
To improve connection speed when using Birdseye via restream you can enable a small idle heartbeat by setting `birdseye.idle_heartbeat_fps` to a low value (e.g. `12`). This makes Frigate periodically push the last frame even when no motion is detected, reducing initial connection latency.
:::
### Securing Restream With Authentication
The go2rtc restream can be secured with RTSP based username / password authentication. Ex:
@@ -160,31 +159,6 @@ go2rtc:
See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
## Preventing go2rtc from blocking two-way audio {#two-way-talk-restream}
For cameras that support two-way talk, go2rtc will automatically establish an audio output backchannel when connecting to an RTSP stream. This backchannel blocks access to the camera's audio output for two-way talk functionality, preventing both Frigate and other applications from using it.
To prevent this, you must configure two separate stream instances:
1. One stream instance with `#backchannel=0` for Frigate's viewing, recording, and detection (prevents go2rtc from establishing the blocking backchannel)
2. A second stream instance without `#backchannel=0` for two-way talk functionality (can be used by Frigate's WebRTC viewer or other applications)
Configuration example:
```yaml
go2rtc:
streams:
front_door:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#backchannel=0
front_door_twoway:
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
```
In this configuration:
- `front_door` stream is used by Frigate for viewing, recording, and detection. The `#backchannel=0` parameter prevents go2rtc from establishing the audio output backchannel, so it won't block two-way talk access.
- `front_door_twoway` stream is used for two-way talk functionality. This stream can be used by Frigate's WebRTC viewer when two-way talk is enabled, or by other applications (like Home Assistant Advanced Camera Card) that need access to the camera's audio output channel.
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
@@ -195,4 +169,4 @@ NOTE: The output will need to be passed with two curly braces `{{output}}`
go2rtc:
streams:
stream1: exec:ffmpeg -hide_banner -re -stream_loop -1 -i /media/BigBuckBunny.mp4 -c copy -rtsp_transport tcp -f rtsp {{output}}
```
```

View File

@@ -78,7 +78,7 @@ Switching between V1 and V2 requires reindexing your embeddings. The embeddings
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU / NPU hardware, when available. This depends on the Docker build that is used. You can also target a specific device in a multi-GPU installation.
```yaml
semantic_search:
@@ -90,7 +90,7 @@ semantic_search:
:::info
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU will be detected and used automatically.
If the correct build is used for your GPU / NPU and the `large` model is configured, then the GPU / NPU will be detected and used automatically.
Specify the `device` option to target a specific GPU in a multi-GPU system (see [onnxruntime's provider options](https://onnxruntime.ai/docs/execution-providers/)).
If you do not specify a device, the first available GPU will be used.
@@ -119,7 +119,7 @@ Semantic Search must be enabled to use Triggers.
### Configuration
Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires - `notification`, `sub_label`, and `attribute`.
Triggers are defined within the `semantic_search` configuration for each camera in your Frigate configuration file or through the UI. Each trigger consists of a `friendly_name`, a `type` (either `thumbnail` or `description`), a `data` field (the reference image event ID or text), a `threshold` for similarity matching, and a list of `actions` to perform when the trigger fires.
Triggers are best configured through the Frigate UI.
@@ -128,20 +128,17 @@ Triggers are best configured through the Frigate UI.
1. Navigate to the **Settings** page and select the **Triggers** tab.
2. Choose a camera from the dropdown menu to view or manage its triggers.
3. Click **Add Trigger** to create a new trigger or use the pencil icon to edit an existing one.
4. In the **Create Trigger** wizard:
- Enter a **Name** for the trigger (e.g., "Red Car Alert").
4. In the **Create Trigger** dialog:
- Enter a **Name** for the trigger (e.g., "red_car_alert").
- Enter a descriptive **Friendly Name** for the trigger (e.g., "Red car on the driveway camera").
- Select the **Type** (`Thumbnail` or `Description`).
- For `Thumbnail`, select an image to trigger this action when a similar thumbnail image is detected, based on the threshold.
- For `Description`, enter text to trigger this action when a similar tracked object description is detected.
- Set the **Threshold** for similarity matching.
- Select **Actions** to perform when the trigger fires.
If native webpush notifications are enabled, check the `Send Notification` box to send a notification.
Check the `Add Sub Label` box to add the trigger's friendly name as a sub label to any triggering tracked objects.
Check the `Add Attribute` box to add the trigger's internal ID (e.g., "red_car_alert") to a data attribute on the tracked object that can be processed via the API or MQTT.
5. Save the trigger to update the configuration and store the embedding in the database.
When a trigger fires, the UI highlights the trigger with a blue dot for 3 seconds for easy identification. Additionally, the UI will show the last date/time and tracked object ID that activated your trigger. The last triggered timestamp is not saved to the database or persisted through restarts of Frigate.
When a trigger fires, the UI highlights the trigger with a blue outline for 3 seconds for easy identification.
### Usage and Best Practices

View File

@@ -27,7 +27,6 @@ cameras:
- entire_yard
zones:
entire_yard:
friendly_name: Entire yard # You can use characters from any language text
coordinates: ...
```
@@ -45,10 +44,8 @@ cameras:
- edge_yard
zones:
edge_yard:
friendly_name: Edge yard # You can use characters from any language text
coordinates: ...
inner_yard:
friendly_name: Inner yard # You can use characters from any language text
coordinates: ...
```
@@ -62,7 +59,6 @@ cameras:
- entire_yard
zones:
entire_yard:
friendly_name: Entire yard
coordinates: ...
```
@@ -86,7 +82,6 @@ cameras:
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
### Zone Loitering
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.

View File

@@ -3,8 +3,6 @@ id: hardware
title: Recommended hardware
---
import CommunityBadge from '@site/src/components/CommunityBadge';
## Cameras
Cameras that output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. It is also helpful if your camera supports multiple substreams to allow different resolutions to be used for detection, streaming, and recordings without re-encoding.
@@ -20,7 +18,7 @@ Here are some of the cameras I recommend:
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
- <a href="https://www.bhphotovideo.com/c/product/1705511-REG/hikvision_colorvu_ds_2cd2387g2p_lsu_sl_8mp_network.html" target="_blank" rel="nofollow noopener">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
- <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
@@ -38,11 +36,9 @@ If the EQ13 is out of stock, the link below may take you to a suggested alternat
:::
| Name | Capabilities | Notes |
| ------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------------------------------------------------- |
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Can run object detection on several 1080p cameras with low-medium activity | Dual gigabit NICs for easy isolated camera network. |
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP) | Can handle a large number of 1080p cameras with high activity | |
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
| Name | Coral Inference Speed | Coral Compatibility | Notes |
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
## Detectors
@@ -63,7 +59,7 @@ Frigate supports multiple different detectors that work on different types of ha
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
- <CommunityBadge /> [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
- [MemryX](#memryx-mx3): The MX3 M.2 accelerator module is available in m.2 format allowing for a wide range of compatibility with devices.
- [Supports many model architectures](../../configuration/object_detectors#memryx-mx3)
- Runs best with tiny, small, or medium-size models
@@ -88,26 +84,32 @@ Frigate supports multiple different detectors that work on different types of ha
**Nvidia**
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs to provide efficient object detection.
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#onnx-supported-models)
- Runs well with any size models including large
- <CommunityBadge /> [Jetson](#nvidia-jetson): Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6.
**Rockchip** <CommunityBadge />
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection.
- [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model)
- Runs best with tiny or small size models
- Runs efficiently on low power hardware
**Synaptics** <CommunityBadge />
**Synaptics**
- [Synaptics](#synaptics): synap models can run on Synaptics devices(e.g astra machina) with included NPUs to provide efficient object detection.
:::
### Synaptics
- **Synaptics** Default model is **mobilenet**
| Name | Synaptics SL1680 Inference Time |
| ---------------- | ------------------------------- |
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
### Hailo-8
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isnt provided.
@@ -127,16 +129,10 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
### Google Coral TPU
:::warning
The Coral is no longer recommended for new Frigate installations, except in deployments with particularly low power requirements or hardware incapable of utilizing alternative AI accelerators for object detection. Instead, we suggest using one of the numerous other supported object detectors. Frigate will continue to provide support for the Coral TPU for as long as practicably possible given its still one of the most power-efficient devices for executing object detection models.
:::
Frigate supports both the USB and M.2 versions of the Google Coral.
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
- The PCIe and M.2 versions require installation of a driver on the host. https://github.com/jnicolson/gasket-builder should be used.
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
@@ -167,7 +163,7 @@ Inference speeds vary greatly depending on the CPU or GPU used, some known examp
| Intel HD 530 | 15 - 35 ms | | | | Can only run one detector instance |
| Intel HD 620 | 15 - 25 ms | | 320: ~ 35 ms | | |
| Intel HD 630 | ~ 15 ms | | 320: ~ 30 ms | | |
| Intel UHD 730 | ~ 10 ms | t-320: 14ms s-320: 24ms t-640: 34ms s-640: 65ms | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel UHD 730 | ~ 10 ms | | 320: ~ 19 ms 640: ~ 54 ms | | |
| Intel UHD 770 | ~ 15 ms | t-320: ~ 16 ms s-320: ~ 20 ms s-640: ~ 40 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
| Intel N100 | ~ 15 ms | s-320: 30 ms | 320: ~ 25 ms | | Can only run one detector instance |
| Intel N150 | ~ 15 ms | t-320: 16 ms s-320: 24 ms | | | |
@@ -265,7 +261,7 @@ Inference speeds may vary depending on the host platform. The above data was mea
### Nvidia Jetson
Jetson devices are supported via the TensorRT or ONNX detectors when running Jetpack 6. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
@@ -286,15 +282,6 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
### Synaptics
- **Synaptics** Default model is **mobilenet**
| Name | Synaptics SL1680 Inference Time |
| ------------- | ------------------------------- |
| ssd mobilenet | ~ 25 ms |
| yolov5m | ~ 118 ms |
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@@ -56,7 +56,7 @@ services:
volumes:
- /path/to/your/config:/config
- /path/to/your/storage:/media/frigate
- type: tmpfs # Recommended: 1GB of memory
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
target: /tmp/cache
tmpfs:
size: 1000000000
@@ -94,10 +94,6 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
## Extra Steps for Specific Hardware
The following sections contain additional setup steps that are only required if you are using specific hardware. If you are not using any of these hardware types, you can skip to the [Docker](#docker) installation section.
### Raspberry Pi 3/4
By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options).
@@ -110,107 +106,14 @@ The Hailo-8 and Hailo-8L AI accelerators are available in both M.2 and HAT form
#### Installation
:::warning
For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simply follow this [guide](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation) to install the driver and software.
The Raspberry Pi kernel includes an older version of the Hailo driver that is incompatible with Frigate. You **must** follow the installation steps below to install the correct driver version, and you **must** disable the built-in kernel driver as described in step 1.
For other installations, follow these steps for installation:
:::
1. **Disable the built-in Hailo driver (Raspberry Pi only)**:
:::note
If you are **not** using a Raspberry Pi, skip this step and proceed directly to step 2.
:::
If you are using a Raspberry Pi, you need to blacklist the built-in kernel Hailo driver to prevent conflicts. First, check if the driver is currently loaded:
```bash
lsmod | grep hailo
```
If it shows `hailo_pci`, unload it:
```bash
sudo rmmod hailo_pci
```
Now blacklist the driver to prevent it from loading on boot:
```bash
echo "blacklist hailo_pci" | sudo tee /etc/modprobe.d/blacklist-hailo_pci.conf
```
Update initramfs to ensure the blacklist takes effect:
```bash
sudo update-initramfs -u
```
Reboot your Raspberry Pi:
```bash
sudo reboot
```
After rebooting, verify the built-in driver is not loaded:
```bash
lsmod | grep hailo
```
This command should return no results. If it still shows `hailo_pci`, the blacklist did not take effect properly and you may need to check for other Hailo packages installed via apt that are loading the driver.
2. **Run the installation script**:
Download the installation script:
```bash
wget https://raw.githubusercontent.com/blakeblackshear/frigate/dev/docker/hailo8l/user_installation.sh
```
Make it executable:
```bash
sudo chmod +x user_installation.sh
```
Run the script:
```bash
./user_installation.sh
```
The script will:
- Install necessary build dependencies
- Clone and build the Hailo driver from the official repository
- Install the driver
- Download and install the required firmware
- Set up udev rules
3. **Reboot your system**:
After the script completes successfully, reboot to load the firmware:
```bash
sudo reboot
```
4. **Verify the installation**:
After rebooting, verify that the Hailo device is available:
```bash
ls -l /dev/hailo0
```
You should see the device listed. You can also verify the driver is loaded:
```bash
lsmod | grep hailo_pci
```
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
4. Run the script with `./user_installation.sh`
#### Setup
@@ -232,7 +135,6 @@ Finally, configure [hardware object detection](/configuration/object_detectors#h
### MemryX MX3
The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVMe SSD), and supports a variety of configurations:
- x86 (Intel/AMD) PCs
- Raspberry Pi 5
- Orange Pi 5 Plus/Max
@@ -240,6 +142,7 @@ The MemryX MX3 Accelerator is available in the M.2 2280 form factor (like an NVM
#### Configuration
#### Installation
To get started with MX3 hardware setup for your system, refer to the [Hardware Setup Guide](https://developer.memryx.com/get_started/hardware_setup.html).
@@ -253,7 +156,7 @@ Then follow these steps for installing the correct driver/runtime configuration:
#### Setup
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
To set up Frigate, follow the default installation instructions, for example: `ghcr.io/blakeblackshear/frigate:stable`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
@@ -270,7 +173,7 @@ In your `docker-compose.yml`, also add:
privileged: true
volumes:
- /run/mxa_manager:/run/mxa_manager
/run/mxa_manager:/run/mxa_manager
```
If you can't use Docker Compose, you can run the container with something similar to this:
@@ -399,7 +302,7 @@ services:
shm_size: "512mb" # update for your cameras based on calculation above
devices:
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
- /dev/video11:/dev/video11 # For Raspberry Pi 4B
- /dev/dri/renderD128:/dev/dri/renderD128 # AMD / Intel GPU, needs to be updated for your hardware
- /dev/accel:/dev/accel # Intel NPU
@@ -407,7 +310,7 @@ services:
- /etc/localtime:/etc/localtime:ro
- /path/to/your/config:/config
- /path/to/your/storage:/media/frigate
- type: tmpfs # Recommended: 1GB of memory
- type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear
target: /tmp/cache
tmpfs:
size: 1000000000
@@ -465,7 +368,6 @@ There are important limitations in HA OS to be aware of:
- Separate local storage for media is not yet supported by Home Assistant
- AMD GPUs are not supported because HA OS does not include the mesa driver.
- Intel NPUs are not supported because HA OS does not include the NPU firmware.
- Nvidia GPUs are not supported because addons do not support the nvidia runtime.
:::
@@ -509,7 +411,7 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers. Ensure that ballooning is **disabled**, especially if you are passing through a GPU to the VM.
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
:::warning

View File

@@ -5,7 +5,7 @@ title: Updating
# Updating Frigate
The current stable version of Frigate is **0.17.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.17.0).
The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1).
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
@@ -33,21 +33,21 @@ If youre running Frigate via Docker (recommended method), follow these steps:
2. **Update and Pull the Latest Image**:
- If using Docker Compose:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.17.0` instead of `0.16.3`). For example:
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example:
```yaml
services:
frigate:
image: ghcr.io/blakeblackshear/frigate:0.17.0
image: ghcr.io/blakeblackshear/frigate:0.16.1
```
- Then pull the image:
```bash
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
```
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you dont need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
- If using `docker run`:
- Pull the image with the appropriate tag (e.g., `0.17.0`, `0.17.0-tensorrt`, or `stable`):
- Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`):
```bash
docker pull ghcr.io/blakeblackshear/frigate:0.17.0
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
```
3. **Start the Container**:
@@ -105,8 +105,8 @@ If an update causes issues:
1. Stop Frigate.
2. Restore your backed-up config file and database.
3. Revert to the previous image version:
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`) in your `docker run` command.
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.16.3`), and re-run `docker compose up -d`.
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`) in your `docker run` command.
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.15.2`), and re-run `docker compose up -d`.
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
4. Verify the old version is running again.

View File

@@ -113,8 +113,7 @@ section.
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router.
3. If your camera supports two-way talk, you must configure your stream with `#backchannel=0` to prevent go2rtc from blocking other applications from accessing the camera's audio output. See [preventing go2rtc from blocking two-way audio](/configuration/restream#two-way-talk-restream) in the restream documentation.
## Homekit Configuration
To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.
To add camera streams to Homekit Frigate must be configured in docker to use `host` networking mode. Once that is done, you can use the go2rtc WebUI (accessed via port 1984, which is disabled by default) to share export a camera to Homekit. Any changes made will automatically be saved to `/config/go2rtc_homekit.yml`.

View File

@@ -134,13 +134,31 @@ Now you should be able to start Frigate by running `docker compose up -d` from w
This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution.
### Step 1: Start Frigate
### Step 1: Add a detect stream
At this point you should be able to start Frigate and a basic config will be created automatically.
First we will add the detect stream for the camera:
### Step 2: Add a camera
```yaml
mqtt:
enabled: False
You can click the `Add Camera` button to use the camera setup wizard to get your first camera added into Frigate.
cameras:
name_of_your_camera: # <------ Name the camera
enabled: True
ffmpeg:
inputs:
- path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection
roles:
- detect
```
### Step 2: Start Frigate
At this point you should be able to start Frigate and see the video feed in the UI.
If you get an error image from the camera, this means ffmpeg was not able to get the video feed from your camera. Check the logs for error messages from ffmpeg. The default ffmpeg arguments are designed to work with H264 RTSP cameras that support TCP connections.
FFmpeg arguments for other types of cameras can be found [here](../configuration/camera_specific.md).
### Step 3: Configure hardware acceleration (recommended)
@@ -155,7 +173,7 @@ services:
frigate:
...
devices:
- /dev/dri/renderD128:/dev/dri/renderD128 # for intel & amd hwaccel, needs to be updated for your hardware
- /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
...
```
@@ -184,7 +202,7 @@ services:
...
devices:
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
...
```

View File

@@ -245,12 +245,6 @@ To load a preview gif of a review item:
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
```
To load the thumbnail of a review item:
```
https://HA_URL/api/frigate/notifications/<review-id>/<camera>/review_thumbnail.webp
```
<a name="streams"></a>
## RTSP stream

View File

@@ -159,49 +159,9 @@ Message published for updates to tracked object metadata, for example:
}
```
#### Object Classification Update
Message published when [object classification](/configuration/custom_classification/object_classification) reaches consensus on a classification result.
**Sub label type:**
```json
{
"type": "classification",
"id": "1607123955.475377-mxklsc",
"camera": "front_door_cam",
"timestamp": 1607123958.748393,
"model": "person_classifier",
"sub_label": "delivery_person",
"score": 0.87
}
```
**Attribute type:**
```json
{
"type": "classification",
"id": "1607123955.475377-mxklsc",
"camera": "front_door_cam",
"timestamp": 1607123958.748393,
"model": "helmet_detector",
"attribute": "yes",
"score": 0.92
}
```
### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
An `update` with the same ID will be published when:
- The severity changes from `detection` to `alert`
- Additional objects are detected
- An object is recognized via face, lpr, etc.
When the review activity has ended a final `end` message is published.
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
```json
{
@@ -280,7 +240,7 @@ Topic with current state of notifications. Published values are `ON` and `OFF`.
## Frigate Camera Topics
### `frigate/<camera_name>/status/<role>`
### `frigate/<camera_name>/<role>/status`
Publishes the current health status of each role that is enabled (`audio`, `detect`, `record`). Possible values are:
@@ -341,11 +301,6 @@ Publishes transcribed text for audio detected on this camera.
**NOTE:** Requires audio detection and transcription to be enabled
### `frigate/<camera_name>/classification/<model_name>`
Publishes the current state detected by a state classification model for the camera. The topic name includes the model name as configured in your classification settings.
The published value is the detected state class name (e.g., `open`, `closed`, `on`, `off`). The state is only published when it changes, helping to reduce unnecessary MQTT traffic.
### `frigate/<camera_name>/enabled/set`
Topic to turn Frigate's processing of a camera on and off. Expected values are `ON` and `OFF`.

View File

@@ -38,7 +38,3 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
## [Periscope](https://github.com/maksz42/periscope)
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
## [Scrypted - Frigate bridge plugin](https://github.com/apocaliss92/scrypted-frigate-bridge)
[Scrypted - Frigate bridge](https://github.com/apocaliss92/scrypted-frigate-bridge) is an plugin that allows to ingest Frigate detections, motion, videoclips on Scrypted as well as provide templates to export rebroadcast configurations on Frigate.

View File

@@ -42,7 +42,6 @@ Misidentified objects should have a correct label added. For example, if a perso
| `w` | Add box |
| `d` | Toggle difficult |
| `s` | Switch to the next label |
| `Shift + s` | Switch to the previous label |
| `tab` | Select next largest box |
| `del` | Delete current box |
| `esc` | Deselect/Cancel |

View File

@@ -15,11 +15,13 @@ There are three model types offered in Frigate+, `mobiledet`, `yolonas`, and `yo
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
| Model Type | Description |
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX, Apple Silicon, and Rockchip NPUs. |
| Model Type | Description |
| ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `mobiledet` | Based on the same architecture as the default model included with Frigate. Runs on Google Coral devices and CPUs. |
| `yolonas` | A newer architecture that offers slightly higher accuracy and improved detection of small objects. Runs on Intel, NVidia GPUs, and AMD GPUs. |
| `yolov9` | A leading SOTA (state of the art) object detection model with similar performance to yolonas, but on a wider range of hardware options. Runs on Intel, NVidia GPUs, AMD GPUs, Hailo, MemryX\*, Apple Silicon\*, and Rockchip NPUs. |
_\* Support coming in 0.17_
### YOLOv9 Details
@@ -37,7 +39,7 @@ If you have a Hailo device, you will need to specify the hardware you have when
#### Rockchip (RKNN) Support
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is available in 0.17 and later.
For 0.16, YOLOv9 onnx models will need to be manually converted. First, you will need to configure Frigate to use the model id for your YOLOv9 onnx model so it downloads the model to your `model_cache` directory. From there, you can follow the [documentation](/configuration/object_detectors.md#converting-your-own-onnx-model-to-rknn-format) to convert it. Automatic conversion is coming in 0.17.
## Supported detector types
@@ -53,7 +55,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
| [Hailo8/Hailo8L/Hailo8R](/configuration/object_detectors#hailo-8) | `hailo8l` | `yolov9` |
| [Rockchip NPU](/configuration/object_detectors#rockchip-platform)\* | `rknn` | `yolov9` |
_\* Requires manual conversion in 0.16. Automatic conversion available in 0.17 and later._
_\* Requires manual conversion in 0.16. Automatic conversion coming in 0.17._
## Improving your model

View File

@@ -1,73 +0,0 @@
---
id: cpu
title: High CPU Usage
---
High CPU usage can impact Frigate's performance and responsiveness. This guide outlines the most effective configuration changes to help reduce CPU consumption and optimize resource usage.
## 1. Hardware Acceleration for Video Decoding
**Priority: Critical**
Video decoding is one of the most CPU-intensive tasks in Frigate. While an AI accelerator handles object detection, it does not assist with decoding video streams. Hardware acceleration (hwaccel) offloads this work to your GPU or specialized video decode hardware, significantly reducing CPU usage and enabling you to support more cameras on the same hardware.
### Key Concepts
**Resolution & FPS Impact:** The decoding burden grows exponentially with resolution and frame rate. A 4K stream at 30 FPS requires roughly 4 times the processing power of a 1080p stream at the same frame rate, and doubling the frame rate doubles the decode workload. This is why hardware acceleration becomes critical when working with multiple high-resolution cameras.
**Hardware Acceleration Benefits:** By using dedicated video decode hardware, you can:
- Significantly reduce CPU usage per camera stream
- Support 2-3x more cameras on the same hardware
- Free up CPU resources for motion detection and other Frigate processes
- Reduce system heat and power consumption
### Configuration
Frigate provides preset configurations for common hardware acceleration scenarios. Set up `hwaccel_args` based on your hardware in your [configuration](../configuration/reference) as described in the [getting started guide](../guides/getting_started).
### Troubleshooting Hardware Acceleration
If hardware acceleration isn't working:
1. Check Frigate logs for FFmpeg errors related to hwaccel
2. Verify the hardware device is accessible inside the container
3. Ensure your camera streams use H.264 or H.265 codecs (most common)
4. Try different presets if the automatic detection fails
5. Check that your GPU drivers are properly installed on the host system
## 2. Detector Selection and Configuration
**Priority: Critical**
Choosing the right detector for your hardware is the single most important factor for detection performance. The detector is responsible for running the AI model that identifies objects in video frames. Different detector types have vastly different performance characteristics and hardware requirements, as detailed in the [hardware documentation](../frigate/hardware).
### Understanding Detector Performance
Frigate uses motion detection as a first-line check before running expensive object detection, as explained in the [motion detection documentation](../configuration/motion_detection). When motion is detected, Frigate creates a "region" (the green boxes in the debug viewer) and sends it to the detector. The detector's inference speed determines how many detections per second your system can handle.
**Calculating Detector Capacity:** Your detector has a finite capacity measured in detections per second. With an inference speed of 10ms, your detector can handle approximately 100 detections per second (1000ms / 10ms = 100).If your cameras collectively require more than this capacity, you'll experience delays, missed detections, or the system will fall behind.
### Choosing the Right Detector
Different detectors have vastly different performance characteristics, see the expected performance for object detectors in [the hardware docs](../frigate/hardware)
### Multiple Detector Instances
When a single detector cannot keep up with your camera count, some detector types (`openvino`, `onnx`) allow you to define multiple detector instances to share the workload. This is particularly useful with GPU-based detectors that have sufficient VRAM to run multiple inference processes.
For detailed instructions on configuring multiple detectors, see the [Object Detectors documentation](../configuration/object_detectors).
**When to add a second detector:**
- Skipped FPS is consistently > 0 even during normal activity
### Model Selection and Optimization
The model you use significantly impacts detector performance. Frigate provides default models optimized for each detector type, but you can customize them as described in the [detector documentation](../configuration/object_detectors).
**Model Size Trade-offs:**
- Smaller models (320x320): Faster inference, Frigate is specifically optimized for a 320x320 size model.
- Larger models (640x640): Slower inference, can sometimes have higher accuracy on very large objects that take up a majority of the frame.

View File

@@ -1,60 +0,0 @@
---
id: dummy-camera
title: Analyzing Object Detection
---
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
## When to use
- Replaying an exported clip to reproduce incorrect detections
- Testing configuration changes (model settings, trackers, filters) against a known clip
- Gathering deterministic logs and recordings for debugging or issue reports
## Example Config
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
```yaml
cameras:
test:
ffmpeg:
inputs:
- path: /media/frigate/car-stopping.mp4
input_args: -re -stream_loop -1 -fflags +genpts
roles:
- detect
detect:
enabled: true
record:
enabled: false
snapshots:
enabled: false
```
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
## Steps
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`).
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
- If you're debugging a specific camera, copy the settings from that camera (frame rate, model/enrichment settings, zones, etc.) into the temporary camera so the replay closely matches the original environment. Leave `record` and `snapshots` disabled unless you are specifically debugging recording or snapshot behavior.
3. Restart Frigate.
4. Observe the Debug view in the UI and logs as the clip is replayed. Watch detections, zones, or any feature you're looking to debug, and note any errors in the logs to reproduce the issue.
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
## Variables to consider in object tracking
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
## Troubleshooting
- No video: verify the path is correct and accessible from the Frigate process/container.
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.

View File

@@ -1,6 +1,6 @@
---
id: edgetpu
title: EdgeTPU Errors
title: Troubleshooting EdgeTPU
---
## USB Coral Not Detected
@@ -68,7 +68,8 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
- In most cases https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
- For some newer Linux distros (for example, Ubuntu 22.04+), https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
## Attempting to load TPU as pci & Fatal Python error: Illegal instruction

View File

@@ -1,6 +1,6 @@
---
id: gpu
title: GPU Errors
title: Troubleshooting GPU
---
## OpenVINO

View File

@@ -1,134 +0,0 @@
---
id: memory
title: Memory Usage
---
Frigate includes built-in memory profiling using [memray](https://bloomberg.github.io/memray/) to help diagnose memory issues. This feature allows you to profile specific Frigate modules to identify memory leaks, excessive allocations, or other memory-related problems.
## Enabling Memory Profiling
Memory profiling is controlled via the `FRIGATE_MEMRAY_MODULES` environment variable. Set it to a comma-separated list of module names you want to profile:
```yaml
# docker-compose example
services:
frigate:
...
environment:
- FRIGATE_MEMRAY_MODULES=frigate.embeddings,frigate.capture
```
```bash
# docker run example
docker run -e FRIGATE_MEMRAY_MODULES="frigate.embeddings" \
...
--name frigate <frigate_image>
```
### Module Names
Frigate processes are named using a module-based naming scheme. Common module names include:
- `frigate.review_segment_manager` - Review segment processing
- `frigate.recording_manager` - Recording management
- `frigate.capture` - Camera capture processes (all cameras with this module name)
- `frigate.process` - Camera processing/tracking (all cameras with this module name)
- `frigate.output` - Output processing
- `frigate.audio_manager` - Audio processing
- `frigate.embeddings` - Embeddings processing
You can also specify the full process name (including camera-specific identifiers) if you want to profile a specific camera:
```bash
FRIGATE_MEMRAY_MODULES=frigate.capture:front_door
```
When you specify a module name (e.g., `frigate.capture`), all processes with that module prefix will be profiled. For example, `frigate.capture` will profile all camera capture processes.
## How It Works
1. **Binary File Creation**: When profiling is enabled, memray creates a binary file (`.bin`) in `/config/memray_reports/` that is updated continuously in real-time as the process runs.
2. **Automatic HTML Generation**: On normal process exit, Frigate automatically:
- Stops memray tracking
- Generates an HTML flamegraph report
- Saves it to `/config/memray_reports/<module_name>.html`
3. **Crash Recovery**: If a process crashes (SIGKILL, segfault, etc.), the binary file is preserved with all data up to the crash point. You can manually generate the HTML report from the binary file.
## Viewing Reports
### Automatic Reports
After a process exits normally, you'll find HTML reports in `/config/memray_reports/`. Open these files in a web browser to view interactive flamegraphs showing memory usage patterns.
### Manual Report Generation
If a process crashes or you want to generate a report from an existing binary file, you can manually create the HTML report:
- Run `memray` inside the Frigate container:
```bash
docker-compose exec frigate memray flamegraph /config/memray_reports/<module_name>.bin
# or
docker exec -it <container_name_or_id> memray flamegraph /config/memray_reports/<module_name>.bin
```
- You can also copy the `.bin` file to the host and run `memray` locally if you have it installed:
```bash
docker cp <container_name_or_id>:/config/memray_reports/<module_name>.bin /tmp/
memray flamegraph /tmp/<module_name>.bin
```
## Understanding the Reports
Memray flamegraphs show:
- **Memory allocations over time**: See where memory is being allocated in your code
- **Call stacks**: Understand the full call chain leading to allocations
- **Memory hotspots**: Identify functions or code paths that allocate the most memory
- **Memory leaks**: Spot patterns where memory is allocated but not freed
The interactive HTML reports allow you to:
- Zoom into specific time ranges
- Filter by function names
- View detailed allocation information
- Export data for further analysis
## Best Practices
1. **Profile During Issues**: Enable profiling when you're experiencing memory issues, not all the time, as it adds some overhead.
2. **Profile Specific Modules**: Instead of profiling everything, focus on the modules you suspect are causing issues.
3. **Let Processes Run**: Allow processes to run for a meaningful duration to capture representative memory usage patterns.
4. **Check Binary Files**: If HTML reports aren't generated automatically (e.g., after a crash), check for `.bin` files in `/config/memray_reports/` and generate reports manually.
5. **Compare Reports**: Generate reports at different times to compare memory usage patterns and identify trends.
## Troubleshooting
### No Reports Generated
- Check that the environment variable is set correctly
- Verify the module name matches exactly (case-sensitive)
- Check logs for memray-related errors
- Ensure `/config/memray_reports/` directory exists and is writable
### Process Crashed Before Report Generation
- Look for `.bin` files in `/config/memray_reports/`
- Manually generate HTML reports using: `memray flamegraph <file>.bin`
- The binary file contains all data up to the crash point
### Reports Show No Data
- Ensure the process ran long enough to generate meaningful data
- Check that memray is properly installed (included by default in Frigate)
- Verify the process actually started and ran (check process logs)
For more information about memray and interpreting reports, see the [official memray documentation](https://bloomberg.github.io/memray/).

View File

@@ -1,6 +1,6 @@
---
id: recordings
title: Recordings Errors
title: Troubleshooting Recordings
---
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?

View File

@@ -10,7 +10,7 @@ const config: Config = {
baseUrl: "/",
onBrokenLinks: "throw",
onBrokenMarkdownLinks: "warn",
favicon: "img/branding/favicon.ico",
favicon: "img/favicon.ico",
organizationName: "blakeblackshear",
projectName: "frigate",
themes: [
@@ -116,8 +116,8 @@ const config: Config = {
title: "Frigate",
logo: {
alt: "Frigate",
src: "img/branding/logo.svg",
srcDark: "img/branding/logo-dark.svg",
src: "img/logo.svg",
srcDark: "img/logo-dark.svg",
},
items: [
{
@@ -170,7 +170,7 @@ const config: Config = {
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} Frigate, Inc.`,
copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`,
},
},
plugins: [

3199
docs/package-lock.json generated
View File

File diff suppressed because it is too large Load Diff

View File

@@ -18,14 +18,14 @@
},
"dependencies": {
"@docusaurus/core": "^3.7.0",
"@docusaurus/plugin-content-docs": "^3.7.0",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@docusaurus/preset-classic": "^3.7.0",
"@docusaurus/theme-mermaid": "^3.7.0",
"@docusaurus/theme-mermaid": "^3.6.3",
"@inkeep/docusaurus": "^2.0.16",
"@mdx-js/react": "^3.1.0",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
"docusaurus-plugin-openapi-docs": "^4.3.1",
"docusaurus-theme-openapi-docs": "^4.3.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.3.1",
@@ -44,9 +44,9 @@
]
},
"devDependencies": {
"@docusaurus/module-type-aliases": "^3.7.0",
"@docusaurus/types": "^3.7.0",
"@types/react": "^18.3.27"
"@docusaurus/module-type-aliases": "^3.4.0",
"@docusaurus/types": "^3.4.0",
"@types/react": "^18.3.7"
},
"engines": {
"node": ">=18.0"

View File

@@ -129,27 +129,8 @@ const sidebars: SidebarsConfig = {
Troubleshooting: [
"troubleshooting/faqs",
"troubleshooting/recordings",
"troubleshooting/dummy-camera",
{
type: "category",
label: "Troubleshooting Hardware",
link: {
type: "generated-index",
title: "Troubleshooting Hardware",
description: "Troubleshooting Problems with Hardware",
},
items: ["troubleshooting/gpu", "troubleshooting/edgetpu"],
},
{
type: "category",
label: "Troubleshooting Resource Usage",
link: {
type: "generated-index",
title: "Troubleshooting Resource Usage",
description: "Troubleshooting issues with resource usage",
},
items: ["troubleshooting/cpu", "troubleshooting/memory"],
},
"troubleshooting/gpu",
"troubleshooting/edgetpu",
],
Development: [
"development/contributing",

View File

@@ -1,23 +0,0 @@
import React from "react";
export default function CommunityBadge() {
return (
<span
title="This detector is maintained by community members who provide code, maintenance, and support. See the contributing boards documentation for more information."
style={{
display: "inline-block",
backgroundColor: "#f1f3f5",
color: "#24292f",
fontSize: "11px",
fontWeight: 600,
padding: "2px 6px",
borderRadius: "3px",
border: "1px solid #d1d9e0",
marginLeft: "4px",
cursor: "help",
}}
>
Community Supported
</span>
);
}

View File

@@ -1,18 +1,13 @@
.alert {
padding: 12px;
background: #fff8e6;
border-bottom: 1px solid #ffd166;
text-align: center;
font-size: 15px;
}
[data-theme="dark"] .alert {
background: #3b2f0b;
border-bottom: 1px solid #665c22;
}
.alert a {
color: #1890ff;
font-weight: 500;
margin-left: 6px;
}
padding: 12px;
background: #fff8e6;
border-bottom: 1px solid #ffd166;
text-align: center;
font-size: 15px;
}
.alert a {
color: #1890ff;
font-weight: 500;
margin-left: 6px;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +0,0 @@
# COPYRIGHT AND TRADEMARK NOTICE
The images, logos, and icons contained in this directory (the "Brand Assets") are
proprietary to Frigate, Inc. and are NOT covered by the MIT License governing the
rest of this repository.
1. TRADEMARK STATUS
The "Frigate" name and the accompanying logo are common law trademarks™ of
Frigate, Inc. Frigate, Inc. reserves all rights to these marks.
2. LIMITED PERMISSION FOR USE
Permission is hereby granted to display these Brand Assets strictly for the
following purposes:
a. To execute the software interface on a local machine.
b. To identify the software in documentation or reviews (nominative use).
3. RESTRICTIONS
You may NOT:
a. Use these Brand Assets to represent a derivative work (fork) as an official
product of Frigate, Inc.
b. Use these Brand Assets in a way that implies endorsement, sponsorship, or
commercial affiliation with Frigate, Inc.
c. Modify or alter the Brand Assets.
If you fork this repository with the intent to distribute a modified or competing
version of the software, you must replace these Brand Assets with your own
original content.
ALL RIGHTS RESERVED.
Copyright (c) 2026 Frigate, Inc.

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 936 B

After

Width:  |  Height:  |  Size: 936 B

View File

Before

Width:  |  Height:  |  Size: 933 B

After

Width:  |  Height:  |  Size: 933 B

View File

@@ -23,7 +23,7 @@ from markupsafe import escape
from peewee import SQL, fn, operator
from pydantic import ValidationError
from frigate.api.auth import allow_any_authenticated, allow_public, require_role
from frigate.api.auth import require_role
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
from frigate.api.defs.request.app_body import AppConfigSetBody
from frigate.api.defs.tags import Tags
@@ -37,6 +37,7 @@ from frigate.stats.prometheus import get_metrics, update_metrics
from frigate.util.builtin import (
clean_camera_user_pass,
flatten_config_data,
get_tz_modifiers,
process_config_query_string,
update_yaml_file_bulk,
)
@@ -47,7 +48,6 @@ from frigate.util.services import (
restart_frigate,
vainfo_hwaccel,
)
from frigate.util.time import get_tz_modifiers
from frigate.version import VERSION
logger = logging.getLogger(__name__)
@@ -56,33 +56,29 @@ logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.app])
@router.get(
"/", response_class=PlainTextResponse, dependencies=[Depends(allow_public())]
)
@router.get("/", response_class=PlainTextResponse)
def is_healthy():
return "Frigate is running. Alive and healthy!"
@router.get("/config/schema.json", dependencies=[Depends(allow_public())])
@router.get("/config/schema.json")
def config_schema(request: Request):
return Response(
content=request.app.frigate_config.schema_json(), media_type="application/json"
)
@router.get(
"/version", response_class=PlainTextResponse, dependencies=[Depends(allow_public())]
)
@router.get("/version", response_class=PlainTextResponse)
def version():
return VERSION
@router.get("/stats", dependencies=[Depends(allow_any_authenticated())])
@router.get("/stats")
def stats(request: Request):
return JSONResponse(content=request.app.stats_emitter.get_latest_stats())
@router.get("/stats/history", dependencies=[Depends(allow_any_authenticated())])
@router.get("/stats/history")
def stats_history(request: Request, keys: str = None):
if keys:
keys = keys.split(",")
@@ -90,7 +86,7 @@ def stats_history(request: Request, keys: str = None):
return JSONResponse(content=request.app.stats_emitter.get_stats_history(keys))
@router.get("/metrics", dependencies=[Depends(allow_any_authenticated())])
@router.get("/metrics")
def metrics(request: Request):
"""Expose Prometheus metrics endpoint and update metrics with latest stats"""
# Retrieve the latest statistics and update the Prometheus metrics
@@ -107,7 +103,7 @@ def metrics(request: Request):
return Response(content=content, media_type=content_type)
@router.get("/config", dependencies=[Depends(allow_any_authenticated())])
@router.get("/config")
def config(request: Request):
config_obj: FrigateConfig = request.app.frigate_config
config: dict[str, dict[str, Any]] = config_obj.model_dump(
@@ -183,37 +179,7 @@ def config(request: Request):
return JSONResponse(content=config)
@router.get("/config/raw_paths", dependencies=[Depends(require_role(["admin"]))])
def config_raw_paths(request: Request):
"""Admin-only endpoint that returns camera paths and go2rtc streams without credential masking."""
config_obj: FrigateConfig = request.app.frigate_config
raw_paths = {"cameras": {}, "go2rtc": {"streams": {}}}
# Extract raw camera ffmpeg input paths
for camera_name, camera in config_obj.cameras.items():
raw_paths["cameras"][camera_name] = {
"ffmpeg": {
"inputs": [
{"path": input.path, "roles": input.roles}
for input in camera.ffmpeg.inputs
]
}
}
# Extract raw go2rtc stream URLs
go2rtc_config = config_obj.go2rtc.model_dump(
mode="json", warnings="none", exclude_none=True
)
for stream_name, stream in go2rtc_config.get("streams", {}).items():
if stream is None:
continue
raw_paths["go2rtc"]["streams"][stream_name] = stream
return JSONResponse(content=raw_paths)
@router.get("/config/raw", dependencies=[Depends(allow_any_authenticated())])
@router.get("/config/raw")
def config_raw():
config_file = find_config_file()
@@ -421,29 +387,20 @@ def config_set(request: Request, body: AppConfigSetBody):
old_config: FrigateConfig = request.app.frigate_config
request.app.frigate_config = config
if body.update_topic:
if body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if body.update_topic and body.update_topic.startswith("config/cameras/"):
_, _, camera, field = body.update_topic.split("/")
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
settings = config.get_nested_object(body.update_topic)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
settings,
)
if field == "add":
settings = config.cameras[camera]
elif field == "remove":
settings = old_config.cameras[camera]
else:
# Generic handling for global config updates
settings = config.get_nested_object(body.update_topic)
# Publish None for removal, actual config for add/update
request.app.config_publisher.publisher.publish(
body.update_topic, settings
)
request.app.config_publisher.publish_update(
CameraConfigUpdateTopic(CameraConfigUpdateEnum[field], camera),
settings,
)
return JSONResponse(
content=(
@@ -456,7 +413,7 @@ def config_set(request: Request, body: AppConfigSetBody):
)
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
@router.get("/vainfo")
def vainfo():
vainfo = vainfo_hwaccel()
return JSONResponse(
@@ -476,16 +433,12 @@ def vainfo():
)
@router.get("/nvinfo", dependencies=[Depends(allow_any_authenticated())])
@router.get("/nvinfo")
def nvinfo():
return JSONResponse(content=get_nvidia_driver_info())
@router.get(
"/logs/{service}",
tags=[Tags.logs],
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/logs/{service}", tags=[Tags.logs])
async def logs(
service: str = Path(enum=["frigate", "nginx", "go2rtc"]),
download: Optional[str] = None,
@@ -593,7 +546,7 @@ def restart():
)
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
@router.get("/labels")
def get_labels(camera: str = ""):
try:
if camera:
@@ -611,7 +564,7 @@ def get_labels(camera: str = ""):
return JSONResponse(content=labels)
@router.get("/sub_labels", dependencies=[Depends(allow_any_authenticated())])
@router.get("/sub_labels")
def get_sub_labels(split_joined: Optional[int] = None):
try:
events = Event.select(Event.sub_label).distinct()
@@ -642,7 +595,7 @@ def get_sub_labels(split_joined: Optional[int] = None):
return JSONResponse(content=sub_labels)
@router.get("/plus/models", dependencies=[Depends(allow_any_authenticated())])
@router.get("/plus/models")
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
if not request.app.frigate_config.plus_api.is_active():
return JSONResponse(
@@ -684,9 +637,7 @@ def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
return JSONResponse(content=validModels)
@router.get(
"/recognized_license_plates", dependencies=[Depends(allow_any_authenticated())]
)
@router.get("/recognized_license_plates")
def get_recognized_license_plates(split_joined: Optional[int] = None):
try:
query = (
@@ -720,7 +671,7 @@ def get_recognized_license_plates(split_joined: Optional[int] = None):
return JSONResponse(content=recognized_license_plates)
@router.get("/timeline", dependencies=[Depends(allow_any_authenticated())])
@router.get("/timeline")
def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = None):
clauses = []
@@ -737,11 +688,7 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
clauses.append((Timeline.camera == camera))
if source_id:
source_ids = [sid.strip() for sid in source_id.split(",")]
if len(source_ids) == 1:
clauses.append((Timeline.source_id == source_ids[0]))
else:
clauses.append((Timeline.source_id.in_(source_ids)))
clauses.append((Timeline.source_id == source_id))
if len(clauses) == 0:
clauses.append((True))
@@ -757,7 +704,7 @@ def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = N
return JSONResponse(content=[t for t in timeline])
@router.get("/timeline/hourly", dependencies=[Depends(allow_any_authenticated())])
@router.get("/timeline/hourly")
def hourly_timeline(params: AppTimelineHourlyQueryParameters = Depends()):
"""Get hourly summary for timeline."""
cameras = params.cameras

View File

@@ -32,180 +32,9 @@ from frigate.models import User
logger = logging.getLogger(__name__)
def require_admin_by_default():
"""
Global admin requirement dependency for all endpoints by default.
This is set as the default dependency on the FastAPI app to ensure all
endpoints require admin access unless explicitly overridden with
allow_public(), allow_any_authenticated(), or require_role().
Port 5000 (internal) always has admin role set by the /auth endpoint,
so this check passes automatically for internal requests.
Certain paths are exempted from the global admin check because they must
be accessible before authentication (login, auth) or they have their own
route-level authorization dependencies that handle access control.
"""
# Paths that have route-level auth dependencies and should bypass global admin check
# These paths still have authorization - it's handled by their route-level dependencies
EXEMPT_PATHS = {
# Public auth endpoints (allow_public)
"/auth",
"/auth/first_time_login",
"/login",
"/logout",
# Authenticated user endpoints (allow_any_authenticated)
"/profile",
# Public info endpoints (allow_public)
"/",
"/version",
"/config/schema.json",
# Authenticated user endpoints (allow_any_authenticated)
"/metrics",
"/stats",
"/stats/history",
"/config",
"/config/raw",
"/vainfo",
"/nvinfo",
"/labels",
"/sub_labels",
"/plus/models",
"/recognized_license_plates",
"/timeline",
"/timeline/hourly",
"/recordings/storage",
"/recordings/summary",
"/recordings/unavailable",
"/go2rtc/streams",
"/event_ids",
"/events",
"/exports",
}
# Path prefixes that should be exempt (for paths with parameters)
EXEMPT_PREFIXES = (
"/logs/", # /logs/{service}
"/review", # /review, /review/{id}, /review/summary, /review_ids, etc.
"/reviews/", # /reviews/viewed, /reviews/delete
"/events/", # /events/{id}/thumbnail, /events/summary, etc. (camera-scoped)
"/export/", # /export/{camera}/start/..., /export/{id}/rename, /export/{id}
"/go2rtc/streams/", # /go2rtc/streams/{camera}
"/users/", # /users/{username}/password (has own auth)
"/preview/", # /preview/{file}/thumbnail.jpg
"/exports/", # /exports/{export_id}
"/vod/", # /vod/{camera_name}/...
"/notifications/", # /notifications/pubkey, /notifications/register
)
async def admin_checker(request: Request):
path = request.url.path
# Check exact path matches
if path in EXEMPT_PATHS:
return
# Check prefix matches for parameterized paths
if path.startswith(EXEMPT_PREFIXES):
return
# Dynamic camera path exemption:
# Any path whose first segment matches a configured camera name should
# bypass the global admin requirement. These endpoints enforce access
# via route-level dependencies (e.g. require_camera_access) to ensure
# per-camera authorization. This allows non-admin authenticated users
# (e.g. viewer role) to access camera-specific resources without
# needing admin privileges.
try:
if path.startswith("/"):
first_segment = path.split("/", 2)[1]
if (
first_segment
and first_segment in request.app.frigate_config.cameras
):
return
except Exception:
pass
# For all other paths, require admin role
# Port 5000 (internal) requests have admin role set automatically
role = request.headers.get("remote-role")
if role == "admin":
return
raise HTTPException(
status_code=403,
detail="Access denied. A user with the admin role is required.",
)
return admin_checker
def allow_public():
"""
Override dependency to allow unauthenticated access to an endpoint.
Use this for endpoints that should be publicly accessible without
authentication, such as login page, health checks, or pre-auth info.
Example:
@router.get("/public-endpoint", dependencies=[Depends(allow_public())])
"""
async def public_checker(request: Request):
return # Always allow
return public_checker
def allow_any_authenticated():
"""
Override dependency to allow any request that passed through the /auth endpoint.
Allows:
- Port 5000 internal requests (remote-user: "anonymous", remote-role: "admin")
- Authenticated users with JWT tokens (remote-user: username)
- Unauthenticated requests when auth is disabled (remote-user: "viewer")
Rejects:
- Requests with no remote-user header (did not pass through /auth endpoint)
Example:
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
"""
async def auth_checker(request: Request):
# Ensure a remote-user has been set by the /auth endpoint
username = request.headers.get("remote-user")
if username is None:
raise HTTPException(status_code=401, detail="Authentication required")
return
return auth_checker
router = APIRouter(tags=[Tags.auth])
@router.get("/auth/first_time_login", dependencies=[Depends(allow_public())])
def first_time_login(request: Request):
"""Return whether the admin first-time login help flag is set in config.
This endpoint is intentionally unauthenticated so the login page can
query it before a user is authenticated.
"""
auth_config = request.app.frigate_config.auth
return JSONResponse(
content={
"admin_first_time_login": auth_config.admin_first_time_login
or auth_config.reset_admin_password
}
)
class RateLimiter:
_limit = ""
@@ -297,10 +126,7 @@ def get_jwt_secret() -> str:
)
jwt_secret = secrets.token_hex(64)
try:
fd = os.open(
jwt_secret_file, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600
)
with os.fdopen(fd, "w") as f:
with open(jwt_secret_file, "w") as f:
f.write(str(jwt_secret))
except Exception:
logger.warning(
@@ -345,35 +171,9 @@ def verify_password(password, password_hash):
return secrets.compare_digest(password_hash, compare_hash)
def validate_password_strength(password: str) -> tuple[bool, Optional[str]]:
"""
Validate password strength.
Returns a tuple of (is_valid, error_message).
"""
if not password:
return False, "Password cannot be empty"
if len(password) < 8:
return False, "Password must be at least 8 characters long"
if not any(c.isupper() for c in password):
return False, "Password must contain at least one uppercase letter"
if not any(c.isdigit() for c in password):
return False, "Password must contain at least one digit"
if not any(c in '!@#$%^&*(),.?":{}|<>' for c in password):
return False, "Password must contain at least one special character"
return True, None
def create_encoded_jwt(user, role, expiration, secret):
return jwt.encode(
{"alg": "HS256"},
{"sub": user, "role": role, "exp": expiration, "iat": int(time.time())},
secret,
{"alg": "HS256"}, {"sub": user, "role": role, "exp": expiration}, secret
)
@@ -535,37 +335,7 @@ def resolve_role(
# Endpoints
@router.get(
"/auth",
dependencies=[Depends(allow_public())],
summary="Authenticate request",
description=(
"Authenticates the current request based on proxy headers or JWT token. "
"This endpoint verifies authentication credentials and manages JWT token refresh. "
"On success, no JSON body is returned; authentication state is communicated via response headers and cookies."
),
status_code=202,
responses={
202: {
"description": "Authentication Accepted (no response body)",
"headers": {
"remote-user": {
"description": 'Authenticated username or "viewer" in proxy-only mode',
"schema": {"type": "string"},
},
"remote-role": {
"description": "Resolved role (e.g., admin, viewer, or custom)",
"schema": {"type": "string"},
},
"Set-Cookie": {
"description": "May include refreshed JWT cookie when applicable",
"schema": {"type": "string"},
},
},
},
401: {"description": "Authentication Failed"},
},
)
@router.get("/auth")
def auth(request: Request):
auth_config: AuthConfig = request.app.frigate_config.auth
proxy_config: ProxyConfig = request.app.frigate_config.proxy
@@ -592,12 +362,12 @@ def auth(request: Request):
# if auth is disabled, just apply the proxy header map and return success
if not auth_config.enabled:
# pass the user header value from the upstream proxy if a mapping is specified
# or use viewer if none are specified
# or use anonymous if none are specified
user_header = proxy_config.header_map.user
success_response.headers["remote-user"] = (
request.headers.get(user_header, default="viewer")
request.headers.get(user_header, default="anonymous")
if user_header
else "viewer"
else "anonymous"
)
# parse header and resolve a valid role
@@ -664,27 +434,13 @@ def auth(request: Request):
return fail_response
# if the jwt cookie is expiring soon
if jwt_source == "cookie" and expiration - JWT_REFRESH <= current_time:
elif jwt_source == "cookie" and expiration - JWT_REFRESH <= current_time:
logger.debug("jwt token expiring soon, refreshing cookie")
# Check if password has been changed since token was issued
# If so, force re-login by rejecting the refresh
# ensure the user hasn't been deleted
try:
user_obj = User.get_by_id(user)
if user_obj.password_changed_at is not None:
token_iat = int(token.claims.get("iat", 0))
password_changed_timestamp = int(
user_obj.password_changed_at.timestamp()
)
if token_iat < password_changed_timestamp:
logger.debug(
"jwt token issued before password change, rejecting refresh"
)
return fail_response
User.get_by_id(user)
except DoesNotExist:
logger.debug("user not found")
return fail_response
new_expiration = current_time + JWT_SESSION_LENGTH
new_encoded_jwt = create_encoded_jwt(
user, role, new_expiration, request.app.jwt_token
@@ -705,14 +461,9 @@ def auth(request: Request):
return fail_response
@router.get(
"/profile",
dependencies=[Depends(allow_any_authenticated())],
summary="Get user profile",
description="Returns the current authenticated user's profile including username, role, and allowed cameras. This endpoint requires authentication and returns information about the user's permissions.",
)
@router.get("/profile")
def profile(request: Request):
username = request.headers.get("remote-user", "viewer")
username = request.headers.get("remote-user", "anonymous")
role = request.headers.get("remote-role", "viewer")
all_camera_names = set(request.app.frigate_config.cameras.keys())
@@ -724,12 +475,7 @@ def profile(request: Request):
)
@router.get(
"/logout",
dependencies=[Depends(allow_public())],
summary="Logout user",
description="Logs out the current user by clearing the session cookie. After logout, subsequent requests will require re-authentication.",
)
@router.get("/logout")
def logout(request: Request):
auth_config: AuthConfig = request.app.frigate_config.auth
response = RedirectResponse("/login", status_code=303)
@@ -740,12 +486,7 @@ def logout(request: Request):
limiter = Limiter(key_func=get_remote_addr)
@router.post(
"/login",
dependencies=[Depends(allow_public())],
summary="Login with credentials",
description='Authenticates a user with username and password. Returns a JWT token as a secure HTTP-only cookie that can be used for subsequent API requests. The JWT token can also be retrieved from the response and used as a Bearer token in the Authorization header.\n\nExample using Bearer token:\n```\ncurl -H "Authorization: Bearer <token_value>" https://frigate_ip:8971/api/profile\n```',
)
@router.post("/login")
@limiter.limit(limit_value=rateLimiter.get_limit)
def login(request: Request, body: AppPostLoginBody):
JWT_COOKIE_NAME = request.app.frigate_config.auth.cookie_name
@@ -774,21 +515,11 @@ def login(request: Request, body: AppPostLoginBody):
set_jwt_cookie(
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
)
# Clear admin_first_time_login flag after successful admin login so the
# UI stops showing the first-time login documentation link.
if role == "admin":
request.app.frigate_config.auth.admin_first_time_login = False
return response
return JSONResponse(content={"message": "Login failed"}, status_code=401)
@router.get(
"/users",
dependencies=[Depends(require_role(["admin"]))],
summary="Get all users",
description="Returns a list of all users with their usernames and roles. Requires admin role. Each user object contains the username and assigned role.",
)
@router.get("/users", dependencies=[Depends(require_role(["admin"]))])
def get_users():
exports = (
User.select(User.username, User.role).order_by(User.username).dicts().iterator()
@@ -796,12 +527,7 @@ def get_users():
return JSONResponse([e for e in exports])
@router.post(
"/users",
dependencies=[Depends(require_role(["admin"]))],
summary="Create new user",
description='Creates a new user with the specified username, password, and role. Requires admin role. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?":{} |<>).',
)
@router.post("/users", dependencies=[Depends(require_role(["admin"]))])
def create_user(
request: Request,
body: AppPostUsersBody,
@@ -830,29 +556,13 @@ def create_user(
return JSONResponse(content={"username": body.username})
@router.delete(
"/users/{username}",
dependencies=[Depends(require_role(["admin"]))],
summary="Delete user",
description="Deletes a user by username. The built-in admin user cannot be deleted. Requires admin role. Returns success message or error if user not found.",
)
def delete_user(request: Request, username: str):
# Prevent deletion of the built-in admin user
if username == "admin":
return JSONResponse(
content={"message": "Cannot delete admin user"}, status_code=403
)
@router.delete("/users/{username}")
def delete_user(username: str):
User.delete_by_id(username)
return JSONResponse(content={"success": True})
@router.put(
"/users/{username}/password",
dependencies=[Depends(allow_any_authenticated())],
summary="Update user password",
description="Updates a user's password. Users can only change their own password unless they have admin role. Requires the current password to verify identity for non-admin users. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?\":{} |<>). If user changes their own password, a new JWT cookie is automatically issued.",
)
@router.put("/users/{username}/password")
async def update_password(
request: Request,
username: str,
@@ -874,66 +584,15 @@ async def update_password(
HASH_ITERATIONS = request.app.frigate_config.auth.hash_iterations
try:
user = User.get_by_id(username)
except DoesNotExist:
return JSONResponse(content={"message": "User not found"}, status_code=404)
# Require old_password when non-admin user is changing any password
# Admin users changing passwords do NOT need to provide the current password
if current_role != "admin":
if not body.old_password:
return JSONResponse(
content={"message": "Current password is required"},
status_code=400,
)
if not verify_password(body.old_password, user.password_hash):
return JSONResponse(
content={"message": "Current password is incorrect"},
status_code=401,
)
# Validate new password strength
is_valid, error_message = validate_password_strength(body.password)
if not is_valid:
return JSONResponse(
content={"message": error_message},
status_code=400,
)
password_hash = hash_password(body.password, iterations=HASH_ITERATIONS)
User.update(
{
User.password_hash: password_hash,
User.password_changed_at: datetime.now(),
}
).where(User.username == username).execute()
User.set_by_id(username, {User.password_hash: password_hash})
response = JSONResponse(content={"success": True})
# If user changed their own password, issue a new JWT to keep them logged in
if current_username == username:
JWT_COOKIE_NAME = request.app.frigate_config.auth.cookie_name
JWT_COOKIE_SECURE = request.app.frigate_config.auth.cookie_secure
JWT_SESSION_LENGTH = request.app.frigate_config.auth.session_length
expiration = int(time.time()) + JWT_SESSION_LENGTH
encoded_jwt = create_encoded_jwt(
username, current_role, expiration, request.app.jwt_token
)
# Set new JWT cookie on response
set_jwt_cookie(
response, JWT_COOKIE_NAME, encoded_jwt, expiration, JWT_COOKIE_SECURE
)
return response
return JSONResponse(content={"success": True})
@router.put(
"/users/{username}/role",
dependencies=[Depends(require_role(["admin"]))],
summary="Update user role",
description="Updates a user's role. The built-in admin user's role cannot be modified. Requires admin role. Valid roles are defined in the configuration.",
)
async def update_role(
request: Request,

View File

@@ -3,23 +3,13 @@
import json
import logging
import re
from importlib.util import find_spec
from pathlib import Path
from urllib.parse import quote_plus
import httpx
import requests
from fastapi import APIRouter, Depends, Query, Request, Response
from fastapi import APIRouter, Depends, Request, Response
from fastapi.responses import JSONResponse
from onvif import ONVIFCamera, ONVIFError
from zeep.exceptions import Fault, TransportError
from zeep.transports import AsyncTransport
from frigate.api.auth import (
allow_any_authenticated,
require_camera_access,
require_role,
)
from frigate.api.auth import require_role
from frigate.api.defs.tags import Tags
from frigate.config.config import FrigateConfig
from frigate.util.builtin import clean_camera_user_pass
@@ -54,7 +44,7 @@ def _is_valid_host(host: str) -> bool:
return False
@router.get("/go2rtc/streams", dependencies=[Depends(allow_any_authenticated())])
@router.get("/go2rtc/streams")
def go2rtc_streams():
r = requests.get("http://127.0.0.1:1984/api/streams")
if not r.ok:
@@ -70,9 +60,7 @@ def go2rtc_streams():
return JSONResponse(content=stream_data)
@router.get(
"/go2rtc/streams/{camera_name}", dependencies=[Depends(require_camera_access)]
)
@router.get("/go2rtc/streams/{camera_name}")
def go2rtc_camera_stream(request: Request, camera_name: str):
r = requests.get(
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=all&microphone"
@@ -167,7 +155,7 @@ def go2rtc_delete_stream(stream_name: str):
)
@router.get("/ffprobe", dependencies=[Depends(require_role(["admin"]))])
@router.get("/ffprobe")
def ffprobe(request: Request, paths: str = "", detailed: bool = False):
path_param = paths
@@ -211,30 +199,19 @@ def ffprobe(request: Request, paths: str = "", detailed: bool = False):
request.app.frigate_config.ffmpeg, path.strip(), detailed=detailed
)
if ffprobe.returncode != 0:
try:
stderr_decoded = ffprobe.stderr.decode("utf-8")
except UnicodeDecodeError:
try:
stderr_decoded = ffprobe.stderr.decode("unicode_escape")
except Exception:
stderr_decoded = str(ffprobe.stderr)
stderr_lines = [
line.strip() for line in stderr_decoded.split("\n") if line.strip()
]
result = {
"return_code": ffprobe.returncode,
"stderr": stderr_lines,
"stdout": "",
}
else:
result = {
"return_code": ffprobe.returncode,
"stderr": [],
"stdout": json.loads(ffprobe.stdout.decode("unicode_escape").strip()),
}
result = {
"return_code": ffprobe.returncode,
"stderr": (
ffprobe.stderr.decode("unicode_escape").strip()
if ffprobe.returncode != 0
else ""
),
"stdout": (
json.loads(ffprobe.stdout.decode("unicode_escape").strip())
if ffprobe.returncode == 0
else ""
),
}
# Add detailed metadata if requested and probe was successful
if detailed and ffprobe.returncode == 0 and result["stdout"]:
@@ -464,537 +441,3 @@ def _extract_fps(r_frame_rate: str) -> float | None:
return round(float(num) / float(den), 2)
except (ValueError, ZeroDivisionError):
return None
@router.get(
"/onvif/probe",
dependencies=[Depends(require_role(["admin"]))],
summary="Probe ONVIF device",
description=(
"Probe an ONVIF device to determine capabilities and optionally test available stream URIs. "
"Query params: host (required), port (default 80), username, password, test (boolean), "
"auth_type (basic or digest, default basic)."
),
)
async def onvif_probe(
request: Request,
host: str = Query(None),
port: int = Query(80),
username: str = Query(""),
password: str = Query(""),
test: bool = Query(False),
auth_type: str = Query("basic"), # Add auth_type parameter
):
"""
Probe a single ONVIF device to determine capabilities.
Connects to an ONVIF device and queries for:
- Device information (manufacturer, model)
- Media profiles count
- PTZ support
- Available presets
- Autotracking support
Query Parameters:
host: Device host/IP address (required)
port: Device port (default 80)
username: ONVIF username (optional)
password: ONVIF password (optional)
test: run ffprobe on the stream (optional)
auth_type: Authentication type - "basic" or "digest" (default "basic")
Returns:
JSON with device capabilities information
"""
if not host:
return JSONResponse(
content={"success": False, "message": "host parameter is required"},
status_code=400,
)
# Validate host format
if not _is_valid_host(host):
return JSONResponse(
content={"success": False, "message": "Invalid host format"},
status_code=400,
)
# Validate auth_type
if auth_type not in ["basic", "digest"]:
return JSONResponse(
content={
"success": False,
"message": "auth_type must be 'basic' or 'digest'",
},
status_code=400,
)
onvif_camera = None
try:
logger.debug(f"Probing ONVIF device at {host}:{port} with {auth_type} auth")
try:
wsdl_base = None
spec = find_spec("onvif")
if spec and getattr(spec, "origin", None):
wsdl_base = str(Path(spec.origin).parent / "wsdl")
except Exception:
wsdl_base = None
onvif_camera = ONVIFCamera(
host, port, username or "", password or "", wsdl_dir=wsdl_base
)
# Configure digest authentication if requested
if auth_type == "digest" and username and password:
# Create httpx client with digest auth
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
# Replace the transport in the zeep client
transport = AsyncTransport(client=client)
# Update the xaddr before setting transport
await onvif_camera.update_xaddrs()
# Replace transport in all services
if hasattr(onvif_camera, "devicemgmt"):
onvif_camera.devicemgmt.zeep_client.transport = transport
if hasattr(onvif_camera, "media"):
onvif_camera.media.zeep_client.transport = transport
if hasattr(onvif_camera, "ptz"):
onvif_camera.ptz.zeep_client.transport = transport
logger.debug("Configured digest authentication")
else:
await onvif_camera.update_xaddrs()
# Get device information
device_info = {
"manufacturer": "Unknown",
"model": "Unknown",
"firmware_version": "Unknown",
}
try:
device_service = await onvif_camera.create_devicemgmt_service()
# Update transport for device service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
device_service.zeep_client.transport = transport
device_info_resp = await device_service.GetDeviceInformation()
manufacturer = getattr(device_info_resp, "Manufacturer", None) or (
device_info_resp.get("Manufacturer")
if isinstance(device_info_resp, dict)
else None
)
model = getattr(device_info_resp, "Model", None) or (
device_info_resp.get("Model")
if isinstance(device_info_resp, dict)
else None
)
firmware = getattr(device_info_resp, "FirmwareVersion", None) or (
device_info_resp.get("FirmwareVersion")
if isinstance(device_info_resp, dict)
else None
)
device_info.update(
{
"manufacturer": manufacturer or "Unknown",
"model": model or "Unknown",
"firmware_version": firmware or "Unknown",
}
)
except Exception as e:
logger.debug(f"Failed to get device info: {e}")
# Get media profiles
profiles = []
profiles_count = 0
first_profile_token = None
ptz_config_token = None
try:
media_service = await onvif_camera.create_media_service()
# Update transport for media service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
media_service.zeep_client.transport = transport
profiles = await media_service.GetProfiles()
profiles_count = len(profiles) if profiles else 0
if profiles and len(profiles) > 0:
p = profiles[0]
first_profile_token = getattr(p, "token", None) or (
p.get("token") if isinstance(p, dict) else None
)
# Get PTZ configuration token from the profile
ptz_configuration = getattr(p, "PTZConfiguration", None) or (
p.get("PTZConfiguration") if isinstance(p, dict) else None
)
if ptz_configuration:
ptz_config_token = getattr(ptz_configuration, "token", None) or (
ptz_configuration.get("token")
if isinstance(ptz_configuration, dict)
else None
)
except Exception as e:
logger.debug(f"Failed to get media profiles: {e}")
# Check PTZ support and capabilities
ptz_supported = False
presets_count = 0
autotrack_supported = False
try:
ptz_service = await onvif_camera.create_ptz_service()
# Update transport for PTZ service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
ptz_service.zeep_client.transport = transport
# Check if PTZ service is available
try:
await ptz_service.GetServiceCapabilities()
ptz_supported = True
logger.debug("PTZ service is available")
except Exception as e:
logger.debug(f"PTZ service not available: {e}")
ptz_supported = False
# Try to get presets if PTZ is supported and we have a profile
if ptz_supported and first_profile_token:
try:
presets_resp = await ptz_service.GetPresets(
{"ProfileToken": first_profile_token}
)
presets_count = len(presets_resp) if presets_resp else 0
logger.debug(f"Found {presets_count} presets")
except Exception as e:
logger.debug(f"Failed to get presets: {e}")
presets_count = 0
# Check for autotracking support - requires both FOV relative movement and MoveStatus
if ptz_supported and first_profile_token and ptz_config_token:
# First check for FOV relative movement support
pt_r_fov_supported = False
try:
config_request = ptz_service.create_type("GetConfigurationOptions")
config_request.ConfigurationToken = ptz_config_token
ptz_config = await ptz_service.GetConfigurationOptions(
config_request
)
if ptz_config:
# Check for pt-r-fov support
spaces = getattr(ptz_config, "Spaces", None) or (
ptz_config.get("Spaces")
if isinstance(ptz_config, dict)
else None
)
if spaces:
rel_pan_tilt_space = getattr(
spaces, "RelativePanTiltTranslationSpace", None
) or (
spaces.get("RelativePanTiltTranslationSpace")
if isinstance(spaces, dict)
else None
)
if rel_pan_tilt_space:
# Look for FOV space
for i, space in enumerate(rel_pan_tilt_space):
uri = None
if isinstance(space, dict):
uri = space.get("URI")
else:
uri = getattr(space, "URI", None)
if uri and "TranslationSpaceFov" in uri:
pt_r_fov_supported = True
logger.debug(
"FOV relative movement (pt-r-fov) supported"
)
break
logger.debug(f"PTZ config spaces: {ptz_config}")
except Exception as e:
logger.debug(f"Failed to check FOV relative movement: {e}")
pt_r_fov_supported = False
# Now check for MoveStatus support via GetServiceCapabilities
if pt_r_fov_supported:
try:
service_capabilities_request = ptz_service.create_type(
"GetServiceCapabilities"
)
service_capabilities = await ptz_service.GetServiceCapabilities(
service_capabilities_request
)
# Look for MoveStatus in the capabilities
move_status_capable = False
if service_capabilities:
# Try to find MoveStatus key recursively
def find_move_status(obj, key="MoveStatus"):
if isinstance(obj, dict):
if key in obj:
return obj[key]
for v in obj.values():
result = find_move_status(v, key)
if result is not None:
return result
elif hasattr(obj, key):
return getattr(obj, key)
elif hasattr(obj, "__dict__"):
for v in vars(obj).values():
result = find_move_status(v, key)
if result is not None:
return result
return None
move_status_value = find_move_status(service_capabilities)
# MoveStatus should return "true" if supported
if isinstance(move_status_value, bool):
move_status_capable = move_status_value
elif isinstance(move_status_value, str):
move_status_capable = (
move_status_value.lower() == "true"
)
logger.debug(f"MoveStatus capability: {move_status_value}")
# Autotracking is supported if both conditions are met
autotrack_supported = pt_r_fov_supported and move_status_capable
if autotrack_supported:
logger.debug(
"Autotracking fully supported (pt-r-fov + MoveStatus)"
)
else:
logger.debug(
f"Autotracking not fully supported - pt-r-fov: {pt_r_fov_supported}, MoveStatus: {move_status_capable}"
)
except Exception as e:
logger.debug(f"Failed to check MoveStatus support: {e}")
autotrack_supported = False
except Exception as e:
logger.debug(f"Failed to probe PTZ service: {e}")
result = {
"success": True,
"host": host,
"port": port,
"manufacturer": device_info["manufacturer"],
"model": device_info["model"],
"firmware_version": device_info["firmware_version"],
"profiles_count": profiles_count,
"ptz_supported": ptz_supported,
"presets_count": presets_count,
"autotrack_supported": autotrack_supported,
}
# Gather RTSP candidates
rtsp_candidates: list[dict] = []
try:
media_service = await onvif_camera.create_media_service()
# Update transport for media service if digest auth
if auth_type == "digest" and username and password:
auth = httpx.DigestAuth(username, password)
client = httpx.AsyncClient(auth=auth, timeout=10.0)
transport = AsyncTransport(client=client)
media_service.zeep_client.transport = transport
if profiles_count and media_service:
for p in profiles or []:
token = getattr(p, "token", None) or (
p.get("token") if isinstance(p, dict) else None
)
if not token:
continue
try:
stream_setup = {
"Stream": "RTP-Unicast",
"Transport": {"Protocol": "RTSP"},
}
stream_req = {
"ProfileToken": token,
"StreamSetup": stream_setup,
}
stream_uri_resp = await media_service.GetStreamUri(stream_req)
uri = (
stream_uri_resp.get("Uri")
if isinstance(stream_uri_resp, dict)
else getattr(stream_uri_resp, "Uri", None)
)
if uri:
logger.debug(
f"GetStreamUri returned for token {token}: {uri}"
)
# If credentials were provided, do NOT add the unauthenticated URI.
try:
if isinstance(uri, str) and uri.startswith("rtsp://"):
if username and password and "@" not in uri:
# Inject URL-encoded credentials and add only the
# authenticated version.
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
injected = uri.replace(
"rtsp://", f"rtsp://{cred}", 1
)
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": injected,
}
)
else:
# No credentials provided or URI already contains
# credentials — add the URI as returned.
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": uri,
}
)
else:
# Non-RTSP URIs (e.g., http-flv) — add as returned.
rtsp_candidates.append(
{
"source": "GetStreamUri",
"profile_token": token,
"uri": uri,
}
)
except Exception as e:
logger.debug(
f"Skipping stream URI for token {token} due to processing error: {e}"
)
continue
except Exception:
logger.debug(
f"GetStreamUri failed for token {token}", exc_info=True
)
continue
# Add common RTSP patterns as fallback
if not rtsp_candidates:
common_paths = [
"/h264",
"/live.sdp",
"/media.amp",
"/Streaming/Channels/101",
"/Streaming/Channels/1",
"/stream1",
"/cam/realmonitor?channel=1&subtype=0",
"/11",
]
# Use URL-encoded credentials for pattern fallback URIs when provided
auth_str = (
f"{quote_plus(username)}:{quote_plus(password)}@"
if username and password
else ""
)
rtsp_port = 554
for path in common_paths:
uri = f"rtsp://{auth_str}{host}:{rtsp_port}{path}"
rtsp_candidates.append({"source": "pattern", "uri": uri})
except Exception:
logger.debug("Failed to collect RTSP candidates")
# Optionally test RTSP candidates using ffprobe_stream
tested_candidates = []
if test and rtsp_candidates:
for c in rtsp_candidates:
uri = c["uri"]
to_test = [uri]
try:
if (
username
and password
and isinstance(uri, str)
and uri.startswith("rtsp://")
and "@" not in uri
):
cred = f"{quote_plus(username)}:{quote_plus(password)}@"
cred_uri = uri.replace("rtsp://", f"rtsp://{cred}", 1)
if cred_uri not in to_test:
to_test.append(cred_uri)
except Exception:
pass
for test_uri in to_test:
try:
probe = ffprobe_stream(
request.app.frigate_config.ffmpeg, test_uri, detailed=False
)
print(probe)
ok = probe is not None and getattr(probe, "returncode", 1) == 0
tested_candidates.append(
{
"uri": test_uri,
"source": c.get("source"),
"ok": ok,
"profile_token": c.get("profile_token"),
}
)
except Exception as e:
logger.debug(f"Unable to probe stream: {e}")
tested_candidates.append(
{
"uri": test_uri,
"source": c.get("source"),
"ok": False,
"profile_token": c.get("profile_token"),
}
)
result["rtsp_candidates"] = rtsp_candidates
if test:
result["rtsp_tested"] = tested_candidates
logger.debug(f"ONVIF probe successful: {result}")
return JSONResponse(content=result)
except ONVIFError as e:
logger.warning(f"ONVIF error probing {host}:{port}: {e}")
return JSONResponse(
content={"success": False, "message": "ONVIF error"},
status_code=400,
)
except (Fault, TransportError) as e:
logger.warning(f"Connection error probing {host}:{port}: {e}")
return JSONResponse(
content={"success": False, "message": "Connection error"},
status_code=503,
)
except Exception as e:
logger.warning(f"Error probing ONVIF device at {host}:{port}, {e}")
return JSONResponse(
content={"success": False, "message": "Probe failed"},
status_code=500,
)
finally:
# Best-effort cleanup of ONVIF camera client session
if onvif_camera is not None:
try:
# Check if the camera has a close method and call it
if hasattr(onvif_camera, "close"):
await onvif_camera.close()
except Exception as e:
logger.debug(f"Error closing ONVIF camera session: {e}")

View File

@@ -3,9 +3,7 @@
import datetime
import logging
import os
import random
import shutil
import string
from typing import Any
import cv2
@@ -19,8 +17,6 @@ from frigate.api.auth import require_role
from frigate.api.defs.request.classification_body import (
AudioTranscriptionBody,
DeleteFaceImagesBody,
GenerateObjectExamplesBody,
GenerateStateExamplesBody,
RenameFaceBody,
)
from frigate.api.defs.response.classification_response import (
@@ -31,18 +27,10 @@ from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.config import FrigateConfig
from frigate.config.camera import DetectConfig
from frigate.config.classification import ObjectClassificationType
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
from frigate.const import CLIPS_DIR, FACE_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event
from frigate.util.classification import (
collect_object_classification_examples,
collect_state_classification_examples,
get_dataset_image_count,
read_training_metadata,
write_training_metadata,
)
from frigate.util.file import get_event_snapshot
from frigate.util.path import get_event_snapshot
logger = logging.getLogger(__name__)
@@ -116,18 +104,9 @@ def reclassify_face(request: Request, body: dict = None):
context: EmbeddingsContext = request.app.embeddings
response = context.reprocess_face(training_file)
if not isinstance(response, dict):
return JSONResponse(
status_code=500,
content={
"success": False,
"message": "Could not process request.",
},
)
return JSONResponse(
status_code=200 if response.get("success", True) else 400,
content=response,
status_code=200,
)
@@ -180,7 +159,8 @@ def train_face(request: Request, name: str, body: dict = None):
new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp"
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
os.makedirs(new_file_folder, exist_ok=True)
if not os.path.exists(new_file_folder):
os.mkdir(new_file_folder)
if training_file_name:
shutil.move(training_file, os.path.join(new_file_folder, new_name))
@@ -544,7 +524,6 @@ def transcribe_audio(request: Request, body: AudioTranscriptionBody):
status_code=409, # 409 Conflict
)
else:
logger.debug(f"Failed to transcribe audio, response: {response}")
return JSONResponse(
content={
"success": False,
@@ -569,112 +548,23 @@ def get_classification_dataset(name: str):
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(name), "dataset")
if not os.path.exists(dataset_dir):
return JSONResponse(
status_code=200, content={"categories": {}, "training_metadata": None}
)
return JSONResponse(status_code=200, content={})
for category_name in os.listdir(dataset_dir):
category_dir = os.path.join(dataset_dir, category_name)
for name in os.listdir(dataset_dir):
category_dir = os.path.join(dataset_dir, name)
if not os.path.isdir(category_dir):
continue
dataset_dict[category_name] = []
dataset_dict[name] = []
for file in filter(
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
os.listdir(category_dir),
):
dataset_dict[category_name].append(file)
dataset_dict[name].append(file)
# Get training metadata
metadata = read_training_metadata(sanitize_filename(name))
current_image_count = get_dataset_image_count(sanitize_filename(name))
if metadata is None:
training_metadata = {
"has_trained": False,
"last_training_date": None,
"last_training_image_count": 0,
"current_image_count": current_image_count,
"new_images_count": current_image_count,
"dataset_changed": current_image_count > 0,
}
else:
last_training_count = metadata.get("last_training_image_count", 0)
# Dataset has changed if count is different (either added or deleted images)
dataset_changed = current_image_count != last_training_count
# Only show positive count for new images (ignore deletions in the count display)
new_images_count = max(0, current_image_count - last_training_count)
training_metadata = {
"has_trained": True,
"last_training_date": metadata.get("last_training_date"),
"last_training_image_count": last_training_count,
"current_image_count": current_image_count,
"new_images_count": new_images_count,
"dataset_changed": dataset_changed,
}
return JSONResponse(
status_code=200,
content={
"categories": dataset_dict,
"training_metadata": training_metadata,
},
)
@router.get(
"/classification/attributes",
summary="Get custom classification attributes",
description="""Returns custom classification attributes for a given object type.
Only includes models with classification_type set to 'attribute'.
By default returns a flat sorted list of all attribute labels.
If group_by_model is true, returns attributes grouped by model name.""",
)
def get_custom_attributes(
request: Request, object_type: str = None, group_by_model: bool = False
):
models_with_attributes = {}
for (
model_key,
model_config,
) in request.app.frigate_config.classification.custom.items():
if (
not model_config.enabled
or not model_config.object_config
or model_config.object_config.classification_type
!= ObjectClassificationType.attribute
):
continue
model_objects = getattr(model_config.object_config, "objects", []) or []
if object_type is not None and object_type not in model_objects:
continue
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
if not os.path.exists(dataset_dir):
continue
attributes = []
for category_name in os.listdir(dataset_dir):
category_dir = os.path.join(dataset_dir, category_name)
if os.path.isdir(category_dir) and category_name != "none":
attributes.append(category_name)
if attributes:
model_name = model_config.name or model_key
models_with_attributes[model_name] = sorted(attributes)
if group_by_model:
return JSONResponse(content=models_with_attributes)
else:
# Flatten to a unique sorted list
all_attributes = set()
for attributes in models_with_attributes.values():
all_attributes.update(attributes)
return JSONResponse(content=sorted(list(all_attributes)))
return JSONResponse(status_code=200, content=dataset_dict)
@router.get(
@@ -765,112 +655,12 @@ def delete_classification_dataset_images(
if os.path.isfile(file_path):
os.unlink(file_path)
if os.path.exists(folder) and not os.listdir(folder) and category.lower() != "none":
os.rmdir(folder)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted images."}),
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)
@router.put(
"/classification/{name}/dataset/{old_category}/rename",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Rename a classification category",
description="""Renames a classification category for a given classification model.
The old category must exist and the new name must be valid. Returns a success message or an error if the name is invalid.""",
)
def rename_classification_category(
request: Request, name: str, old_category: str, body: dict = None
):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
json: dict[str, Any] = body or {}
new_category = sanitize_filename(json.get("new_category", ""))
if not new_category:
return JSONResponse(
content=(
{
"success": False,
"message": "New category name is required.",
}
),
status_code=400,
)
old_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(old_category)
)
new_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", new_category
)
if not os.path.exists(old_folder):
return JSONResponse(
content=(
{
"success": False,
"message": f"Category {old_category} does not exist.",
}
),
status_code=404,
)
if os.path.exists(new_folder):
return JSONResponse(
content=(
{
"success": False,
"message": f"Category {new_category} already exists.",
}
),
status_code=400,
)
try:
os.rename(old_folder, new_folder)
# Mark dataset as ready to train by resetting training metadata
# This ensures the dataset is marked as changed after renaming
sanitized_name = sanitize_filename(name)
write_training_metadata(sanitized_name, 0)
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully renamed category to {new_category}.",
}
),
status_code=200,
)
except Exception as e:
logger.error(f"Error renaming category: {e}")
return JSONResponse(
content=(
{
"success": False,
"message": "Failed to rename category",
}
),
status_code=500,
)
@router.post(
"/classification/{name}/dataset/categorize",
response_model=GenericResponse,
@@ -911,14 +701,13 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
status_code=404,
)
random_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
timestamp = datetime.datetime.now().timestamp()
new_name = f"{category}-{timestamp}-{random_id}.png"
new_name = f"{category}-{datetime.datetime.now().timestamp()}.png"
new_file_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", category
)
os.makedirs(new_file_folder, exist_ok=True)
if not os.path.exists(new_file_folder):
os.mkdir(new_file_folder)
# use opencv because webp images can not be used to train
img = cv2.imread(training_file)
@@ -926,47 +715,7 @@ def categorize_classification_image(request: Request, name: str, body: dict = No
os.unlink(training_file)
return JSONResponse(
content=({"success": True, "message": "Successfully categorized image."}),
status_code=200,
)
@router.post(
"/classification/{name}/dataset/{category}/create",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Create an empty classification category folder",
description="""Creates an empty folder for a classification category.
This is used to create folders for categories that don't have images yet.
Returns a success message or an error if the name is invalid.""",
)
def create_classification_category(request: Request, name: str, category: str):
config: FrigateConfig = request.app.frigate_config
if name not in config.classification.custom:
return JSONResponse(
content=(
{
"success": False,
"message": f"{name} is not a known classification model.",
}
),
status_code=404,
)
category_folder = os.path.join(
CLIPS_DIR, sanitize_filename(name), "dataset", sanitize_filename(category)
)
os.makedirs(category_folder, exist_ok=True)
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully created category folder: {category}",
}
),
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)
@@ -1004,87 +753,6 @@ def delete_classification_train_images(request: Request, name: str, body: dict =
os.unlink(file_path)
return JSONResponse(
content=({"success": True, "message": "Successfully deleted images."}),
status_code=200,
)
@router.post(
"/classification/generate_examples/state",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Generate state classification examples",
)
async def generate_state_examples(request: Request, body: GenerateStateExamplesBody):
"""Generate examples for state classification."""
model_name = sanitize_filename(body.model_name)
cameras_normalized = {
camera_name: tuple(crop)
for camera_name, crop in body.cameras.items()
if camera_name in request.app.frigate_config.cameras
}
collect_state_classification_examples(model_name, cameras_normalized)
return JSONResponse(
content={"success": True, "message": "Example generation completed"},
status_code=200,
)
@router.post(
"/classification/generate_examples/object",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Generate object classification examples",
)
async def generate_object_examples(request: Request, body: GenerateObjectExamplesBody):
"""Generate examples for object classification."""
model_name = sanitize_filename(body.model_name)
collect_object_classification_examples(model_name, body.label)
return JSONResponse(
content={"success": True, "message": "Example generation completed"},
status_code=200,
)
@router.delete(
"/classification/{name}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete a classification model",
description="""Deletes a specific classification model and all its associated data.
Works even if the model is not in the config (e.g., partially created during wizard).
Returns a success message.""",
)
def delete_classification_model(request: Request, name: str):
sanitized_name = sanitize_filename(name)
# Delete the classification model's data directory in clips
data_dir = os.path.join(CLIPS_DIR, sanitized_name)
if os.path.exists(data_dir):
try:
shutil.rmtree(data_dir)
logger.info(f"Deleted classification data directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete data directory for {name}: {e}")
# Delete the classification model's files in model_cache
model_dir = os.path.join(MODEL_CACHE_DIR, sanitized_name)
if os.path.exists(model_dir):
try:
shutil.rmtree(model_dir)
logger.info(f"Deleted classification model directory for {name}")
except Exception as e:
logger.debug(f"Failed to delete model directory for {name}: {e}")
return JSONResponse(
content=(
{
"success": True,
"message": f"Successfully deleted classification model {name}.",
}
),
content=({"success": True, "message": "Successfully deleted faces."}),
status_code=200,
)

View File

@@ -12,7 +12,6 @@ class EventsQueryParams(BaseModel):
labels: Optional[str] = "all"
sub_label: Optional[str] = "all"
sub_labels: Optional[str] = "all"
attributes: Optional[str] = "all"
zone: Optional[str] = "all"
zones: Optional[str] = "all"
limit: Optional[int] = 100
@@ -59,8 +58,6 @@ class EventsSearchQueryParams(BaseModel):
limit: Optional[int] = 50
cameras: Optional[str] = "all"
labels: Optional[str] = "all"
sub_labels: Optional[str] = "all"
attributes: Optional[str] = "all"
zones: Optional[str] = "all"
after: Optional[float] = None
before: Optional[float] = None

View File

@@ -11,7 +11,6 @@ class AppConfigSetBody(BaseModel):
class AppPutPasswordBody(BaseModel):
password: str
old_password: Optional[str] = None
class AppPostUsersBody(BaseModel):

View File

@@ -1,31 +1,17 @@
from typing import Dict, List, Tuple
from typing import List
from pydantic import BaseModel, Field
class RenameFaceBody(BaseModel):
new_name: str = Field(description="New name for the face")
new_name: str
class AudioTranscriptionBody(BaseModel):
event_id: str = Field(description="ID of the event to transcribe audio for")
event_id: str
class DeleteFaceImagesBody(BaseModel):
ids: List[str] = Field(
description="List of image filenames to delete from the face folder"
)
class GenerateStateExamplesBody(BaseModel):
model_name: str = Field(description="Name of the classification model")
cameras: Dict[str, Tuple[float, float, float, float]] = Field(
description="Dictionary mapping camera names to normalized crop coordinates in [x1, y1, x2, y2] format (values 0-1)"
)
class GenerateObjectExamplesBody(BaseModel):
model_name: str = Field(description="Name of the classification model")
label: str = Field(
description="Object label to collect examples for (e.g., 'person', 'car')"
)

View File

@@ -24,18 +24,12 @@ class EventsLPRBody(BaseModel):
)
class EventsAttributesBody(BaseModel):
attributes: List[str] = Field(
title="Selected classification attributes for the event",
default_factory=list,
)
class EventsDescriptionBody(BaseModel):
description: Union[str, None] = Field(title="The description of the event")
class EventsCreateBody(BaseModel):
source_type: Optional[str] = "api"
sub_label: Optional[str] = None
score: Optional[float] = 0
duration: Optional[int] = 30

View File

@@ -1,4 +1,4 @@
from typing import Optional, Union
from typing import Union
from pydantic import BaseModel, Field
from pydantic.json_schema import SkipJsonSchema
@@ -16,5 +16,5 @@ class ExportRecordingsBody(BaseModel):
source: PlaybackSourceEnum = Field(
default=PlaybackSourceEnum.recordings, title="Playback source"
)
name: Optional[str] = Field(title="Friendly name", default=None, max_length=256)
name: str = Field(title="Friendly name", default=None, max_length=256)
image_path: Union[str, SkipJsonSchema[None]] = None

View File

@@ -2,7 +2,6 @@
import base64
import datetime
import json
import logging
import os
import random
@@ -22,7 +21,6 @@ from peewee import JOIN, DoesNotExist, fn, operator
from playhouse.shortcuts import model_to_dict
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
@@ -37,7 +35,6 @@ from frigate.api.defs.query.regenerate_query_parameters import (
RegenerateQueryParameters,
)
from frigate.api.defs.request.events_body import (
EventsAttributesBody,
EventsCreateBody,
EventsDeleteBody,
EventsDescriptionBody,
@@ -56,13 +53,12 @@ from frigate.api.defs.response.event_response import (
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
from frigate.config.classification import ObjectClassificationType
from frigate.const import CLIPS_DIR, TRIGGER_DIR
from frigate.embeddings import EmbeddingsContext
from frigate.models import Event, ReviewSegment, Timeline, Trigger
from frigate.track.object_processing import TrackedObject
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.time import get_dst_transitions, get_tz_modifiers
from frigate.util.builtin import get_tz_modifiers
from frigate.util.path import get_event_thumbnail_bytes
logger = logging.getLogger(__name__)
@@ -72,7 +68,6 @@ router = APIRouter(tags=[Tags.events])
@router.get(
"/events",
response_model=list[EventResponse],
dependencies=[Depends(allow_any_authenticated())],
summary="Get events",
description="Returns a list of events.",
)
@@ -101,8 +96,6 @@ def events(
if sub_labels == "all" and sub_label != "all":
sub_labels = sub_label
attributes = unquote(params.attributes)
zone = params.zone
zones = params.zones
@@ -191,17 +184,6 @@ def events(
sub_label_clause = reduce(operator.or_, sub_label_clauses)
clauses.append((sub_label_clause))
if attributes != "all":
# Custom classification results are stored as data[model_name] = result_value
filtered_attributes = attributes.split(",")
attribute_clauses = []
for attr in filtered_attributes:
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
attribute_clause = reduce(operator.or_, attribute_clauses)
clauses.append(attribute_clause)
if recognized_license_plate != "all":
filtered_recognized_license_plates = recognized_license_plate.split(",")
@@ -360,8 +342,7 @@ def events(
@router.get(
"/events/explore",
response_model=list[EventResponse],
dependencies=[Depends(allow_any_authenticated())],
summary="Get summary of objects",
summary="Get summary of objects.",
description="""Gets a summary of objects from the database.
Returns a list of objects with a max of `limit` objects for each label.
""",
@@ -453,8 +434,7 @@ def events_explore(
@router.get(
"/event_ids",
response_model=list[EventResponse],
dependencies=[Depends(allow_any_authenticated())],
summary="Get events by ids",
summary="Get events by ids.",
description="""Gets events by a list of ids.
Returns a list of events.
""",
@@ -487,8 +467,7 @@ async def event_ids(ids: str, request: Request):
@router.get(
"/events/search",
dependencies=[Depends(allow_any_authenticated())],
summary="Search events",
summary="Search events.",
description="""Searches for events in the database.
Returns a list of events.
""",
@@ -507,8 +486,6 @@ def events_search(
# Filters
cameras = params.cameras
labels = params.labels
sub_labels = params.sub_labels
attributes = params.attributes
zones = params.zones
after = params.after
before = params.before
@@ -583,38 +560,6 @@ def events_search(
if labels != "all":
event_filters.append((Event.label << labels.split(",")))
if sub_labels != "all":
# use matching so joined sub labels are included
# for example a sub label 'bob' would get events
# with sub labels 'bob' and 'bob, john'
sub_label_clauses = []
filtered_sub_labels = sub_labels.split(",")
if "None" in filtered_sub_labels:
filtered_sub_labels.remove("None")
sub_label_clauses.append((Event.sub_label.is_null()))
for label in filtered_sub_labels:
sub_label_clauses.append(
(Event.sub_label.cast("text") == label)
) # include exact matches
# include this label when part of a list
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
event_filters.append((reduce(operator.or_, sub_label_clauses)))
if attributes != "all":
# Custom classification results are stored as data[model_name] = result_value
filtered_attributes = attributes.split(",")
attribute_clauses = []
for attr in filtered_attributes:
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
event_filters.append(reduce(operator.or_, attribute_clauses))
if zones != "all":
zone_clauses = []
filtered_zones = zones.split(",")
@@ -862,12 +807,13 @@ def events_search(
return JSONResponse(content=processed_events)
@router.get("/events/summary", dependencies=[Depends(allow_any_authenticated())])
@router.get("/events/summary")
def events_summary(
params: EventsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
tz_name = params.timezone
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(tz_name)
has_clip = params.has_clip
has_snapshot = params.has_snapshot
@@ -882,98 +828,39 @@ def events_summary(
if len(clauses) == 0:
clauses.append((True))
time_range_query = (
groups = (
Event.select(
fn.MIN(Event.start_time).alias("min_time"),
fn.MAX(Event.start_time).alias("max_time"),
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(reduce(operator.and_, clauses) & (Event.camera << allowed_cameras))
.dicts()
.get()
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
Event.zones,
)
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if min_time is None or max_time is None:
return JSONResponse(content=[])
dst_periods = get_dst_transitions(tz_name, min_time, max_time)
grouped: dict[tuple, dict] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_groups = (
Event.select(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(
reduce(operator.and_, clauses)
& (Event.camera << allowed_cameras)
& (Event.start_time >= period_start)
& (Event.start_time <= period_end)
)
.group_by(
Event.camera,
Event.label,
Event.sub_label,
Event.data,
(Event.start_time + period_offset).cast("int") / (3600 * 24),
Event.zones,
)
.namedtuples()
)
for g in period_groups:
key = (
g.camera,
g.label,
g.sub_label,
json.dumps(g.data, sort_keys=True) if g.data is not None else None,
g.day,
json.dumps(g.zones, sort_keys=True) if g.zones is not None else None,
)
if key in grouped:
grouped[key]["count"] += int(g.count or 0)
else:
grouped[key] = {
"camera": g.camera,
"label": g.label,
"sub_label": g.sub_label,
"data": g.data,
"day": g.day,
"zones": g.zones,
"count": int(g.count or 0),
}
return JSONResponse(content=sorted(grouped.values(), key=lambda x: x["day"]))
return JSONResponse(content=[e for e in groups.dicts()])
@router.get(
"/events/{event_id}",
response_model=EventResponse,
dependencies=[Depends(allow_any_authenticated())],
summary="Get event by id",
summary="Get event by id.",
description="Gets an event by its id.",
)
async def event(event_id: str, request: Request):
@@ -1016,8 +903,7 @@ def set_retain(event_id: str):
@router.post(
"/events/{event_id}/plus",
response_model=EventUploadPlusResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Send event to Frigate+",
summary="Send event to Frigate+.",
description="""Sends an event to Frigate+.
Returns a success message or an error if the event is not found.
""",
@@ -1157,7 +1043,6 @@ async def send_to_plus(request: Request, event_id: str, body: SubmitPlusBody = N
@router.put(
"/events/{event_id}/false_positive",
response_model=EventUploadPlusResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Submit false positive to Frigate+",
description="""Submit an event as a false positive to Frigate+.
This endpoint is the same as the standard Frigate+ submission endpoint,
@@ -1256,7 +1141,7 @@ async def false_positive(request: Request, event_id: str):
"/events/{event_id}/retain",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Stop event from being retained indefinitely",
summary="Stop event from being retained indefinitely.",
description="""Stops an event from being retained indefinitely.
Returns a success message or an error if the event is not found.
NOTE: This is a legacy endpoint and is not supported in the frontend.
@@ -1285,7 +1170,7 @@ async def delete_retain(event_id: str, request: Request):
"/events/{event_id}/sub_label",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Set event sub label",
summary="Set event sub label.",
description="""Sets an event's sub label.
Returns a success message or an error if the event is not found.
""",
@@ -1344,7 +1229,7 @@ async def set_sub_label(
"/events/{event_id}/recognized_license_plate",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Set event license plate",
summary="Set event license plate.",
description="""Sets an event's license plate.
Returns a success message or an error if the event is not found.
""",
@@ -1400,112 +1285,11 @@ async def set_plate(
)
@router.post(
"/events/{event_id}/attributes",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Set custom classification attributes",
description=(
"Sets an event's custom classification attributes for all attribute-type "
"models that apply to the event's object type."
),
)
async def set_attributes(
request: Request,
event_id: str,
body: EventsAttributesBody,
):
try:
event: Event = Event.get(Event.id == event_id)
await require_camera_access(event.camera, request=request)
except DoesNotExist:
return JSONResponse(
content=({"success": False, "message": f"Event {event_id} not found."}),
status_code=404,
)
object_type = event.label
selected_attributes = set(body.attributes or [])
applied_updates: list[dict[str, str | float | None]] = []
for (
model_key,
model_config,
) in request.app.frigate_config.classification.custom.items():
# Only apply to enabled attribute classifiers that target this object type
if (
not model_config.enabled
or not model_config.object_config
or model_config.object_config.classification_type
!= ObjectClassificationType.attribute
or object_type not in (model_config.object_config.objects or [])
):
continue
# Get available labels from dataset directory
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
available_labels = set()
if os.path.exists(dataset_dir):
for category_name in os.listdir(dataset_dir):
category_dir = os.path.join(dataset_dir, category_name)
if os.path.isdir(category_dir):
available_labels.add(category_name)
if not available_labels:
logger.warning(
"No dataset found for custom attribute model %s at %s",
model_key,
dataset_dir,
)
continue
# Find all selected attributes that apply to this model
model_name = model_config.name or model_key
matching_attrs = selected_attributes & available_labels
if matching_attrs:
# Publish updates for each selected attribute
for attr in matching_attrs:
request.app.event_metadata_updater.publish(
(event_id, model_name, attr, 1.0),
EventMetadataTypeEnum.attribute.value,
)
applied_updates.append(
{"model": model_name, "label": attr, "score": 1.0}
)
else:
# Clear this model's attribute
request.app.event_metadata_updater.publish(
(event_id, model_name, None, None),
EventMetadataTypeEnum.attribute.value,
)
applied_updates.append({"model": model_name, "label": None, "score": None})
if len(applied_updates) == 0:
return JSONResponse(
content={
"success": False,
"message": "No matching attributes found for this object type.",
},
status_code=400,
)
return JSONResponse(
content={
"success": True,
"message": f"Updated {len(applied_updates)} attribute(s)",
"applied": applied_updates,
},
status_code=200,
)
@router.post(
"/events/{event_id}/description",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Set event description",
summary="Set event description.",
description="""Sets an event's description.
Returns a success message or an error if the event is not found.
""",
@@ -1561,7 +1345,7 @@ async def set_description(
"/events/{event_id}/description/regenerate",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Regenerate event description",
summary="Regenerate event description.",
description="""Regenerates an event's description.
Returns a success message or an error if the event is not found.
""",
@@ -1613,8 +1397,8 @@ async def regenerate_description(
@router.post(
"/description/generate",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Generate description embedding",
# dependencies=[Depends(require_role(["admin"]))],
summary="Generate description embedding.",
description="""Generates an embedding for an event's description.
Returns a success message or an error if the event is not found.
""",
@@ -1679,7 +1463,7 @@ async def delete_single_event(event_id: str, request: Request) -> dict:
"/events/{event_id}",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete event",
summary="Delete event.",
description="""Deletes an event from the database.
Returns a success message or an error if the event is not found.
""",
@@ -1694,7 +1478,7 @@ async def delete_event(request: Request, event_id: str):
"/events/",
response_model=EventMultiDeleteResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete events",
summary="Delete events.",
description="""Deletes a list of events from the database.
Returns a success message or an error if the events are not found.
""",
@@ -1728,7 +1512,7 @@ async def delete_events(request: Request, body: EventsDeleteBody):
"/events/{camera_name}/{label}/create",
response_model=EventCreateResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="Create manual event",
summary="Create manual event.",
description="""Creates a manual event in the database.
Returns a success message or an error if the event is not found.
NOTES:
@@ -1770,7 +1554,7 @@ def create_event(
body.score,
body.sub_label,
body.duration,
"api",
body.source_type,
body.draw,
),
EventMetadataTypeEnum.manual_event_create.value,
@@ -1792,7 +1576,7 @@ def create_event(
"/events/{event_id}/end",
response_model=GenericResponse,
dependencies=[Depends(require_role(["admin"]))],
summary="End manual event",
summary="End manual event.",
description="""Ends a manual event.
Returns a success message or an error if the event is not found.
NOTE: This should only be used for manual events.
@@ -1802,27 +1586,10 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
try:
event: Event = Event.get(Event.id == event_id)
await require_camera_access(event.camera, request=request)
if body.end_time is not None and body.end_time < event.start_time:
return JSONResponse(
content=(
{
"success": False,
"message": f"end_time ({body.end_time}) cannot be before start_time ({event.start_time}).",
}
),
status_code=400,
)
end_time = body.end_time or datetime.datetime.now().timestamp()
request.app.event_metadata_updater.publish(
(event_id, end_time), EventMetadataTypeEnum.manual_event_end.value
)
except DoesNotExist:
return JSONResponse(
content=({"success": False, "message": f"Event {event_id} not found."}),
status_code=404,
)
except Exception:
return JSONResponse(
content=(
@@ -1841,7 +1608,7 @@ async def end_event(request: Request, event_id: str, body: EventsEndBody):
"/trigger/embedding",
response_model=dict,
dependencies=[Depends(require_role(["admin"]))],
summary="Create trigger embedding",
summary="Create trigger embedding.",
description="""Creates a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found.
""",
@@ -1898,40 +1665,37 @@ def create_trigger_embedding(
if event.data.get("type") != "object":
return
# Get the thumbnail
thumbnail = get_event_thumbnail_bytes(event)
if thumbnail is None:
return JSONResponse(
content={
"success": False,
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
},
status_code=400,
if thumbnail := get_event_thumbnail_bytes(event):
cursor = context.db.execute_sql(
"""
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
""",
[body.data],
)
# Try to reuse existing embedding from database
cursor = context.db.execute_sql(
"""
SELECT thumbnail_embedding FROM vec_thumbnails WHERE id = ?
""",
[body.data],
)
row = cursor.fetchone() if cursor else None
row = cursor.fetchone() if cursor else None
if row:
query_embedding = row[0]
embedding = np.frombuffer(query_embedding, dtype=np.float32)
if row:
query_embedding = row[0]
embedding = np.frombuffer(query_embedding, dtype=np.float32)
else:
# Generate new embedding
# Extract valid thumbnail
thumbnail = get_event_thumbnail_bytes(event)
if thumbnail is None:
return JSONResponse(
content={
"success": False,
"message": f"Failed to get thumbnail for {body.data} for {body.type} trigger",
},
status_code=400,
)
embedding = context.generate_image_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
if embedding is None or (
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
):
if embedding is None:
return JSONResponse(
content={
"success": False,
@@ -1959,8 +1723,9 @@ def create_trigger_embedding(
logger.debug(
f"Writing thumbnail for trigger with data {body.data} in {camera_name}."
)
except Exception:
logger.exception(
except Exception as e:
logger.error(e.with_traceback())
logger.error(
f"Failed to write thumbnail for trigger with data {body.data} in {camera_name}"
)
@@ -1984,8 +1749,8 @@ def create_trigger_embedding(
status_code=200,
)
except Exception:
logger.exception("Error creating trigger embedding")
except Exception as e:
logger.error(e.with_traceback())
return JSONResponse(
content={
"success": False,
@@ -1999,7 +1764,7 @@ def create_trigger_embedding(
"/trigger/embedding/{camera_name}/{name}",
response_model=dict,
dependencies=[Depends(require_role(["admin"]))],
summary="Update trigger embedding",
summary="Update trigger embedding.",
description="""Updates a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found.
""",
@@ -2066,9 +1831,7 @@ def update_trigger_embedding(
body.data, (base64.b64encode(thumbnail).decode("ASCII"))
)
if embedding is None or (
isinstance(embedding, (list, np.ndarray)) and len(embedding) == 0
):
if embedding is None:
return JSONResponse(
content={
"success": False,
@@ -2096,8 +1859,9 @@ def update_trigger_embedding(
logger.debug(
f"Deleted thumbnail for trigger with data {trigger.data} in {camera_name}."
)
except Exception:
logger.exception(
except Exception as e:
logger.error(e.with_traceback())
logger.error(
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera_name}"
)
@@ -2136,8 +1900,9 @@ def update_trigger_embedding(
logger.debug(
f"Writing thumbnail for trigger with data {body.data} in {camera_name}."
)
except Exception:
logger.exception(
except Exception as e:
logger.error(e.with_traceback())
logger.error(
f"Failed to write thumbnail for trigger with data {body.data} in {camera_name}"
)
@@ -2149,8 +1914,8 @@ def update_trigger_embedding(
status_code=200,
)
except Exception:
logger.exception("Error updating trigger embedding")
except Exception as e:
logger.error(e.with_traceback())
return JSONResponse(
content={
"success": False,
@@ -2164,7 +1929,7 @@ def update_trigger_embedding(
"/trigger/embedding/{camera_name}/{name}",
response_model=dict,
dependencies=[Depends(require_role(["admin"]))],
summary="Delete trigger embedding",
summary="Delete trigger embedding.",
description="""Deletes a trigger embedding for a specific trigger.
Returns a success message or an error if the trigger is not found.
""",
@@ -2210,8 +1975,9 @@ def delete_trigger_embedding(
logger.debug(
f"Deleted thumbnail for trigger with data {trigger.data} in {camera_name}."
)
except Exception:
logger.exception(
except Exception as e:
logger.error(e.with_traceback())
logger.error(
f"Failed to delete thumbnail for trigger with data {trigger.data} in {camera_name}"
)
@@ -2223,8 +1989,8 @@ def delete_trigger_embedding(
status_code=200,
)
except Exception:
logger.exception("Error deleting trigger embedding")
except Exception as e:
logger.error(e.with_traceback())
return JSONResponse(
content={
"success": False,
@@ -2238,7 +2004,7 @@ def delete_trigger_embedding(
"/triggers/status/{camera_name}",
response_model=dict,
dependencies=[Depends(require_role(["admin"]))],
summary="Get triggers status",
summary="Get triggers status.",
description="""Gets the status of all triggers for a specific camera.
Returns a success message or an error if the camera is not found.
""",

View File

@@ -9,12 +9,10 @@ from typing import List
import psutil
from fastapi import APIRouter, Depends, Request
from fastapi.responses import JSONResponse
from pathvalidate import sanitize_filepath
from peewee import DoesNotExist
from playhouse.shortcuts import model_to_dict
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
require_role,
@@ -28,14 +26,14 @@ from frigate.api.defs.response.export_response import (
)
from frigate.api.defs.response.generic_response import GenericResponse
from frigate.api.defs.tags import Tags
from frigate.const import CLIPS_DIR, EXPORT_DIR
from frigate.const import EXPORT_DIR
from frigate.models import Export, Previews, Recordings
from frigate.record.export import (
PlaybackFactorEnum,
PlaybackSourceEnum,
RecordingExporter,
)
from frigate.util.time import is_current_hour
from frigate.util.builtin import is_current_hour
logger = logging.getLogger(__name__)
@@ -45,7 +43,6 @@ router = APIRouter(tags=[Tags.export])
@router.get(
"/exports",
response_model=ExportsResponse,
dependencies=[Depends(allow_any_authenticated())],
summary="Get exports",
description="""Gets all exports from the database for cameras the user has access to.
Returns a list of exports ordered by date (most recent first).""",
@@ -91,14 +88,7 @@ def export_recording(
playback_factor = body.playback
playback_source = body.source
friendly_name = body.name
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
# Ensure that existing_image is a valid path
if existing_image and not existing_image.startswith(CLIPS_DIR):
return JSONResponse(
content=({"success": False, "message": "Invalid image path"}),
status_code=400,
)
existing_image = body.image_path
if playback_source == "recordings":
recordings_count = (
@@ -274,7 +264,6 @@ async def export_delete(event_id: str, request: Request):
@router.get(
"/exports/{export_id}",
response_model=ExportModel,
dependencies=[Depends(allow_any_authenticated())],
summary="Get a single export",
description="""Gets a specific export by ID. The user must have access to the camera
associated with the export.""",

View File

@@ -2,7 +2,7 @@ import logging
import re
from typing import Optional
from fastapi import Depends, FastAPI, Request
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from joserfc.jwk import OctKey
from playhouse.sqliteq import SqliteQueueDatabase
@@ -24,7 +24,7 @@ from frigate.api import (
preview,
review,
)
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
from frigate.api.auth import get_jwt_secret, limiter
from frigate.comms.event_metadata_updater import (
EventMetadataPublisher,
)
@@ -62,15 +62,11 @@ def create_fastapi_app(
stats_emitter: StatsEmitter,
event_metadata_updater: EventMetadataPublisher,
config_publisher: CameraConfigUpdatePublisher,
enforce_default_admin: bool = True,
):
logger.info("Starting FastAPI app")
app = FastAPI(
debug=False,
swagger_ui_parameters={"apisSorter": "alpha", "operationsSorter": "alpha"},
dependencies=[Depends(require_admin_by_default())]
if enforce_default_admin
else [],
)
# update the request_address with the x-forwarded-for header from nginx

View File

@@ -22,11 +22,7 @@ from pathvalidate import sanitize_filename
from peewee import DoesNotExist, fn, operator
from tzlocal import get_localzone_name
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
)
from frigate.api.auth import get_allowed_cameras_for_filter, require_camera_access
from frigate.api.defs.query.media_query_parameters import (
Extension,
MediaEventsSnapshotQueryParams,
@@ -48,9 +44,9 @@ from frigate.const import (
)
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
from frigate.track.object_processing import TrackedObjectProcessor
from frigate.util.file import get_event_thumbnail_bytes
from frigate.util.builtin import get_tz_modifiers
from frigate.util.image import get_image_from_recording
from frigate.util.time import get_dst_transitions
from frigate.util.path import get_event_thumbnail_bytes
logger = logging.getLogger(__name__)
@@ -397,7 +393,7 @@ async def submit_recording_snapshot_to_plus(
)
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
@router.get("/recordings/storage")
def get_recordings_storage_usage(request: Request):
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
"storage"
@@ -421,13 +417,14 @@ def get_recordings_storage_usage(request: Request):
return JSONResponse(content=camera_usages)
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
@router.get("/recordings/summary")
def all_recordings_summary(
request: Request,
params: MediaRecordingsSummaryQueryParams = Depends(),
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
):
"""Returns true/false by day indicating if recordings exist"""
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
cameras = params.cameras
if cameras != "all":
@@ -435,72 +432,43 @@ def all_recordings_summary(
filtered = requested.intersection(allowed_cameras)
if not filtered:
return JSONResponse(content={})
camera_list = list(filtered)
cameras = ",".join(filtered)
else:
camera_list = allowed_cameras
cameras = allowed_cameras
time_range_query = (
query = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day")
)
.where(Recordings.camera << camera_list)
.dicts()
.get()
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time + seconds_offset,
"unixepoch",
hour_modifier,
minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
if params.cameras != "all":
query = query.where(Recordings.camera << cameras.split(","))
if min_time is None or max_time is None:
return JSONResponse(content={})
recording_days = query.namedtuples()
days = {day.day: True for day in recording_days}
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
days: dict[str, bool] = {}
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
period_query = (
Recordings.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day")
)
.where(
(Recordings.camera << camera_list)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
)
)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
for g in period_query:
days[g.day] = True
return JSONResponse(content=dict(sorted(days.items())))
return JSONResponse(content=days)
@router.get(
@@ -508,103 +476,61 @@ def all_recordings_summary(
)
async def recordings_summary(camera_name: str, timezone: str = "utc"):
"""Returns hourly summary for recordings of given camera"""
time_range_query = (
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(timezone)
recording_groups = (
Recordings.select(
fn.MIN(Recordings.start_time).alias("min_time"),
fn.MAX(Recordings.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(Recordings.camera == camera_name)
.dicts()
.get()
.group_by((Recordings.start_time + seconds_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
days: dict[str, dict] = {}
if min_time is None or max_time is None:
return JSONResponse(content=list(days.values()))
dst_periods = get_dst_transitions(timezone, min_time, max_time)
for period_start, period_end, period_offset in dst_periods:
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
recording_groups = (
Recordings.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Recordings.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.SUM(Recordings.duration).alias("duration"),
fn.SUM(Recordings.motion).alias("motion"),
fn.SUM(Recordings.objects).alias("objects"),
)
.where(
(Recordings.camera == camera_name)
& (Recordings.end_time >= period_start)
& (Recordings.start_time <= period_end)
)
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
.order_by(Recordings.start_time.desc())
.namedtuples()
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time, "unixepoch", hour_modifier, minute_modifier
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.group_by((Event.start_time + seconds_offset).cast("int") / 3600)
.namedtuples()
)
event_groups = (
Event.select(
fn.strftime(
"%Y-%m-%d %H",
fn.datetime(
Event.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("hour"),
fn.COUNT(Event.id).alias("count"),
)
.where(Event.camera == camera_name, Event.has_clip)
.where(
(Event.start_time >= period_start) & (Event.start_time <= period_end)
)
.group_by((Event.start_time + period_offset).cast("int") / 3600)
.namedtuples()
)
event_map = {g.hour: g.count for g in event_groups}
event_map = {g.hour: g.count for g in event_groups}
days = {}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day in days:
# merge counts if already present (edge-case at DST boundary)
days[day]["events"] += events_count or 0
days[day]["hours"].append(hour_data)
else:
days[day] = {
"events": events_count or 0,
"hours": [hour_data],
"day": day,
}
for recording_group in recording_groups:
parts = recording_group.hour.split()
hour = parts[1]
day = parts[0]
events_count = event_map.get(recording_group.hour, 0)
hour_data = {
"hour": hour,
"events": events_count,
"motion": recording_group.motion,
"objects": recording_group.objects,
"duration": round(recording_group.duration),
}
if day not in days:
days[day] = {"events": events_count, "hours": [hour_data], "day": day}
else:
days[day]["events"] += events_count
days[day]["hours"].append(hour_data)
return JSONResponse(content=list(days.values()))
@@ -639,11 +565,7 @@ async def recordings(
return JSONResponse(content=list(recordings))
@router.get(
"/recordings/unavailable",
response_model=list[dict],
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/recordings/unavailable", response_model=list[dict])
async def no_recordings(
request: Request,
params: MediaRecordingsAvailabilityQueryParams = Depends(),
@@ -667,7 +589,7 @@ async def no_recordings(
)
scale = params.scale
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
if cameras != "all":
camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list))
@@ -686,39 +608,33 @@ async def no_recordings(
# Convert recordings to list of (start, end) tuples
recordings = [(r["start_time"], r["end_time"]) for r in data]
# Iterate through time segments and check if each has any recording
no_recording_segments = []
# Generate all time segments
current = after
current_gap_start = None
no_recording_segments = []
current_start = None
while current < before:
segment_end = min(current + scale, before)
# Check if this segment overlaps with any recording
segment_end = current + scale
# Check if segment overlaps with any recording
has_recording = any(
rec_start < segment_end and rec_end > current
for rec_start, rec_end in recordings
start <= segment_end and end >= current for start, end in recordings
)
if not has_recording:
# This segment has no recordings
if current_gap_start is None:
current_gap_start = current # Start a new gap
if current_start is None:
current_start = current # Start a new gap
else:
# This segment has recordings
if current_gap_start is not None:
if current_start is not None:
# End the current gap and append it
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(current)}
{"start_time": int(current_start), "end_time": int(current)}
)
current_gap_start = None
current_start = None
current = segment_end
# Append the last gap if it exists
if current_gap_start is not None:
if current_start is not None:
no_recording_segments.append(
{"start_time": int(current_gap_start), "end_time": int(before)}
{"start_time": int(current_start), "end_time": int(before)}
)
return JSONResponse(content=no_recording_segments)
@@ -770,15 +686,6 @@ async def recording_clip(
.order_by(Recordings.start_time.asc())
)
if recordings.count() == 0:
return JSONResponse(
content={
"success": False,
"message": "No recordings found for the specified time range",
},
status_code=400,
)
file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
file_path = os.path.join(CACHE_DIR, file_name)
with open(file_path, "w") as file:
@@ -837,19 +744,7 @@ async def recording_clip(
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
async def vod_ts(
camera_name: str,
start_ts: float,
end_ts: float,
force_discontinuity: bool = False,
):
logger.debug(
"VOD: Generating VOD for %s from %s to %s with force_discontinuity=%s",
camera_name,
start_ts,
end_ts,
force_discontinuity,
)
async def vod_ts(camera_name: str, start_ts: float, end_ts: float):
recordings = (
Recordings.select(
Recordings.path,
@@ -869,19 +764,10 @@ async def vod_ts(
clips = []
durations = []
min_duration_ms = 100 # Minimum 100ms to ensure at least one video frame
max_duration_ms = MAX_SEGMENT_DURATION * 1000
recording: Recordings
for recording in recordings:
logger.debug(
"VOD: processing recording: %s start=%s end=%s duration=%s",
recording.path,
recording.start_time,
recording.end_time,
recording.duration,
)
clip = {"type": "source", "path": recording.path}
duration = int(recording.duration * 1000)
@@ -890,35 +776,19 @@ async def vod_ts(
inpoint = int((start_ts - recording.start_time) * 1000)
clip["clipFrom"] = inpoint
duration -= inpoint
logger.debug(
"VOD: applied clipFrom %sms to %s",
inpoint,
recording.path,
)
# adjust end if recording.end_time is after end_ts
if recording.end_time > end_ts:
duration -= int((recording.end_time - end_ts) * 1000)
if duration < min_duration_ms:
# skip if the clip has no valid duration (too short to contain frames)
logger.debug(
"VOD: skipping recording %s - resulting duration %sms too short",
recording.path,
duration,
)
if duration <= 0:
# skip if the clip has no valid duration
continue
if min_duration_ms <= duration < max_duration_ms:
if 0 < duration < max_duration_ms:
clip["keyFrameDurations"] = [duration]
clips.append(clip)
durations.append(duration)
logger.debug(
"VOD: added clip %s duration_ms=%s clipFrom=%s",
recording.path,
duration,
clip.get("clipFrom"),
)
else:
logger.warning(f"Recording clip is missing or empty: {recording.path}")
@@ -938,7 +808,7 @@ async def vod_ts(
return JSONResponse(
content={
"cache": hour_ago.timestamp() > start_ts,
"discontinuity": force_discontinuity,
"discontinuity": False,
"consistentSequenceMediaInfo": True,
"durations": durations,
"segment_duration": max(durations),
@@ -981,7 +851,6 @@ async def vod_hour(
@router.get(
"/vod/event/{event_id}",
dependencies=[Depends(allow_any_authenticated())],
description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
async def vod_event(
@@ -1022,19 +891,6 @@ async def vod_event(
return vod_response
@router.get(
"/vod/clip/{camera_name}/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(require_camera_access)],
description="Returns an HLS playlist for a timestamp range with HLS discontinuity enabled. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
async def vod_clip(
camera_name: str,
start_ts: float,
end_ts: float,
):
return await vod_ts(camera_name, start_ts, end_ts, force_discontinuity=True)
@router.get(
"/events/{event_id}/snapshot.jpg",
description="Returns a snapshot image for the specified object id. NOTE: The query params only take affect while the event is in-progress. Once the event has ended the snapshot configuration is used.",
@@ -1111,10 +967,7 @@ async def event_snapshot(
)
@router.get(
"/events/{event_id}/thumbnail.{extension}",
dependencies=[Depends(require_camera_access)],
)
@router.get("/events/{event_id}/thumbnail.{extension}")
async def event_thumbnail(
request: Request,
event_id: str,
@@ -1312,10 +1165,7 @@ def grid_snapshot(
)
@router.get(
"/events/{event_id}/snapshot-clean.webp",
dependencies=[Depends(require_camera_access)],
)
@router.get("/events/{event_id}/snapshot-clean.webp")
def event_snapshot_clean(request: Request, event_id: str, download: bool = False):
webp_bytes = None
try:
@@ -1439,9 +1289,7 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False
)
@router.get(
"/events/{event_id}/clip.mp4", dependencies=[Depends(require_camera_access)]
)
@router.get("/events/{event_id}/clip.mp4")
async def event_clip(
request: Request,
event_id: str,
@@ -1469,9 +1317,7 @@ async def event_clip(
)
@router.get(
"/events/{event_id}/preview.gif", dependencies=[Depends(require_camera_access)]
)
@router.get("/events/{event_id}/preview.gif")
def event_preview(request: Request, event_id: str):
try:
event: Event = Event.get(Event.id == event_id)
@@ -1824,7 +1670,7 @@ def preview_mp4(
)
@router.get("/review/{event_id}/preview", dependencies=[Depends(require_camera_access)])
@router.get("/review/{event_id}/preview")
def review_preview(
request: Request,
event_id: str,
@@ -1850,12 +1696,8 @@ def review_preview(
return preview_mp4(request, review.camera, start_ts, end_ts)
@router.get(
"/preview/{file_name}/thumbnail.jpg", dependencies=[Depends(require_camera_access)]
)
@router.get(
"/preview/{file_name}/thumbnail.webp", dependencies=[Depends(require_camera_access)]
)
@router.get("/preview/{file_name}/thumbnail.jpg")
@router.get("/preview/{file_name}/thumbnail.webp")
def preview_thumbnail(file_name: str):
"""Get a thumbnail from the cached preview frames."""
if len(file_name) > 1000:
@@ -1935,7 +1777,7 @@ async def label_clip(request: Request, camera_name: str, label: str):
try:
event = event_query.get()
return await event_clip(request, event.id, 0)
return await event_clip(request, event.id)
except DoesNotExist:
return JSONResponse(
content={"success": False, "message": "Event not found"}, status_code=404

View File

@@ -5,12 +5,11 @@ import os
from typing import Any
from cryptography.hazmat.primitives import serialization
from fastapi import APIRouter, Depends, Request
from fastapi import APIRouter, Request
from fastapi.responses import JSONResponse
from peewee import DoesNotExist
from py_vapid import Vapid01, utils
from frigate.api.auth import allow_any_authenticated
from frigate.api.defs.tags import Tags
from frigate.const import CONFIG_DIR
from frigate.models import User
@@ -22,7 +21,6 @@ router = APIRouter(tags=[Tags.notifications])
@router.get(
"/notifications/pubkey",
dependencies=[Depends(allow_any_authenticated())],
summary="Get VAPID public key",
description="""Gets the VAPID public key for the notifications.
Returns the public key or an error if notifications are not enabled.
@@ -49,7 +47,6 @@ def get_vapid_pub_key(request: Request):
@router.post(
"/notifications/register",
dependencies=[Depends(allow_any_authenticated())],
summary="Register notifications",
description="""Registers a notifications subscription.
Returns a success message or an error if the subscription is not provided.

View File

@@ -5,14 +5,10 @@ import os
from datetime import datetime, timedelta, timezone
import pytz
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends
from fastapi.responses import JSONResponse
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
require_camera_access,
)
from frigate.api.auth import require_camera_access
from frigate.api.defs.response.preview_response import (
PreviewFramesResponse,
PreviewsResponse,
@@ -30,32 +26,19 @@ router = APIRouter(tags=[Tags.preview])
@router.get(
"/preview/{camera_name}/start/{start_ts}/end/{end_ts}",
response_model=PreviewsResponse,
dependencies=[Depends(allow_any_authenticated())],
dependencies=[Depends(require_camera_access)],
summary="Get preview clips for time range",
description="""Gets all preview clips for a specified camera and time range.
Returns a list of preview video clips that overlap with the requested time period,
ordered by start time. Use camera_name='all' to get previews from all cameras.
Returns an error if no previews are found.""",
)
def preview_ts(
camera_name: str,
start_ts: float,
end_ts: float,
allowed_cameras: list[str] = Depends(get_allowed_cameras_for_filter),
):
def preview_ts(camera_name: str, start_ts: float, end_ts: float):
"""Get all mp4 previews relevant for time period."""
if camera_name != "all":
if camera_name not in allowed_cameras:
raise HTTPException(status_code=403, detail="Access denied for camera")
camera_list = [camera_name]
camera_clause = Previews.camera == camera_name
else:
camera_list = allowed_cameras
if not camera_list:
return JSONResponse(
content={"success": False, "message": "No previews found."},
status_code=404,
)
camera_clause = True
previews = (
Previews.select(
@@ -70,7 +53,7 @@ def preview_ts(
| Previews.end_time.between(start_ts, end_ts)
| ((start_ts > Previews.start_time) & (end_ts < Previews.end_time))
)
.where(Previews.camera << camera_list)
.where(camera_clause)
.order_by(Previews.start_time.asc())
.dicts()
.iterator()
@@ -105,21 +88,14 @@ def preview_ts(
@router.get(
"/preview/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
response_model=PreviewsResponse,
dependencies=[Depends(allow_any_authenticated())],
dependencies=[Depends(require_camera_access)],
summary="Get preview clips for specific hour",
description="""Gets all preview clips for a specific hour in a given timezone.
Converts the provided date/time from the specified timezone to UTC and retrieves
all preview clips for that hour. Use camera_name='all' to get previews from all cameras.
The tz_name should be a timezone like 'America/New_York' (use commas instead of slashes).""",
)
def preview_hour(
year_month: str,
day: int,
hour: int,
camera_name: str,
tz_name: str,
allowed_cameras: list[str] = Depends(get_allowed_cameras_for_filter),
):
def preview_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str):
"""Get all mp4 previews relevant for time period given the timezone"""
parts = year_month.split("-")
start_date = (
@@ -130,7 +106,7 @@ def preview_hour(
start_ts = start_date.timestamp()
end_ts = end_date.timestamp()
return preview_ts(camera_name, start_ts, end_ts, allowed_cameras)
return preview_ts(camera_name, start_ts, end_ts)
@router.get(

View File

@@ -14,7 +14,6 @@ from peewee import Case, DoesNotExist, IntegrityError, fn, operator
from playhouse.shortcuts import model_to_dict
from frigate.api.auth import (
allow_any_authenticated,
get_allowed_cameras_for_filter,
get_current_user,
require_camera_access,
@@ -37,18 +36,14 @@ from frigate.config import FrigateConfig
from frigate.embeddings import EmbeddingsContext
from frigate.models import Recordings, ReviewSegment, UserReviewStatus
from frigate.review.types import SeverityEnum
from frigate.util.time import get_dst_transitions
from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__)
router = APIRouter(tags=[Tags.review])
@router.get(
"/review",
response_model=list[ReviewSegmentResponse],
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/review", response_model=list[ReviewSegmentResponse])
async def review(
params: ReviewQueryParams = Depends(),
current_user: dict = Depends(get_current_user),
@@ -157,11 +152,7 @@ async def review(
return JSONResponse(content=[r for r in review_query])
@router.get(
"/review_ids",
response_model=list[ReviewSegmentResponse],
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/review_ids", response_model=list[ReviewSegmentResponse])
async def review_ids(request: Request, ids: str):
ids = ids.split(",")
@@ -195,11 +186,7 @@ async def review_ids(request: Request, ids: str):
)
@router.get(
"/review/summary",
response_model=ReviewSummaryResponse,
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/review/summary", response_model=ReviewSummaryResponse)
async def review_summary(
params: ReviewSummaryQueryParams = Depends(),
current_user: dict = Depends(get_current_user),
@@ -210,6 +197,7 @@ async def review_summary(
user_id = current_user["username"]
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
cameras = params.cameras
@@ -341,144 +329,94 @@ async def review_summary(
)
clauses.append(reduce(operator.or_, label_clauses))
# Find the time range of available data
time_range_query = (
day_in_seconds = 60 * 60 * 24
last_month_query = (
ReviewSegment.select(
fn.MIN(ReviewSegment.start_time).alias("min_time"),
fn.MAX(ReviewSegment.start_time).alias("max_time"),
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
hour_modifier,
minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, clauses) if clauses else True)
.dicts()
.get()
.group_by(
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
min_time = time_range_query.get("min_time")
max_time = time_range_query.get("max_time")
data = {
"last24Hours": last_24_query,
}
# If no data, return early
if min_time is None or max_time is None:
return JSONResponse(content=data)
# Get DST transition periods
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
day_in_seconds = 60 * 60 * 24
# Query each DST period separately with the correct offset
for period_start, period_end, period_offset in dst_periods:
# Calculate hour/minute modifiers for this period
hours_offset = int(period_offset / 60 / 60)
minutes_offset = int(period_offset / 60 - hours_offset * 60)
period_hour_modifier = f"{hours_offset} hour"
period_minute_modifier = f"{minutes_offset} minute"
# Build clauses including time range for this period
period_clauses = clauses.copy()
period_clauses.append(
(ReviewSegment.start_time >= period_start)
& (ReviewSegment.start_time <= period_end)
)
period_query = (
ReviewSegment.select(
fn.strftime(
"%Y-%m-%d",
fn.datetime(
ReviewSegment.start_time,
"unixepoch",
period_hour_modifier,
period_minute_modifier,
),
).alias("day"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection)
& (UserReviewStatus.has_been_reviewed == True),
1,
)
],
0,
)
).alias("reviewed_detection"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.alert),
1,
)
],
0,
)
).alias("total_alert"),
fn.SUM(
Case(
None,
[
(
(ReviewSegment.severity == SeverityEnum.detection),
1,
)
],
0,
)
).alias("total_detection"),
)
.left_outer_join(
UserReviewStatus,
on=(
(ReviewSegment.id == UserReviewStatus.review_segment)
& (UserReviewStatus.user_id == user_id)
),
)
.where(reduce(operator.and_, period_clauses))
.group_by(
(ReviewSegment.start_time + period_offset).cast("int") / day_in_seconds
)
.order_by(ReviewSegment.start_time.desc())
)
# Merge results from this period
for e in period_query.dicts().iterator():
day_key = e["day"]
if day_key in data:
# Merge counts if day already exists (edge case at DST boundary)
data[day_key]["reviewed_alert"] += e["reviewed_alert"] or 0
data[day_key]["reviewed_detection"] += e["reviewed_detection"] or 0
data[day_key]["total_alert"] += e["total_alert"] or 0
data[day_key]["total_detection"] += e["total_detection"] or 0
else:
data[day_key] = e
for e in last_month_query.dicts().iterator():
data[e["day"]] = e
return JSONResponse(content=data)
@router.post(
"/reviews/viewed",
response_model=GenericResponse,
dependencies=[Depends(allow_any_authenticated())],
)
@router.post("/reviews/viewed", response_model=GenericResponse)
async def set_multiple_reviewed(
request: Request,
body: ReviewModifyMultipleBody,
@@ -577,9 +515,7 @@ def delete_reviews(body: ReviewModifyMultipleBody):
@router.get(
"/review/activity/motion",
response_model=list[ReviewActivityMotionResponse],
dependencies=[Depends(allow_any_authenticated())],
"/review/activity/motion", response_model=list[ReviewActivityMotionResponse]
)
def motion_activity(
params: ReviewActivityMotionQueryParams = Depends(),
@@ -663,11 +599,7 @@ def motion_activity(
return JSONResponse(content=normalized)
@router.get(
"/review/event/{event_id}",
response_model=ReviewSegmentResponse,
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/review/event/{event_id}", response_model=ReviewSegmentResponse)
async def get_review_from_event(request: Request, event_id: str):
try:
review = ReviewSegment.get(
@@ -682,11 +614,7 @@ async def get_review_from_event(request: Request, event_id: str):
)
@router.get(
"/review/{review_id}",
response_model=ReviewSegmentResponse,
dependencies=[Depends(allow_any_authenticated())],
)
@router.get("/review/{review_id}", response_model=ReviewSegmentResponse)
async def get_review(request: Request, review_id: str):
try:
review = ReviewSegment.get(ReviewSegment.id == review_id)
@@ -699,11 +627,7 @@ async def get_review(request: Request, review_id: str):
)
@router.delete(
"/review/{review_id}/viewed",
response_model=GenericResponse,
dependencies=[Depends(allow_any_authenticated())],
)
@router.delete("/review/{review_id}/viewed", response_model=GenericResponse)
async def set_not_reviewed(
review_id: str,
current_user: dict = Depends(get_current_user),
@@ -741,7 +665,6 @@ async def set_not_reviewed(
@router.post(
"/review/summarize/start/{start_ts}/end/{end_ts}",
dependencies=[Depends(allow_any_authenticated())],
description="Use GenAI to summarize review items over a period of time.",
)
def generate_review_summary(request: Request, start_ts: float, end_ts: float):

View File

@@ -100,10 +100,6 @@ class FrigateApp:
)
if (
config.semantic_search.enabled
or any(
c.objects.genai.enabled or c.review.genai.enabled
for c in config.cameras.values()
)
or config.lpr.enabled
or config.face_recognition.enabled
or len(config.classification.custom) > 0
@@ -492,8 +488,6 @@ class FrigateApp:
}
).execute()
self.config.auth.admin_first_time_login = True
logger.info("********************************************************")
logger.info("********************************************************")
logger.info("*** Auth is enabled, but no users exist. ***")

Some files were not shown because too many files have changed in this diff Show More