mirror of
https://github.com/blakeblackshear/frigate.git
synced 2026-01-17 09:48:48 -05:00
Compare commits
19 Commits
dependabot
...
llama.cpp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5486dc581c | ||
|
|
1ff6921d33 | ||
|
|
d9f8e603c9 | ||
|
|
594a706347 | ||
|
|
e5fec56893 | ||
|
|
f1a19128ed | ||
|
|
a77b0a7c4b | ||
|
|
1c95eb2c39 | ||
|
|
26744efb1e | ||
|
|
aa0b082184 | ||
|
|
7fb8d9b050 | ||
|
|
b8bc98a423 | ||
|
|
f9e06bb7b7 | ||
|
|
7cc16161b3 | ||
|
|
08311a6ee2 | ||
|
|
a08c044144 | ||
|
|
5cced22f65 | ||
|
|
b962c95725 | ||
|
|
0cbec25494 |
8
.github/workflows/pull_request.yml
vendored
8
.github/workflows/pull_request.yml
vendored
@@ -19,9 +19,9 @@ jobs:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@v6
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
node-version: 16.x
|
||||
- run: npm install
|
||||
working-directory: ./web
|
||||
- name: Lint
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@v6
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
- run: npm install
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@v6
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
- name: Install devcontainer cli
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
default_target: local
|
||||
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.17.0
|
||||
VERSION = 0.18.0
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BOARDS= #Initialized empty
|
||||
|
||||
@@ -4,14 +4,14 @@
|
||||
|
||||
# Frigate NVR™ - 一个具有实时目标检测的本地 NVR
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
|
||||
</a>
|
||||
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
|
||||
</a>
|
||||
|
||||
一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备 AI 目标/物体检测功能。使用 OpenCV 和 TensorFlow 在本地为 IP 摄像头执行实时物体检测。
|
||||
|
||||
强烈推荐使用 GPU 或者 AI 加速器(例如[Google Coral 加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/)等)。它们的运行效率远远高于现在的顶级 CPU,并且功耗也极低。
|
||||
@@ -38,7 +38,6 @@
|
||||
## 协议
|
||||
|
||||
本项目采用 **MIT 许可证**授权。
|
||||
|
||||
**代码部分**:本代码库中的源代码、配置文件和文档均遵循 [MIT 许可证](LICENSE)。您可以自由使用、修改和分发这些代码,但必须保留原始版权声明。
|
||||
|
||||
**商标部分**:“Frigate”名称、“Frigate NVR”品牌以及 Frigate 的 Logo 为 **Frigate LLC 的商标**,**不在** MIT 许可证覆盖范围内。
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.10/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.13/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
@@ -237,18 +237,8 @@ ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
|
||||
# Set HailoRT to disable logging
|
||||
ENV HAILORT_LOGGER_PATH=NONE
|
||||
|
||||
# TensorFlow C++ logging suppression (must be set before import)
|
||||
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
|
||||
# TensorFlow error only
|
||||
ENV TF_CPP_MIN_LOG_LEVEL=3
|
||||
# Suppress verbose logging from TensorFlow C++ code
|
||||
ENV TF_CPP_MIN_VLOG_LEVEL=3
|
||||
# Disable oneDNN optimization messages ("optimized with oneDNN...")
|
||||
ENV TF_ENABLE_ONEDNN_OPTS=0
|
||||
# Suppress AutoGraph verbosity during conversion
|
||||
ENV AUTOGRAPH_VERBOSITY=0
|
||||
# Google Logging (GLOG) suppression for TensorFlow components
|
||||
ENV GLOG_minloglevel=3
|
||||
ENV GLOG_logtostderr=0
|
||||
|
||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||
|
||||
|
||||
@@ -14,5 +14,5 @@ nvidia_cusparse_cu12==12.5.1.*; platform_machine == 'x86_64'
|
||||
nvidia_nccl_cu12==2.23.4; platform_machine == 'x86_64'
|
||||
nvidia_nvjitlink_cu12==12.5.82; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.23.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.22.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
||||
@@ -234,7 +234,7 @@ To do this:
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.10, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.13, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
||||
@@ -238,7 +238,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ id: object_classification
|
||||
title: Object Classification
|
||||
---
|
||||
|
||||
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object. Classification results are visible in the Tracked Object Details pane in Explore, through the `frigate/tracked_object_details` MQTT topic, in Home Assistant sensors via the official Frigate integration, or through the event endpoints in the HTTP API.
|
||||
Object classification allows you to train a custom MobileNetV2 classification model to run on tracked objects (persons, cars, animals, etc.) to identify a finer category or attribute for that object.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@@ -11,8 +11,6 @@ Object classification models are lightweight and run very fast on CPU. Inference
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
Classes are the categories your model will learn to distinguish between. Each class represents a distinct visual category that the model will predict.
|
||||
@@ -33,15 +31,9 @@ For object classification:
|
||||
- Example: `cat` → `Leo`, `Charlie`, `None`.
|
||||
|
||||
- **Attribute**:
|
||||
- Added as metadata to the object, visible in the Tracked Object Details pane in Explore, `frigate/events` MQTT messages, and the HTTP API response as `<model_name>: <predicted_value>`.
|
||||
- Added as metadata to the object (visible in /events): `<model_name>: <predicted_value>`.
|
||||
- Ideal when multiple attributes can coexist independently.
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not, and if they are wearing a yellow vest or not.
|
||||
|
||||
:::note
|
||||
|
||||
A tracked object can only have a single sub label. If you are using Face Recognition and you configure an object classification model for `person` using the sub label type, your sub label may not be assigned correctly as it depends on which enrichment completes its analysis first. Consider using the `attribute` type instead.
|
||||
|
||||
:::
|
||||
- Example: Detecting if a `person` in a construction yard is wearing a helmet or not.
|
||||
|
||||
## Assignment Requirements
|
||||
|
||||
@@ -81,8 +73,6 @@ classification:
|
||||
classification_type: sub_label # or: attribute
|
||||
```
|
||||
|
||||
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For object classification models, the default is 200.
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of two steps:
|
||||
@@ -91,16 +81,12 @@ Creating and training the model is done within the Frigate UI using the `Classif
|
||||
|
||||
Enter a name for your model, select the object label to classify (e.g., `person`, `dog`, `car`), choose the classification type (sub label or attribute), and define your classes. Include a `none` class for objects that don't fit any specific category.
|
||||
|
||||
For example: To classify your two cats, create a model named "Our Cats" and create two classes, "Charlie" and "Leo". Create a third class, "none", for other neighborhood cats that are not your own.
|
||||
|
||||
### Step 2: Assign Training Examples
|
||||
|
||||
The system will automatically generate example images from detected objects matching your selected label. You'll be guided through each class one at a time to select which images represent that class. Any images not assigned to a specific class will automatically be assigned to `none` when you complete the last class. Once all images are processed, training will begin automatically.
|
||||
|
||||
When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects.
|
||||
|
||||
If examples for some of your classes do not appear in the grid, you can continue configuring the model without them. New images will begin to appear in the Recent Classifications view. When your missing classes are seen, classify them from this view and retrain your model.
|
||||
|
||||
### Improving the Model
|
||||
|
||||
- **Problem framing**: Keep classes visually distinct and relevant to the chosen object types.
|
||||
@@ -108,23 +94,3 @@ If examples for some of your classes do not appear in the grid, you can continue
|
||||
- **Preprocessing**: Ensure examples reflect object crops similar to Frigate’s boxes; keep the subject centered.
|
||||
- **Labels**: Keep label names short and consistent; include a `none` class if you plan to ignore uncertain predictions for sub labels.
|
||||
- **Threshold**: Tune `threshold` per model to reduce false assignments. Start at `0.8` and adjust based on validation.
|
||||
|
||||
## Debugging Classification Models
|
||||
|
||||
To troubleshoot issues with object classification models, enable debug logging to see detailed information about classification attempts, scores, and consensus calculations.
|
||||
|
||||
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
The debug logs will show:
|
||||
|
||||
- Classification probabilities for each attempt
|
||||
- Whether scores meet the threshold requirement
|
||||
- Consensus calculations and when assignments are made
|
||||
- Object classification history and weighted scores
|
||||
|
||||
@@ -3,7 +3,7 @@ id: state_classification
|
||||
title: State Classification
|
||||
---
|
||||
|
||||
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region. Classification results are available through the `frigate/<camera_name>/classification/<model_name>` MQTT topic and in Home Assistant sensors via the official Frigate integration.
|
||||
State classification allows you to train a custom MobileNetV2 classification model on a fixed region of your camera frame(s) to determine a current state. The model can be configured to run on a schedule and/or when motion is detected in that region.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@@ -11,8 +11,6 @@ State classification models are lightweight and run very fast on CPU. Inference
|
||||
|
||||
Training the model does briefly use a high amount of system resources for about 1–3 minutes per training run. On lower-power devices, training may take longer.
|
||||
|
||||
A CPU with AVX instructions is required for training and inference.
|
||||
|
||||
## Classes
|
||||
|
||||
Classes are the different states an area on your camera can be in. Each class represents a distinct visual state that the model will learn to recognize.
|
||||
@@ -48,8 +46,6 @@ classification:
|
||||
crop: [0, 180, 220, 400]
|
||||
```
|
||||
|
||||
An optional config, `save_attempts`, can be set as a key under the model name. This defines the number of classification attempts to save in the Recent Classifications tab. For state classification models, the default is 100.
|
||||
|
||||
## Training the model
|
||||
|
||||
Creating and training the model is done within the Frigate UI using the `Classification` page. The process consists of three steps:
|
||||
@@ -74,34 +70,3 @@ Once some images are assigned, training will begin automatically.
|
||||
- **Data collection**: Use the model's Recent Classifications tab to gather balanced examples across times of day and weather.
|
||||
- **When to train**: Focus on cases where the model is entirely incorrect or flips between states when it should not. There's no need to train additional images when the model is already working consistently.
|
||||
- **Selecting training images**: Images scoring below 100% due to new conditions (e.g., first snow of the year, seasonal changes) or variations (e.g., objects temporarily in view, insects at night) are good candidates for training, as they represent scenarios different from the default state. Training these lower-scoring images that differ from existing training data helps prevent overfitting. Avoid training large quantities of images that look very similar, especially if they already score 100% as this can lead to overfitting.
|
||||
|
||||
## Debugging Classification Models
|
||||
|
||||
To troubleshoot issues with state classification models, enable debug logging to see detailed information about classification attempts, scores, and state verification.
|
||||
|
||||
Enable debug logs for classification models by adding `frigate.data_processing.real_time.custom_classification: debug` to your `logger` configuration. These logs are verbose, so only keep this enabled when necessary. Restart Frigate after this change.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
frigate.data_processing.real_time.custom_classification: debug
|
||||
```
|
||||
|
||||
The debug logs will show:
|
||||
|
||||
- Classification probabilities for each attempt
|
||||
- Whether scores meet the threshold requirement
|
||||
- State verification progress (consecutive detections needed)
|
||||
- When state changes are published
|
||||
|
||||
### Recent Classifications
|
||||
|
||||
For state classification, images are only added to recent classifications under specific circumstances:
|
||||
|
||||
- **First detection**: The first classification attempt for a camera is always saved
|
||||
- **State changes**: Images are saved when the detected state differs from the current verified state
|
||||
- **Pending verification**: Images are saved when there's a pending state change being verified (requires 3 consecutive identical states)
|
||||
- **Low confidence**: Images with scores below 100% are saved even if the state matches the current state (useful for training)
|
||||
|
||||
Images are **not** saved when the state is stable (detected state matches current state) **and** the score is 100%. This prevents unnecessary storage of redundant high-confidence classifications.
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
---
|
||||
id: genai
|
||||
title: Generative AI
|
||||
---
|
||||
|
||||
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||
|
||||
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle, or can optionally be sent earlier after a number of significantly changed frames, for example in use in more real-time notifications. Descriptions can also be regenerated manually via the Frigate UI. Note that if you are manually entering a description for tracked objects prior to its end, this will be overwritten by the generated response.
|
||||
|
||||
## Configuration
|
||||
|
||||
Generative AI can be enabled for all cameras or only for specific cameras. If GenAI is disabled for a camera, you can still manually generate descriptions for events using the HTTP API. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
|
||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-2.0-flash
|
||||
|
||||
cameras:
|
||||
front_camera:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
objects:
|
||||
- person
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
objects:
|
||||
genai:
|
||||
enabled: False # <- disable GenAI for your indoor camera
|
||||
```
|
||||
|
||||
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
|
||||
Generative AI can also be toggled dynamically for a camera via MQTT with the topic `frigate/<camera_name>/object_descriptions/set`. See the [MQTT documentation](/integrations/mqtt/#frigatecamera_nameobjectdescriptionsset).
|
||||
|
||||
## Ollama
|
||||
|
||||
:::warning
|
||||
|
||||
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||
|
||||
:::
|
||||
|
||||
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
|
||||
|
||||
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
|
||||
|
||||
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
|
||||
|
||||
:::note
|
||||
|
||||
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
|
||||
:::
|
||||
|
||||
#### Ollama Cloud models
|
||||
|
||||
Ollama also supports [cloud models](https://ollama.com/cloud), where your local Ollama instance handles requests from Frigate, but model inference is performed in the cloud. Set up Ollama locally, sign in with your Ollama account, and specify the cloud model name in your Frigate config. For more details, see the Ollama cloud model [docs](https://docs.ollama.com/cloud).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: qwen3-vl:4b
|
||||
```
|
||||
|
||||
## Google Gemini
|
||||
|
||||
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
|
||||
|
||||
### Get API Key
|
||||
|
||||
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
|
||||
|
||||
1. Accept the Terms of Service
|
||||
2. Click "Get API Key" from the right hand navigation
|
||||
3. Click "Create API key in new project"
|
||||
4. Copy the API key for use in your config
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-2.0-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different Gemini-compatible API endpoint, set the `GEMINI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
|
||||
## OpenAI
|
||||
|
||||
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
|
||||
|
||||
### Get API Key
|
||||
|
||||
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: openai
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
model: gpt-4o
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
|
||||
## Azure OpenAI
|
||||
|
||||
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
|
||||
|
||||
### Create Resource and Get API Key
|
||||
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: azure_openai
|
||||
base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview
|
||||
model: gpt-5-mini
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
```
|
||||
|
||||
## Usage and Best Practices
|
||||
|
||||
Frigate's thumbnail search excels at identifying specific details about tracked objects – for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigate’s default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
|
||||
|
||||
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigate’s default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you what’s happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if they’re moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situation’s context.
|
||||
|
||||
### Using GenAI for notifications
|
||||
|
||||
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
|
||||
|
||||
If looking to get notifications earlier than when an object ceases to be tracked, an additional send trigger can be configured of `after_significant_updates`.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
send_triggers:
|
||||
tracked_object_end: true # default
|
||||
after_significant_updates: 3 # how many updates to a tracked object before we should send an image
|
||||
```
|
||||
|
||||
## Custom Prompts
|
||||
|
||||
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
|
||||
|
||||
```
|
||||
Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||
|
||||
:::
|
||||
|
||||
You are also able to define custom prompts in your configuration.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: llava
|
||||
|
||||
objects:
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
|
||||
object_prompts:
|
||||
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overridden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
front_door:
|
||||
objects:
|
||||
genai:
|
||||
enabled: True
|
||||
use_snapshot: True
|
||||
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
|
||||
object_prompts:
|
||||
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
|
||||
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
|
||||
objects:
|
||||
- person
|
||||
- cat
|
||||
required_zones:
|
||||
- steps
|
||||
```
|
||||
|
||||
### Experiment with prompts
|
||||
|
||||
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
|
||||
|
||||
- OpenAI - [ChatGPT](https://chatgpt.com)
|
||||
- Gemini - [Google AI Studio](https://aistudio.google.com)
|
||||
- Ollama - [Open WebUI](https://docs.openwebui.com/)
|
||||
@@ -5,7 +5,7 @@ title: Configuring Generative AI
|
||||
|
||||
## Configuration
|
||||
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
A Generative AI provider can be configured in the global config, which will make the Generative AI features available for use. There are currently 4 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
|
||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
|
||||
@@ -41,12 +41,12 @@ If you are trying to use a single model for Frigate and HomeAssistant, it will n
|
||||
|
||||
The following models are recommended:
|
||||
|
||||
| Model | Notes |
|
||||
| ----------------- | -------------------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
| Model | Notes |
|
||||
| ------------- | -------------------------------------------------------------------- |
|
||||
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
|
||||
| `Intern3.5VL` | Relatively fast with good vision comprehension |
|
||||
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
|
||||
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
|
||||
|
||||
:::note
|
||||
|
||||
@@ -61,12 +61,46 @@ genai:
|
||||
provider: ollama
|
||||
base_url: http://localhost:11434
|
||||
model: minicpm-v:8b
|
||||
provider_options: # other Ollama client options can be defined
|
||||
provider_options: # other Ollama client options can be defined
|
||||
keep_alive: -1
|
||||
options:
|
||||
num_ctx: 8192 # make sure the context matches other services that are using ollama
|
||||
num_ctx: 8192 # make sure the context matches other services that are using ollama
|
||||
```
|
||||
|
||||
## llama.cpp
|
||||
|
||||
[llama.cpp](https://github.com/ggerganov/llama.cpp) is a C++ implementation of LLaMA that provides a high-performance inference server. Using llama.cpp directly gives you access to all native llama.cpp options and parameters.
|
||||
|
||||
:::warning
|
||||
|
||||
Using llama.cpp on CPU is not recommended, high inference times make using Generative AI impractical.
|
||||
|
||||
:::
|
||||
|
||||
It is highly recommended to host the llama.cpp server on a machine with a discrete graphics card, or on an Apple silicon Mac for best performance.
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. The llama.cpp server supports various vision models in GGUF format.
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
provider: llamacpp
|
||||
base_url: http://localhost:8080
|
||||
model: your-model-name
|
||||
provider_options:
|
||||
temperature: 0.7
|
||||
repeat_penalty: 1.05
|
||||
top_p: 0.8
|
||||
top_k: 40
|
||||
min_p: 0.05
|
||||
seed: -1
|
||||
```
|
||||
|
||||
All llama.cpp native options can be passed through `provider_options`, including `temperature`, `top_k`, `top_p`, `min_p`, `repeat_penalty`, `repeat_last_n`, `seed`, `grammar`, and more. See the [llama.cpp server documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md) for a complete list of available parameters.
|
||||
|
||||
## Google Gemini
|
||||
|
||||
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage.
|
||||
|
||||
@@ -13,7 +13,7 @@ Object detection and enrichments (like Semantic Search, Face Recognition, and Li
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm support in the `-rocm` Frigate image is automatically detected for enrichments, but only some enrichment models are available due to ROCm's focus on LLMs and limited stability with certain neural network models. Frigate disables models that perform poorly or are unstable to ensure reliable operation, so only compatible enrichments may be active.
|
||||
- ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image.
|
||||
|
||||
- **Intel**
|
||||
|
||||
|
||||
@@ -146,16 +146,16 @@ detectors:
|
||||
|
||||
### EdgeTPU Supported Models
|
||||
|
||||
| Model | Notes |
|
||||
| ----------------------- | ------------------------------------------- |
|
||||
| [Mobiledet](#mobiledet) | Default model |
|
||||
| [YOLOv9](#yolov9) | More accurate but slower than default model |
|
||||
| Model | Notes |
|
||||
| ------------------------------------- | ------------------------------------------- |
|
||||
| [MobileNet v2](#ssdlite-mobilenet-v2) | Default model |
|
||||
| [YOLOv9](#yolo-v9) | More accurate but slower than default model |
|
||||
|
||||
#### Mobiledet
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
|
||||
|
||||
#### YOLOv9
|
||||
#### YOLO v9
|
||||
|
||||
[YOLOv9](https://github.com/dbro/frigate-detector-edgetpu-yolo9/releases/download/v1.0/yolov9-s-relu6-best_320_int8_edgetpu.tflite) models that are compiled for Tensorflow Lite and properly quantized are supported, but not included by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. Note that the model may require a custom label file (eg. [use this 17 label file](https://raw.githubusercontent.com/dbro/frigate-detector-edgetpu-yolo9/refs/heads/main/labels-coco17.txt) for the model linked above.)
|
||||
|
||||
|
||||
@@ -139,7 +139,13 @@ record:
|
||||
|
||||
:::tip
|
||||
|
||||
When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
When using `hwaccel_args`, hardware encoding is used for timelapse generation. This setting can be overridden for a specific camera (e.g., when camera resolution exceeds hardware encoder limits); set `cameras.<camera>.record.export.hwaccel_args` with the appropriate settings. Using an unrecognized value or empty string will fall back to software encoding (libx264).
|
||||
|
||||
:::
|
||||
|
||||
:::tip
|
||||
|
||||
The encoder determines its own behavior so the resulting file size may be undesirably large.
|
||||
To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario.
|
||||
|
||||
:::
|
||||
@@ -148,19 +154,16 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
|
||||
|
||||
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
|
||||
|
||||
## Syncing Recordings With Disk
|
||||
## Syncing Media Files With Disk
|
||||
|
||||
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
|
||||
Media files (event snapshots, event thumbnails, review thumbnails, previews, exports, and recordings) can become orphaned when database entries are deleted but the corresponding files remain on disk.
|
||||
|
||||
```yaml
|
||||
record:
|
||||
sync_recordings: True
|
||||
```
|
||||
Normal operation may leave small numbers of orphaned files until Frigate's scheduled cleanup, but crashes, configuration changes, or upgrades may cause more orphaned files that Frigate does not clean up. This feature checks the file system for media files and removes any that are not referenced in the database.
|
||||
|
||||
This feature is meant to fix variations in files, not completely delete entries in the database. If you delete all of your media, don't use `sync_recordings`, just stop Frigate, delete the `frigate.db` database, and restart.
|
||||
The Maintenance pane in the Frigate UI or an API endpoint `POST /api/media/sync` can be used to trigger a media sync. When using the API, a job ID is returned and the operation continues on the server. Status can be checked with the `/api/media/sync/status/{job_id}` endpoint.
|
||||
|
||||
:::warning
|
||||
|
||||
The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary.
|
||||
This operation uses considerable CPU resources and includes a safety threshold that aborts if more than 50% of files would be deleted. Only run when necessary. If you set `force: true` the safety threshold will be bypassed; do not use `force` unless you are certain the deletions are intended.
|
||||
|
||||
:::
|
||||
|
||||
@@ -510,8 +510,6 @@ record:
|
||||
# Optional: Number of minutes to wait between cleanup runs (default: shown below)
|
||||
# This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o
|
||||
expire_interval: 60
|
||||
# Optional: Two-way sync recordings database with disk on startup and once a day (default: shown below).
|
||||
sync_recordings: False
|
||||
# Optional: Continuous retention settings
|
||||
continuous:
|
||||
# Optional: Number of days to retain recordings regardless of tracked objects or motion (default: shown below)
|
||||
@@ -534,6 +532,8 @@ record:
|
||||
# The -r (framerate) dictates how smooth the output video is.
|
||||
# So the args would be -vf setpts=0.02*PTS -r 30 in that case.
|
||||
timelapse_args: "-vf setpts=0.04*PTS -r 30"
|
||||
# Optional: Global hardware acceleration settings for timelapse exports. (default: inherit)
|
||||
hwaccel_args: auto
|
||||
# Optional: Recording Preview Settings
|
||||
preview:
|
||||
# Optional: Quality of recording preview (default: shown below).
|
||||
@@ -749,7 +749,7 @@ classification:
|
||||
interval: None
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.10)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.13)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
@@ -835,6 +835,11 @@ cameras:
|
||||
# Optional: camera specific output args (default: inherit)
|
||||
# output_args:
|
||||
|
||||
# Optional: camera specific hwaccel args for timelapse export (default: inherit)
|
||||
# record:
|
||||
# export:
|
||||
# hwaccel_args:
|
||||
|
||||
# Optional: timeout for highest scoring image before allowing it
|
||||
# to be replaced by a newer image. (default: shown below)
|
||||
best_image_timeout: 60
|
||||
|
||||
@@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.10) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.13) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@@ -187,7 +187,7 @@ In this configuration:
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
## Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#module-streams), not just rtsp.
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
|
||||
- Check Video Codec:
|
||||
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.10#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.13#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
|
||||
@@ -38,7 +38,3 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
## [Periscope](https://github.com/maksz42/periscope)
|
||||
|
||||
[Periscope](https://github.com/maksz42/periscope) is a lightweight Android app that turns old devices into live viewers for Frigate. It works on Android 2.2 and above, including Android TV. It supports authentication and HTTPS.
|
||||
|
||||
## [Scrypted - Frigate bridge plugin](https://github.com/apocaliss92/scrypted-frigate-bridge)
|
||||
|
||||
[Scrypted - Frigate bridge](https://github.com/apocaliss92/scrypted-frigate-bridge) is an plugin that allows to ingest Frigate detections, motion, videoclips on Scrypted as well as provide templates to export rebroadcast configurations on Frigate.
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
---
|
||||
id: dummy-camera
|
||||
title: Troubleshooting Detection
|
||||
---
|
||||
|
||||
When investigating object detection or tracking problems, it can be helpful to replay an exported video as a temporary "dummy" camera. This lets you reproduce issues locally, iterate on configuration (detections, zones, enrichment settings), and capture logs and clips for analysis.
|
||||
|
||||
## When to use
|
||||
|
||||
- Replaying an exported clip to reproduce incorrect detections
|
||||
- Testing configuration changes (model settings, trackers, filters) against a known clip
|
||||
- Gathering deterministic logs and recordings for debugging or issue reports
|
||||
|
||||
## Example Config
|
||||
|
||||
Place the clip you want to replay in a location accessible to Frigate (for example `/media/frigate/` or the repository `debug/` folder when developing). Then add a temporary camera to your `config/config.yml` like this:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
test:
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: /media/frigate/car-stopping.mp4
|
||||
input_args: -re -stream_loop -1 -fflags +genpts
|
||||
roles:
|
||||
- detect
|
||||
detect:
|
||||
enabled: true
|
||||
record:
|
||||
enabled: false
|
||||
snapshots:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
- `-re -stream_loop -1` tells `ffmpeg` to play the file in realtime and loop indefinitely, which is useful for long debugging sessions.
|
||||
- `-fflags +genpts` helps generate presentation timestamps when they are missing in the file.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Export or copy the clip you want to replay to the Frigate host (e.g., `/media/frigate/` or `debug/clips/`).
|
||||
2. Add the temporary camera to `config/config.yml` (example above). Use a unique name such as `test` or `replay_camera` so it's easy to remove later.
|
||||
- If you're debugging a specific camera, copy the settings from that camera (frame rate, model/enrichment settings, zones, etc.) into the temporary camera so the replay closely matches the original environment. Leave `record` and `snapshots` disabled unless you are specifically debugging recording or snapshot behavior.
|
||||
3. Restart Frigate.
|
||||
4. Observe the Debug view in the UI and logs as the clip is replayed. Watch detections, zones, or any feature you're looking to debug, and note any errors in the logs to reproduce the issue.
|
||||
5. Iterate on camera or enrichment settings (model, fps, zones, filters) and re-check the replay until the behavior is resolved.
|
||||
6. Remove the temporary camera from your config after debugging to avoid spurious telemetry or recordings.
|
||||
|
||||
## Variables to consider in object tracking
|
||||
|
||||
- The exported video will not always line up exactly with how it originally ran through Frigate (or even with the last loop). Different frames may be used on replay, which can change detections and tracking.
|
||||
- Motion detection depends on the frames used; small frame shifts can change motion regions and therefore what gets passed to the detector.
|
||||
- Object detection is not deterministic: models and post-processing can yield different results across runs, so you may not get identical detections or track IDs every time.
|
||||
|
||||
When debugging, treat the replay as a close approximation rather than a byte-for-byte replay. Capture multiple runs, enable recording if helpful, and examine logs and saved event clips to understand variability.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- No video: verify the path is correct and accessible from the Frigate process/container.
|
||||
- FFmpeg errors: check the log output for ffmpeg-specific flags and adjust `input_args` accordingly for your file/container. You may also need to disable hardware acceleration (`hwaccel_args: ""`) for the dummy camera.
|
||||
- No detections: confirm the camera `roles` include `detect`, and model/detector configuration is enabled.
|
||||
@@ -9,20 +9,8 @@ Frigate includes built-in memory profiling using [memray](https://bloomberg.gith
|
||||
|
||||
Memory profiling is controlled via the `FRIGATE_MEMRAY_MODULES` environment variable. Set it to a comma-separated list of module names you want to profile:
|
||||
|
||||
```yaml
|
||||
# docker-compose example
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
environment:
|
||||
- FRIGATE_MEMRAY_MODULES=frigate.embeddings,frigate.capture
|
||||
```
|
||||
|
||||
```bash
|
||||
# docker run example
|
||||
docker run -e FRIGATE_MEMRAY_MODULES="frigate.embeddings" \
|
||||
...
|
||||
--name frigate <frigate_image>
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
```
|
||||
|
||||
### Module Names
|
||||
@@ -36,12 +24,11 @@ Frigate processes are named using a module-based naming scheme. Common module na
|
||||
- `frigate.output` - Output processing
|
||||
- `frigate.audio_manager` - Audio processing
|
||||
- `frigate.embeddings` - Embeddings processing
|
||||
- `frigate.embeddings_manager` - Embeddings manager
|
||||
|
||||
You can also specify the full process name (including camera-specific identifiers) if you want to profile a specific camera:
|
||||
|
||||
```bash
|
||||
FRIGATE_MEMRAY_MODULES=frigate.capture:front_door
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.capture:front_door"
|
||||
```
|
||||
|
||||
When you specify a module name (e.g., `frigate.capture`), all processes with that module prefix will be profiled. For example, `frigate.capture` will profile all camera capture processes.
|
||||
@@ -68,20 +55,11 @@ After a process exits normally, you'll find HTML reports in `/config/memray_repo
|
||||
|
||||
If a process crashes or you want to generate a report from an existing binary file, you can manually create the HTML report:
|
||||
|
||||
- Run `memray` inside the Frigate container:
|
||||
|
||||
```bash
|
||||
docker-compose exec frigate memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
# or
|
||||
docker exec -it <container_name_or_id> memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
memray flamegraph /config/memray_reports/<module_name>.bin
|
||||
```
|
||||
|
||||
- You can also copy the `.bin` file to the host and run `memray` locally if you have it installed:
|
||||
|
||||
```bash
|
||||
docker cp <container_name_or_id>:/config/memray_reports/<module_name>.bin /tmp/
|
||||
memray flamegraph /tmp/<module_name>.bin
|
||||
```
|
||||
This will generate an HTML file that you can open in your browser.
|
||||
|
||||
## Understanding the Reports
|
||||
|
||||
@@ -132,4 +110,20 @@ The interactive HTML reports allow you to:
|
||||
- Check that memray is properly installed (included by default in Frigate)
|
||||
- Verify the process actually started and ran (check process logs)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```bash
|
||||
# Enable profiling for review and capture modules
|
||||
export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture"
|
||||
|
||||
# Start Frigate
|
||||
# ... let it run for a while ...
|
||||
|
||||
# Check for reports
|
||||
ls -lh /config/memray_reports/
|
||||
|
||||
# If a process crashed, manually generate report
|
||||
memray flamegraph /config/memray_reports/frigate_capture_front_door.bin
|
||||
```
|
||||
|
||||
For more information about memray and interpreting reports, see the [official memray documentation](https://bloomberg.github.io/memray/).
|
||||
|
||||
@@ -28,7 +28,7 @@ const sidebars: SidebarsConfig = {
|
||||
{
|
||||
type: "link",
|
||||
label: "Go2RTC Configuration Reference",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.10#configuration",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.13#configuration",
|
||||
} as PropSidebarItemLink,
|
||||
],
|
||||
Detectors: [
|
||||
@@ -132,7 +132,6 @@ const sidebars: SidebarsConfig = {
|
||||
"troubleshooting/gpu",
|
||||
"troubleshooting/edgetpu",
|
||||
"troubleshooting/memory",
|
||||
"troubleshooting/dummy-camera",
|
||||
],
|
||||
Development: [
|
||||
"development/contributing",
|
||||
|
||||
152
docs/static/frigate-api.yaml
vendored
152
docs/static/frigate-api.yaml
vendored
@@ -17,25 +17,20 @@ paths:
|
||||
summary: Authenticate request
|
||||
description: |-
|
||||
Authenticates the current request based on proxy headers or JWT token.
|
||||
Returns user role and permissions for camera access.
|
||||
This endpoint verifies authentication credentials and manages JWT token refresh.
|
||||
On success, no JSON body is returned; authentication state is communicated via response headers and cookies.
|
||||
operationId: auth_auth_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"202":
|
||||
description: Authentication Accepted (no response body, different headers depending on auth method)
|
||||
headers:
|
||||
remote-user:
|
||||
description: Authenticated username or "viewer" in proxy-only mode
|
||||
schema:
|
||||
type: string
|
||||
remote-role:
|
||||
description: Resolved role (e.g., admin, viewer, or custom)
|
||||
schema:
|
||||
type: string
|
||||
Set-Cookie:
|
||||
description: May include refreshed JWT cookie ("frigate-token") when applicable
|
||||
schema:
|
||||
type: string
|
||||
description: Authentication Accepted
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"401":
|
||||
description: Authentication Failed
|
||||
/profile:
|
||||
@@ -331,6 +326,59 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/media/sync:
|
||||
post:
|
||||
tags:
|
||||
- App
|
||||
summary: Start media sync job
|
||||
description: |-
|
||||
Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
|
||||
Returns 202 with job details when queued, or 409 if a job is already running.
|
||||
operationId: sync_media_media_sync_post
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
responses:
|
||||
"202":
|
||||
description: Accepted - Job queued
|
||||
"409":
|
||||
description: Conflict - Job already running
|
||||
"422":
|
||||
description: Validation Error
|
||||
|
||||
/media/sync/current:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Get current media sync job
|
||||
description: |-
|
||||
Retrieve the current running media sync job, if any. Returns the job details or null when no job is active.
|
||||
operationId: get_media_sync_current_media_sync_current_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"422":
|
||||
description: Validation Error
|
||||
|
||||
/media/sync/status/{job_id}:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Get media sync job status
|
||||
description: |-
|
||||
Get status and results for the specified media sync job id. Returns 200 with job details including results, or 404 if the job is not found.
|
||||
operationId: get_media_sync_status_media_sync_status__job_id__get
|
||||
parameters:
|
||||
- name: job_id
|
||||
in: path
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"404":
|
||||
description: Not Found - Job not found
|
||||
"422":
|
||||
description: Validation Error
|
||||
/faces/train/{name}/classify:
|
||||
post:
|
||||
tags:
|
||||
@@ -616,32 +664,6 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/classification/attributes:
|
||||
get:
|
||||
tags:
|
||||
- Classification
|
||||
summary: Get custom classification attributes
|
||||
description: |-
|
||||
Returns custom classification attributes for a given object type.
|
||||
Only includes models with classification_type set to 'attribute'.
|
||||
By default returns a flat sorted list of all attribute labels.
|
||||
If group_by_model is true, returns attributes grouped by model name.
|
||||
operationId: get_custom_attributes_classification_attributes_get
|
||||
parameters:
|
||||
- name: object_type
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: group_by_model
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
"422":
|
||||
description: Validation Error
|
||||
/classification/{name}/dataset:
|
||||
get:
|
||||
tags:
|
||||
@@ -2938,42 +2960,6 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/events/{event_id}/attributes:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Set custom classification attributes
|
||||
description: |-
|
||||
Sets an event's custom classification attributes for all attribute-type
|
||||
models that apply to the event's object type.
|
||||
Returns a success message or an error if the event is not found.
|
||||
operationId: set_attributes_events__event_id__attributes_post
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/EventsAttributesBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/GenericResponse"
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/events/{event_id}/description:
|
||||
post:
|
||||
tags:
|
||||
@@ -5021,18 +5007,6 @@ components:
|
||||
required:
|
||||
- subLabel
|
||||
title: EventsSubLabelBody
|
||||
EventsAttributesBody:
|
||||
properties:
|
||||
attributes:
|
||||
type: object
|
||||
title: Attributes
|
||||
description: Object with model names as keys and attribute values
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- attributes
|
||||
title: EventsAttributesBody
|
||||
ExportModel:
|
||||
properties:
|
||||
id:
|
||||
|
||||
@@ -25,15 +25,22 @@ from pydantic import ValidationError
|
||||
|
||||
from frigate.api.auth import allow_any_authenticated, allow_public, require_role
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody, MediaSyncBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera.updater import (
|
||||
CameraConfigUpdateEnum,
|
||||
CameraConfigUpdateTopic,
|
||||
)
|
||||
from frigate.ffmpeg_presets import FFMPEG_HWACCEL_VAAPI, _gpu_selector
|
||||
from frigate.jobs.media_sync import (
|
||||
get_current_media_sync_job,
|
||||
get_media_sync_job_by_id,
|
||||
start_media_sync_job,
|
||||
)
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
flatten_config_data,
|
||||
@@ -458,7 +465,15 @@ def config_set(request: Request, body: AppConfigSetBody):
|
||||
|
||||
@router.get("/vainfo", dependencies=[Depends(allow_any_authenticated())])
|
||||
def vainfo():
|
||||
vainfo = vainfo_hwaccel()
|
||||
# Use LibvaGpuSelector to pick an appropriate libva device (if available)
|
||||
selected_gpu = ""
|
||||
try:
|
||||
selected_gpu = _gpu_selector.get_gpu_arg(FFMPEG_HWACCEL_VAAPI, 0) or ""
|
||||
except Exception:
|
||||
selected_gpu = ""
|
||||
|
||||
# If selected_gpu is empty, pass None to vainfo_hwaccel to run plain `vainfo`.
|
||||
vainfo = vainfo_hwaccel(device_name=selected_gpu or None)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"return_code": vainfo.returncode,
|
||||
@@ -593,6 +608,98 @@ def restart():
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/media/sync",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Start media sync job",
|
||||
description="""Start an asynchronous media sync job to find and (optionally) remove orphaned media files.
|
||||
Returns 202 with job details when queued, or 409 if a job is already running.""",
|
||||
)
|
||||
def sync_media(body: MediaSyncBody = Body(...)):
|
||||
"""Start async media sync job - remove orphaned files.
|
||||
|
||||
Syncs specified media types: event snapshots, event thumbnails, review thumbnails,
|
||||
previews, exports, and/or recordings. Job runs in background; use /media/sync/current
|
||||
or /media/sync/status/{job_id} to check status.
|
||||
|
||||
Args:
|
||||
body: MediaSyncBody with dry_run flag and media_types list.
|
||||
media_types can include: 'all', 'event_snapshots', 'event_thumbnails',
|
||||
'review_thumbnails', 'previews', 'exports', 'recordings'
|
||||
|
||||
Returns:
|
||||
202 Accepted with job_id, or 409 Conflict if job already running.
|
||||
"""
|
||||
job_id = start_media_sync_job(
|
||||
dry_run=body.dry_run, media_types=body.media_types, force=body.force
|
||||
)
|
||||
|
||||
if job_id is None:
|
||||
# A job is already running
|
||||
current = get_current_media_sync_job()
|
||||
return JSONResponse(
|
||||
content={
|
||||
"error": "A media sync job is already running",
|
||||
"current_job_id": current.id if current else None,
|
||||
},
|
||||
status_code=409,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"job": {
|
||||
"job_type": "media_sync",
|
||||
"status": JobStatusTypesEnum.queued,
|
||||
"id": job_id,
|
||||
}
|
||||
},
|
||||
status_code=202,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/current",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get current media sync job",
|
||||
description="""Retrieve the current running media sync job, if any. Returns the job details
|
||||
or null when no job is active.""",
|
||||
)
|
||||
def get_media_sync_current():
|
||||
"""Get the current running media sync job, if any."""
|
||||
job = get_current_media_sync_job()
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(content={"job": None}, status_code=200)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/media/sync/status/{job_id}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get media sync job status",
|
||||
description="""Get status and results for the specified media sync job id. Returns 200 with
|
||||
job details including results, or 404 if the job is not found.""",
|
||||
)
|
||||
def get_media_sync_status(job_id: str):
|
||||
"""Get the status of a specific media sync job."""
|
||||
job = get_media_sync_job_by_id(job_id)
|
||||
|
||||
if job is None:
|
||||
return JSONResponse(
|
||||
content={"error": "Job not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={"job": job.to_dict()},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/labels", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_labels(camera: str = ""):
|
||||
try:
|
||||
|
||||
@@ -143,6 +143,17 @@ def require_admin_by_default():
|
||||
return admin_checker
|
||||
|
||||
|
||||
def _is_authenticated(request: Request) -> bool:
|
||||
"""
|
||||
Helper to determine if a request is from an authenticated user.
|
||||
|
||||
Returns True if the request has a valid authenticated user (not anonymous).
|
||||
Port 5000 internal requests are considered anonymous despite having admin role.
|
||||
"""
|
||||
username = request.headers.get("remote-user")
|
||||
return username is not None and username != "anonymous"
|
||||
|
||||
|
||||
def allow_public():
|
||||
"""
|
||||
Override dependency to allow unauthenticated access to an endpoint.
|
||||
@@ -162,24 +173,27 @@ def allow_public():
|
||||
|
||||
def allow_any_authenticated():
|
||||
"""
|
||||
Override dependency to allow any request that passed through the /auth endpoint.
|
||||
Override dependency to allow any authenticated user (bypass admin requirement).
|
||||
|
||||
Allows:
|
||||
- Port 5000 internal requests (remote-user: "anonymous", remote-role: "admin")
|
||||
- Authenticated users with JWT tokens (remote-user: username)
|
||||
- Unauthenticated requests when auth is disabled (remote-user: "viewer")
|
||||
- Port 5000 internal requests (have admin role despite anonymous user)
|
||||
- Any authenticated user with a real username (not "anonymous")
|
||||
|
||||
Rejects:
|
||||
- Requests with no remote-user header (did not pass through /auth endpoint)
|
||||
- Port 8971 requests with anonymous user (auth disabled, no proxy auth)
|
||||
|
||||
Example:
|
||||
@router.get("/authenticated-endpoint", dependencies=[Depends(allow_any_authenticated())])
|
||||
"""
|
||||
|
||||
async def auth_checker(request: Request):
|
||||
# Ensure a remote-user has been set by the /auth endpoint
|
||||
username = request.headers.get("remote-user")
|
||||
if username is None:
|
||||
# Port 5000 requests have admin role and should be allowed
|
||||
role = request.headers.get("remote-role")
|
||||
if role == "admin":
|
||||
return
|
||||
|
||||
# Otherwise require a real authenticated user (not anonymous)
|
||||
if not _is_authenticated(request):
|
||||
raise HTTPException(status_code=401, detail="Authentication required")
|
||||
return
|
||||
|
||||
@@ -539,32 +553,7 @@ def resolve_role(
|
||||
"/auth",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Authenticate request",
|
||||
description=(
|
||||
"Authenticates the current request based on proxy headers or JWT token. "
|
||||
"This endpoint verifies authentication credentials and manages JWT token refresh. "
|
||||
"On success, no JSON body is returned; authentication state is communicated via response headers and cookies."
|
||||
),
|
||||
status_code=202,
|
||||
responses={
|
||||
202: {
|
||||
"description": "Authentication Accepted (no response body)",
|
||||
"headers": {
|
||||
"remote-user": {
|
||||
"description": 'Authenticated username or "viewer" in proxy-only mode',
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
"remote-role": {
|
||||
"description": "Resolved role (e.g., admin, viewer, or custom)",
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
"Set-Cookie": {
|
||||
"description": "May include refreshed JWT cookie when applicable",
|
||||
"schema": {"type": "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
401: {"description": "Authentication Failed"},
|
||||
},
|
||||
description="Authenticates the current request based on proxy headers or JWT token. Returns user role and permissions for camera access.",
|
||||
)
|
||||
def auth(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
@@ -592,12 +581,12 @@ def auth(request: Request):
|
||||
# if auth is disabled, just apply the proxy header map and return success
|
||||
if not auth_config.enabled:
|
||||
# pass the user header value from the upstream proxy if a mapping is specified
|
||||
# or use viewer if none are specified
|
||||
# or use anonymous if none are specified
|
||||
user_header = proxy_config.header_map.user
|
||||
success_response.headers["remote-user"] = (
|
||||
request.headers.get(user_header, default="viewer")
|
||||
request.headers.get(user_header, default="anonymous")
|
||||
if user_header
|
||||
else "viewer"
|
||||
else "anonymous"
|
||||
)
|
||||
|
||||
# parse header and resolve a valid role
|
||||
@@ -709,10 +698,10 @@ def auth(request: Request):
|
||||
"/profile",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get user profile",
|
||||
description="Returns the current authenticated user's profile including username, role, and allowed cameras. This endpoint requires authentication and returns information about the user's permissions.",
|
||||
description="Returns the current authenticated user's profile including username, role, and allowed cameras.",
|
||||
)
|
||||
def profile(request: Request):
|
||||
username = request.headers.get("remote-user", "viewer")
|
||||
username = request.headers.get("remote-user", "anonymous")
|
||||
role = request.headers.get("remote-role", "viewer")
|
||||
|
||||
all_camera_names = set(request.app.frigate_config.cameras.keys())
|
||||
@@ -728,7 +717,7 @@ def profile(request: Request):
|
||||
"/logout",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Logout user",
|
||||
description="Logs out the current user by clearing the session cookie. After logout, subsequent requests will require re-authentication.",
|
||||
description="Logs out the current user by clearing the session cookie.",
|
||||
)
|
||||
def logout(request: Request):
|
||||
auth_config: AuthConfig = request.app.frigate_config.auth
|
||||
@@ -744,7 +733,7 @@ limiter = Limiter(key_func=get_remote_addr)
|
||||
"/login",
|
||||
dependencies=[Depends(allow_public())],
|
||||
summary="Login with credentials",
|
||||
description='Authenticates a user with username and password. Returns a JWT token as a secure HTTP-only cookie that can be used for subsequent API requests. The JWT token can also be retrieved from the response and used as a Bearer token in the Authorization header.\n\nExample using Bearer token:\n```\ncurl -H "Authorization: Bearer <token_value>" https://frigate_ip:8971/api/profile\n```',
|
||||
description="Authenticates a user with username and password. Returns a JWT token as a secure HTTP-only cookie that can be used for subsequent API requests. The token can also be retrieved and used as a Bearer token in the Authorization header.",
|
||||
)
|
||||
@limiter.limit(limit_value=rateLimiter.get_limit)
|
||||
def login(request: Request, body: AppPostLoginBody):
|
||||
@@ -787,7 +776,7 @@ def login(request: Request, body: AppPostLoginBody):
|
||||
"/users",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Get all users",
|
||||
description="Returns a list of all users with their usernames and roles. Requires admin role. Each user object contains the username and assigned role.",
|
||||
description="Returns a list of all users with their usernames and roles. Requires admin role.",
|
||||
)
|
||||
def get_users():
|
||||
exports = (
|
||||
@@ -800,7 +789,7 @@ def get_users():
|
||||
"/users",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create new user",
|
||||
description='Creates a new user with the specified username, password, and role. Requires admin role. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?":{} |<>).',
|
||||
description="Creates a new user with the specified username, password, and role. Requires admin role. Password must meet strength requirements.",
|
||||
)
|
||||
def create_user(
|
||||
request: Request,
|
||||
@@ -834,7 +823,7 @@ def create_user(
|
||||
"/users/{username}",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete user",
|
||||
description="Deletes a user by username. The built-in admin user cannot be deleted. Requires admin role. Returns success message or error if user not found.",
|
||||
description="Deletes a user by username. The built-in admin user cannot be deleted. Requires admin role.",
|
||||
)
|
||||
def delete_user(request: Request, username: str):
|
||||
# Prevent deletion of the built-in admin user
|
||||
@@ -851,7 +840,7 @@ def delete_user(request: Request, username: str):
|
||||
"/users/{username}/password",
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Update user password",
|
||||
description="Updates a user's password. Users can only change their own password unless they have admin role. Requires the current password to verify identity for non-admin users. Password must meet strength requirements: minimum 8 characters, at least one uppercase letter, at least one digit, and at least one special character (!@#$%^&*(),.?\":{} |<>). If user changes their own password, a new JWT cookie is automatically issued.",
|
||||
description="Updates a user's password. Users can only change their own password unless they have admin role. Requires the current password to verify identity. Password must meet strength requirements (minimum 8 characters, uppercase letter, digit, and special character).",
|
||||
)
|
||||
async def update_password(
|
||||
request: Request,
|
||||
@@ -879,9 +868,13 @@ async def update_password(
|
||||
except DoesNotExist:
|
||||
return JSONResponse(content={"message": "User not found"}, status_code=404)
|
||||
|
||||
# Require old_password when non-admin user is changing any password
|
||||
# Admin users changing passwords do NOT need to provide the current password
|
||||
if current_role != "admin":
|
||||
# Require old_password when:
|
||||
# 1. Non-admin user is changing another user's password (admin only action)
|
||||
# 2. Any user is changing their own password
|
||||
is_changing_own_password = current_username == username
|
||||
is_non_admin = current_role != "admin"
|
||||
|
||||
if is_changing_own_password or is_non_admin:
|
||||
if not body.old_password:
|
||||
return JSONResponse(
|
||||
content={"message": "Current password is required"},
|
||||
@@ -933,7 +926,7 @@ async def update_password(
|
||||
"/users/{username}/role",
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update user role",
|
||||
description="Updates a user's role. The built-in admin user's role cannot be modified. Requires admin role. Valid roles are defined in the configuration.",
|
||||
description="Updates a user's role. The built-in admin user's role cannot be modified. Requires admin role.",
|
||||
)
|
||||
async def update_role(
|
||||
request: Request,
|
||||
|
||||
@@ -31,7 +31,6 @@ from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.config.classification import ObjectClassificationType
|
||||
from frigate.const import CLIPS_DIR, FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
@@ -40,7 +39,6 @@ from frigate.util.classification import (
|
||||
collect_state_classification_examples,
|
||||
get_dataset_image_count,
|
||||
read_training_metadata,
|
||||
write_training_metadata,
|
||||
)
|
||||
from frigate.util.file import get_event_snapshot
|
||||
|
||||
@@ -624,59 +622,6 @@ def get_classification_dataset(name: str):
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/classification/attributes",
|
||||
summary="Get custom classification attributes",
|
||||
description="""Returns custom classification attributes for a given object type.
|
||||
Only includes models with classification_type set to 'attribute'.
|
||||
By default returns a flat sorted list of all attribute labels.
|
||||
If group_by_model is true, returns attributes grouped by model name.""",
|
||||
)
|
||||
def get_custom_attributes(
|
||||
request: Request, object_type: str = None, group_by_model: bool = False
|
||||
):
|
||||
models_with_attributes = {}
|
||||
|
||||
for (
|
||||
model_key,
|
||||
model_config,
|
||||
) in request.app.frigate_config.classification.custom.items():
|
||||
if (
|
||||
not model_config.enabled
|
||||
or not model_config.object_config
|
||||
or model_config.object_config.classification_type
|
||||
!= ObjectClassificationType.attribute
|
||||
):
|
||||
continue
|
||||
|
||||
model_objects = getattr(model_config.object_config, "objects", []) or []
|
||||
if object_type is not None and object_type not in model_objects:
|
||||
continue
|
||||
|
||||
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
|
||||
if not os.path.exists(dataset_dir):
|
||||
continue
|
||||
|
||||
attributes = []
|
||||
for category_name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category_name)
|
||||
if os.path.isdir(category_dir) and category_name != "none":
|
||||
attributes.append(category_name)
|
||||
|
||||
if attributes:
|
||||
model_name = model_config.name or model_key
|
||||
models_with_attributes[model_name] = sorted(attributes)
|
||||
|
||||
if group_by_model:
|
||||
return JSONResponse(content=models_with_attributes)
|
||||
else:
|
||||
# Flatten to a unique sorted list
|
||||
all_attributes = set()
|
||||
for attributes in models_with_attributes.values():
|
||||
all_attributes.update(attributes)
|
||||
return JSONResponse(content=sorted(list(all_attributes)))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/classification/{name}/train",
|
||||
summary="Get classification train images",
|
||||
@@ -843,12 +788,6 @@ def rename_classification_category(
|
||||
|
||||
try:
|
||||
os.rename(old_folder, new_folder)
|
||||
|
||||
# Mark dataset as ready to train by resetting training metadata
|
||||
# This ensures the dataset is marked as changed after renaming
|
||||
sanitized_name = sanitize_filename(name)
|
||||
write_training_metadata(sanitized_name, 0)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
|
||||
@@ -12,7 +12,6 @@ class EventsQueryParams(BaseModel):
|
||||
labels: Optional[str] = "all"
|
||||
sub_label: Optional[str] = "all"
|
||||
sub_labels: Optional[str] = "all"
|
||||
attributes: Optional[str] = "all"
|
||||
zone: Optional[str] = "all"
|
||||
zones: Optional[str] = "all"
|
||||
limit: Optional[int] = 100
|
||||
@@ -59,8 +58,6 @@ class EventsSearchQueryParams(BaseModel):
|
||||
limit: Optional[int] = 50
|
||||
cameras: Optional[str] = "all"
|
||||
labels: Optional[str] = "all"
|
||||
sub_labels: Optional[str] = "all"
|
||||
attributes: Optional[str] = "all"
|
||||
zones: Optional[str] = "all"
|
||||
after: Optional[float] = None
|
||||
before: Optional[float] = None
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class Extension(str, Enum):
|
||||
@@ -48,15 +47,3 @@ class MediaMjpegFeedQueryParams(BaseModel):
|
||||
mask: Optional[int] = None
|
||||
motion: Optional[int] = None
|
||||
regions: Optional[int] = None
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
21
frigate/api/defs/query/recordings_query_parameters.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
|
||||
|
||||
class MediaRecordingsSummaryQueryParams(BaseModel):
|
||||
timezone: str = "utc"
|
||||
cameras: Optional[str] = "all"
|
||||
|
||||
|
||||
class MediaRecordingsAvailabilityQueryParams(BaseModel):
|
||||
cameras: str = "all"
|
||||
before: Union[float, SkipJsonSchema[None]] = None
|
||||
after: Union[float, SkipJsonSchema[None]] = None
|
||||
scale: int = 30
|
||||
|
||||
|
||||
class RecordingsDeleteQueryParams(BaseModel):
|
||||
keep: Optional[str] = None
|
||||
cameras: Optional[str] = "all"
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AppConfigSetBody(BaseModel):
|
||||
@@ -27,3 +27,16 @@ class AppPostLoginBody(BaseModel):
|
||||
|
||||
class AppPutRoleBody(BaseModel):
|
||||
role: str
|
||||
|
||||
|
||||
class MediaSyncBody(BaseModel):
|
||||
dry_run: bool = Field(
|
||||
default=True, description="If True, only report orphans without deleting them"
|
||||
)
|
||||
media_types: List[str] = Field(
|
||||
default=["all"],
|
||||
description="Types of media to sync: 'all', 'event_snapshots', 'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'",
|
||||
)
|
||||
force: bool = Field(
|
||||
default=False, description="If True, bypass safety threshold checks"
|
||||
)
|
||||
|
||||
@@ -24,13 +24,6 @@ class EventsLPRBody(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class EventsAttributesBody(BaseModel):
|
||||
attributes: List[str] = Field(
|
||||
title="Selected classification attributes for the event",
|
||||
default_factory=list,
|
||||
)
|
||||
|
||||
|
||||
class EventsDescriptionBody(BaseModel):
|
||||
description: Union[str, None] = Field(title="The description of the event")
|
||||
|
||||
|
||||
35
frigate/api/defs/request/export_case_body.py
Normal file
35
frigate/api/defs/request/export_case_body.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseCreateBody(BaseModel):
|
||||
"""Request body for creating a new export case."""
|
||||
|
||||
name: str = Field(max_length=100, description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseUpdateBody(BaseModel):
|
||||
"""Request body for updating an existing export case."""
|
||||
|
||||
name: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=100,
|
||||
description="Updated friendly name of the export case",
|
||||
)
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Updated description of the export case"
|
||||
)
|
||||
|
||||
|
||||
class ExportCaseAssignBody(BaseModel):
|
||||
"""Request body for assigning or unassigning an export to a case."""
|
||||
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
max_length=30,
|
||||
description="Case ID to assign to the export, or null to unassign",
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.json_schema import SkipJsonSchema
|
||||
@@ -18,3 +18,9 @@ class ExportRecordingsBody(BaseModel):
|
||||
)
|
||||
name: str = Field(title="Friendly name", default=None, max_length=256)
|
||||
image_path: Union[str, SkipJsonSchema[None]] = None
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None,
|
||||
title="Export case ID",
|
||||
max_length=30,
|
||||
description="ID of the export case to assign this export to",
|
||||
)
|
||||
|
||||
22
frigate/api/defs/response/export_case_response.py
Normal file
22
frigate/api/defs/response/export_case_response.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ExportCaseModel(BaseModel):
|
||||
"""Model representing a single export case."""
|
||||
|
||||
id: str = Field(description="Unique identifier for the export case")
|
||||
name: str = Field(description="Friendly name of the export case")
|
||||
description: Optional[str] = Field(
|
||||
default=None, description="Optional description of the export case"
|
||||
)
|
||||
created_at: float = Field(
|
||||
description="Unix timestamp when the export case was created"
|
||||
)
|
||||
updated_at: float = Field(
|
||||
description="Unix timestamp when the export case was last updated"
|
||||
)
|
||||
|
||||
|
||||
ExportCasesResponse = List[ExportCaseModel]
|
||||
@@ -15,6 +15,9 @@ class ExportModel(BaseModel):
|
||||
in_progress: bool = Field(
|
||||
description="Whether the export is currently being processed"
|
||||
)
|
||||
export_case_id: Optional[str] = Field(
|
||||
default=None, description="ID of the export case this export belongs to"
|
||||
)
|
||||
|
||||
|
||||
class StartExportResponse(BaseModel):
|
||||
|
||||
@@ -3,13 +3,14 @@ from enum import Enum
|
||||
|
||||
class Tags(Enum):
|
||||
app = "App"
|
||||
auth = "Auth"
|
||||
camera = "Camera"
|
||||
preview = "Preview"
|
||||
events = "Events"
|
||||
export = "Export"
|
||||
classification = "Classification"
|
||||
logs = "Logs"
|
||||
media = "Media"
|
||||
notifications = "Notifications"
|
||||
preview = "Preview"
|
||||
recordings = "Recordings"
|
||||
review = "Review"
|
||||
export = "Export"
|
||||
events = "Events"
|
||||
classification = "Classification"
|
||||
auth = "Auth"
|
||||
|
||||
@@ -37,7 +37,6 @@ from frigate.api.defs.query.regenerate_query_parameters import (
|
||||
RegenerateQueryParameters,
|
||||
)
|
||||
from frigate.api.defs.request.events_body import (
|
||||
EventsAttributesBody,
|
||||
EventsCreateBody,
|
||||
EventsDeleteBody,
|
||||
EventsDescriptionBody,
|
||||
@@ -56,7 +55,6 @@ from frigate.api.defs.response.event_response import (
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
||||
from frigate.config.classification import ObjectClassificationType
|
||||
from frigate.const import CLIPS_DIR, TRIGGER_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event, ReviewSegment, Timeline, Trigger
|
||||
@@ -101,8 +99,6 @@ def events(
|
||||
if sub_labels == "all" and sub_label != "all":
|
||||
sub_labels = sub_label
|
||||
|
||||
attributes = unquote(params.attributes)
|
||||
|
||||
zone = params.zone
|
||||
zones = params.zones
|
||||
|
||||
@@ -191,17 +187,6 @@ def events(
|
||||
sub_label_clause = reduce(operator.or_, sub_label_clauses)
|
||||
clauses.append((sub_label_clause))
|
||||
|
||||
if attributes != "all":
|
||||
# Custom classification results are stored as data[model_name] = result_value
|
||||
filtered_attributes = attributes.split(",")
|
||||
attribute_clauses = []
|
||||
|
||||
for attr in filtered_attributes:
|
||||
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
|
||||
|
||||
attribute_clause = reduce(operator.or_, attribute_clauses)
|
||||
clauses.append(attribute_clause)
|
||||
|
||||
if recognized_license_plate != "all":
|
||||
filtered_recognized_license_plates = recognized_license_plate.split(",")
|
||||
|
||||
@@ -507,8 +492,6 @@ def events_search(
|
||||
# Filters
|
||||
cameras = params.cameras
|
||||
labels = params.labels
|
||||
sub_labels = params.sub_labels
|
||||
attributes = params.attributes
|
||||
zones = params.zones
|
||||
after = params.after
|
||||
before = params.before
|
||||
@@ -583,38 +566,6 @@ def events_search(
|
||||
if labels != "all":
|
||||
event_filters.append((Event.label << labels.split(",")))
|
||||
|
||||
if sub_labels != "all":
|
||||
# use matching so joined sub labels are included
|
||||
# for example a sub label 'bob' would get events
|
||||
# with sub labels 'bob' and 'bob, john'
|
||||
sub_label_clauses = []
|
||||
filtered_sub_labels = sub_labels.split(",")
|
||||
|
||||
if "None" in filtered_sub_labels:
|
||||
filtered_sub_labels.remove("None")
|
||||
sub_label_clauses.append((Event.sub_label.is_null()))
|
||||
|
||||
for label in filtered_sub_labels:
|
||||
sub_label_clauses.append(
|
||||
(Event.sub_label.cast("text") == label)
|
||||
) # include exact matches
|
||||
|
||||
# include this label when part of a list
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*{label},*"))
|
||||
sub_label_clauses.append((Event.sub_label.cast("text") % f"*, {label}*"))
|
||||
|
||||
event_filters.append((reduce(operator.or_, sub_label_clauses)))
|
||||
|
||||
if attributes != "all":
|
||||
# Custom classification results are stored as data[model_name] = result_value
|
||||
filtered_attributes = attributes.split(",")
|
||||
attribute_clauses = []
|
||||
|
||||
for attr in filtered_attributes:
|
||||
attribute_clauses.append(Event.data.cast("text") % f'*:"{attr}"*')
|
||||
|
||||
event_filters.append(reduce(operator.or_, attribute_clauses))
|
||||
|
||||
if zones != "all":
|
||||
zone_clauses = []
|
||||
filtered_zones = zones.split(",")
|
||||
@@ -1400,107 +1351,6 @@ async def set_plate(
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/attributes",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Set custom classification attributes",
|
||||
description=(
|
||||
"Sets an event's custom classification attributes for all attribute-type "
|
||||
"models that apply to the event's object type."
|
||||
),
|
||||
)
|
||||
async def set_attributes(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
body: EventsAttributesBody,
|
||||
):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
await require_camera_access(event.camera, request=request)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": f"Event {event_id} not found."}),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
object_type = event.label
|
||||
selected_attributes = set(body.attributes or [])
|
||||
applied_updates: list[dict[str, str | float | None]] = []
|
||||
|
||||
for (
|
||||
model_key,
|
||||
model_config,
|
||||
) in request.app.frigate_config.classification.custom.items():
|
||||
# Only apply to enabled attribute classifiers that target this object type
|
||||
if (
|
||||
not model_config.enabled
|
||||
or not model_config.object_config
|
||||
or model_config.object_config.classification_type
|
||||
!= ObjectClassificationType.attribute
|
||||
or object_type not in (model_config.object_config.objects or [])
|
||||
):
|
||||
continue
|
||||
|
||||
# Get available labels from dataset directory
|
||||
dataset_dir = os.path.join(CLIPS_DIR, sanitize_filename(model_key), "dataset")
|
||||
available_labels = set()
|
||||
|
||||
if os.path.exists(dataset_dir):
|
||||
for category_name in os.listdir(dataset_dir):
|
||||
category_dir = os.path.join(dataset_dir, category_name)
|
||||
if os.path.isdir(category_dir):
|
||||
available_labels.add(category_name)
|
||||
|
||||
if not available_labels:
|
||||
logger.warning(
|
||||
"No dataset found for custom attribute model %s at %s",
|
||||
model_key,
|
||||
dataset_dir,
|
||||
)
|
||||
continue
|
||||
|
||||
# Find all selected attributes that apply to this model
|
||||
model_name = model_config.name or model_key
|
||||
matching_attrs = selected_attributes & available_labels
|
||||
|
||||
if matching_attrs:
|
||||
# Publish updates for each selected attribute
|
||||
for attr in matching_attrs:
|
||||
request.app.event_metadata_updater.publish(
|
||||
(event_id, model_name, attr, 1.0),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
applied_updates.append(
|
||||
{"model": model_name, "label": attr, "score": 1.0}
|
||||
)
|
||||
else:
|
||||
# Clear this model's attribute
|
||||
request.app.event_metadata_updater.publish(
|
||||
(event_id, model_name, None, None),
|
||||
EventMetadataTypeEnum.attribute.value,
|
||||
)
|
||||
applied_updates.append({"model": model_name, "label": None, "score": None})
|
||||
|
||||
if len(applied_updates) == 0:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No matching attributes found for this object type.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Updated {len(applied_updates)} attribute(s)",
|
||||
"applied": applied_updates,
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/description",
|
||||
response_model=GenericResponse,
|
||||
|
||||
@@ -4,10 +4,10 @@ import logging
|
||||
import random
|
||||
import string
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
@@ -19,8 +19,17 @@ from frigate.api.auth import (
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.request.export_case_body import (
|
||||
ExportCaseAssignBody,
|
||||
ExportCaseCreateBody,
|
||||
ExportCaseUpdateBody,
|
||||
)
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.request.export_rename_body import ExportRenameBody
|
||||
from frigate.api.defs.response.export_case_response import (
|
||||
ExportCaseModel,
|
||||
ExportCasesResponse,
|
||||
)
|
||||
from frigate.api.defs.response.export_response import (
|
||||
ExportModel,
|
||||
ExportsResponse,
|
||||
@@ -29,7 +38,7 @@ from frigate.api.defs.response.export_response import (
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.models import Export, ExportCase, Previews, Recordings
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
PlaybackSourceEnum,
|
||||
@@ -52,17 +61,182 @@ router = APIRouter(tags=[Tags.export])
|
||||
)
|
||||
def get_exports(
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
export_case_id: Optional[str] = None,
|
||||
cameras: Optional[str] = Query(default="all"),
|
||||
start_date: Optional[float] = None,
|
||||
end_date: Optional[float] = None,
|
||||
):
|
||||
exports = (
|
||||
Export.select()
|
||||
.where(Export.camera << allowed_cameras)
|
||||
.order_by(Export.date.desc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
query = Export.select().where(Export.camera << allowed_cameras)
|
||||
|
||||
if export_case_id is not None:
|
||||
if export_case_id == "unassigned":
|
||||
query = query.where(Export.export_case.is_null(True))
|
||||
else:
|
||||
query = query.where(Export.export_case == export_case_id)
|
||||
|
||||
if cameras and cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered_cameras = list(requested.intersection(allowed_cameras))
|
||||
if not filtered_cameras:
|
||||
return JSONResponse(content=[])
|
||||
query = query.where(Export.camera << filtered_cameras)
|
||||
|
||||
if start_date is not None:
|
||||
query = query.where(Export.date >= start_date)
|
||||
|
||||
if end_date is not None:
|
||||
query = query.where(Export.date <= end_date)
|
||||
|
||||
exports = query.order_by(Export.date.desc()).dicts().iterator()
|
||||
return JSONResponse(content=[e for e in exports])
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases",
|
||||
response_model=ExportCasesResponse,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get export cases",
|
||||
description="Gets all export cases from the database.",
|
||||
)
|
||||
def get_export_cases():
|
||||
cases = (
|
||||
ExportCase.select().order_by(ExportCase.created_at.desc()).dicts().iterator()
|
||||
)
|
||||
return JSONResponse(content=[c for c in cases])
|
||||
|
||||
|
||||
@router.post(
|
||||
"/cases",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Create export case",
|
||||
description="Creates a new export case.",
|
||||
)
|
||||
def create_export_case(body: ExportCaseCreateBody):
|
||||
case = ExportCase.create(
|
||||
id="".join(random.choices(string.ascii_lowercase + string.digits, k=12)),
|
||||
name=body.name,
|
||||
description=body.description,
|
||||
created_at=Path().stat().st_mtime,
|
||||
updated_at=Path().stat().st_mtime,
|
||||
)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/cases/{case_id}",
|
||||
response_model=ExportCaseModel,
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
summary="Get a single export case",
|
||||
description="Gets a specific export case by ID.",
|
||||
)
|
||||
def get_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
return JSONResponse(content=model_to_dict(case))
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Update export case",
|
||||
description="Updates an existing export case.",
|
||||
)
|
||||
def update_export_case(case_id: str, body: ExportCaseUpdateBody):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.name is not None:
|
||||
case.name = body.name
|
||||
if body.description is not None:
|
||||
case.description = body.description
|
||||
|
||||
case.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/cases/{case_id}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete export case",
|
||||
description="""Deletes an export case.\n Exports that reference this case will have their export_case set to null.\n """,
|
||||
)
|
||||
def delete_export_case(case_id: str):
|
||||
try:
|
||||
case = ExportCase.get(ExportCase.id == case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Unassign exports from this case but keep the exports themselves
|
||||
Export.update(export_case=None).where(Export.export_case == case).execute()
|
||||
|
||||
case.delete_instance()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully deleted export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/export/{export_id}/case",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Assign export to case",
|
||||
description=(
|
||||
"Assigns an export to a case, or unassigns it if export_case_id is null."
|
||||
),
|
||||
)
|
||||
async def assign_export_case(
|
||||
export_id: str,
|
||||
body: ExportCaseAssignBody,
|
||||
request: Request,
|
||||
):
|
||||
try:
|
||||
export: Export = Export.get(Export.id == export_id)
|
||||
await require_camera_access(export.camera, request=request)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export not found."},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
if body.export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == body.export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found."},
|
||||
status_code=404,
|
||||
)
|
||||
export.export_case = body.export_case_id
|
||||
else:
|
||||
export.export_case = None
|
||||
|
||||
export.save()
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": "Successfully updated export case."}
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/export/{camera_name}/start/{start_time}/end/{end_time}",
|
||||
response_model=StartExportResponse,
|
||||
@@ -93,6 +267,16 @@ def export_recording(
|
||||
friendly_name = body.name
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
|
||||
export_case_id = body.export_case_id
|
||||
if export_case_id is not None:
|
||||
try:
|
||||
ExportCase.get(ExportCase.id == export_case_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Export case not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
@@ -161,6 +345,7 @@ def export_recording(
|
||||
if playback_source in PlaybackSourceEnum.__members__.values()
|
||||
else PlaybackSourceEnum.recordings
|
||||
),
|
||||
export_case_id,
|
||||
)
|
||||
exporter.start()
|
||||
return JSONResponse(
|
||||
|
||||
@@ -22,6 +22,7 @@ from frigate.api import (
|
||||
media,
|
||||
notification,
|
||||
preview,
|
||||
record,
|
||||
review,
|
||||
)
|
||||
from frigate.api.auth import get_jwt_secret, limiter, require_admin_by_default
|
||||
@@ -128,6 +129,7 @@ def create_fastapi_app(
|
||||
app.include_router(export.router)
|
||||
app.include_router(event.router)
|
||||
app.include_router(media.router)
|
||||
app.include_router(record.router)
|
||||
# App Properties
|
||||
app.frigate_config = frigate_config
|
||||
app.embeddings = embeddings
|
||||
|
||||
@@ -8,9 +8,8 @@ import os
|
||||
import subprocess as sp
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from functools import reduce
|
||||
from pathlib import Path as FilePath
|
||||
from typing import Any, List
|
||||
from typing import Any
|
||||
from urllib.parse import unquote
|
||||
|
||||
import cv2
|
||||
@@ -19,12 +18,11 @@ import pytz
|
||||
from fastapi import APIRouter, Depends, Path, Query, Request, Response
|
||||
from fastapi.responses import FileResponse, JSONResponse, StreamingResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist, fn, operator
|
||||
from peewee import DoesNotExist, fn
|
||||
from tzlocal import get_localzone_name
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
)
|
||||
from frigate.api.defs.query.media_query_parameters import (
|
||||
@@ -32,8 +30,6 @@ from frigate.api.defs.query.media_query_parameters import (
|
||||
MediaEventsSnapshotQueryParams,
|
||||
MediaLatestFrameQueryParams,
|
||||
MediaMjpegFeedQueryParams,
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
)
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.camera.state import CameraState
|
||||
@@ -44,13 +40,11 @@ from frigate.const import (
|
||||
INSTALL_DIR,
|
||||
MAX_SEGMENT_DURATION,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -397,333 +391,6 @@ async def submit_recording_snapshot_to_plus(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
||||
dependencies=[Depends(require_camera_access)],
|
||||
|
||||
479
frigate/api/record.py
Normal file
479
frigate/api/record.py
Normal file
@@ -0,0 +1,479 @@
|
||||
"""Recording APIs."""
|
||||
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from functools import reduce
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from urllib.parse import unquote
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi import Path as PathParam
|
||||
from fastapi.responses import JSONResponse
|
||||
from peewee import fn, operator
|
||||
|
||||
from frigate.api.auth import (
|
||||
allow_any_authenticated,
|
||||
get_allowed_cameras_for_filter,
|
||||
require_camera_access,
|
||||
require_role,
|
||||
)
|
||||
from frigate.api.defs.query.recordings_query_parameters import (
|
||||
MediaRecordingsAvailabilityQueryParams,
|
||||
MediaRecordingsSummaryQueryParams,
|
||||
RecordingsDeleteQueryParams,
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.util.time import get_dst_transitions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=[Tags.recordings])
|
||||
|
||||
|
||||
@router.get("/recordings/storage", dependencies=[Depends(allow_any_authenticated())])
|
||||
def get_recordings_storage_usage(request: Request):
|
||||
recording_stats = request.app.stats_emitter.get_latest_stats()["service"][
|
||||
"storage"
|
||||
][RECORD_DIR]
|
||||
|
||||
if not recording_stats:
|
||||
return JSONResponse({})
|
||||
|
||||
total_mb = recording_stats["total"]
|
||||
|
||||
camera_usages: dict[str, dict] = (
|
||||
request.app.storage_maintainer.calculate_camera_usages()
|
||||
)
|
||||
|
||||
for camera_name in camera_usages.keys():
|
||||
if camera_usages.get(camera_name, {}).get("usage"):
|
||||
camera_usages[camera_name]["usage_percent"] = (
|
||||
camera_usages.get(camera_name, {}).get("usage", 0) / total_mb
|
||||
) * 100
|
||||
|
||||
return JSONResponse(content=camera_usages)
|
||||
|
||||
|
||||
@router.get("/recordings/summary", dependencies=[Depends(allow_any_authenticated())])
|
||||
def all_recordings_summary(
|
||||
request: Request,
|
||||
params: MediaRecordingsSummaryQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Returns true/false by day indicating if recordings exist"""
|
||||
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content={})
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera << camera_list)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content={})
|
||||
|
||||
dst_periods = get_dst_transitions(params.timezone, min_time, max_time)
|
||||
|
||||
days: dict[str, bool] = {}
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
period_query = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("day")
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera << camera_list)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
)
|
||||
)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
for g in period_query:
|
||||
days[g.day] = True
|
||||
|
||||
return JSONResponse(content=dict(sorted(days.items())))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{camera_name}/recordings/summary", dependencies=[Depends(require_camera_access)]
|
||||
)
|
||||
async def recordings_summary(camera_name: str, timezone: str = "utc"):
|
||||
"""Returns hourly summary for recordings of given camera"""
|
||||
|
||||
time_range_query = (
|
||||
Recordings.select(
|
||||
fn.MIN(Recordings.start_time).alias("min_time"),
|
||||
fn.MAX(Recordings.start_time).alias("max_time"),
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
min_time = time_range_query.get("min_time")
|
||||
max_time = time_range_query.get("max_time")
|
||||
|
||||
days: dict[str, dict] = {}
|
||||
|
||||
if min_time is None or max_time is None:
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
dst_periods = get_dst_transitions(timezone, min_time, max_time)
|
||||
|
||||
for period_start, period_end, period_offset in dst_periods:
|
||||
hours_offset = int(period_offset / 60 / 60)
|
||||
minutes_offset = int(period_offset / 60 - hours_offset * 60)
|
||||
period_hour_modifier = f"{hours_offset} hour"
|
||||
period_minute_modifier = f"{minutes_offset} minute"
|
||||
|
||||
recording_groups = (
|
||||
Recordings.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Recordings.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.SUM(Recordings.duration).alias("duration"),
|
||||
fn.SUM(Recordings.motion).alias("motion"),
|
||||
fn.SUM(Recordings.objects).alias("objects"),
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == camera_name)
|
||||
& (Recordings.end_time >= period_start)
|
||||
& (Recordings.start_time <= period_end)
|
||||
)
|
||||
.group_by((Recordings.start_time + period_offset).cast("int") / 3600)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
Event.select(
|
||||
fn.strftime(
|
||||
"%Y-%m-%d %H",
|
||||
fn.datetime(
|
||||
Event.start_time,
|
||||
"unixepoch",
|
||||
period_hour_modifier,
|
||||
period_minute_modifier,
|
||||
),
|
||||
).alias("hour"),
|
||||
fn.COUNT(Event.id).alias("count"),
|
||||
)
|
||||
.where(Event.camera == camera_name, Event.has_clip)
|
||||
.where(
|
||||
(Event.start_time >= period_start) & (Event.start_time <= period_end)
|
||||
)
|
||||
.group_by((Event.start_time + period_offset).cast("int") / 3600)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
events_count = event_map.get(recording_group.hour, 0)
|
||||
hour_data = {
|
||||
"hour": hour,
|
||||
"events": events_count,
|
||||
"motion": recording_group.motion,
|
||||
"objects": recording_group.objects,
|
||||
"duration": round(recording_group.duration),
|
||||
}
|
||||
if day in days:
|
||||
# merge counts if already present (edge-case at DST boundary)
|
||||
days[day]["events"] += events_count or 0
|
||||
days[day]["hours"].append(hour_data)
|
||||
else:
|
||||
days[day] = {
|
||||
"events": events_count or 0,
|
||||
"hours": [hour_data],
|
||||
"day": day,
|
||||
}
|
||||
|
||||
return JSONResponse(content=list(days.values()))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/recordings", dependencies=[Depends(require_camera_access)])
|
||||
async def recordings(
|
||||
camera_name: str,
|
||||
after: float = (datetime.now() - timedelta(hours=1)).timestamp(),
|
||||
before: float = datetime.now().timestamp(),
|
||||
):
|
||||
"""Return specific camera recordings between the given 'after'/'end' times. If not provided the last hour will be used"""
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.motion,
|
||||
Recordings.objects,
|
||||
Recordings.duration,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera == camera_name,
|
||||
Recordings.end_time >= after,
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get(
|
||||
"/recordings/unavailable",
|
||||
response_model=list[dict],
|
||||
dependencies=[Depends(allow_any_authenticated())],
|
||||
)
|
||||
async def no_recordings(
|
||||
request: Request,
|
||||
params: MediaRecordingsAvailabilityQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Get time ranges with no recordings."""
|
||||
cameras = params.cameras
|
||||
if cameras != "all":
|
||||
requested = set(unquote(cameras).split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
if not filtered:
|
||||
return JSONResponse(content=[])
|
||||
cameras = ",".join(filtered)
|
||||
else:
|
||||
cameras = allowed_cameras
|
||||
|
||||
before = params.before or datetime.datetime.now().timestamp()
|
||||
after = (
|
||||
params.after
|
||||
or (datetime.datetime.now() - datetime.timedelta(hours=1)).timestamp()
|
||||
)
|
||||
scale = params.scale
|
||||
|
||||
clauses = [(Recordings.end_time >= after) & (Recordings.start_time <= before)]
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
clauses.append((Recordings.camera << camera_list))
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Get recording start times
|
||||
data: list[Recordings] = (
|
||||
Recordings.select(Recordings.start_time, Recordings.end_time)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Convert recordings to list of (start, end) tuples
|
||||
recordings = [(r["start_time"], r["end_time"]) for r in data]
|
||||
|
||||
# Iterate through time segments and check if each has any recording
|
||||
no_recording_segments = []
|
||||
current = after
|
||||
current_gap_start = None
|
||||
|
||||
while current < before:
|
||||
segment_end = min(current + scale, before)
|
||||
|
||||
# Check if this segment overlaps with any recording
|
||||
has_recording = any(
|
||||
rec_start < segment_end and rec_end > current
|
||||
for rec_start, rec_end in recordings
|
||||
)
|
||||
|
||||
if not has_recording:
|
||||
# This segment has no recordings
|
||||
if current_gap_start is None:
|
||||
current_gap_start = current # Start a new gap
|
||||
else:
|
||||
# This segment has recordings
|
||||
if current_gap_start is not None:
|
||||
# End the current gap and append it
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(current)}
|
||||
)
|
||||
current_gap_start = None
|
||||
|
||||
current = segment_end
|
||||
|
||||
# Append the last gap if it exists
|
||||
if current_gap_start is not None:
|
||||
no_recording_segments.append(
|
||||
{"start_time": int(current_gap_start), "end_time": int(before)}
|
||||
)
|
||||
|
||||
return JSONResponse(content=no_recording_segments)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/recordings/start/{start}/end/{end}",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
summary="Delete recordings",
|
||||
description="""Deletes recordings within the specified time range.
|
||||
Recordings can be filtered by cameras and kept based on motion, objects, or audio attributes.
|
||||
""",
|
||||
)
|
||||
async def delete_recordings(
|
||||
start: float = PathParam(..., description="Start timestamp (unix)"),
|
||||
end: float = PathParam(..., description="End timestamp (unix)"),
|
||||
params: RecordingsDeleteQueryParams = Depends(),
|
||||
allowed_cameras: List[str] = Depends(get_allowed_cameras_for_filter),
|
||||
):
|
||||
"""Delete recordings in the specified time range."""
|
||||
if start >= end:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Start time must be less than end time.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
cameras = params.cameras
|
||||
|
||||
if cameras != "all":
|
||||
requested = set(cameras.split(","))
|
||||
filtered = requested.intersection(allowed_cameras)
|
||||
|
||||
if not filtered:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "No valid cameras found in the request.",
|
||||
},
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
camera_list = list(filtered)
|
||||
else:
|
||||
camera_list = allowed_cameras
|
||||
|
||||
# Parse keep parameter
|
||||
keep_set = set()
|
||||
|
||||
if params.keep:
|
||||
keep_set = set(params.keep.split(","))
|
||||
|
||||
# Build query to find overlapping recordings
|
||||
clauses = [
|
||||
(
|
||||
Recordings.start_time.between(start, end)
|
||||
| Recordings.end_time.between(start, end)
|
||||
| ((start > Recordings.start_time) & (end < Recordings.end_time))
|
||||
),
|
||||
(Recordings.camera << camera_list),
|
||||
]
|
||||
|
||||
keep_clauses = []
|
||||
|
||||
if "motion" in keep_set:
|
||||
keep_clauses.append(Recordings.motion.is_null(False) & (Recordings.motion > 0))
|
||||
|
||||
if "object" in keep_set:
|
||||
keep_clauses.append(
|
||||
Recordings.objects.is_null(False) & (Recordings.objects > 0)
|
||||
)
|
||||
|
||||
if "audio" in keep_set:
|
||||
keep_clauses.append(Recordings.dBFS.is_null(False))
|
||||
|
||||
if keep_clauses:
|
||||
keep_condition = reduce(operator.or_, keep_clauses)
|
||||
clauses.append(~keep_condition)
|
||||
|
||||
recordings_to_delete = (
|
||||
Recordings.select(Recordings.id, Recordings.path)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
recording_ids = []
|
||||
deleted_count = 0
|
||||
error_count = 0
|
||||
|
||||
for recording in recordings_to_delete:
|
||||
recording_ids.append(recording["id"])
|
||||
|
||||
try:
|
||||
Path(recording["path"]).unlink(missing_ok=True)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete recording file {recording['path']}: {e}")
|
||||
error_count += 1
|
||||
|
||||
if recording_ids:
|
||||
max_deletes = 100000
|
||||
recording_ids_list = list(recording_ids)
|
||||
|
||||
for i in range(0, len(recording_ids_list), max_deletes):
|
||||
Recordings.delete().where(
|
||||
Recordings.id << recording_ids_list[i : i + max_deletes]
|
||||
).execute()
|
||||
|
||||
message = f"Successfully deleted {deleted_count} recording(s)."
|
||||
|
||||
if error_count > 0:
|
||||
message += f" {error_count} file deletion error(s) occurred."
|
||||
|
||||
return JSONResponse(
|
||||
content={"success": True, "message": message},
|
||||
status_code=200,
|
||||
)
|
||||
@@ -100,10 +100,6 @@ class FrigateApp:
|
||||
)
|
||||
if (
|
||||
config.semantic_search.enabled
|
||||
or any(
|
||||
c.objects.genai.enabled or c.review.genai.enabled
|
||||
for c in config.cameras.values()
|
||||
)
|
||||
or config.lpr.enabled
|
||||
or config.face_recognition.enabled
|
||||
or len(config.classification.custom) > 0
|
||||
|
||||
@@ -19,6 +19,8 @@ class CameraMetrics:
|
||||
process_pid: Synchronized
|
||||
capture_process_pid: Synchronized
|
||||
ffmpeg_pid: Synchronized
|
||||
reconnects_last_hour: Synchronized
|
||||
stalls_last_hour: Synchronized
|
||||
|
||||
def __init__(self, manager: SyncManager):
|
||||
self.camera_fps = manager.Value("d", 0)
|
||||
@@ -35,6 +37,8 @@ class CameraMetrics:
|
||||
self.process_pid = manager.Value("i", 0)
|
||||
self.capture_process_pid = manager.Value("i", 0)
|
||||
self.ffmpeg_pid = manager.Value("i", 0)
|
||||
self.reconnects_last_hour = manager.Value("i", 0)
|
||||
self.stalls_last_hour = manager.Value("i", 0)
|
||||
|
||||
|
||||
class PTZMetrics:
|
||||
|
||||
@@ -28,6 +28,7 @@ from frigate.const import (
|
||||
UPDATE_CAMERA_ACTIVITY,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
UPDATE_JOB_STATE,
|
||||
UPDATE_MODEL_STATE,
|
||||
UPDATE_REVIEW_DESCRIPTION,
|
||||
UPSERT_REVIEW_SEGMENT,
|
||||
@@ -60,6 +61,7 @@ class Dispatcher:
|
||||
self.camera_activity = CameraActivityManager(config, self.publish)
|
||||
self.audio_activity = AudioActivityManager(config, self.publish)
|
||||
self.model_state: dict[str, ModelStatusTypesEnum] = {}
|
||||
self.job_state: dict[str, dict[str, Any]] = {} # {job_type: job_data}
|
||||
self.embeddings_reindex: dict[str, Any] = {}
|
||||
self.birdseye_layout: dict[str, Any] = {}
|
||||
self.audio_transcription_state: str = "idle"
|
||||
@@ -180,6 +182,19 @@ class Dispatcher:
|
||||
def handle_model_state() -> None:
|
||||
self.publish("model_state", json.dumps(self.model_state.copy()))
|
||||
|
||||
def handle_update_job_state() -> None:
|
||||
if payload and isinstance(payload, dict):
|
||||
job_type = payload.get("job_type")
|
||||
if job_type:
|
||||
self.job_state[job_type] = payload
|
||||
self.publish(
|
||||
"job_state",
|
||||
json.dumps(self.job_state),
|
||||
)
|
||||
|
||||
def handle_job_state() -> None:
|
||||
self.publish("job_state", json.dumps(self.job_state.copy()))
|
||||
|
||||
def handle_update_audio_transcription_state() -> None:
|
||||
if payload:
|
||||
self.audio_transcription_state = payload
|
||||
@@ -277,6 +292,7 @@ class Dispatcher:
|
||||
UPDATE_EVENT_DESCRIPTION: handle_update_event_description,
|
||||
UPDATE_REVIEW_DESCRIPTION: handle_update_review_description,
|
||||
UPDATE_MODEL_STATE: handle_update_model_state,
|
||||
UPDATE_JOB_STATE: handle_update_job_state,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS: handle_update_embeddings_reindex_progress,
|
||||
UPDATE_BIRDSEYE_LAYOUT: handle_update_birdseye_layout,
|
||||
UPDATE_AUDIO_TRANSCRIPTION_STATE: handle_update_audio_transcription_state,
|
||||
@@ -284,6 +300,7 @@ class Dispatcher:
|
||||
"restart": handle_restart,
|
||||
"embeddingsReindexProgress": handle_embeddings_reindex_progress,
|
||||
"modelState": handle_model_state,
|
||||
"jobState": handle_job_state,
|
||||
"audioTranscriptionState": handle_audio_transcription_state,
|
||||
"birdseyeLayout": handle_birdseye_layout,
|
||||
"onConnect": handle_on_connect,
|
||||
|
||||
@@ -225,8 +225,7 @@ class MqttClient(Communicator):
|
||||
"birdseye_mode",
|
||||
"review_alerts",
|
||||
"review_detections",
|
||||
"object_descriptions",
|
||||
"review_descriptions",
|
||||
"genai",
|
||||
]
|
||||
|
||||
for name in self.config.cameras.keys():
|
||||
|
||||
@@ -14,6 +14,7 @@ class GenAIProviderEnum(str, Enum):
|
||||
azure_openai = "azure_openai"
|
||||
gemini = "gemini"
|
||||
ollama = "ollama"
|
||||
llamacpp = "llamacpp"
|
||||
|
||||
|
||||
class GenAIConfig(FrigateBaseModel):
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
@@ -70,13 +70,13 @@ class RecordExportConfig(FrigateBaseModel):
|
||||
timelapse_args: str = Field(
|
||||
default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args"
|
||||
)
|
||||
hwaccel_args: Union[str, list[str]] = Field(
|
||||
default="auto", title="Export-specific FFmpeg hardware acceleration arguments."
|
||||
)
|
||||
|
||||
|
||||
class RecordConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable record on all cameras.")
|
||||
sync_recordings: bool = Field(
|
||||
default=False, title="Sync recordings with disk on startup and once a day."
|
||||
)
|
||||
expire_interval: int = Field(
|
||||
default=60,
|
||||
title="Number of minutes to wait between cleanup runs.",
|
||||
|
||||
@@ -523,6 +523,14 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if camera_config.ffmpeg.hwaccel_args == "auto":
|
||||
camera_config.ffmpeg.hwaccel_args = self.ffmpeg.hwaccel_args
|
||||
|
||||
# Resolve export hwaccel_args: camera export -> camera ffmpeg -> global ffmpeg
|
||||
# This allows per-camera override for exports (e.g., when camera resolution
|
||||
# exceeds hardware encoder limits)
|
||||
if camera_config.record.export.hwaccel_args == "auto":
|
||||
camera_config.record.export.hwaccel_args = (
|
||||
camera_config.ffmpeg.hwaccel_args
|
||||
)
|
||||
|
||||
for input in camera_config.ffmpeg.inputs:
|
||||
need_detect_dimensions = "detect" in input.roles and (
|
||||
camera_config.detect.height is None
|
||||
|
||||
@@ -77,9 +77,6 @@ FFMPEG_HWACCEL_RKMPP = "preset-rkmpp"
|
||||
FFMPEG_HWACCEL_AMF = "preset-amd-amf"
|
||||
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
|
||||
|
||||
# RKNN constants
|
||||
SUPPORTED_RK_SOCS = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
# Regex constants
|
||||
|
||||
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
|
||||
@@ -122,6 +119,7 @@ UPDATE_REVIEW_DESCRIPTION = "update_review_description"
|
||||
UPDATE_MODEL_STATE = "update_model_state"
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS = "handle_embeddings_reindex_progress"
|
||||
UPDATE_BIRDSEYE_LAYOUT = "update_birdseye_layout"
|
||||
UPDATE_JOB_STATE = "update_job_state"
|
||||
NOTIFICATION_TEST = "notification_test"
|
||||
|
||||
# IO Nice Values
|
||||
|
||||
@@ -374,9 +374,6 @@ class LicensePlateProcessingMixin:
|
||||
combined_plate = re.sub(
|
||||
pattern, replacement, combined_plate
|
||||
)
|
||||
logger.debug(
|
||||
f"{camera}: Processing replace rule: '{pattern}' -> '{replacement}', result: '{combined_plate}'"
|
||||
)
|
||||
except re.error as e:
|
||||
logger.warning(
|
||||
f"{camera}: Invalid regex in replace_rules '{pattern}': {e}"
|
||||
@@ -384,7 +381,7 @@ class LicensePlateProcessingMixin:
|
||||
|
||||
if combined_plate != original_combined:
|
||||
logger.debug(
|
||||
f"{camera}: All rules applied: '{original_combined}' -> '{combined_plate}'"
|
||||
f"{camera}: Rules applied: '{original_combined}' -> '{combined_plate}'"
|
||||
)
|
||||
|
||||
# Compute the combined area for qualifying boxes
|
||||
|
||||
@@ -131,9 +131,8 @@ class AudioTranscriptionPostProcessor(PostProcessorApi):
|
||||
},
|
||||
)
|
||||
|
||||
# Embed the description if semantic search is enabled
|
||||
if self.config.semantic_search.enabled:
|
||||
self.embeddings.embed_description(event_id, transcription)
|
||||
# Embed the description
|
||||
self.embeddings.embed_description(event_id, transcription)
|
||||
|
||||
except DoesNotExist:
|
||||
logger.debug("No recording found for audio transcription post-processing")
|
||||
|
||||
@@ -131,8 +131,6 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
)
|
||||
):
|
||||
self._process_genai_description(event, camera_config, thumbnail)
|
||||
else:
|
||||
self.cleanup_event(event.id)
|
||||
|
||||
def __regenerate_description(self, event_id: str, source: str, force: bool) -> None:
|
||||
"""Regenerate the description for an event."""
|
||||
@@ -206,17 +204,6 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
)
|
||||
return None
|
||||
|
||||
def cleanup_event(self, event_id: str) -> None:
|
||||
"""Clean up tracked event data to prevent memory leaks.
|
||||
|
||||
This should be called when an event ends, regardless of whether
|
||||
genai processing is triggered.
|
||||
"""
|
||||
if event_id in self.tracked_events:
|
||||
del self.tracked_events[event_id]
|
||||
if event_id in self.early_request_sent:
|
||||
del self.early_request_sent[event_id]
|
||||
|
||||
def _read_and_crop_snapshot(self, event: Event) -> bytes | None:
|
||||
"""Read, decode, and crop the snapshot image."""
|
||||
|
||||
@@ -312,8 +299,9 @@ class ObjectDescriptionProcessor(PostProcessorApi):
|
||||
),
|
||||
).start()
|
||||
|
||||
# Clean up tracked events and early request state
|
||||
self.cleanup_event(event.id)
|
||||
# Delete tracked events based on the event_id
|
||||
if event.id in self.tracked_events:
|
||||
del self.tracked_events[event.id]
|
||||
|
||||
def _genai_embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
|
||||
"""Embed the description for an event."""
|
||||
|
||||
@@ -311,7 +311,6 @@ class ReviewDescriptionProcessor(PostProcessorApi):
|
||||
start_ts,
|
||||
end_ts,
|
||||
events_with_context,
|
||||
self.config.review.genai.preferred_language,
|
||||
self.config.review.genai.debug_save_thumbnails,
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.comms.event_metadata_updater import (
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.util.object import calculate_region
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
@@ -80,14 +80,13 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to download {path}: {e}")
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.interpreter = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ from frigate.config.classification import (
|
||||
ObjectClassificationType,
|
||||
)
|
||||
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
|
||||
from frigate.util.object import box_overlaps, calculate_region
|
||||
@@ -52,7 +52,7 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.requestor = requestor
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter = None
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
self.tensor_output_details: dict[str, Any] | None = None
|
||||
self.labelmap: dict[int, str] = {}
|
||||
@@ -72,12 +72,8 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.last_run = datetime.datetime.now().timestamp()
|
||||
self.__build_detector()
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter
|
||||
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
|
||||
@@ -88,13 +84,11 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
@@ -230,34 +224,28 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
|
||||
if not should_run:
|
||||
return
|
||||
|
||||
x, y, x2, y2 = calculate_region(
|
||||
frame.shape,
|
||||
crop[0],
|
||||
crop[1],
|
||||
crop[2],
|
||||
crop[3],
|
||||
224,
|
||||
1.0,
|
||||
)
|
||||
|
||||
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||
height, width = rgb.shape[:2]
|
||||
frame = rgb[
|
||||
y:y2,
|
||||
x:x2,
|
||||
]
|
||||
|
||||
# Convert normalized crop coordinates to pixel values
|
||||
x1 = int(camera_config.crop[0] * width)
|
||||
y1 = int(camera_config.crop[1] * height)
|
||||
x2 = int(camera_config.crop[2] * width)
|
||||
y2 = int(camera_config.crop[3] * height)
|
||||
|
||||
# Clip coordinates to frame boundaries
|
||||
x1 = max(0, min(x1, width))
|
||||
y1 = max(0, min(y1, height))
|
||||
x2 = max(0, min(x2, width))
|
||||
y2 = max(0, min(y2, height))
|
||||
|
||||
if x2 <= x1 or y2 <= y1:
|
||||
logger.warning(
|
||||
f"Invalid crop coordinates for {camera}: [{x1}, {y1}, {x2}, {y2}]"
|
||||
)
|
||||
return
|
||||
|
||||
frame = rgb[y1:y2, x1:x2]
|
||||
|
||||
try:
|
||||
resized_frame = cv2.resize(frame, (224, 224))
|
||||
except Exception:
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
if frame.shape != (224, 224):
|
||||
try:
|
||||
resized_frame = cv2.resize(frame, (224, 224))
|
||||
except Exception:
|
||||
logger.warning("Failed to resize image for state classification")
|
||||
return
|
||||
|
||||
if self.interpreter is None:
|
||||
# When interpreter is None, always save (score is 0.0, which is < 1.0)
|
||||
@@ -357,7 +345,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.model_config = model_config
|
||||
self.model_dir = os.path.join(MODEL_CACHE_DIR, self.model_config.name)
|
||||
self.train_dir = os.path.join(CLIPS_DIR, self.model_config.name, "train")
|
||||
self.interpreter: Interpreter = None
|
||||
self.interpreter: Interpreter | None = None
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.requestor = requestor
|
||||
self.tensor_input_details: dict[str, Any] | None = None
|
||||
@@ -378,6 +366,7 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
|
||||
self.__build_detector()
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __build_detector(self) -> None:
|
||||
model_path = os.path.join(self.model_dir, "model.tflite")
|
||||
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
|
||||
@@ -389,13 +378,11 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
self.labelmap = {}
|
||||
return
|
||||
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.interpreter = Interpreter(
|
||||
model_path=model_path,
|
||||
num_threads=2,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.labelmap = load_labels(labelmap_path, prefill=0)
|
||||
@@ -521,13 +508,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
|
||||
0.0,
|
||||
max_files=save_attempts,
|
||||
)
|
||||
|
||||
# Still track history even when model doesn't exist to respect MAX_OBJECT_CLASSIFICATIONS
|
||||
# Add an entry with "unknown" label so the history limit is enforced
|
||||
if object_id not in self.classification_history:
|
||||
self.classification_history[object_id] = []
|
||||
|
||||
self.classification_history[object_id].append(("unknown", 0.0, now))
|
||||
return
|
||||
|
||||
input = np.expand_dims(resized_crop, axis=0)
|
||||
@@ -669,5 +649,5 @@ def write_classification_attempt(
|
||||
|
||||
if len(files) > max_files:
|
||||
os.unlink(os.path.join(folder, files[-1]))
|
||||
except (FileNotFoundError, OSError):
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@@ -131,7 +131,6 @@ class ONNXModelRunner(BaseModelRunner):
|
||||
|
||||
return model_type in [
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.yolov9_license_plate.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
EnrichmentModelTypeEnum.facenet.value,
|
||||
@@ -170,7 +169,6 @@ class CudaGraphRunner(BaseModelRunner):
|
||||
|
||||
return model_type not in [
|
||||
ModelTypeEnum.yolonas.value,
|
||||
ModelTypeEnum.dfine.value,
|
||||
EnrichmentModelTypeEnum.paddleocr.value,
|
||||
EnrichmentModelTypeEnum.jina_v1.value,
|
||||
EnrichmentModelTypeEnum.jina_v2.value,
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.log import redirect_output_to_logger
|
||||
|
||||
from ..detector_utils import tflite_detect_raw, tflite_init
|
||||
|
||||
@@ -28,13 +28,12 @@ class CpuDetectorConfig(BaseDetectorConfig):
|
||||
class CpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __init__(self, detector_config: CpuDetectorConfig):
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
|
||||
tflite_init(self, interpreter)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import cv2
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detection_runners import RKNNModelRunner
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
@@ -19,6 +19,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "rknn"
|
||||
|
||||
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
supported_models = {
|
||||
ModelTypeEnum.yologeneric: "^frigate-fp16-yolov9-[cemst]$",
|
||||
ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$",
|
||||
@@ -80,9 +82,9 @@ class Rknn(DetectionApi):
|
||||
except FileNotFoundError:
|
||||
raise Exception("Make sure to run docker in privileged mode.")
|
||||
|
||||
if soc not in SUPPORTED_RK_SOCS:
|
||||
if soc not in supported_socs:
|
||||
raise Exception(
|
||||
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {SUPPORTED_RK_SOCS}."
|
||||
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {supported_socs}."
|
||||
)
|
||||
|
||||
return soc
|
||||
|
||||
@@ -522,8 +522,6 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
elif isinstance(processor, ObjectDescriptionProcessor):
|
||||
if not updated_db:
|
||||
# Still need to cleanup tracked events even if not processing
|
||||
processor.cleanup_event(event_id)
|
||||
continue
|
||||
|
||||
processor.process_data(
|
||||
|
||||
@@ -8,7 +8,7 @@ import numpy as np
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_runners import get_optimized_runner
|
||||
from frigate.embeddings.types import EnrichmentModelTypeEnum
|
||||
from frigate.log import suppress_stderr_during
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
from ...config import FaceRecognitionConfig
|
||||
@@ -57,18 +57,17 @@ class FaceNetEmbedding(BaseEmbedding):
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.runner = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.runner.allocate_tensors()
|
||||
self.runner = Interpreter(
|
||||
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
|
||||
num_threads=2,
|
||||
)
|
||||
self.runner.allocate_tensors()
|
||||
self.tensor_input_details = self.runner.get_input_details()
|
||||
self.tensor_output_details = self.runner.get_output_details()
|
||||
|
||||
|
||||
@@ -186,9 +186,6 @@ class JinaV1ImageEmbedding(BaseEmbedding):
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
# Avoid lazy loading in worker threads: block until downloads complete
|
||||
# and load the model on the main thread during initialization.
|
||||
self._load_model_and_utils()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
|
||||
@@ -65,9 +65,6 @@ class JinaV2Embedding(BaseEmbedding):
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
# Avoid lazy loading in worker threads: block until downloads complete
|
||||
# and load the model on the main thread during initialization.
|
||||
self._load_model_and_utils()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
|
||||
@@ -34,7 +34,7 @@ from frigate.data_processing.real_time.audio_transcription import (
|
||||
AudioTranscriptionRealTimeProcessor,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe, suppress_stderr_during
|
||||
from frigate.log import LogPipe, redirect_output_to_logger
|
||||
from frigate.object_detection.base import load_labels
|
||||
from frigate.util.builtin import get_ffmpeg_arg_list
|
||||
from frigate.util.process import FrigateProcess
|
||||
@@ -367,17 +367,17 @@ class AudioEventMaintainer(threading.Thread):
|
||||
|
||||
|
||||
class AudioTfl:
|
||||
@redirect_output_to_logger(logger, logging.DEBUG)
|
||||
def __init__(self, stop_event: threading.Event, num_threads=2):
|
||||
self.stop_event = stop_event
|
||||
self.num_threads = num_threads
|
||||
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
|
||||
# Suppress TFLite delegate creation messages that bypass Python logging
|
||||
with suppress_stderr_during("tflite_interpreter_init"):
|
||||
self.interpreter = Interpreter(
|
||||
model_path="/cpu_audio_model.tflite",
|
||||
num_threads=self.num_threads,
|
||||
)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.interpreter = Interpreter(
|
||||
model_path="/cpu_audio_model.tflite",
|
||||
num_threads=self.num_threads,
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
@@ -46,7 +46,7 @@ def should_update_state(prev_event: Event, current_event: Event) -> bool:
|
||||
if prev_event["sub_label"] != current_event["sub_label"]:
|
||||
return True
|
||||
|
||||
if set(prev_event["current_zones"]) != set(current_event["current_zones"]):
|
||||
if len(prev_event["current_zones"]) < len(current_event["current_zones"]):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -153,7 +153,7 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -hwaccel cuda -hwaccel_device {3} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
|
||||
FFMPEG_HWACCEL_RKMPP: "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
|
||||
@@ -178,7 +178,6 @@ Each line represents a detection state, not necessarily unique individuals. Pare
|
||||
start_ts: float,
|
||||
end_ts: float,
|
||||
events: list[dict[str, Any]],
|
||||
preferred_language: str | None,
|
||||
debug_save: bool,
|
||||
) -> str | None:
|
||||
"""Generate a summary of review item descriptions over a period of time."""
|
||||
@@ -233,9 +232,6 @@ Guidelines:
|
||||
for event in events:
|
||||
timeline_summary_prompt += f"\n{event}\n"
|
||||
|
||||
if preferred_language:
|
||||
timeline_summary_prompt += f"\nProvide your answer in {preferred_language}"
|
||||
|
||||
if debug_save:
|
||||
with open(
|
||||
os.path.join(
|
||||
|
||||
101
frigate/genai/llama_cpp.py
Normal file
101
frigate/genai/llama_cpp.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""llama.cpp Provider for Frigate AI."""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
from typing import Any, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from frigate.config import GenAIProviderEnum
|
||||
from frigate.genai import GenAIClient, register_genai_provider
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@register_genai_provider(GenAIProviderEnum.llamacpp)
|
||||
class LlamaCppClient(GenAIClient):
|
||||
"""Generative AI client for Frigate using llama.cpp server."""
|
||||
|
||||
LOCAL_OPTIMIZED_OPTIONS = {
|
||||
"temperature": 0.7,
|
||||
"repeat_penalty": 1.05,
|
||||
"top_p": 0.8,
|
||||
}
|
||||
|
||||
provider: str # base_url
|
||||
provider_options: dict[str, Any]
|
||||
|
||||
def _init_provider(self):
|
||||
"""Initialize the client."""
|
||||
self.provider_options = {
|
||||
**self.LOCAL_OPTIMIZED_OPTIONS,
|
||||
**self.genai_config.provider_options,
|
||||
}
|
||||
return (
|
||||
self.genai_config.base_url.rstrip("/")
|
||||
if self.genai_config.base_url
|
||||
else None
|
||||
)
|
||||
|
||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||
"""Submit a request to llama.cpp server."""
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"llama.cpp provider has not been initialized, a description will not be generated. Check your llama.cpp configuration."
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
content = []
|
||||
for image in images:
|
||||
encoded_image = base64.b64encode(image).decode("utf-8")
|
||||
content.append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/jpeg;base64,{encoded_image}",
|
||||
},
|
||||
}
|
||||
)
|
||||
content.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": prompt,
|
||||
}
|
||||
)
|
||||
|
||||
# Build request payload with llama.cpp native options
|
||||
payload = {
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": content,
|
||||
},
|
||||
],
|
||||
**self.provider_options,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{self.provider}/v1/chat/completions",
|
||||
json=payload,
|
||||
timeout=self.timeout,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
if (
|
||||
result is not None
|
||||
and "choices" in result
|
||||
and len(result["choices"]) > 0
|
||||
):
|
||||
choice = result["choices"][0]
|
||||
if "message" in choice and "content" in choice["message"]:
|
||||
return choice["message"]["content"].strip()
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warning("llama.cpp returned an error: %s", str(e))
|
||||
return None
|
||||
|
||||
def get_context_size(self) -> int:
|
||||
"""Get the context window size for llama.cpp."""
|
||||
return self.genai_config.provider_options.get("context_size", 4096)
|
||||
0
frigate/jobs/__init__.py
Normal file
0
frigate/jobs/__init__.py
Normal file
21
frigate/jobs/job.py
Normal file
21
frigate/jobs/job.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""Generic base class for long-running background jobs."""
|
||||
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Job:
|
||||
"""Base class for long-running background jobs."""
|
||||
|
||||
id: str = field(default_factory=lambda: __import__("uuid").uuid4().__str__()[:12])
|
||||
job_type: str = "" # Must be set by subclasses
|
||||
status: str = "queued" # queued, running, success, failed, cancelled
|
||||
results: Optional[dict[str, Any]] = None
|
||||
start_time: Optional[float] = None
|
||||
end_time: Optional[float] = None
|
||||
error_message: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary for WebSocket transmission."""
|
||||
return asdict(self)
|
||||
70
frigate/jobs/manager.py
Normal file
70
frigate/jobs/manager.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Generic job management for long-running background tasks."""
|
||||
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
from frigate.jobs.job import Job
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
|
||||
# Global state and locks for enforcing single concurrent job per job type
|
||||
_job_locks: dict[str, threading.Lock] = {}
|
||||
_current_jobs: dict[str, Optional[Job]] = {}
|
||||
# Keep completed jobs for retrieval, keyed by (job_type, job_id)
|
||||
_completed_jobs: dict[tuple[str, str], Job] = {}
|
||||
|
||||
|
||||
def _get_lock(job_type: str) -> threading.Lock:
|
||||
"""Get or create a lock for the specified job type."""
|
||||
if job_type not in _job_locks:
|
||||
_job_locks[job_type] = threading.Lock()
|
||||
return _job_locks[job_type]
|
||||
|
||||
|
||||
def set_current_job(job: Job) -> None:
|
||||
"""Set the current job for a given job type."""
|
||||
lock = _get_lock(job.job_type)
|
||||
with lock:
|
||||
# Store the previous job if it was completed
|
||||
old_job = _current_jobs.get(job.job_type)
|
||||
if old_job and old_job.status in (
|
||||
JobStatusTypesEnum.success,
|
||||
JobStatusTypesEnum.failed,
|
||||
JobStatusTypesEnum.cancelled,
|
||||
):
|
||||
_completed_jobs[(job.job_type, old_job.id)] = old_job
|
||||
_current_jobs[job.job_type] = job
|
||||
|
||||
|
||||
def clear_current_job(job_type: str, job_id: Optional[str] = None) -> None:
|
||||
"""Clear the current job for a given job type, optionally checking the ID."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
if job_type in _current_jobs:
|
||||
current = _current_jobs[job_type]
|
||||
if current is None or (job_id is None or current.id == job_id):
|
||||
_current_jobs[job_type] = None
|
||||
|
||||
|
||||
def get_current_job(job_type: str) -> Optional[Job]:
|
||||
"""Get the current running/queued job for a given job type, if any."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
return _current_jobs.get(job_type)
|
||||
|
||||
|
||||
def get_job_by_id(job_type: str, job_id: str) -> Optional[Job]:
|
||||
"""Get job by ID. Checks current job first, then completed jobs."""
|
||||
lock = _get_lock(job_type)
|
||||
with lock:
|
||||
# Check if it's the current job
|
||||
current = _current_jobs.get(job_type)
|
||||
if current and current.id == job_id:
|
||||
return current
|
||||
# Check if it's a completed job
|
||||
return _completed_jobs.get((job_type, job_id))
|
||||
|
||||
|
||||
def job_is_running(job_type: str) -> bool:
|
||||
"""Check if a job of the given type is currently running or queued."""
|
||||
job = get_current_job(job_type)
|
||||
return job is not None and job.status in ("queued", "running")
|
||||
135
frigate/jobs/media_sync.py
Normal file
135
frigate/jobs/media_sync.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Media sync job management with background execution."""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import UPDATE_JOB_STATE
|
||||
from frigate.jobs.job import Job
|
||||
from frigate.jobs.manager import (
|
||||
get_current_job,
|
||||
get_job_by_id,
|
||||
job_is_running,
|
||||
set_current_job,
|
||||
)
|
||||
from frigate.types import JobStatusTypesEnum
|
||||
from frigate.util.media import sync_all_media
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaSyncJob(Job):
|
||||
"""In-memory job state for media sync operations."""
|
||||
|
||||
job_type: str = "media_sync"
|
||||
dry_run: bool = False
|
||||
media_types: list[str] = field(default_factory=lambda: ["all"])
|
||||
force: bool = False
|
||||
|
||||
|
||||
class MediaSyncRunner(threading.Thread):
|
||||
"""Thread-based runner for media sync jobs."""
|
||||
|
||||
def __init__(self, job: MediaSyncJob) -> None:
|
||||
super().__init__(daemon=True, name="media_sync")
|
||||
self.job = job
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
def run(self) -> None:
|
||||
"""Execute the media sync job and broadcast status updates."""
|
||||
try:
|
||||
# Update job status to running
|
||||
self.job.status = JobStatusTypesEnum.running
|
||||
self.job.start_time = datetime.now().timestamp()
|
||||
self._broadcast_status()
|
||||
|
||||
# Execute sync with provided parameters
|
||||
logger.debug(
|
||||
f"Starting media sync job {self.job.id}: "
|
||||
f"media_types={self.job.media_types}, "
|
||||
f"dry_run={self.job.dry_run}, "
|
||||
f"force={self.job.force}"
|
||||
)
|
||||
|
||||
results = sync_all_media(
|
||||
dry_run=self.job.dry_run,
|
||||
media_types=self.job.media_types,
|
||||
force=self.job.force,
|
||||
)
|
||||
|
||||
# Store results and mark as complete
|
||||
self.job.results = results.to_dict()
|
||||
self.job.status = JobStatusTypesEnum.success
|
||||
self.job.end_time = datetime.now().timestamp()
|
||||
|
||||
logger.debug(f"Media sync job {self.job.id} completed successfully")
|
||||
self._broadcast_status()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Media sync job {self.job.id} failed: {e}", exc_info=True)
|
||||
self.job.status = JobStatusTypesEnum.failed
|
||||
self.job.error_message = str(e)
|
||||
self.job.end_time = datetime.now().timestamp()
|
||||
self._broadcast_status()
|
||||
|
||||
finally:
|
||||
if self.requestor:
|
||||
self.requestor.stop()
|
||||
|
||||
def _broadcast_status(self) -> None:
|
||||
"""Broadcast job status update via IPC to all WebSocket subscribers."""
|
||||
try:
|
||||
self.requestor.send_data(
|
||||
UPDATE_JOB_STATE,
|
||||
self.job.to_dict(),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to broadcast media sync status: {e}")
|
||||
|
||||
|
||||
def start_media_sync_job(
|
||||
dry_run: bool = False,
|
||||
media_types: Optional[list[str]] = None,
|
||||
force: bool = False,
|
||||
) -> Optional[str]:
|
||||
"""Start a new media sync job if none is currently running.
|
||||
|
||||
Returns job ID on success, None if job already running.
|
||||
"""
|
||||
# Check if a job is already running
|
||||
if job_is_running("media_sync"):
|
||||
current = get_current_job("media_sync")
|
||||
logger.warning(
|
||||
f"Media sync job {current.id} is already running. Rejecting new request."
|
||||
)
|
||||
return None
|
||||
|
||||
# Create and start new job
|
||||
job = MediaSyncJob(
|
||||
dry_run=dry_run,
|
||||
media_types=media_types or ["all"],
|
||||
force=force,
|
||||
)
|
||||
|
||||
logger.debug(f"Creating new media sync job: {job.id}")
|
||||
set_current_job(job)
|
||||
|
||||
# Start the background runner
|
||||
runner = MediaSyncRunner(job)
|
||||
runner.start()
|
||||
|
||||
return job.id
|
||||
|
||||
|
||||
def get_current_media_sync_job() -> Optional[MediaSyncJob]:
|
||||
"""Get the current running/queued media sync job, if any."""
|
||||
return get_current_job("media_sync")
|
||||
|
||||
|
||||
def get_media_sync_job_by_id(job_id: str) -> Optional[MediaSyncJob]:
|
||||
"""Get media sync job by ID. Currently only tracks the current job."""
|
||||
return get_job_by_id("media_sync", job_id)
|
||||
@@ -80,15 +80,10 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
|
||||
log_levels = {
|
||||
"absl": LogLevel.error,
|
||||
"httpx": LogLevel.error,
|
||||
"h5py": LogLevel.error,
|
||||
"keras": LogLevel.error,
|
||||
"matplotlib": LogLevel.error,
|
||||
"tensorflow": LogLevel.error,
|
||||
"tensorflow.python": LogLevel.error,
|
||||
"werkzeug": LogLevel.error,
|
||||
"ws4py": LogLevel.error,
|
||||
"PIL": LogLevel.warning,
|
||||
"numba": LogLevel.warning,
|
||||
**log_levels,
|
||||
}
|
||||
|
||||
@@ -323,31 +318,3 @@ def suppress_os_output(func: Callable) -> Callable:
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@contextmanager
|
||||
def suppress_stderr_during(operation_name: str) -> Generator[None, None, None]:
|
||||
"""
|
||||
Context manager to suppress stderr output during a specific operation.
|
||||
|
||||
Useful for silencing LLVM debug output, CUDA messages, and other native
|
||||
library logging that cannot be controlled via Python logging or environment
|
||||
variables. Completely redirects file descriptor 2 (stderr) to /dev/null.
|
||||
|
||||
Usage:
|
||||
with suppress_stderr_during("model_conversion"):
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
tflite_model = converter.convert()
|
||||
|
||||
Args:
|
||||
operation_name: Name of the operation for debugging purposes
|
||||
"""
|
||||
original_stderr_fd = os.dup(2)
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
try:
|
||||
os.dup2(devnull, 2)
|
||||
yield
|
||||
finally:
|
||||
os.dup2(original_stderr_fd, 2)
|
||||
os.close(devnull)
|
||||
os.close(original_stderr_fd)
|
||||
|
||||
@@ -80,6 +80,14 @@ class Recordings(Model):
|
||||
regions = IntegerField(null=True)
|
||||
|
||||
|
||||
class ExportCase(Model):
|
||||
id = CharField(null=False, primary_key=True, max_length=30)
|
||||
name = CharField(index=True, max_length=100)
|
||||
description = TextField(null=True)
|
||||
created_at = DateTimeField()
|
||||
updated_at = DateTimeField()
|
||||
|
||||
|
||||
class Export(Model):
|
||||
id = CharField(null=False, primary_key=True, max_length=30)
|
||||
camera = CharField(index=True, max_length=20)
|
||||
@@ -88,6 +96,12 @@ class Export(Model):
|
||||
video_path = CharField(unique=True)
|
||||
thumb_path = CharField(unique=True)
|
||||
in_progress = BooleanField()
|
||||
export_case = ForeignKeyField(
|
||||
ExportCase,
|
||||
null=True,
|
||||
backref="exports",
|
||||
column_name="export_case_id",
|
||||
)
|
||||
|
||||
|
||||
class ReviewSegment(Model):
|
||||
|
||||
@@ -13,9 +13,8 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from frigate.config import CameraConfig, FrigateConfig, RetainModeEnum
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, MAX_WAL_SIZE, RECORD_DIR
|
||||
from frigate.models import Previews, Recordings, ReviewSegment, UserReviewStatus
|
||||
from frigate.record.util import remove_empty_directories, sync_recordings
|
||||
from frigate.util.builtin import clear_and_unlink
|
||||
from frigate.util.time import get_tomorrow_at_time
|
||||
from frigate.util.media import remove_empty_directories
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -119,7 +118,6 @@ class RecordingCleanup(threading.Thread):
|
||||
Recordings.path,
|
||||
Recordings.objects,
|
||||
Recordings.motion,
|
||||
Recordings.dBFS,
|
||||
)
|
||||
.where(
|
||||
(Recordings.camera == config.name)
|
||||
@@ -127,7 +125,6 @@ class RecordingCleanup(threading.Thread):
|
||||
(
|
||||
(Recordings.end_time < continuous_expire_date)
|
||||
& (Recordings.motion == 0)
|
||||
& (Recordings.dBFS == 0)
|
||||
)
|
||||
| (Recordings.end_time < motion_expire_date)
|
||||
)
|
||||
@@ -187,7 +184,6 @@ class RecordingCleanup(threading.Thread):
|
||||
mode == RetainModeEnum.motion
|
||||
and recording.motion == 0
|
||||
and recording.objects == 0
|
||||
and recording.dBFS == 0
|
||||
)
|
||||
or (mode == RetainModeEnum.active_objects and recording.objects == 0)
|
||||
):
|
||||
@@ -350,11 +346,6 @@ class RecordingCleanup(threading.Thread):
|
||||
logger.debug("End expire recordings.")
|
||||
|
||||
def run(self) -> None:
|
||||
# on startup sync recordings with disk if enabled
|
||||
if self.config.record.sync_recordings:
|
||||
sync_recordings(limited=False)
|
||||
next_sync = get_tomorrow_at_time(3)
|
||||
|
||||
# Expire tmp clips every minute, recordings and clean directories every hour.
|
||||
for counter in itertools.cycle(range(self.config.record.expire_interval)):
|
||||
if self.stop_event.wait(60):
|
||||
@@ -363,14 +354,6 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
self.clean_tmp_previews()
|
||||
|
||||
if (
|
||||
self.config.record.sync_recordings
|
||||
and datetime.datetime.now().astimezone(datetime.timezone.utc)
|
||||
> next_sync
|
||||
):
|
||||
sync_recordings(limited=True)
|
||||
next_sync = get_tomorrow_at_time(3)
|
||||
|
||||
if counter == 0:
|
||||
self.clean_tmp_clips()
|
||||
self.expire_recordings()
|
||||
|
||||
@@ -64,6 +64,7 @@ class RecordingExporter(threading.Thread):
|
||||
end_time: int,
|
||||
playback_factor: PlaybackFactorEnum,
|
||||
playback_source: PlaybackSourceEnum,
|
||||
export_case_id: Optional[str] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.config = config
|
||||
@@ -75,6 +76,7 @@ class RecordingExporter(threading.Thread):
|
||||
self.end_time = end_time
|
||||
self.playback_factor = playback_factor
|
||||
self.playback_source = playback_source
|
||||
self.export_case_id = export_case_id
|
||||
|
||||
# ensure export thumb dir
|
||||
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
||||
@@ -226,7 +228,7 @@ class RecordingExporter(threading.Thread):
|
||||
ffmpeg_cmd = (
|
||||
parse_preset_hardware_acceleration_encode(
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
self.config.cameras[self.camera].record.export.hwaccel_args,
|
||||
f"-an {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart",
|
||||
EncodeTypeEnum.timelapse,
|
||||
@@ -317,7 +319,7 @@ class RecordingExporter(threading.Thread):
|
||||
ffmpeg_cmd = (
|
||||
parse_preset_hardware_acceleration_encode(
|
||||
self.config.ffmpeg.ffmpeg_path,
|
||||
self.config.ffmpeg.hwaccel_args,
|
||||
self.config.cameras[self.camera].record.export.hwaccel_args,
|
||||
f"{TIMELAPSE_DATA_INPUT_ARGS} {ffmpeg_input}",
|
||||
f"{self.config.cameras[self.camera].record.export.timelapse_args} -movflags +faststart {video_path}",
|
||||
EncodeTypeEnum.timelapse,
|
||||
@@ -348,17 +350,20 @@ class RecordingExporter(threading.Thread):
|
||||
video_path = f"{EXPORT_DIR}/{self.camera}_{filename_start_datetime}-{filename_end_datetime}_{cleaned_export_id}.mp4"
|
||||
thumb_path = self.save_thumbnail(self.export_id)
|
||||
|
||||
Export.insert(
|
||||
{
|
||||
Export.id: self.export_id,
|
||||
Export.camera: self.camera,
|
||||
Export.name: export_name,
|
||||
Export.date: self.start_time,
|
||||
Export.video_path: video_path,
|
||||
Export.thumb_path: thumb_path,
|
||||
Export.in_progress: True,
|
||||
}
|
||||
).execute()
|
||||
export_values = {
|
||||
Export.id: self.export_id,
|
||||
Export.camera: self.camera,
|
||||
Export.name: export_name,
|
||||
Export.date: self.start_time,
|
||||
Export.video_path: video_path,
|
||||
Export.thumb_path: thumb_path,
|
||||
Export.in_progress: True,
|
||||
}
|
||||
|
||||
if self.export_case_id is not None:
|
||||
export_values[Export.export_case] = self.export_case_id
|
||||
|
||||
Export.insert(export_values).execute()
|
||||
|
||||
try:
|
||||
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||
|
||||
@@ -67,7 +67,7 @@ class SegmentInfo:
|
||||
if (
|
||||
not keep
|
||||
and retain_mode == RetainModeEnum.motion
|
||||
and (self.motion_count > 0 or self.average_dBFS != 0)
|
||||
and (self.motion_count > 0 or self.average_dBFS > 0)
|
||||
):
|
||||
keep = True
|
||||
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
"""Recordings Utilities."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.const import RECORD_DIR
|
||||
from frigate.models import Recordings, RecordingsToDelete
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def remove_empty_directories(directory: str) -> None:
|
||||
# list all directories recursively and sort them by path,
|
||||
# longest first
|
||||
paths = sorted(
|
||||
[x[0] for x in os.walk(directory)],
|
||||
key=lambda p: len(str(p)),
|
||||
reverse=True,
|
||||
)
|
||||
for path in paths:
|
||||
# don't delete the parent
|
||||
if path == directory:
|
||||
continue
|
||||
if len(os.listdir(path)) == 0:
|
||||
os.rmdir(path)
|
||||
|
||||
|
||||
def sync_recordings(limited: bool) -> None:
|
||||
"""Check the db for stale recordings entries that don't exist in the filesystem."""
|
||||
|
||||
def delete_db_entries_without_file(check_timestamp: float) -> bool:
|
||||
"""Delete db entries where file was deleted outside of frigate."""
|
||||
|
||||
if limited:
|
||||
recordings = Recordings.select(Recordings.id, Recordings.path).where(
|
||||
Recordings.start_time >= check_timestamp
|
||||
)
|
||||
else:
|
||||
# get all recordings in the db
|
||||
recordings = Recordings.select(Recordings.id, Recordings.path)
|
||||
|
||||
# Use pagination to process records in chunks
|
||||
page_size = 1000
|
||||
num_pages = (recordings.count() + page_size - 1) // page_size
|
||||
recordings_to_delete = set()
|
||||
|
||||
for page in range(num_pages):
|
||||
for recording in recordings.paginate(page, page_size):
|
||||
if not os.path.exists(recording.path):
|
||||
recordings_to_delete.add(recording.id)
|
||||
|
||||
if len(recordings_to_delete) == 0:
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
f"Deleting {len(recordings_to_delete)} recording DB entries with missing files"
|
||||
)
|
||||
|
||||
# convert back to list of dictionaries for insertion
|
||||
recordings_to_delete = [
|
||||
{"id": recording_id} for recording_id in recordings_to_delete
|
||||
]
|
||||
|
||||
if float(len(recordings_to_delete)) / max(1, recordings.count()) > 0.5:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings.count()) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
return False
|
||||
|
||||
# create a temporary table for deletion
|
||||
RecordingsToDelete.create_table(temporary=True)
|
||||
|
||||
# insert ids to the temporary table
|
||||
max_inserts = 1000
|
||||
for batch in chunked(recordings_to_delete, max_inserts):
|
||||
RecordingsToDelete.insert_many(batch).execute()
|
||||
|
||||
try:
|
||||
# delete records in the main table that exist in the temporary table
|
||||
query = Recordings.delete().where(
|
||||
Recordings.id.in_(RecordingsToDelete.select(RecordingsToDelete.id))
|
||||
)
|
||||
query.execute()
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error during recordings db cleanup: {e}")
|
||||
|
||||
return True
|
||||
|
||||
def delete_files_without_db_entry(files_on_disk: list[str]):
|
||||
"""Delete files where file is not inside frigate db."""
|
||||
files_to_delete = []
|
||||
|
||||
for file in files_on_disk:
|
||||
if not Recordings.select().where(Recordings.path == file).exists():
|
||||
files_to_delete.append(file)
|
||||
|
||||
if len(files_to_delete) == 0:
|
||||
return True
|
||||
|
||||
logger.info(
|
||||
f"Deleting {len(files_to_delete)} recordings files with missing DB entries"
|
||||
)
|
||||
|
||||
if float(len(files_to_delete)) / max(1, len(files_on_disk)) > 0.5:
|
||||
logger.debug(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
return False
|
||||
|
||||
for file in files_to_delete:
|
||||
os.unlink(file)
|
||||
|
||||
return True
|
||||
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# start checking on the hour 36 hours ago
|
||||
check_point = datetime.datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36)
|
||||
db_success = delete_db_entries_without_file(check_point.timestamp())
|
||||
|
||||
# only try to cleanup files if db cleanup was successful
|
||||
if db_success:
|
||||
if limited:
|
||||
# get recording files from last 36 hours
|
||||
hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}"
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
if root > hour_check
|
||||
}
|
||||
else:
|
||||
# get all recordings files on disk and put them in a set
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
}
|
||||
|
||||
delete_files_without_db_entry(files_on_disk)
|
||||
|
||||
logger.debug("End sync recordings.")
|
||||
@@ -22,6 +22,7 @@ from frigate.util.services import (
|
||||
get_bandwidth_stats,
|
||||
get_cpu_stats,
|
||||
get_fs_type,
|
||||
get_hailo_temps,
|
||||
get_intel_gpu_stats,
|
||||
get_jetson_stats,
|
||||
get_nvidia_gpu_stats,
|
||||
@@ -91,9 +92,80 @@ def get_temperatures() -> dict[str, float]:
|
||||
if temp is not None:
|
||||
temps[apex] = temp
|
||||
|
||||
# Get temperatures for Hailo devices
|
||||
temps.update(get_hailo_temps())
|
||||
|
||||
return temps
|
||||
|
||||
|
||||
def get_detector_temperature(
|
||||
detector_type: str,
|
||||
detector_index_by_type: dict[str, int],
|
||||
) -> Optional[float]:
|
||||
"""Get temperature for a specific detector based on its type."""
|
||||
if detector_type == "edgetpu":
|
||||
# Get temperatures for all attached Corals
|
||||
base = "/sys/class/apex/"
|
||||
if os.path.isdir(base):
|
||||
apex_devices = sorted(os.listdir(base))
|
||||
index = detector_index_by_type.get("edgetpu", 0)
|
||||
if index < len(apex_devices):
|
||||
apex_name = apex_devices[index]
|
||||
temp = read_temperature(os.path.join(base, apex_name, "temp"))
|
||||
if temp is not None:
|
||||
return temp
|
||||
elif detector_type == "hailo8l":
|
||||
# Get temperatures for Hailo devices
|
||||
hailo_temps = get_hailo_temps()
|
||||
if hailo_temps:
|
||||
hailo_device_names = sorted(hailo_temps.keys())
|
||||
index = detector_index_by_type.get("hailo8l", 0)
|
||||
if index < len(hailo_device_names):
|
||||
device_name = hailo_device_names[index]
|
||||
return hailo_temps[device_name]
|
||||
elif detector_type == "rknn":
|
||||
# Rockchip temperatures are handled by the GPU / NPU stats
|
||||
# as there are not detector specific temperatures
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_detector_stats(
|
||||
stats_tracking: StatsTrackingTypes,
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Get stats for all detectors, including temperatures based on detector type."""
|
||||
detector_stats: dict[str, dict[str, Any]] = {}
|
||||
detector_type_indices: dict[str, int] = {}
|
||||
|
||||
for name, detector in stats_tracking["detectors"].items():
|
||||
pid = detector.detect_process.pid if detector.detect_process else None
|
||||
detector_type = detector.detector_config.type
|
||||
|
||||
# Keep track of the index for each detector type to match temperatures correctly
|
||||
current_index = detector_type_indices.get(detector_type, 0)
|
||||
detector_type_indices[detector_type] = current_index + 1
|
||||
|
||||
detector_stat = {
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"pid": pid,
|
||||
}
|
||||
|
||||
temp = get_detector_temperature(detector_type, {detector_type: current_index})
|
||||
|
||||
if temp is not None:
|
||||
detector_stat["temperature"] = round(temp, 1)
|
||||
|
||||
detector_stats[name] = detector_stat
|
||||
|
||||
return detector_stats
|
||||
|
||||
|
||||
def get_processing_stats(
|
||||
config: FrigateConfig, stats: dict[str, str], hwaccel_errors: list[str]
|
||||
) -> None:
|
||||
@@ -174,6 +246,7 @@ async def set_gpu_stats(
|
||||
"mem": str(round(float(nvidia_usage[i]["mem"]), 2)) + "%",
|
||||
"enc": str(round(float(nvidia_usage[i]["enc"]), 2)) + "%",
|
||||
"dec": str(round(float(nvidia_usage[i]["dec"]), 2)) + "%",
|
||||
"temp": str(nvidia_usage[i]["temp"]),
|
||||
}
|
||||
|
||||
else:
|
||||
@@ -279,6 +352,32 @@ def stats_snapshot(
|
||||
if camera_stats.capture_process_pid.value
|
||||
else None
|
||||
)
|
||||
# Calculate connection quality based on current state
|
||||
# This is computed at stats-collection time so offline cameras
|
||||
# correctly show as unusable rather than excellent
|
||||
expected_fps = config.cameras[name].detect.fps
|
||||
current_fps = camera_stats.camera_fps.value
|
||||
reconnects = camera_stats.reconnects_last_hour.value
|
||||
stalls = camera_stats.stalls_last_hour.value
|
||||
|
||||
if current_fps < 0.1:
|
||||
quality_str = "unusable"
|
||||
elif reconnects == 0 and current_fps >= 0.9 * expected_fps and stalls < 5:
|
||||
quality_str = "excellent"
|
||||
elif reconnects <= 2 and current_fps >= 0.6 * expected_fps:
|
||||
quality_str = "fair"
|
||||
elif reconnects > 10 or current_fps < 1.0 or stalls > 100:
|
||||
quality_str = "unusable"
|
||||
else:
|
||||
quality_str = "poor"
|
||||
|
||||
connection_quality = {
|
||||
"connection_quality": quality_str,
|
||||
"expected_fps": expected_fps,
|
||||
"reconnects_last_hour": reconnects,
|
||||
"stalls_last_hour": stalls,
|
||||
}
|
||||
|
||||
stats["cameras"][name] = {
|
||||
"camera_fps": round(camera_stats.camera_fps.value, 2),
|
||||
"process_fps": round(camera_stats.process_fps.value, 2),
|
||||
@@ -290,20 +389,10 @@ def stats_snapshot(
|
||||
"ffmpeg_pid": ffmpeg_pid,
|
||||
"audio_rms": round(camera_stats.audio_rms.value, 4),
|
||||
"audio_dBFS": round(camera_stats.audio_dBFS.value, 4),
|
||||
**connection_quality,
|
||||
}
|
||||
|
||||
stats["detectors"] = {}
|
||||
for name, detector in stats_tracking["detectors"].items():
|
||||
pid = detector.detect_process.pid if detector.detect_process else None
|
||||
stats["detectors"][name] = {
|
||||
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2), # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"detection_start": detector.detection_start.value, # type: ignore[attr-defined]
|
||||
# issue https://github.com/python/typeshed/issues/8799
|
||||
# from mypy 0.981 onwards
|
||||
"pid": pid,
|
||||
}
|
||||
stats["detectors"] = get_detector_stats(stats_tracking)
|
||||
stats["camera_fps"] = round(total_camera_fps, 2)
|
||||
stats["process_fps"] = round(total_process_fps, 2)
|
||||
stats["skipped_fps"] = round(total_skipped_fps, 2)
|
||||
@@ -389,7 +478,6 @@ def stats_snapshot(
|
||||
"version": VERSION,
|
||||
"latest_version": stats_tracking["latest_frigate_version"],
|
||||
"storage": {},
|
||||
"temperatures": get_temperatures(),
|
||||
"last_updated": int(time.time()),
|
||||
}
|
||||
|
||||
|
||||
@@ -86,11 +86,11 @@ class TimelineProcessor(threading.Thread):
|
||||
event_data: dict[Any, Any],
|
||||
) -> bool:
|
||||
"""Handle object detection."""
|
||||
save = False
|
||||
camera_config = self.config.cameras[camera]
|
||||
event_id = event_data["id"]
|
||||
|
||||
# Base timeline entry data that all entries will share
|
||||
base_entry = {
|
||||
timeline_entry = {
|
||||
Timeline.timestamp: event_data["frame_time"],
|
||||
Timeline.camera: camera,
|
||||
Timeline.source: "tracked_object",
|
||||
@@ -123,64 +123,40 @@ class TimelineProcessor(threading.Thread):
|
||||
e[Timeline.data]["sub_label"] = event_data["sub_label"]
|
||||
|
||||
if event_type == EventStateEnum.start:
|
||||
timeline_entry = base_entry.copy()
|
||||
timeline_entry[Timeline.class_type] = "visible"
|
||||
self.insert_or_save(timeline_entry, prev_event_data, event_data)
|
||||
save = True
|
||||
elif event_type == EventStateEnum.update:
|
||||
# Check all conditions and create timeline entries for each change
|
||||
entries_to_save = []
|
||||
|
||||
# Check for zone changes
|
||||
prev_zones = set(prev_event_data["current_zones"])
|
||||
current_zones = set(event_data["current_zones"])
|
||||
zones_changed = prev_zones != current_zones
|
||||
|
||||
# Only save "entered_zone" events when the object is actually IN zones
|
||||
if (
|
||||
zones_changed
|
||||
len(prev_event_data["current_zones"]) < len(event_data["current_zones"])
|
||||
and not event_data["stationary"]
|
||||
and len(current_zones) > 0
|
||||
):
|
||||
zone_entry = base_entry.copy()
|
||||
zone_entry[Timeline.class_type] = "entered_zone"
|
||||
zone_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
zone_entry[Timeline.data]["zones"] = event_data["current_zones"]
|
||||
entries_to_save.append(zone_entry)
|
||||
|
||||
# Check for stationary status change
|
||||
if prev_event_data["stationary"] != event_data["stationary"]:
|
||||
stationary_entry = base_entry.copy()
|
||||
stationary_entry[Timeline.class_type] = (
|
||||
timeline_entry[Timeline.class_type] = "entered_zone"
|
||||
timeline_entry[Timeline.data]["zones"] = event_data["current_zones"]
|
||||
save = True
|
||||
elif prev_event_data["stationary"] != event_data["stationary"]:
|
||||
timeline_entry[Timeline.class_type] = (
|
||||
"stationary" if event_data["stationary"] else "active"
|
||||
)
|
||||
stationary_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
entries_to_save.append(stationary_entry)
|
||||
|
||||
# Check for new attributes
|
||||
if prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
|
||||
attribute_entry = base_entry.copy()
|
||||
attribute_entry[Timeline.class_type] = "attribute"
|
||||
attribute_entry[Timeline.data] = base_entry[Timeline.data].copy()
|
||||
attribute_entry[Timeline.data]["attribute"] = list(
|
||||
save = True
|
||||
elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}:
|
||||
timeline_entry[Timeline.class_type] = "attribute"
|
||||
timeline_entry[Timeline.data]["attribute"] = list(
|
||||
event_data["attributes"].keys()
|
||||
)[0]
|
||||
|
||||
if len(event_data["current_attributes"]) > 0:
|
||||
attribute_entry[Timeline.data]["attribute_box"] = to_relative_box(
|
||||
timeline_entry[Timeline.data]["attribute_box"] = to_relative_box(
|
||||
camera_config.detect.width,
|
||||
camera_config.detect.height,
|
||||
event_data["current_attributes"][0]["box"],
|
||||
)
|
||||
|
||||
entries_to_save.append(attribute_entry)
|
||||
|
||||
# Save all entries
|
||||
for entry in entries_to_save:
|
||||
self.insert_or_save(entry, prev_event_data, event_data)
|
||||
|
||||
save = True
|
||||
elif event_type == EventStateEnum.end:
|
||||
timeline_entry = base_entry.copy()
|
||||
timeline_entry[Timeline.class_type] = "gone"
|
||||
save = True
|
||||
|
||||
if save:
|
||||
self.insert_or_save(timeline_entry, prev_event_data, event_data)
|
||||
|
||||
def handle_api_entry(
|
||||
|
||||
@@ -26,6 +26,15 @@ class ModelStatusTypesEnum(str, Enum):
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class JobStatusTypesEnum(str, Enum):
|
||||
pending = "pending"
|
||||
queued = "queued"
|
||||
running = "running"
|
||||
success = "success"
|
||||
failed = "failed"
|
||||
cancelled = "cancelled"
|
||||
|
||||
|
||||
class TrackedObjectUpdateTypesEnum(str, Enum):
|
||||
description = "description"
|
||||
face = "face"
|
||||
|
||||
@@ -19,10 +19,9 @@ from frigate.const import (
|
||||
PROCESS_PRIORITY_LOW,
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
from frigate.log import redirect_output_to_logger, suppress_stderr_during
|
||||
from frigate.log import redirect_output_to_logger
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.file import get_event_thumbnail_bytes
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.process import FrigateProcess
|
||||
@@ -122,10 +121,6 @@ def get_dataset_image_count(model_name: str) -> int:
|
||||
|
||||
class ClassificationTrainingProcess(FrigateProcess):
|
||||
def __init__(self, model_name: str) -> None:
|
||||
self.BASE_WEIGHT_URL = os.environ.get(
|
||||
"TF_KERAS_MOBILENET_V2_WEIGHTS_URL",
|
||||
"",
|
||||
)
|
||||
super().__init__(
|
||||
stop_event=None,
|
||||
priority=PROCESS_PRIORITY_LOW,
|
||||
@@ -184,23 +179,11 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
)
|
||||
return False
|
||||
|
||||
weights_path = "imagenet"
|
||||
# Download MobileNetV2 weights if not present
|
||||
if self.BASE_WEIGHT_URL:
|
||||
weights_path = os.path.join(
|
||||
MODEL_CACHE_DIR, "MobileNet", "mobilenet_v2_weights.h5"
|
||||
)
|
||||
if not os.path.exists(weights_path):
|
||||
logger.info("Downloading MobileNet V2 weights file")
|
||||
ModelDownloader.download_from_url(
|
||||
self.BASE_WEIGHT_URL, weights_path
|
||||
)
|
||||
|
||||
# Start with imagenet base model with 35% of channels in each layer
|
||||
base_model = MobileNetV2(
|
||||
input_shape=(224, 224, 3),
|
||||
include_top=False,
|
||||
weights=weights_path,
|
||||
weights="imagenet",
|
||||
alpha=0.35,
|
||||
)
|
||||
base_model.trainable = False # Freeze pre-trained layers
|
||||
@@ -250,20 +233,15 @@ class ClassificationTrainingProcess(FrigateProcess):
|
||||
logger.debug(f"Converting {self.model_name} to TFLite...")
|
||||
|
||||
# convert model to tflite
|
||||
# Suppress stderr during conversion to avoid LLVM debug output
|
||||
# (fully_quantize, inference_type, MLIR optimization messages, etc)
|
||||
with suppress_stderr_during("tflite_conversion"):
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [
|
||||
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
|
||||
]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
converter = tf.lite.TFLiteConverter.from_keras_model(model)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
converter.representative_dataset = (
|
||||
self.__generate_representative_dataset_factory(dataset_dir)
|
||||
)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.inference_input_type = tf.uint8
|
||||
converter.inference_output_type = tf.uint8
|
||||
tflite_model = converter.convert()
|
||||
|
||||
# write model
|
||||
model_path = os.path.join(model_dir, "model.tflite")
|
||||
@@ -360,6 +338,8 @@ def collect_state_classification_examples(
|
||||
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
|
||||
"""
|
||||
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 1: Get review items for the cameras
|
||||
camera_names = list(cameras.keys())
|
||||
@@ -374,10 +354,6 @@ def collect_state_classification_examples(
|
||||
logger.warning(f"No review items found for cameras: {camera_names}")
|
||||
return
|
||||
|
||||
# The temp directory is only created when there are review_items.
|
||||
temp_dir = os.path.join(dataset_dir, "temp")
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Step 2: Create balanced timestamp selection (100 samples)
|
||||
timestamps = _select_balanced_timestamps(review_items, target_count=100)
|
||||
|
||||
@@ -506,10 +482,6 @@ def _extract_keyframes(
|
||||
"""
|
||||
Extract keyframes from recordings at specified timestamps and crop to specified regions.
|
||||
|
||||
This implementation batches work by running multiple ffmpeg snapshot commands
|
||||
concurrently, which significantly reduces total runtime compared to
|
||||
processing each timestamp serially.
|
||||
|
||||
Args:
|
||||
ffmpeg_path: Path to ffmpeg binary
|
||||
timestamps: List of timestamp dicts from _select_balanced_timestamps
|
||||
@@ -519,21 +491,15 @@ def _extract_keyframes(
|
||||
Returns:
|
||||
List of paths to successfully extracted and cropped keyframe images
|
||||
"""
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
keyframe_paths = []
|
||||
|
||||
if not timestamps:
|
||||
return []
|
||||
|
||||
# Limit the number of concurrent ffmpeg processes so we don't overload the host.
|
||||
max_workers = min(5, len(timestamps))
|
||||
|
||||
def _process_timestamp(idx: int, ts_info: dict) -> tuple[int, str | None]:
|
||||
for idx, ts_info in enumerate(timestamps):
|
||||
camera = ts_info["camera"]
|
||||
timestamp = ts_info["timestamp"]
|
||||
|
||||
if camera not in camera_crops:
|
||||
logger.warning(f"No crop coordinates for camera {camera}")
|
||||
return idx, None
|
||||
continue
|
||||
|
||||
norm_x1, norm_y1, norm_x2, norm_y2 = camera_crops[camera]
|
||||
|
||||
@@ -550,7 +516,7 @@ def _extract_keyframes(
|
||||
.get()
|
||||
)
|
||||
except Exception:
|
||||
return idx, None
|
||||
continue
|
||||
|
||||
relative_time = timestamp - recording.start_time
|
||||
|
||||
@@ -564,57 +530,38 @@ def _extract_keyframes(
|
||||
height=None,
|
||||
)
|
||||
|
||||
if not image_data:
|
||||
return idx, None
|
||||
if image_data:
|
||||
nparr = np.frombuffer(image_data, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
|
||||
nparr = np.frombuffer(image_data, np.uint8)
|
||||
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
||||
if img is not None:
|
||||
height, width = img.shape[:2]
|
||||
|
||||
if img is None:
|
||||
return idx, None
|
||||
x1 = int(norm_x1 * width)
|
||||
y1 = int(norm_y1 * height)
|
||||
x2 = int(norm_x2 * width)
|
||||
y2 = int(norm_y2 * height)
|
||||
|
||||
height, width = img.shape[:2]
|
||||
x1_clipped = max(0, min(x1, width))
|
||||
y1_clipped = max(0, min(y1, height))
|
||||
x2_clipped = max(0, min(x2, width))
|
||||
y2_clipped = max(0, min(y2, height))
|
||||
|
||||
x1 = int(norm_x1 * width)
|
||||
y1 = int(norm_y1 * height)
|
||||
x2 = int(norm_x2 * width)
|
||||
y2 = int(norm_y2 * height)
|
||||
if x2_clipped > x1_clipped and y2_clipped > y1_clipped:
|
||||
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
|
||||
x1_clipped = max(0, min(x1, width))
|
||||
y1_clipped = max(0, min(y1, height))
|
||||
x2_clipped = max(0, min(x2, width))
|
||||
y2_clipped = max(0, min(y2, height))
|
||||
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
keyframe_paths.append(output_path)
|
||||
|
||||
if x2_clipped <= x1_clipped or y2_clipped <= y1_clipped:
|
||||
return idx, None
|
||||
|
||||
cropped = img[y1_clipped:y2_clipped, x1_clipped:x2_clipped]
|
||||
resized = cv2.resize(cropped, (224, 224))
|
||||
|
||||
output_path = os.path.join(output_dir, f"frame_{idx:04d}.jpg")
|
||||
cv2.imwrite(output_path, resized)
|
||||
return idx, output_path
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to extract frame from {recording.path} at {relative_time}s: {e}"
|
||||
)
|
||||
return idx, None
|
||||
continue
|
||||
|
||||
keyframes_with_index: list[tuple[int, str]] = []
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
future_to_idx = {
|
||||
executor.submit(_process_timestamp, idx, ts_info): idx
|
||||
for idx, ts_info in enumerate(timestamps)
|
||||
}
|
||||
|
||||
for future in as_completed(future_to_idx):
|
||||
_, path = future.result()
|
||||
if path:
|
||||
keyframes_with_index.append((future_to_idx[future], path))
|
||||
|
||||
keyframes_with_index.sort(key=lambda item: item[0])
|
||||
return [path for _, path in keyframes_with_index]
|
||||
return keyframe_paths
|
||||
|
||||
|
||||
def _select_distinct_images(
|
||||
|
||||
@@ -13,7 +13,7 @@ from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = "0.17-0"
|
||||
CURRENT_CONFIG_VERSION = "0.18-0"
|
||||
DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml")
|
||||
|
||||
|
||||
@@ -98,6 +98,13 @@ def migrate_frigate_config(config_file: str):
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.17-0"
|
||||
|
||||
if previous_version < "0.18-0":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.18-0...")
|
||||
new_config = migrate_018_0(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.18-0"
|
||||
|
||||
logger.info("Finished frigate config migration...")
|
||||
|
||||
|
||||
@@ -427,6 +434,27 @@ def migrate_017_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]
|
||||
return new_config
|
||||
|
||||
|
||||
def migrate_018_0(config: dict[str, dict[str, Any]]) -> dict[str, dict[str, Any]]:
|
||||
"""Handle migrating frigate config to 0.18-0"""
|
||||
new_config = config.copy()
|
||||
|
||||
# Remove deprecated sync_recordings from global record config
|
||||
if new_config.get("record", {}).get("sync_recordings") is not None:
|
||||
del new_config["record"]["sync_recordings"]
|
||||
|
||||
# Remove deprecated sync_recordings from camera-specific record configs
|
||||
for name, camera in config.get("cameras", {}).items():
|
||||
camera_config: dict[str, dict[str, Any]] = camera.copy()
|
||||
|
||||
if camera_config.get("record", {}).get("sync_recordings") is not None:
|
||||
del camera_config["record"]["sync_recordings"]
|
||||
|
||||
new_config["cameras"][name] = camera_config
|
||||
|
||||
new_config["version"] = "0.18-0"
|
||||
return new_config
|
||||
|
||||
|
||||
def get_relative_coordinates(
|
||||
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
|
||||
) -> Union[str, list]:
|
||||
|
||||
788
frigate/util/media.py
Normal file
788
frigate/util/media.py
Normal file
@@ -0,0 +1,788 @@
|
||||
"""Recordings Utilities."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from peewee import DatabaseError, chunked
|
||||
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR, RECORD_DIR, THUMB_DIR
|
||||
from frigate.models import (
|
||||
Event,
|
||||
Export,
|
||||
Previews,
|
||||
Recordings,
|
||||
RecordingsToDelete,
|
||||
ReviewSegment,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Safety threshold - abort if more than 50% of files would be deleted
|
||||
SAFETY_THRESHOLD = 0.5
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncResult:
|
||||
"""Result of a sync operation."""
|
||||
|
||||
media_type: str
|
||||
files_checked: int = 0
|
||||
orphans_found: int = 0
|
||||
orphans_deleted: int = 0
|
||||
orphan_paths: list[str] = field(default_factory=list)
|
||||
aborted: bool = False
|
||||
error: str | None = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"media_type": self.media_type,
|
||||
"files_checked": self.files_checked,
|
||||
"orphans_found": self.orphans_found,
|
||||
"orphans_deleted": self.orphans_deleted,
|
||||
"aborted": self.aborted,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
def remove_empty_directories(directory: str) -> None:
|
||||
# list all directories recursively and sort them by path,
|
||||
# longest first
|
||||
paths = sorted(
|
||||
[x[0] for x in os.walk(directory)],
|
||||
key=lambda p: len(str(p)),
|
||||
reverse=True,
|
||||
)
|
||||
for path in paths:
|
||||
# don't delete the parent
|
||||
if path == directory:
|
||||
continue
|
||||
if len(os.listdir(path)) == 0:
|
||||
os.rmdir(path)
|
||||
|
||||
|
||||
def sync_recordings(
|
||||
limited: bool = False, dry_run: bool = False, force: bool = False
|
||||
) -> SyncResult:
|
||||
"""Sync recordings between the database and disk using the SyncResult format."""
|
||||
|
||||
result = SyncResult(media_type="recordings")
|
||||
|
||||
try:
|
||||
logger.debug("Start sync recordings.")
|
||||
|
||||
# start checking on the hour 36 hours ago
|
||||
check_point = datetime.datetime.now().replace(
|
||||
minute=0, second=0, microsecond=0
|
||||
).astimezone(datetime.timezone.utc) - datetime.timedelta(hours=36)
|
||||
|
||||
# Gather DB recordings to inspect
|
||||
if limited:
|
||||
recordings_query = Recordings.select(Recordings.id, Recordings.path).where(
|
||||
Recordings.start_time >= check_point.timestamp()
|
||||
)
|
||||
else:
|
||||
recordings_query = Recordings.select(Recordings.id, Recordings.path)
|
||||
|
||||
recordings_count = recordings_query.count()
|
||||
page_size = 1000
|
||||
num_pages = (recordings_count + page_size - 1) // page_size
|
||||
recordings_to_delete: list[dict] = []
|
||||
|
||||
for page in range(num_pages):
|
||||
for recording in recordings_query.paginate(page, page_size):
|
||||
if not os.path.exists(recording.path):
|
||||
recordings_to_delete.append(
|
||||
{"id": recording.id, "path": recording.path}
|
||||
)
|
||||
|
||||
result.orphans_found += len(recordings_to_delete)
|
||||
result.orphan_paths.extend(
|
||||
[
|
||||
recording["path"]
|
||||
for recording in recordings_to_delete
|
||||
if recording.get("path")
|
||||
]
|
||||
)
|
||||
|
||||
if (
|
||||
recordings_count
|
||||
and len(recordings_to_delete) / recordings_count > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries (force=True, bypassing safety threshold)"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Deleting {(len(recordings_to_delete) / max(1, recordings_count) * 100):.2f}% of recordings DB entries, could be due to configuration error. Aborting..."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if recordings_to_delete and not dry_run:
|
||||
logger.info(
|
||||
f"Deleting {len(recordings_to_delete)} recording DB entries with missing files"
|
||||
)
|
||||
|
||||
RecordingsToDelete.create_table(temporary=True)
|
||||
|
||||
max_inserts = 1000
|
||||
for batch in chunked(recordings_to_delete, max_inserts):
|
||||
RecordingsToDelete.insert_many(batch).execute()
|
||||
|
||||
try:
|
||||
deleted = (
|
||||
Recordings.delete()
|
||||
.where(
|
||||
Recordings.id.in_(
|
||||
RecordingsToDelete.select(RecordingsToDelete.id)
|
||||
)
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
result.orphans_deleted += int(deleted)
|
||||
except DatabaseError as e:
|
||||
logger.error(f"Database error during recordings db cleanup: {e}")
|
||||
result.error = str(e)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if result.aborted:
|
||||
logger.warning("Recording DB sync aborted; skipping file cleanup.")
|
||||
return result
|
||||
|
||||
# Only try to cleanup files if db cleanup was successful or dry_run
|
||||
if limited:
|
||||
# get recording files from last 36 hours
|
||||
hour_check = f"{RECORD_DIR}/{check_point.strftime('%Y-%m-%d/%H')}"
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
if root > hour_check
|
||||
}
|
||||
else:
|
||||
# get all recordings files on disk and put them in a set
|
||||
files_on_disk = {
|
||||
os.path.join(root, file)
|
||||
for root, _, files in os.walk(RECORD_DIR)
|
||||
for file in files
|
||||
}
|
||||
|
||||
result.files_checked = len(files_on_disk)
|
||||
|
||||
files_to_delete: list[str] = []
|
||||
for file in files_on_disk:
|
||||
if not Recordings.select().where(Recordings.path == file).exists():
|
||||
files_to_delete.append(file)
|
||||
|
||||
result.orphans_found += len(files_to_delete)
|
||||
result.orphan_paths.extend(files_to_delete)
|
||||
|
||||
if (
|
||||
files_on_disk
|
||||
and len(files_to_delete) / len(files_on_disk) > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files (force=True, bypassing safety threshold)"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Deleting {(len(files_to_delete) / max(1, len(files_on_disk)) * 100):.2f}% of recordings files, could be due to configuration error. Aborting..."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Recordings sync (dry run): Found {len(files_to_delete)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(files_to_delete)} orphaned recordings files")
|
||||
for file in files_to_delete:
|
||||
try:
|
||||
os.unlink(file)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file}: {e}")
|
||||
|
||||
logger.debug("End sync recordings.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing recordings: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_event_snapshots(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync event snapshots - delete files not referenced by any event.
|
||||
|
||||
Event snapshots are stored at: CLIPS_DIR/{camera}-{event_id}.jpg
|
||||
Also checks for clean variants: {camera}-{event_id}-clean.webp and -clean.png
|
||||
"""
|
||||
result = SyncResult(media_type="event_snapshots")
|
||||
|
||||
try:
|
||||
# Get all event IDs with snapshots from DB
|
||||
events_with_snapshots = set(
|
||||
f"{e.camera}-{e.id}"
|
||||
for e in Event.select(Event.id, Event.camera).where(
|
||||
Event.has_snapshot == True
|
||||
)
|
||||
)
|
||||
|
||||
# Find snapshot files on disk (directly in CLIPS_DIR, not subdirectories)
|
||||
snapshot_files: list[tuple[str, str]] = [] # (full_path, base_name)
|
||||
if os.path.isdir(CLIPS_DIR):
|
||||
for file in os.listdir(CLIPS_DIR):
|
||||
file_path = os.path.join(CLIPS_DIR, file)
|
||||
if os.path.isfile(file_path) and file.endswith(
|
||||
(".jpg", "-clean.webp", "-clean.png")
|
||||
):
|
||||
# Extract base name (camera-event_id) from filename
|
||||
base_name = file
|
||||
for suffix in ["-clean.webp", "-clean.png", ".jpg"]:
|
||||
if file.endswith(suffix):
|
||||
base_name = file[: -len(suffix)]
|
||||
break
|
||||
snapshot_files.append((file_path, base_name))
|
||||
|
||||
result.files_checked = len(snapshot_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path, base_name in snapshot_files:
|
||||
if base_name not in events_with_snapshots:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Event snapshots sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Event snapshots sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned event snapshot files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing event snapshots: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_event_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync event thumbnails - delete files not referenced by any event.
|
||||
|
||||
Event thumbnails are stored at: THUMB_DIR/{camera}/{event_id}.webp
|
||||
Only events without inline thumbnail (thumbnail field is None/empty) use files.
|
||||
"""
|
||||
result = SyncResult(media_type="event_thumbnails")
|
||||
|
||||
try:
|
||||
# Get all events that use file-based thumbnails
|
||||
# Events with thumbnail field populated don't need files
|
||||
events_with_file_thumbs = set(
|
||||
(e.camera, e.id)
|
||||
for e in Event.select(Event.id, Event.camera, Event.thumbnail).where(
|
||||
(Event.thumbnail.is_null(True)) | (Event.thumbnail == "")
|
||||
)
|
||||
)
|
||||
|
||||
# Find thumbnail files on disk
|
||||
thumbnail_files: list[
|
||||
tuple[str, str, str]
|
||||
] = [] # (full_path, camera, event_id)
|
||||
if os.path.isdir(THUMB_DIR):
|
||||
for camera_dir in os.listdir(THUMB_DIR):
|
||||
camera_path = os.path.join(THUMB_DIR, camera_dir)
|
||||
if not os.path.isdir(camera_path):
|
||||
continue
|
||||
for file in os.listdir(camera_path):
|
||||
if file.endswith(".webp"):
|
||||
event_id = file[:-5] # Remove .webp
|
||||
file_path = os.path.join(camera_path, file)
|
||||
thumbnail_files.append((file_path, camera_dir, event_id))
|
||||
|
||||
result.files_checked = len(thumbnail_files)
|
||||
|
||||
# Find orphans - files where event doesn't exist or event has inline thumbnail
|
||||
orphans: list[str] = []
|
||||
for file_path, camera, event_id in thumbnail_files:
|
||||
if (camera, event_id) not in events_with_file_thumbs:
|
||||
# Check if event exists with inline thumbnail
|
||||
event_exists = Event.select().where(Event.id == event_id).exists()
|
||||
if not event_exists:
|
||||
orphans.append(file_path)
|
||||
# If event exists with inline thumbnail, the file is also orphaned
|
||||
elif event_exists:
|
||||
event = Event.get_or_none(Event.id == event_id)
|
||||
if event and event.thumbnail:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Event thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Event thumbnails sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned event thumbnail files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing event thumbnails: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_review_thumbnails(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync review segment thumbnails - delete files not referenced by any review segment.
|
||||
|
||||
Review thumbnails are stored at: CLIPS_DIR/review/thumb-{camera}-{review_id}.webp
|
||||
The full path is stored in ReviewSegment.thumb_path
|
||||
"""
|
||||
result = SyncResult(media_type="review_thumbnails")
|
||||
|
||||
try:
|
||||
# Get all thumb paths from DB
|
||||
review_thumb_paths = set(
|
||||
r.thumb_path
|
||||
for r in ReviewSegment.select(ReviewSegment.thumb_path)
|
||||
if r.thumb_path
|
||||
)
|
||||
|
||||
# Find review thumbnail files on disk
|
||||
review_dir = os.path.join(CLIPS_DIR, "review")
|
||||
thumbnail_files: list[str] = []
|
||||
if os.path.isdir(review_dir):
|
||||
for file in os.listdir(review_dir):
|
||||
if file.startswith("thumb-") and file.endswith(".webp"):
|
||||
file_path = os.path.join(review_dir, file)
|
||||
thumbnail_files.append(file_path)
|
||||
|
||||
result.files_checked = len(thumbnail_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in thumbnail_files:
|
||||
if file_path not in review_thumb_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Review thumbnails sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(
|
||||
f"Review thumbnails sync (dry run): Found {len(orphans)} orphaned files"
|
||||
)
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned review thumbnail files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing review thumbnails: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_previews(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync preview files - delete files not referenced by any preview record.
|
||||
|
||||
Previews are stored at: CLIPS_DIR/previews/{camera}/*.mp4
|
||||
The full path is stored in Previews.path
|
||||
"""
|
||||
result = SyncResult(media_type="previews")
|
||||
|
||||
try:
|
||||
# Get all preview paths from DB
|
||||
preview_paths = set(p.path for p in Previews.select(Previews.path) if p.path)
|
||||
|
||||
# Find preview files on disk
|
||||
previews_dir = os.path.join(CLIPS_DIR, "previews")
|
||||
preview_files: list[str] = []
|
||||
if os.path.isdir(previews_dir):
|
||||
for camera_dir in os.listdir(previews_dir):
|
||||
camera_path = os.path.join(previews_dir, camera_dir)
|
||||
if not os.path.isdir(camera_path):
|
||||
continue
|
||||
for file in os.listdir(camera_path):
|
||||
if file.endswith(".mp4"):
|
||||
file_path = os.path.join(camera_path, file)
|
||||
preview_files.append(file_path)
|
||||
|
||||
result.files_checked = len(preview_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in preview_files:
|
||||
if file_path not in preview_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Previews sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Previews sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(f"Previews sync (dry run): Found {len(orphans)} orphaned files")
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned preview files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing previews: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sync_exports(dry_run: bool = False, force: bool = False) -> SyncResult:
|
||||
"""Sync export files - delete files not referenced by any export record.
|
||||
|
||||
Export videos are stored at: EXPORT_DIR/*.mp4
|
||||
Export thumbnails are stored at: CLIPS_DIR/export/*.jpg
|
||||
The paths are stored in Export.video_path and Export.thumb_path
|
||||
"""
|
||||
result = SyncResult(media_type="exports")
|
||||
|
||||
try:
|
||||
# Get all export paths from DB
|
||||
export_video_paths = set()
|
||||
export_thumb_paths = set()
|
||||
for e in Export.select(Export.video_path, Export.thumb_path):
|
||||
if e.video_path:
|
||||
export_video_paths.add(e.video_path)
|
||||
if e.thumb_path:
|
||||
export_thumb_paths.add(e.thumb_path)
|
||||
|
||||
# Find export video files on disk
|
||||
export_files: list[str] = []
|
||||
if os.path.isdir(EXPORT_DIR):
|
||||
for file in os.listdir(EXPORT_DIR):
|
||||
if file.endswith(".mp4"):
|
||||
file_path = os.path.join(EXPORT_DIR, file)
|
||||
export_files.append(file_path)
|
||||
|
||||
# Find export thumbnail files on disk
|
||||
export_thumb_dir = os.path.join(CLIPS_DIR, "export")
|
||||
thumb_files: list[str] = []
|
||||
if os.path.isdir(export_thumb_dir):
|
||||
for file in os.listdir(export_thumb_dir):
|
||||
if file.endswith(".jpg"):
|
||||
file_path = os.path.join(export_thumb_dir, file)
|
||||
thumb_files.append(file_path)
|
||||
|
||||
result.files_checked = len(export_files) + len(thumb_files)
|
||||
|
||||
# Find orphans
|
||||
orphans: list[str] = []
|
||||
for file_path in export_files:
|
||||
if file_path not in export_video_paths:
|
||||
orphans.append(file_path)
|
||||
for file_path in thumb_files:
|
||||
if file_path not in export_thumb_paths:
|
||||
orphans.append(file_path)
|
||||
|
||||
result.orphans_found = len(orphans)
|
||||
result.orphan_paths = orphans
|
||||
|
||||
if len(orphans) == 0:
|
||||
return result
|
||||
|
||||
# Safety check
|
||||
if (
|
||||
result.files_checked > 0
|
||||
and len(orphans) / result.files_checked > SAFETY_THRESHOLD
|
||||
):
|
||||
if force:
|
||||
logger.warning(
|
||||
f"Exports sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files (force=True, bypassing safety threshold)."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Exports sync: Would delete {len(orphans)}/{result.files_checked} "
|
||||
f"({len(orphans) / result.files_checked * 100:.2f}%) files. "
|
||||
"Aborting due to safety threshold."
|
||||
)
|
||||
result.aborted = True
|
||||
return result
|
||||
|
||||
if dry_run:
|
||||
logger.info(f"Exports sync (dry run): Found {len(orphans)} orphaned files")
|
||||
return result
|
||||
|
||||
# Delete orphans
|
||||
logger.info(f"Deleting {len(orphans)} orphaned export files")
|
||||
for file_path in orphans:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
result.orphans_deleted += 1
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete {file_path}: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing exports: {e}")
|
||||
result.error = str(e)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class MediaSyncResults:
|
||||
"""Combined results from all media sync operations."""
|
||||
|
||||
event_snapshots: SyncResult | None = None
|
||||
event_thumbnails: SyncResult | None = None
|
||||
review_thumbnails: SyncResult | None = None
|
||||
previews: SyncResult | None = None
|
||||
exports: SyncResult | None = None
|
||||
recordings: SyncResult | None = None
|
||||
|
||||
@property
|
||||
def total_files_checked(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.files_checked
|
||||
return total
|
||||
|
||||
@property
|
||||
def total_orphans_found(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.orphans_found
|
||||
return total
|
||||
|
||||
@property
|
||||
def total_orphans_deleted(self) -> int:
|
||||
total = 0
|
||||
for result in [
|
||||
self.event_snapshots,
|
||||
self.event_thumbnails,
|
||||
self.review_thumbnails,
|
||||
self.previews,
|
||||
self.exports,
|
||||
self.recordings,
|
||||
]:
|
||||
if result:
|
||||
total += result.orphans_deleted
|
||||
return total
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert results to dictionary for API response."""
|
||||
results = {}
|
||||
for name, result in [
|
||||
("event_snapshots", self.event_snapshots),
|
||||
("event_thumbnails", self.event_thumbnails),
|
||||
("review_thumbnails", self.review_thumbnails),
|
||||
("previews", self.previews),
|
||||
("exports", self.exports),
|
||||
("recordings", self.recordings),
|
||||
]:
|
||||
if result:
|
||||
results[name] = {
|
||||
"files_checked": result.files_checked,
|
||||
"orphans_found": result.orphans_found,
|
||||
"orphans_deleted": result.orphans_deleted,
|
||||
"aborted": result.aborted,
|
||||
"error": result.error,
|
||||
}
|
||||
results["totals"] = {
|
||||
"files_checked": self.total_files_checked,
|
||||
"orphans_found": self.total_orphans_found,
|
||||
"orphans_deleted": self.total_orphans_deleted,
|
||||
}
|
||||
return results
|
||||
|
||||
|
||||
def sync_all_media(
|
||||
dry_run: bool = False, media_types: list[str] = ["all"], force: bool = False
|
||||
) -> MediaSyncResults:
|
||||
"""Sync specified media types with the database.
|
||||
|
||||
Args:
|
||||
dry_run: If True, only report orphans without deleting them.
|
||||
media_types: List of media types to sync. Can include: 'all', 'event_snapshots',
|
||||
'event_thumbnails', 'review_thumbnails', 'previews', 'exports', 'recordings'
|
||||
force: If True, bypass safety threshold checks.
|
||||
|
||||
Returns:
|
||||
MediaSyncResults with details of each sync operation.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Starting media sync (dry_run={dry_run}, media_types={media_types}, force={force})"
|
||||
)
|
||||
|
||||
results = MediaSyncResults()
|
||||
|
||||
# Determine which media types to sync
|
||||
sync_all = "all" in media_types
|
||||
|
||||
if sync_all or "event_snapshots" in media_types:
|
||||
results.event_snapshots = sync_event_snapshots(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "event_thumbnails" in media_types:
|
||||
results.event_thumbnails = sync_event_thumbnails(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "review_thumbnails" in media_types:
|
||||
results.review_thumbnails = sync_review_thumbnails(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "previews" in media_types:
|
||||
results.previews = sync_previews(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "exports" in media_types:
|
||||
results.exports = sync_exports(dry_run=dry_run, force=force)
|
||||
|
||||
if sync_all or "recordings" in media_types:
|
||||
results.recordings = sync_recordings(dry_run=dry_run, force=force)
|
||||
|
||||
logger.info(
|
||||
f"Media sync complete: checked {results.total_files_checked} files, "
|
||||
f"found {results.total_orphans_found} orphans, "
|
||||
f"deleted {results.total_orphans_deleted}"
|
||||
)
|
||||
|
||||
return results
|
||||
@@ -65,15 +65,10 @@ class FrigateProcess(BaseProcess):
|
||||
logging.basicConfig(handlers=[], force=True)
|
||||
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
|
||||
|
||||
# Always apply base log level suppressions for noisy third-party libraries
|
||||
# even if no specific logConfig is provided
|
||||
if logConfig:
|
||||
frigate.log.apply_log_levels(
|
||||
logConfig.default.value.upper(), logConfig.logs
|
||||
)
|
||||
else:
|
||||
# Apply default INFO level with standard library suppressions
|
||||
frigate.log.apply_log_levels("INFO", {})
|
||||
|
||||
self._setup_memray()
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from frigate.const import SUPPORTED_RK_SOCS
|
||||
from frigate.util.file import FileLock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -69,20 +68,9 @@ def is_rknn_compatible(model_path: str, model_type: str | None = None) -> bool:
|
||||
True if the model is RKNN-compatible, False otherwise
|
||||
"""
|
||||
soc = get_soc_type()
|
||||
|
||||
if soc is None:
|
||||
return False
|
||||
|
||||
# Check if the SoC is actually a supported RK device
|
||||
# This prevents false positives on non-RK devices (e.g., macOS Docker)
|
||||
# where /proc/device-tree/compatible might exist but contain non-RK content
|
||||
if soc not in SUPPORTED_RK_SOCS:
|
||||
logger.debug(
|
||||
f"SoC '{soc}' is not a supported RK device for RKNN conversion. "
|
||||
f"Supported SoCs: {SUPPORTED_RK_SOCS}"
|
||||
)
|
||||
return False
|
||||
|
||||
if not model_type:
|
||||
model_type = get_rknn_model_type(model_path)
|
||||
|
||||
|
||||
@@ -417,12 +417,12 @@ def get_openvino_npu_stats() -> Optional[dict[str, str]]:
|
||||
else:
|
||||
usage = 0.0
|
||||
|
||||
return {"npu": f"{round(usage, 2)}", "mem": "-"}
|
||||
return {"npu": f"{round(usage, 2)}", "mem": "-%"}
|
||||
except (FileNotFoundError, PermissionError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def get_rockchip_gpu_stats() -> Optional[dict[str, str]]:
|
||||
def get_rockchip_gpu_stats() -> Optional[dict[str, str | float]]:
|
||||
"""Get GPU stats using rk."""
|
||||
try:
|
||||
with open("/sys/kernel/debug/rkrga/load", "r") as f:
|
||||
@@ -440,7 +440,16 @@ def get_rockchip_gpu_stats() -> Optional[dict[str, str]]:
|
||||
return None
|
||||
|
||||
average_load = f"{round(sum(load_values) / len(load_values), 2)}%"
|
||||
return {"gpu": average_load, "mem": "-"}
|
||||
stats: dict[str, str | float] = {"gpu": average_load, "mem": "-%"}
|
||||
|
||||
try:
|
||||
with open("/sys/class/thermal/thermal_zone5/temp", "r") as f:
|
||||
line = f.readline().strip()
|
||||
stats["temp"] = round(int(line) / 1000, 1)
|
||||
except (FileNotFoundError, OSError, ValueError):
|
||||
pass
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
|
||||
@@ -463,13 +472,25 @@ def get_rockchip_npu_stats() -> Optional[dict[str, float | str]]:
|
||||
|
||||
percentages = [int(load) for load in core_loads]
|
||||
mean = round(sum(percentages) / len(percentages), 2)
|
||||
return {"npu": mean, "mem": "-"}
|
||||
stats: dict[str, float | str] = {"npu": mean, "mem": "-%"}
|
||||
|
||||
try:
|
||||
with open("/sys/class/thermal/thermal_zone6/temp", "r") as f:
|
||||
line = f.readline().strip()
|
||||
stats["temp"] = round(int(line) / 1000, 1)
|
||||
except (FileNotFoundError, OSError, ValueError):
|
||||
pass
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def try_get_info(f, h, default="N/A"):
|
||||
def try_get_info(f, h, default="N/A", sensor=None):
|
||||
try:
|
||||
if h:
|
||||
v = f(h)
|
||||
if sensor is not None:
|
||||
v = f(h, sensor)
|
||||
else:
|
||||
v = f(h)
|
||||
else:
|
||||
v = f()
|
||||
except nvml.NVMLError_NotSupported:
|
||||
@@ -498,6 +519,9 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
|
||||
dec = try_get_info(nvml.nvmlDeviceGetDecoderUtilization, handle)
|
||||
temp = try_get_info(
|
||||
nvml.nvmlDeviceGetTemperature, handle, default=None, sensor=0
|
||||
)
|
||||
pstate = try_get_info(nvml.nvmlDeviceGetPowerState, handle, default=None)
|
||||
|
||||
if util != "N/A":
|
||||
@@ -510,6 +534,11 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
else:
|
||||
gpu_mem_util = -1
|
||||
|
||||
if temp != "N/A" and temp is not None:
|
||||
temp = float(temp)
|
||||
else:
|
||||
temp = None
|
||||
|
||||
if enc != "N/A":
|
||||
enc_util = enc[0]
|
||||
else:
|
||||
@@ -527,6 +556,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
"enc": enc_util,
|
||||
"dec": dec_util,
|
||||
"pstate": pstate or "unknown",
|
||||
"temp": temp,
|
||||
}
|
||||
except Exception:
|
||||
pass
|
||||
@@ -549,6 +579,53 @@ def get_jetson_stats() -> Optional[dict[int, dict]]:
|
||||
return results
|
||||
|
||||
|
||||
def get_hailo_temps() -> dict[str, float]:
|
||||
"""Get temperatures for Hailo devices."""
|
||||
try:
|
||||
from hailo_platform import Device
|
||||
except ModuleNotFoundError:
|
||||
return {}
|
||||
|
||||
temps = {}
|
||||
|
||||
try:
|
||||
device_ids = Device.scan()
|
||||
for i, device_id in enumerate(device_ids):
|
||||
try:
|
||||
with Device(device_id) as device:
|
||||
temp_info = device.control.get_chip_temperature()
|
||||
|
||||
# Get board name and normalise it
|
||||
identity = device.control.identify()
|
||||
board_name = None
|
||||
for line in str(identity).split("\n"):
|
||||
if line.startswith("Board Name:"):
|
||||
board_name = (
|
||||
line.split(":", 1)[1].strip().lower().replace("-", "")
|
||||
)
|
||||
break
|
||||
|
||||
if not board_name:
|
||||
board_name = f"hailo{i}"
|
||||
|
||||
# Use indexed name if multiple devices, otherwise just the board name
|
||||
device_name = (
|
||||
f"{board_name}-{i}" if len(device_ids) > 1 else board_name
|
||||
)
|
||||
|
||||
# ts1_temperature is also available, but appeared to be the same as ts0 in testing.
|
||||
temps[device_name] = round(temp_info.ts0_temperature, 1)
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Failed to get temperature for Hailo device {device_id}: {e}"
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to scan for Hailo devices: {e}")
|
||||
|
||||
return temps
|
||||
|
||||
|
||||
def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedProcess:
|
||||
"""Run ffprobe on stream."""
|
||||
clean_path = escape_special_characters(path)
|
||||
@@ -584,12 +661,17 @@ def ffprobe_stream(ffmpeg, path: str, detailed: bool = False) -> sp.CompletedPro
|
||||
|
||||
def vainfo_hwaccel(device_name: Optional[str] = None) -> sp.CompletedProcess:
|
||||
"""Run vainfo."""
|
||||
ffprobe_cmd = (
|
||||
["vainfo"]
|
||||
if not device_name
|
||||
else ["vainfo", "--display", "drm", "--device", f"/dev/dri/{device_name}"]
|
||||
)
|
||||
return sp.run(ffprobe_cmd, capture_output=True)
|
||||
if not device_name:
|
||||
cmd = ["vainfo"]
|
||||
else:
|
||||
if os.path.isabs(device_name) and device_name.startswith("/dev/dri/"):
|
||||
device_path = device_name
|
||||
else:
|
||||
device_path = f"/dev/dri/{device_name}"
|
||||
|
||||
cmd = ["vainfo", "--display", "drm", "--device", device_path]
|
||||
|
||||
return sp.run(cmd, capture_output=True)
|
||||
|
||||
|
||||
def get_nvidia_driver_info() -> dict[str, Any]:
|
||||
|
||||
121
frigate/video.py
121
frigate/video.py
@@ -3,6 +3,7 @@ import queue
|
||||
import subprocess as sp
|
||||
import threading
|
||||
import time
|
||||
from collections import deque
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from multiprocessing import Queue, Value
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
@@ -115,6 +116,7 @@ def capture_frames(
|
||||
frame_rate.start()
|
||||
skipped_eps = EventsPerSecond()
|
||||
skipped_eps.start()
|
||||
|
||||
config_subscriber = CameraConfigUpdateSubscriber(
|
||||
None, {config.name: config}, [CameraConfigUpdateEnum.enabled]
|
||||
)
|
||||
@@ -179,6 +181,9 @@ class CameraWatchdog(threading.Thread):
|
||||
camera_fps,
|
||||
skipped_fps,
|
||||
ffmpeg_pid,
|
||||
stalls,
|
||||
reconnects,
|
||||
detection_frame,
|
||||
stop_event,
|
||||
):
|
||||
threading.Thread.__init__(self)
|
||||
@@ -199,6 +204,10 @@ class CameraWatchdog(threading.Thread):
|
||||
self.frame_index = 0
|
||||
self.stop_event = stop_event
|
||||
self.sleeptime = self.config.ffmpeg.retry_interval
|
||||
self.reconnect_timestamps = deque()
|
||||
self.stalls = stalls
|
||||
self.reconnects = reconnects
|
||||
self.detection_frame = detection_frame
|
||||
|
||||
self.config_subscriber = CameraConfigUpdateSubscriber(
|
||||
None,
|
||||
@@ -213,6 +222,35 @@ class CameraWatchdog(threading.Thread):
|
||||
self.latest_invalid_segment_time: float = 0
|
||||
self.latest_cache_segment_time: float = 0
|
||||
|
||||
# Stall tracking (based on last processed frame)
|
||||
self._stall_timestamps: deque[float] = deque()
|
||||
self._stall_active: bool = False
|
||||
|
||||
# Status caching to reduce message volume
|
||||
self._last_detect_status: str | None = None
|
||||
self._last_record_status: str | None = None
|
||||
self._last_status_update_time: float = 0.0
|
||||
|
||||
def _send_detect_status(self, status: str, now: float) -> None:
|
||||
"""Send detect status only if changed or retry_interval has elapsed."""
|
||||
if (
|
||||
status != self._last_detect_status
|
||||
or (now - self._last_status_update_time) >= self.sleeptime
|
||||
):
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", status)
|
||||
self._last_detect_status = status
|
||||
self._last_status_update_time = now
|
||||
|
||||
def _send_record_status(self, status: str, now: float) -> None:
|
||||
"""Send record status only if changed or retry_interval has elapsed."""
|
||||
if (
|
||||
status != self._last_record_status
|
||||
or (now - self._last_status_update_time) >= self.sleeptime
|
||||
):
|
||||
self.requestor.send_data(f"{self.config.name}/status/record", status)
|
||||
self._last_record_status = status
|
||||
self._last_status_update_time = now
|
||||
|
||||
def _update_enabled_state(self) -> bool:
|
||||
"""Fetch the latest config and update enabled state."""
|
||||
self.config_subscriber.check_for_updates()
|
||||
@@ -239,6 +277,14 @@ class CameraWatchdog(threading.Thread):
|
||||
else:
|
||||
self.ffmpeg_detect_process.wait()
|
||||
|
||||
# Update reconnects
|
||||
now = datetime.now().timestamp()
|
||||
self.reconnect_timestamps.append(now)
|
||||
while self.reconnect_timestamps and self.reconnect_timestamps[0] < now - 3600:
|
||||
self.reconnect_timestamps.popleft()
|
||||
if self.reconnects:
|
||||
self.reconnects.value = len(self.reconnect_timestamps)
|
||||
|
||||
# Wait for old capture thread to fully exit before starting a new one
|
||||
if self.capture_thread is not None and self.capture_thread.is_alive():
|
||||
self.logger.info("Waiting for capture thread to exit...")
|
||||
@@ -261,7 +307,10 @@ class CameraWatchdog(threading.Thread):
|
||||
self.start_all_ffmpeg()
|
||||
|
||||
time.sleep(self.sleeptime)
|
||||
while not self.stop_event.wait(self.sleeptime):
|
||||
last_restart_time = datetime.now().timestamp()
|
||||
|
||||
# 1 second watchdog loop
|
||||
while not self.stop_event.wait(1):
|
||||
enabled = self._update_enabled_state()
|
||||
if enabled != self.was_enabled:
|
||||
if enabled:
|
||||
@@ -277,12 +326,9 @@ class CameraWatchdog(threading.Thread):
|
||||
self.stop_all_ffmpeg()
|
||||
|
||||
# update camera status
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/detect", "disabled"
|
||||
)
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/record", "disabled"
|
||||
)
|
||||
now = datetime.now().timestamp()
|
||||
self._send_detect_status("disabled", now)
|
||||
self._send_record_status("disabled", now)
|
||||
self.was_enabled = enabled
|
||||
continue
|
||||
|
||||
@@ -321,36 +367,44 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
# Check if enough time has passed to allow ffmpeg restart (backoff pacing)
|
||||
time_since_last_restart = now - last_restart_time
|
||||
can_restart = time_since_last_restart >= self.sleeptime
|
||||
|
||||
if not self.capture_thread.is_alive():
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
|
||||
self._send_detect_status("offline", now)
|
||||
self.camera_fps.value = 0
|
||||
self.logger.error(
|
||||
f"Ffmpeg process crashed unexpectedly for {self.config.name}."
|
||||
)
|
||||
self.reset_capture_thread(terminate=False)
|
||||
if can_restart:
|
||||
self.reset_capture_thread(terminate=False)
|
||||
last_restart_time = now
|
||||
elif self.camera_fps.value >= (self.config.detect.fps + 10):
|
||||
self.fps_overflow_count += 1
|
||||
|
||||
if self.fps_overflow_count == 3:
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/detect", "offline"
|
||||
)
|
||||
self._send_detect_status("offline", now)
|
||||
self.fps_overflow_count = 0
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"{self.config.name} exceeded fps limit. Exiting ffmpeg..."
|
||||
)
|
||||
self.reset_capture_thread(drain_output=False)
|
||||
if can_restart:
|
||||
self.reset_capture_thread(drain_output=False)
|
||||
last_restart_time = now
|
||||
elif now - self.capture_thread.current_frame.value > 20:
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "offline")
|
||||
self._send_detect_status("offline", now)
|
||||
self.camera_fps.value = 0
|
||||
self.logger.info(
|
||||
f"No frames received from {self.config.name} in 20 seconds. Exiting ffmpeg..."
|
||||
)
|
||||
self.reset_capture_thread()
|
||||
if can_restart:
|
||||
self.reset_capture_thread()
|
||||
last_restart_time = now
|
||||
else:
|
||||
# process is running normally
|
||||
self.requestor.send_data(f"{self.config.name}/status/detect", "online")
|
||||
self._send_detect_status("online", now)
|
||||
self.fps_overflow_count = 0
|
||||
|
||||
for p in self.ffmpeg_other_processes:
|
||||
@@ -421,9 +475,7 @@ class CameraWatchdog(threading.Thread):
|
||||
|
||||
continue
|
||||
else:
|
||||
self.requestor.send_data(
|
||||
f"{self.config.name}/status/record", "online"
|
||||
)
|
||||
self._send_record_status("online", now)
|
||||
p["latest_segment_time"] = self.latest_cache_segment_time
|
||||
|
||||
if poll is None:
|
||||
@@ -439,6 +491,34 @@ class CameraWatchdog(threading.Thread):
|
||||
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
|
||||
)
|
||||
|
||||
# Update stall metrics based on last processed frame timestamp
|
||||
now = datetime.now().timestamp()
|
||||
processed_ts = (
|
||||
float(self.detection_frame.value) if self.detection_frame else 0.0
|
||||
)
|
||||
if processed_ts > 0:
|
||||
delta = now - processed_ts
|
||||
observed_fps = (
|
||||
self.camera_fps.value
|
||||
if self.camera_fps.value > 0
|
||||
else self.config.detect.fps
|
||||
)
|
||||
interval = 1.0 / max(observed_fps, 0.1)
|
||||
stall_threshold = max(2.0 * interval, 2.0)
|
||||
|
||||
if delta > stall_threshold:
|
||||
if not self._stall_active:
|
||||
self._stall_timestamps.append(now)
|
||||
self._stall_active = True
|
||||
else:
|
||||
self._stall_active = False
|
||||
|
||||
while self._stall_timestamps and self._stall_timestamps[0] < now - 3600:
|
||||
self._stall_timestamps.popleft()
|
||||
|
||||
if self.stalls:
|
||||
self.stalls.value = len(self._stall_timestamps)
|
||||
|
||||
self.stop_all_ffmpeg()
|
||||
self.logpipe.close()
|
||||
self.config_subscriber.stop()
|
||||
@@ -576,6 +656,9 @@ class CameraCapture(FrigateProcess):
|
||||
self.camera_metrics.camera_fps,
|
||||
self.camera_metrics.skipped_fps,
|
||||
self.camera_metrics.ffmpeg_pid,
|
||||
self.camera_metrics.stalls_last_hour,
|
||||
self.camera_metrics.reconnects_last_hour,
|
||||
self.camera_metrics.detection_frame,
|
||||
self.stop_event,
|
||||
)
|
||||
camera_watchdog.start()
|
||||
|
||||
50
migrations/033_create_export_case_table.py
Normal file
50
migrations/033_create_export_case_table.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Peewee migrations -- 033_create_export_case_table.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import peewee as pw
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.sql(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS "exportcase" (
|
||||
"id" VARCHAR(30) NOT NULL PRIMARY KEY,
|
||||
"name" VARCHAR(100) NOT NULL,
|
||||
"description" TEXT NULL,
|
||||
"created_at" DATETIME NOT NULL,
|
||||
"updated_at" DATETIME NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "exportcase_name" ON "exportcase" ("name")'
|
||||
)
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "exportcase_created_at" ON "exportcase" ("created_at")'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
||||
40
migrations/034_add_export_case_to_exports.py
Normal file
40
migrations/034_add_export_case_to_exports.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""Peewee migrations -- 034_add_export_case_to_exports.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
|
||||
import peewee as pw
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
# Add nullable export_case_id column to export table
|
||||
migrator.sql('ALTER TABLE "export" ADD COLUMN "export_case_id" VARCHAR(30) NULL')
|
||||
|
||||
# Index for faster case-based queries
|
||||
migrator.sql(
|
||||
'CREATE INDEX IF NOT EXISTS "export_export_case_id" ON "export" ("export_case_id")'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
||||
@@ -48,6 +48,10 @@
|
||||
"name": {
|
||||
"placeholder": "Name the Export"
|
||||
},
|
||||
"case": {
|
||||
"label": "Case",
|
||||
"placeholder": "Select a case"
|
||||
},
|
||||
"select": "Select",
|
||||
"export": "Export",
|
||||
"selectOrExport": "Select or Export",
|
||||
|
||||
@@ -38,10 +38,6 @@
|
||||
"label": "Sub Labels",
|
||||
"all": "All Sub Labels"
|
||||
},
|
||||
"attributes": {
|
||||
"label": "Classification Attributes",
|
||||
"all": "All Attributes"
|
||||
},
|
||||
"score": "Score",
|
||||
"estimatedSpeed": "Estimated Speed ({{unit}})",
|
||||
"features": {
|
||||
|
||||
@@ -324,9 +324,6 @@
|
||||
"enabled": {
|
||||
"label": "Enable record on all cameras."
|
||||
},
|
||||
"sync_recordings": {
|
||||
"label": "Sync recordings with disk on startup and once a day."
|
||||
},
|
||||
"expire_interval": {
|
||||
"label": "Number of minutes to wait between cleanup runs."
|
||||
},
|
||||
@@ -758,4 +755,4 @@
|
||||
"label": "Keep track of original state of camera."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@
|
||||
"enabled": {
|
||||
"label": "Enable record on all cameras."
|
||||
},
|
||||
"sync_recordings": {
|
||||
"label": "Sync recordings with disk on startup and once a day."
|
||||
},
|
||||
"expire_interval": {
|
||||
"label": "Number of minutes to wait between cleanup runs."
|
||||
},
|
||||
@@ -90,4 +87,4 @@
|
||||
"label": "Keep track of original state of recording."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
{
|
||||
"documentTitle": "Classification Models - Frigate",
|
||||
"details": {
|
||||
"scoreInfo": "Score represents the average classification confidence across all detections of this object.",
|
||||
"none": "None",
|
||||
"unknown": "Unknown"
|
||||
"scoreInfo": "Score represents the average classification confidence across all detections of this object."
|
||||
},
|
||||
"button": {
|
||||
"deleteClassificationAttempts": "Delete Classification Images",
|
||||
@@ -74,7 +72,7 @@
|
||||
},
|
||||
"renameCategory": {
|
||||
"title": "Rename Class",
|
||||
"desc": "Enter a new name for {{name}}. You will be required to retrain the model for the name change to take effect."
|
||||
"desc": "Enter a new name for {{name}}. You will be required to retrain the model for the name change to take affect."
|
||||
},
|
||||
"description": {
|
||||
"invalidName": "Invalid name. Names can only include letters, numbers, spaces, apostrophes, underscores, and hyphens."
|
||||
@@ -85,6 +83,7 @@
|
||||
"aria": "Select Recent Classifications"
|
||||
},
|
||||
"categories": "Classes",
|
||||
"none": "None",
|
||||
"createCategory": {
|
||||
"new": "Create New Class"
|
||||
},
|
||||
@@ -139,7 +138,6 @@
|
||||
"nameOnlyNumbers": "Model name cannot contain only numbers",
|
||||
"classRequired": "At least 1 class is required",
|
||||
"classesUnique": "Class names must be unique",
|
||||
"noneNotAllowed": "The class 'none' is not allowed",
|
||||
"stateRequiresTwoClasses": "State models require at least 2 classes",
|
||||
"objectLabelRequired": "Please select an object label",
|
||||
"objectTypeRequired": "Please select a classification type"
|
||||
|
||||
@@ -104,14 +104,12 @@
|
||||
"regenerate": "A new description has been requested from {{provider}}. Depending on the speed of your provider, the new description may take some time to regenerate.",
|
||||
"updatedSublabel": "Successfully updated sub label.",
|
||||
"updatedLPR": "Successfully updated license plate.",
|
||||
"updatedAttributes": "Successfully updated attributes.",
|
||||
"audioTranscription": "Successfully requested audio transcription. Depending on the speed of your Frigate server, the transcription may take some time to complete."
|
||||
},
|
||||
"error": {
|
||||
"regenerate": "Failed to call {{provider}} for a new description: {{errorMessage}}",
|
||||
"updatedSublabelFailed": "Failed to update sub label: {{errorMessage}}",
|
||||
"updatedLPRFailed": "Failed to update license plate: {{errorMessage}}",
|
||||
"updatedAttributesFailed": "Failed to update attributes: {{errorMessage}}",
|
||||
"audioTranscription": "Failed to request audio transcription: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
@@ -127,10 +125,6 @@
|
||||
"desc": "Enter a new license plate value for this {{label}}",
|
||||
"descNoLabel": "Enter a new license plate value for this tracked object"
|
||||
},
|
||||
"editAttributes": {
|
||||
"title": "Edit attributes",
|
||||
"desc": "Select classification attributes for this {{label}}"
|
||||
},
|
||||
"snapshotScore": {
|
||||
"label": "Snapshot Score"
|
||||
},
|
||||
@@ -142,7 +136,6 @@
|
||||
"label": "Score"
|
||||
},
|
||||
"recognizedLicensePlate": "Recognized License Plate",
|
||||
"attributes": "Classification Attributes",
|
||||
"estimatedSpeed": "Estimated Speed",
|
||||
"objects": "Objects",
|
||||
"camera": "Camera",
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"documentTitle": "Export - Frigate",
|
||||
"search": "Search",
|
||||
"noExports": "No exports found",
|
||||
"headings": {
|
||||
"cases": "Cases",
|
||||
"uncategorizedExports": "Uncategorized Exports"
|
||||
},
|
||||
"deleteExport": "Delete Export",
|
||||
"deleteExport.desc": "Are you sure you want to delete {{exportName}}?",
|
||||
"editExport": {
|
||||
@@ -13,11 +17,21 @@
|
||||
"shareExport": "Share export",
|
||||
"downloadVideo": "Download video",
|
||||
"editName": "Edit name",
|
||||
"deleteExport": "Delete export"
|
||||
"deleteExport": "Delete export",
|
||||
"assignToCase": "Add to case"
|
||||
},
|
||||
"toast": {
|
||||
"error": {
|
||||
"renameExportFailed": "Failed to rename export: {{errorMessage}}"
|
||||
"renameExportFailed": "Failed to rename export: {{errorMessage}}",
|
||||
"assignCaseFailed": "Failed to update case assignment: {{errorMessage}}"
|
||||
}
|
||||
},
|
||||
"caseDialog": {
|
||||
"title": "Add to case",
|
||||
"description": "Choose an existing case or create a new one.",
|
||||
"selectLabel": "Case",
|
||||
"newCaseOption": "Create new case",
|
||||
"nameLabel": "Case name",
|
||||
"descriptionLabel": "Description"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
"labels": "Labels",
|
||||
"zones": "Zones",
|
||||
"sub_labels": "Sub Labels",
|
||||
"attributes": "Attributes",
|
||||
"search_type": "Search Type",
|
||||
"time_range": "Time Range",
|
||||
"before": "Before",
|
||||
|
||||
@@ -679,7 +679,7 @@
|
||||
"desc": "Manage this Frigate instance's user accounts."
|
||||
},
|
||||
"addUser": "Add User",
|
||||
"updatePassword": "Reset Password",
|
||||
"updatePassword": "Update Password",
|
||||
"toast": {
|
||||
"success": {
|
||||
"createUser": "User {{user}} created successfully",
|
||||
@@ -700,7 +700,7 @@
|
||||
"role": "Role",
|
||||
"noUsers": "No users found.",
|
||||
"changeRole": "Change user role",
|
||||
"password": "Reset Password",
|
||||
"password": "Password",
|
||||
"deleteUser": "Delete user"
|
||||
},
|
||||
"dialog": {
|
||||
@@ -1065,5 +1065,53 @@
|
||||
"deleteTriggerFailed": "Failed to delete trigger: {{errorMessage}}"
|
||||
}
|
||||
}
|
||||
},
|
||||
"maintenance": {
|
||||
"title": "Maintenance",
|
||||
"sync": {
|
||||
"title": "Media Sync",
|
||||
"desc": "Frigate will periodically clean up media on a regular schedule according to your retention configuration. It is normal to see a few orphaned files as Frigate runs. Use this feature to remove orphaned media files from disk that are no longer referenced in the database.",
|
||||
"started": "Media sync started.",
|
||||
"alreadyRunning": "A sync job is already running",
|
||||
"error": "Failed to start sync",
|
||||
"currentStatus": "Status",
|
||||
"jobId": "Job ID",
|
||||
"startTime": "Start Time",
|
||||
"endTime": "End Time",
|
||||
"statusLabel": "Status",
|
||||
"results": "Results",
|
||||
"errorLabel": "Error",
|
||||
"mediaTypes": "Media Types",
|
||||
"allMedia": "All Media",
|
||||
"dryRun": "Dry Run",
|
||||
"dryRunEnabled": "No files will be deleted",
|
||||
"dryRunDisabled": "Files will be deleted",
|
||||
"force": "Force",
|
||||
"forceDesc": "Bypass safety threshold and complete sync even if more than 50% of the files would be deleted.",
|
||||
"running": "Sync Running...",
|
||||
"start": "Start Sync",
|
||||
"inProgress": "Sync is in progress. This page is disabled.",
|
||||
"status": {
|
||||
"queued": "Queued",
|
||||
"running": "Running",
|
||||
"completed": "Completed",
|
||||
"failed": "Failed",
|
||||
"notRunning": "Not Running"
|
||||
},
|
||||
"resultsFields": {
|
||||
"filesChecked": "Files Checked",
|
||||
"orphansFound": "Orphans Found",
|
||||
"orphansDeleted": "Orphans Deleted",
|
||||
"aborted": "Aborted. Deletion would exceed safety threshold.",
|
||||
"error": "Error",
|
||||
"totals": "Totals"
|
||||
},
|
||||
"event_snapshots": "Tracked Object Snapshots",
|
||||
"event_thumbnails": "Tracked Object Thumbnails",
|
||||
"review_thumbnails": "Review Thumbnails",
|
||||
"previews": "Previews",
|
||||
"exports": "Exports",
|
||||
"recordings": "Recordings"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
"gpuMemory": "GPU Memory",
|
||||
"gpuEncoder": "GPU Encoder",
|
||||
"gpuDecoder": "GPU Decoder",
|
||||
"gpuTemperature": "GPU Temperature",
|
||||
"gpuInfo": {
|
||||
"vainfoOutput": {
|
||||
"title": "Vainfo Output",
|
||||
@@ -77,6 +78,7 @@
|
||||
},
|
||||
"npuUsage": "NPU Usage",
|
||||
"npuMemory": "NPU Memory",
|
||||
"npuTemperature": "NPU Temperature",
|
||||
"intelGpuWarning": {
|
||||
"title": "Intel GPU Stats Warning",
|
||||
"message": "GPU stats unavailable",
|
||||
@@ -151,6 +153,17 @@
|
||||
"cameraDetectionsPerSecond": "{{camName}} detections per second",
|
||||
"cameraSkippedDetectionsPerSecond": "{{camName}} skipped detections per second"
|
||||
},
|
||||
"connectionQuality": {
|
||||
"title": "Connection Quality",
|
||||
"excellent": "Excellent",
|
||||
"fair": "Fair",
|
||||
"poor": "Poor",
|
||||
"unusable": "Unusable",
|
||||
"fps": "FPS",
|
||||
"expectedFps": "Expected FPS",
|
||||
"reconnectsLastHour": "Reconnects (last hour)",
|
||||
"stallsLastHour": "Stalls (last hour)"
|
||||
},
|
||||
"toast": {
|
||||
"success": {
|
||||
"copyToClipboard": "Copied probe data to clipboard."
|
||||
@@ -192,10 +205,7 @@
|
||||
"review_description_events_per_second": "Review Description",
|
||||
"object_description": "Object Description",
|
||||
"object_description_speed": "Object Description Speed",
|
||||
"object_description_events_per_second": "Object Description",
|
||||
"classification": "{{name}} Classification",
|
||||
"classification_speed": "{{name}} Classification Speed",
|
||||
"classification_events_per_second": "{{name}} Classification Events Per Second"
|
||||
"object_description_events_per_second": "Object Description"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import ProtectedRoute from "@/components/auth/ProtectedRoute";
|
||||
import { AuthProvider } from "@/context/auth-context";
|
||||
import useSWR from "swr";
|
||||
import { FrigateConfig } from "./types/frigateConfig";
|
||||
import ActivityIndicator from "@/components/indicators/activity-indicator";
|
||||
|
||||
const Live = lazy(() => import("@/pages/Live"));
|
||||
const Events = lazy(() => import("@/pages/Events"));
|
||||
@@ -51,13 +50,6 @@ function DefaultAppView() {
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
// Compute required roles for main routes, ensuring we have config first
|
||||
// to prevent race condition where custom roles are temporarily unavailable
|
||||
const mainRouteRoles = config?.auth?.roles
|
||||
? Object.keys(config.auth.roles)
|
||||
: undefined;
|
||||
|
||||
return (
|
||||
<div className="size-full overflow-hidden">
|
||||
{isDesktop && <Sidebar />}
|
||||
@@ -76,11 +68,13 @@ function DefaultAppView() {
|
||||
<Routes>
|
||||
<Route
|
||||
element={
|
||||
mainRouteRoles ? (
|
||||
<ProtectedRoute requiredRoles={mainRouteRoles} />
|
||||
) : (
|
||||
<ActivityIndicator className="absolute left-1/2 top-1/2 -translate-x-1/2 -translate-y-1/2" />
|
||||
)
|
||||
<ProtectedRoute
|
||||
requiredRoles={
|
||||
config?.auth.roles
|
||||
? Object.keys(config.auth.roles)
|
||||
: ["admin", "viewer"]
|
||||
}
|
||||
/>
|
||||
}
|
||||
>
|
||||
<Route index element={<Live />} />
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
TrackedObjectUpdateReturnType,
|
||||
TriggerStatus,
|
||||
FrigateAudioDetections,
|
||||
Job,
|
||||
} from "@/types/ws";
|
||||
import { FrigateStats } from "@/types/stats";
|
||||
import { createContainer } from "react-tracked";
|
||||
@@ -651,3 +652,40 @@ export function useTriggers(): { payload: TriggerStatus } {
|
||||
: { name: "", camera: "", event_id: "", type: "", score: 0 };
|
||||
return { payload: useDeepMemo(parsed) };
|
||||
}
|
||||
|
||||
export function useJobStatus(
|
||||
jobType: string,
|
||||
revalidateOnFocus: boolean = true,
|
||||
): { payload: Job | null } {
|
||||
const {
|
||||
value: { payload },
|
||||
send: sendCommand,
|
||||
} = useWs("job_state", "jobState");
|
||||
|
||||
const jobData = useDeepMemo(
|
||||
payload && typeof payload === "string" ? JSON.parse(payload) : {},
|
||||
);
|
||||
const currentJob = jobData[jobType] || null;
|
||||
|
||||
useEffect(() => {
|
||||
let listener: (() => void) | undefined;
|
||||
if (revalidateOnFocus) {
|
||||
sendCommand("jobState");
|
||||
listener = () => {
|
||||
if (document.visibilityState === "visible") {
|
||||
sendCommand("jobState");
|
||||
}
|
||||
};
|
||||
addEventListener("visibilitychange", listener);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (listener) {
|
||||
removeEventListener("visibilitychange", listener);
|
||||
}
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [revalidateOnFocus]);
|
||||
|
||||
return { payload: currentJob as Job | null };
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user