mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-12-24 05:58:30 -05:00
Compare commits
30 Commits
dependabot
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d5c2466a8 | ||
|
|
0a293aebab | ||
|
|
1de7519d1a | ||
|
|
c3f596327e | ||
|
|
90344540b3 | ||
|
|
7167cf57c5 | ||
|
|
e47e82f4be | ||
|
|
a43d294bd1 | ||
|
|
9f95a5f31f | ||
|
|
592c245dcd | ||
|
|
914ff4f1e5 | ||
|
|
9589c5fc24 | ||
|
|
3620ef27db | ||
|
|
5cf2ae0121 | ||
|
|
17d2bc240a | ||
|
|
6fd7f862f5 | ||
|
|
5d038b5c75 | ||
|
|
c5fe354552 | ||
|
|
5dc8a85f2f | ||
|
|
0302db1c43 | ||
|
|
a4764563a5 | ||
|
|
942a61ddfb | ||
|
|
4d582062fb | ||
|
|
e0a8445bac | ||
|
|
2a271c0f5e | ||
|
|
925bf78811 | ||
|
|
59102794e8 | ||
|
|
20e5e3bdc0 | ||
|
|
b94ebda9e5 | ||
|
|
8cdaef307a |
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
default_target: local
|
||||
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.16.2
|
||||
VERSION = 0.16.3
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BOARDS= #Initialized empty
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
A complete and local NVR designed for [Home Assistant](https://www.home-assistant.io) with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
|
||||
|
||||
Use of a GPU or AI accelerator such as a [Google Coral](https://coral.ai/products/) or [Hailo](https://hailo.ai/) is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead.
|
||||
Use of a GPU, Integrated GPU, or AI accelerator such as a [Hailo](https://hailo.ai/) is highly recommended. Dedicated hardware will outperform even the best CPUs with very little overhead.
|
||||
|
||||
- Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration)
|
||||
- Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary
|
||||
|
||||
@@ -5,21 +5,27 @@ set -euxo pipefail
|
||||
SQLITE3_VERSION="3.46.1"
|
||||
PYSQLITE3_VERSION="0.5.3"
|
||||
|
||||
# Install libsqlite3-dev if not present (needed for some base images like NVIDIA TensorRT)
|
||||
if ! dpkg -l | grep -q libsqlite3-dev; then
|
||||
echo "Installing libsqlite3-dev for compilation..."
|
||||
apt-get update && apt-get install -y libsqlite3-dev && rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
# Fetch the pre-built sqlite amalgamation instead of building from source
|
||||
if [[ ! -d "sqlite" ]]; then
|
||||
mkdir sqlite
|
||||
cd sqlite
|
||||
|
||||
|
||||
# Download the pre-built amalgamation from sqlite.org
|
||||
# For SQLite 3.46.1, the amalgamation version is 3460100
|
||||
SQLITE_AMALGAMATION_VERSION="3460100"
|
||||
|
||||
|
||||
wget https://www.sqlite.org/2024/sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}.zip -O sqlite-amalgamation.zip
|
||||
unzip sqlite-amalgamation.zip
|
||||
mv sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}/* .
|
||||
rmdir sqlite-amalgamation-${SQLITE_AMALGAMATION_VERSION}
|
||||
rm sqlite-amalgamation.zip
|
||||
|
||||
|
||||
cd ../
|
||||
fi
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ fastapi == 0.115.*
|
||||
uvicorn == 0.30.*
|
||||
slowapi == 0.1.*
|
||||
joserfc == 1.0.*
|
||||
cryptography == 44.0.*
|
||||
pathvalidate == 3.2.*
|
||||
markupsafe == 3.0.*
|
||||
python-multipart == 0.0.12
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
scikit-build == 0.18.*
|
||||
nvidia-pyindex
|
||||
|
||||
@@ -112,7 +112,7 @@ RUN apt-get update \
|
||||
&& apt-get install -y protobuf-compiler libprotobuf-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt
|
||||
pip3 wheel --wheel-dir=/trt-model-wheels --no-deps -r /requirements-tensorrt-models.txt
|
||||
|
||||
FROM wget AS jetson-ffmpeg
|
||||
ARG DEBIAN_FRONTEND
|
||||
@@ -145,7 +145,8 @@ COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||
pip3 uninstall -y onnxruntime \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-wheels/*.whl \
|
||||
&& pip3 install -U /deps/trt-model-wheels/*.whl \
|
||||
&& ldconfig
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
|
||||
@@ -164,13 +164,35 @@ According to [this discussion](https://github.com/blakeblackshear/frigate/issues
|
||||
Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels.
|
||||
The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras.
|
||||
|
||||
<details>
|
||||
<summary>Example Config</summary>
|
||||
|
||||
:::tip
|
||||
|
||||
Reolink's latest cameras support two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
NOTE: The RTSP stream can not be prefixed with `ffmpeg:`, as go2rtc needs to handle the stream to support two way audio.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
# example for connecting to a standard Reolink camera
|
||||
your_reolink_camera:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
your_reolink_camera_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
# example for connectin to a Reolink camera that supports two way talk
|
||||
your_reolink_camera_twt:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub"
|
||||
your_reolink_camera_twt_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
- "rtsp://username:password@reolink_ip/Preview_01_sub"
|
||||
# example for connecting to a Reolink NVR
|
||||
your_reolink_camera_via_nvr:
|
||||
- "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15
|
||||
- "ffmpeg:your_reolink_camera_via_nvr#audio=aac"
|
||||
@@ -201,25 +223,16 @@ cameras:
|
||||
roles:
|
||||
- detect
|
||||
```
|
||||
|
||||
#### Reolink Doorbell
|
||||
|
||||
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
your_reolink_doorbell:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus"
|
||||
- rtsp://reolink_ip/Preview_01_sub
|
||||
your_reolink_doorbell_sub:
|
||||
- "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password"
|
||||
```
|
||||
</details>
|
||||
|
||||
### Unifi Protect Cameras
|
||||
|
||||
:::note
|
||||
|
||||
Unifi G5s cameras and newer need a Unifi Protect server to enable rtsps stream, it's not posible to enable it in standalone mode.
|
||||
|
||||
:::
|
||||
|
||||
Unifi protect cameras require the rtspx stream to be used with go2rtc.
|
||||
To utilize a Unifi protect camera, modify the rtsps link to begin with rtspx.
|
||||
Additionally, remove the "?enableSrtp" from the end of the Unifi link.
|
||||
@@ -245,6 +258,10 @@ ffmpeg:
|
||||
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
|
||||
### Wyze Wireless Cameras
|
||||
|
||||
Some community members have found better performance on Wyze cameras by using an alternative firmware known as [Thingino](https://thingino.com/).
|
||||
|
||||
## USB Cameras (aka Webcams)
|
||||
|
||||
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
|
||||
@@ -277,5 +294,3 @@ cameras:
|
||||
width: 1024
|
||||
height: 576
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -98,6 +98,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
|
||||
| Amcrest IP4M-S2112EW-AI | ✅ | ❌ | FOV relative movement not supported. |
|
||||
| Amcrest IP5M-1190EW | ✅ | ❌ | ONVIF Port: 80. FOV relative movement not supported. |
|
||||
| Annke CZ504 | ✅ | ✅ | Annke support provide specific firmware ([V5.7.1 build 250227](https://github.com/pierrepinon/annke_cz504/raw/refs/heads/main/digicap_V5-7-1_build_250227.dav)) to fix issue with ONVIF "TranslationSpaceFov" |
|
||||
| Axis Q-6155E | ✅ | ❌ | ONVIF service port: 80; Camera does not support MoveStatus.
|
||||
| Ctronics PTZ | ✅ | ❌ | |
|
||||
| Dahua | ✅ | ✅ | Some low-end Dahuas (lite series, among others) have been reported to not support autotracking |
|
||||
| Dahua DH-SD2A500HB | ✅ | ❌ | |
|
||||
|
||||
@@ -158,6 +158,8 @@ Start with the [Usage](#usage) section and re-read the [Model Requirements](#mod
|
||||
|
||||
Accuracy is definitely a going to be improved with higher quality cameras / streams. It is important to look at the DORI (Detection Observation Recognition Identification) range of your camera, if that specification is posted. This specification explains the distance from the camera that a person can be detected, observed, recognized, and identified. The identification range is the most relevant here, and the distance listed by the camera is the furthest that face recognition will realistically work.
|
||||
|
||||
Some users have also noted that setting the stream in camera firmware to a constant bit rate (CBR) leads to better image clarity than with a variable bit rate (VBR).
|
||||
|
||||
### Why can't I bulk upload photos?
|
||||
|
||||
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
|
||||
|
||||
@@ -18,10 +18,10 @@ genai:
|
||||
enabled: True
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
model: gemini-2.0-flash
|
||||
|
||||
cameras:
|
||||
front_camera:
|
||||
front_camera:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
@@ -30,7 +30,7 @@ cameras:
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
genai:
|
||||
genai:
|
||||
enabled: False # <- disable GenAI for your indoor camera
|
||||
```
|
||||
|
||||
@@ -78,7 +78,7 @@ Google Gemini has a free tier allowing [15 queries per minute](https://ai.google
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini).
|
||||
|
||||
### Get API Key
|
||||
|
||||
@@ -96,7 +96,7 @@ genai:
|
||||
enabled: True
|
||||
provider: gemini
|
||||
api_key: "{FRIGATE_GEMINI_API_KEY}"
|
||||
model: gemini-1.5-flash
|
||||
model: gemini-2.0-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
@@ -111,7 +111,7 @@ OpenAI does not have a free tier for their API. With the release of gpt-4o, pric
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models).
|
||||
|
||||
### Get API Key
|
||||
|
||||
@@ -139,11 +139,11 @@ Microsoft offers several vision models through Azure OpenAI. A subscription is r
|
||||
|
||||
### Supported Models
|
||||
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
|
||||
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).
|
||||
|
||||
### Create Resource and Get API Key
|
||||
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
|
||||
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key, model name, and resource URL, which must include the `api-version` parameter (see the example below).
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -151,7 +151,8 @@ To start using Azure OpenAI, you must first [create a resource](https://learn.mi
|
||||
genai:
|
||||
enabled: True
|
||||
provider: azure_openai
|
||||
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
|
||||
base_url: https://instance.cognitiveservices.azure.com/openai/responses?api-version=2025-04-01-preview
|
||||
model: gpt-5-mini
|
||||
api_key: "{FRIGATE_OPENAI_API_KEY}"
|
||||
```
|
||||
|
||||
@@ -202,7 +203,7 @@ genai:
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
|
||||
@@ -30,8 +30,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle`
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The YOLOv9 plate detector model and the OCR models ([PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)) are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
## Configuration
|
||||
|
||||
License plate recognition is disabled by default. Enable it in your config file:
|
||||
|
||||
@@ -15,7 +15,7 @@ The jsmpeg live view will use more browser and client GPU resources. Using go2rt
|
||||
| ------ | ------------------------------------- | ---------- | ---------------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| jsmpeg | same as `detect -> fps`, capped at 10 | 720p | no | no | Resolution is configurable, but go2rtc is recommended if you want higher resolutions and better frame rates. jsmpeg is Frigate's default without go2rtc configured. |
|
||||
| mse | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only. This is Frigate's default when go2rtc is configured. |
|
||||
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration, doesn't support h.265. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
|
||||
| webrtc | native | native | yes (depends on audio codec) | yes | Requires extra configuration. Frigate attempts to use WebRTC when MSE fails or when using a camera's two-way talk feature. |
|
||||
|
||||
### Camera Settings Recommendations
|
||||
|
||||
@@ -127,7 +127,8 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
|
||||
```
|
||||
|
||||
- For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.64.0.0/10` CIDR block.
|
||||
- Note that WebRTC does not support H.265.
|
||||
|
||||
- Note that some browsers may not support H.265 (HEVC). You can check your browser's current version for H.265 compatibility [here](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness).
|
||||
|
||||
:::tip
|
||||
|
||||
@@ -174,7 +175,7 @@ For devices that support two way talk, Frigate can be configured to use the feat
|
||||
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||
- For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source.
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-cameras)
|
||||
|
||||
### Streaming options on camera group dashboards
|
||||
|
||||
|
||||
@@ -395,7 +395,7 @@ After placing the downloaded onnx model in your config/model_cache folder, you c
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: GPU
|
||||
device: CPU
|
||||
|
||||
model:
|
||||
model_type: dfine
|
||||
@@ -431,10 +431,10 @@ When using Docker Compose:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
---
|
||||
devices:
|
||||
- /dev/dri
|
||||
- /dev/kfd
|
||||
...
|
||||
devices:
|
||||
- /dev/dri
|
||||
- /dev/kfd
|
||||
```
|
||||
|
||||
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
|
||||
@@ -462,9 +462,9 @@ When using Docker Compose:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
|
||||
...
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "10.0.0"
|
||||
```
|
||||
|
||||
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
|
||||
@@ -988,7 +988,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /dfine
|
||||
RUN git clone https://github.com/Peterande/D-FINE.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx onnxruntime onnxsim
|
||||
RUN uv pip install --system onnx onnxruntime onnxsim onnxscript
|
||||
# Create output directory and download checkpoint
|
||||
RUN mkdir -p output
|
||||
ARG MODEL_SIZE
|
||||
@@ -1002,19 +1002,19 @@ COPY --from=build /dfine/output/dfine_${MODEL_SIZE}_obj2coco.onnx /dfine-${MODEL
|
||||
EOF
|
||||
```
|
||||
|
||||
### Download RF-DETR Model
|
||||
### Downloading RF-DETR Model
|
||||
|
||||
RF-DETR can be exported as ONNX by running the command below. You can copy and paste the whole thing to your terminal and execute, altering `MODEL_SIZE=Nano` in the first line to `Nano`, `Small`, or `Medium` size.
|
||||
|
||||
```sh
|
||||
docker build . --build-arg MODEL_SIZE=Nano --output . -f- <<'EOF'
|
||||
docker build . --build-arg MODEL_SIZE=Nano --rm --output . -f- <<'EOF'
|
||||
FROM python:3.11 AS build
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y libgl1 && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /rfdetr
|
||||
RUN uv pip install --system rfdetr onnx onnxruntime onnxsim onnx-graphsurgeon
|
||||
RUN uv pip install --system rfdetr[onnxexport] torch==2.8.0 onnx==1.19.1 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export()"
|
||||
RUN python3 -c "from rfdetr import RFDETR${MODEL_SIZE}; x = RFDETR${MODEL_SIZE}(resolution=320); x.export(simplify=True)"
|
||||
FROM scratch
|
||||
ARG MODEL_SIZE
|
||||
COPY --from=build /rfdetr/output/inference_model.onnx /rfdetr-${MODEL_SIZE}.onnx
|
||||
@@ -1062,7 +1062,7 @@ COPY --from=ghcr.io/astral-sh/uv:0.8.0 /uv /bin/
|
||||
WORKDIR /yolov9
|
||||
ADD https://github.com/WongKinYiu/yolov9.git .
|
||||
RUN uv pip install --system -r requirements.txt
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1
|
||||
RUN uv pip install --system onnx==1.18.0 onnxruntime onnx-simplifier>=0.4.1 onnxscript
|
||||
ARG MODEL_SIZE
|
||||
ARG IMG_SIZE
|
||||
ADD https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-${MODEL_SIZE}-converted.pt yolov9-${MODEL_SIZE}.pt
|
||||
|
||||
@@ -11,7 +11,7 @@ This adds features including the ability to deep link directly into the app.
|
||||
|
||||
In order to install Frigate as a PWA, the following requirements must be met:
|
||||
|
||||
- Frigate must be accessed via a secure context (localhost, secure https, etc.)
|
||||
- Frigate must be accessed via a secure context (localhost, secure https, VPN, etc.)
|
||||
- On Android, Firefox, Chrome, Edge, Opera, and Samsung Internet Browser all support installing PWAs.
|
||||
- On iOS 16.4 and later, PWAs can be installed from the Share menu in Safari, Chrome, Edge, Firefox, and Orion.
|
||||
|
||||
@@ -22,3 +22,7 @@ Installation varies slightly based on the device that is being used:
|
||||
- Desktop: Use the install button typically found in right edge of the address bar
|
||||
- Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
|
||||
## Usage
|
||||
|
||||
Once setup, the Frigate app can be used wherever it has access to Frigate. This means it can be setup as local-only, VPN-only, or fully accessible depending on your needs.
|
||||
|
||||
@@ -18,7 +18,7 @@ Here are some of the cameras I recommend:
|
||||
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
- <a href="https://www.bhphotovideo.com/c/product/1705511-REG/hikvision_colorvu_ds_2cd2387g2p_lsu_sl_8mp_network.html" target="_blank" rel="nofollow noopener">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
@@ -36,9 +36,11 @@ If the EQ13 is out of stock, the link below may take you to a suggested alternat
|
||||
|
||||
:::
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| Name | Capabilities | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Can run object detection on several 1080p cameras with low-medium activity | Dual gigabit NICs for easy isolated camera network. |
|
||||
| Intel 1120p ([Amazon](https://www.amazon.com/Beelink-i3-1220P-Computer-Display-Gigabit/dp/B0DDCKT9YP) | Can handle a large number of 1080p cameras with high activity | |
|
||||
| Intel 125H ([Amazon](https://www.amazon.com/MINISFORUM-Pro-125H-Barebone-Computer-HDMI2-1/dp/B0FH21FSZM) | Can handle a significant number of 1080p cameras with high activity | Includes NPU for more efficient detection in 0.17+ |
|
||||
|
||||
## Detectors
|
||||
|
||||
@@ -104,10 +106,16 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
|
||||
|
||||
### Google Coral TPU
|
||||
|
||||
:::warning
|
||||
|
||||
The Coral is no longer recommended for new Frigate installations, except in deployments with particularly low power requirements or hardware incapable of utilizing alternative AI accelerators for object detection. Instead, we suggest using one of the numerous other supported object detectors. Frigate will continue to provide support for the Coral TPU for as long as practicably possible given its still one of the most power-efficient devices for executing object detection models.
|
||||
|
||||
:::
|
||||
|
||||
Frigate supports both the USB and M.2 versions of the Google Coral.
|
||||
|
||||
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. https://github.com/jnicolson/gasket-builder should be used.
|
||||
|
||||
A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
|
||||
|
||||
|
||||
@@ -94,6 +94,10 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576
|
||||
|
||||
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
|
||||
|
||||
## Extra Steps for Specific Hardware
|
||||
|
||||
The following sections contain additional setup steps that are only required if you are using specific hardware. If you are not using any of these hardware types, you can skip to the [Docker](#docker) installation section.
|
||||
|
||||
### Raspberry Pi 3/4
|
||||
|
||||
By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options).
|
||||
@@ -106,14 +110,107 @@ The Hailo-8 and Hailo-8L AI accelerators are available in both M.2 and HAT form
|
||||
|
||||
#### Installation
|
||||
|
||||
For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simply follow this [guide](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation) to install the driver and software.
|
||||
:::warning
|
||||
|
||||
For other installations, follow these steps for installation:
|
||||
The Raspberry Pi kernel includes an older version of the Hailo driver that is incompatible with Frigate. You **must** follow the installation steps below to install the correct driver version, and you **must** disable the built-in kernel driver as described in step 1.
|
||||
|
||||
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
|
||||
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
|
||||
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||
4. Run the script with `./user_installation.sh`
|
||||
:::
|
||||
|
||||
1. **Disable the built-in Hailo driver (Raspberry Pi only)**:
|
||||
|
||||
:::note
|
||||
|
||||
If you are **not** using a Raspberry Pi, skip this step and proceed directly to step 2.
|
||||
|
||||
:::
|
||||
|
||||
If you are using a Raspberry Pi, you need to blacklist the built-in kernel Hailo driver to prevent conflicts. First, check if the driver is currently loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo
|
||||
```
|
||||
|
||||
If it shows `hailo_pci`, unload it:
|
||||
|
||||
```bash
|
||||
sudo rmmod hailo_pci
|
||||
```
|
||||
|
||||
Now blacklist the driver to prevent it from loading on boot:
|
||||
|
||||
```bash
|
||||
echo "blacklist hailo_pci" | sudo tee /etc/modprobe.d/blacklist-hailo_pci.conf
|
||||
```
|
||||
|
||||
Update initramfs to ensure the blacklist takes effect:
|
||||
|
||||
```bash
|
||||
sudo update-initramfs -u
|
||||
```
|
||||
|
||||
Reboot your Raspberry Pi:
|
||||
|
||||
```bash
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
After rebooting, verify the built-in driver is not loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo
|
||||
```
|
||||
|
||||
This command should return no results. If it still shows `hailo_pci`, the blacklist did not take effect properly and you may need to check for other Hailo packages installed via apt that are loading the driver.
|
||||
|
||||
2. **Run the installation script**:
|
||||
|
||||
Download the installation script:
|
||||
|
||||
```bash
|
||||
wget https://raw.githubusercontent.com/blakeblackshear/frigate/dev/docker/hailo8l/user_installation.sh
|
||||
```
|
||||
|
||||
Make it executable:
|
||||
|
||||
```bash
|
||||
sudo chmod +x user_installation.sh
|
||||
```
|
||||
|
||||
Run the script:
|
||||
|
||||
```bash
|
||||
./user_installation.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
- Install necessary build dependencies
|
||||
- Clone and build the Hailo driver from the official repository
|
||||
- Install the driver
|
||||
- Download and install the required firmware
|
||||
- Set up udev rules
|
||||
|
||||
3. **Reboot your system**:
|
||||
|
||||
After the script completes successfully, reboot to load the firmware:
|
||||
|
||||
```bash
|
||||
sudo reboot
|
||||
```
|
||||
|
||||
4. **Verify the installation**:
|
||||
|
||||
After rebooting, verify that the Hailo device is available:
|
||||
|
||||
```bash
|
||||
ls -l /dev/hailo0
|
||||
```
|
||||
|
||||
You should see the device listed. You can also verify the driver is loaded:
|
||||
|
||||
```bash
|
||||
lsmod | grep hailo_pci
|
||||
```
|
||||
|
||||
#### Setup
|
||||
|
||||
@@ -200,7 +297,7 @@ services:
|
||||
shm_size: "512mb" # update for your cameras based on calculation above
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
|
||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
|
||||
- /dev/video11:/dev/video11 # For Raspberry Pi 4B
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware
|
||||
volumes:
|
||||
|
||||
@@ -5,7 +5,7 @@ title: Updating
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.16.1**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.1).
|
||||
The current stable version of Frigate is **0.16.3**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.16.3).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
@@ -33,21 +33,21 @@ If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.1` instead of `0.15.2`). For example:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.16.3` instead of `0.15.2`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
image: ghcr.io/blakeblackshear/frigate:0.16.3
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.3
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.1`, `0.16.1-tensorrt`, or `stable`):
|
||||
- Pull the image with the appropriate tag (e.g., `0.16.3`, `0.16.3-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.1
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.16.3
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
||||
@@ -202,7 +202,7 @@ services:
|
||||
...
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions
|
||||
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
|
||||
- /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://github.com/jnicolson/gasket-builder
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
@@ -161,7 +161,14 @@ Message published for updates to tracked object metadata, for example:
|
||||
|
||||
### `frigate/reviews`
|
||||
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated.
|
||||
|
||||
An `update` with the same ID will be published when:
|
||||
- The severity changes from `detection` to `alert`
|
||||
- Additional objects are detected
|
||||
- An object is recognized via face, lpr, etc.
|
||||
|
||||
When the review activity has ended a final `end` message is published.
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -42,6 +42,7 @@ Misidentified objects should have a correct label added. For example, if a perso
|
||||
| `w` | Add box |
|
||||
| `d` | Toggle difficult |
|
||||
| `s` | Switch to the next label |
|
||||
| `Shift + s` | Switch to the previous label |
|
||||
| `tab` | Select next largest box |
|
||||
| `del` | Delete current box |
|
||||
| `esc` | Deselect/Cancel |
|
||||
|
||||
@@ -68,8 +68,7 @@ The USB Coral can become stuck and need to be restarted, this can happen for a n
|
||||
|
||||
The most common reason for the PCIe Coral not being detected is that the driver has not been installed. This process varies based on what OS and kernel that is being run.
|
||||
|
||||
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||
- For some newer Linux distros (for example, Ubuntu 22.04+), https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
- In most cases https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
|
||||
## Attempting to load TPU as pci & Fatal Python error: Illegal instruction
|
||||
|
||||
|
||||
@@ -447,8 +447,14 @@ def create_user(
|
||||
return JSONResponse(content={"username": body.username})
|
||||
|
||||
|
||||
@router.delete("/users/{username}")
|
||||
def delete_user(username: str):
|
||||
@router.delete("/users/{username}", dependencies=[Depends(require_role(["admin"]))])
|
||||
def delete_user(request: Request, username: str):
|
||||
# Prevent deletion of the built-in admin user
|
||||
if username == "admin":
|
||||
return JSONResponse(
|
||||
content={"message": "Cannot delete admin user"}, status_code=403
|
||||
)
|
||||
|
||||
User.delete_by_id(username)
|
||||
return JSONResponse(content={"success": True})
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from pathlib import Path
|
||||
import psutil
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filepath
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
@@ -15,7 +16,7 @@ from frigate.api.auth import require_role
|
||||
from frigate.api.defs.request.export_recordings_body import ExportRecordingsBody
|
||||
from frigate.api.defs.request.export_rename_body import ExportRenameBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import EXPORT_DIR
|
||||
from frigate.const import CLIPS_DIR, EXPORT_DIR
|
||||
from frigate.models import Export, Previews, Recordings
|
||||
from frigate.record.export import (
|
||||
PlaybackFactorEnum,
|
||||
@@ -54,7 +55,14 @@ def export_recording(
|
||||
playback_factor = body.playback
|
||||
playback_source = body.source
|
||||
friendly_name = body.name
|
||||
existing_image = body.image_path
|
||||
existing_image = sanitize_filepath(body.image_path) if body.image_path else None
|
||||
|
||||
# Ensure that existing_image is a valid path
|
||||
if existing_image and not existing_image.startswith(CLIPS_DIR):
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Invalid image path"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if playback_source == "recordings":
|
||||
recordings_count = (
|
||||
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
useState,
|
||||
} from "react";
|
||||
import Hls from "hls.js";
|
||||
import { isAndroid, isDesktop, isMobile } from "react-device-detect";
|
||||
import { isDesktop, isMobile } from "react-device-detect";
|
||||
import { TransformComponent, TransformWrapper } from "react-zoom-pan-pinch";
|
||||
import VideoControls from "./VideoControls";
|
||||
import { VideoResolutionType } from "@/types/live";
|
||||
@@ -21,7 +21,7 @@ import { ASPECT_VERTICAL_LAYOUT, RecordingPlayerError } from "@/types/record";
|
||||
import { useTranslation } from "react-i18next";
|
||||
|
||||
// Android native hls does not seek correctly
|
||||
const USE_NATIVE_HLS = !isAndroid;
|
||||
const USE_NATIVE_HLS = false;
|
||||
const HLS_MIME_TYPE = "application/vnd.apple.mpegurl" as const;
|
||||
const unsupportedErrorCodes = [
|
||||
MediaError.MEDIA_ERR_SRC_NOT_SUPPORTED,
|
||||
|
||||
@@ -2,7 +2,10 @@ import { Recording } from "@/types/record";
|
||||
import { DynamicPlayback } from "@/types/playback";
|
||||
import { PreviewController } from "../PreviewPlayer";
|
||||
import { TimeRange, ObjectLifecycleSequence } from "@/types/timeline";
|
||||
import { calculateInpointOffset } from "@/utils/videoUtil";
|
||||
import {
|
||||
calculateInpointOffset,
|
||||
calculateSeekPosition,
|
||||
} from "@/utils/videoUtil";
|
||||
|
||||
type PlayerMode = "playback" | "scrubbing";
|
||||
|
||||
@@ -68,38 +71,20 @@ export class DynamicVideoController {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
this.recordings.length == 0 ||
|
||||
time < this.recordings[0].start_time ||
|
||||
time > this.recordings[this.recordings.length - 1].end_time
|
||||
) {
|
||||
this.setNoRecording(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.playerMode != "playback") {
|
||||
this.playerMode = "playback";
|
||||
}
|
||||
|
||||
let seekSeconds = 0;
|
||||
(this.recordings || []).every((segment) => {
|
||||
// if the next segment is past the desired time, stop calculating
|
||||
if (segment.start_time > time) {
|
||||
return false;
|
||||
}
|
||||
const seekSeconds = calculateSeekPosition(
|
||||
time,
|
||||
this.recordings,
|
||||
this.inpointOffset,
|
||||
);
|
||||
|
||||
if (segment.end_time < time) {
|
||||
seekSeconds += segment.end_time - segment.start_time;
|
||||
return true;
|
||||
}
|
||||
|
||||
seekSeconds +=
|
||||
segment.end_time - segment.start_time - (segment.end_time - time);
|
||||
return true;
|
||||
});
|
||||
|
||||
// adjust for HLS inpoint offset
|
||||
seekSeconds -= this.inpointOffset;
|
||||
if (seekSeconds === undefined) {
|
||||
this.setNoRecording(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (seekSeconds != 0) {
|
||||
this.playerController.currentTime = seekSeconds;
|
||||
|
||||
@@ -13,7 +13,10 @@ import { VideoResolutionType } from "@/types/live";
|
||||
import axios from "axios";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { calculateInpointOffset } from "@/utils/videoUtil";
|
||||
import {
|
||||
calculateInpointOffset,
|
||||
calculateSeekPosition,
|
||||
} from "@/utils/videoUtil";
|
||||
import { isFirefox } from "react-device-detect";
|
||||
|
||||
/**
|
||||
@@ -99,10 +102,10 @@ export default function DynamicVideoPlayer({
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [isBuffering, setIsBuffering] = useState(false);
|
||||
const [loadingTimeout, setLoadingTimeout] = useState<NodeJS.Timeout>();
|
||||
const [source, setSource] = useState<HlsSource>({
|
||||
playlist: `${apiHost}vod/${camera}/start/${timeRange.after}/end/${timeRange.before}/master.m3u8`,
|
||||
startPosition: startTimestamp ? timeRange.after - startTimestamp : 0,
|
||||
});
|
||||
|
||||
// Don't set source until recordings load - we need accurate startPosition
|
||||
// to avoid hls.js clamping to video end when startPosition exceeds duration
|
||||
const [source, setSource] = useState<HlsSource | undefined>(undefined);
|
||||
|
||||
// start at correct time
|
||||
|
||||
@@ -174,7 +177,7 @@ export default function DynamicVideoPlayer({
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!controller || !recordings?.length) {
|
||||
if (!recordings?.length) {
|
||||
if (recordings?.length == 0) {
|
||||
setNoRecording(true);
|
||||
}
|
||||
@@ -182,10 +185,6 @@ export default function DynamicVideoPlayer({
|
||||
return;
|
||||
}
|
||||
|
||||
if (playerRef.current) {
|
||||
playerRef.current.autoplay = !isScrubbing;
|
||||
}
|
||||
|
||||
let startPosition = undefined;
|
||||
|
||||
if (startTimestamp) {
|
||||
@@ -193,14 +192,12 @@ export default function DynamicVideoPlayer({
|
||||
recordingParams.after,
|
||||
(recordings || [])[0],
|
||||
);
|
||||
const idealStartPosition = Math.max(
|
||||
0,
|
||||
startTimestamp - timeRange.after - inpointOffset,
|
||||
);
|
||||
|
||||
if (idealStartPosition >= recordings[0].start_time - timeRange.after) {
|
||||
startPosition = idealStartPosition;
|
||||
}
|
||||
startPosition = calculateSeekPosition(
|
||||
startTimestamp,
|
||||
recordings,
|
||||
inpointOffset,
|
||||
);
|
||||
}
|
||||
|
||||
setSource({
|
||||
@@ -208,6 +205,18 @@ export default function DynamicVideoPlayer({
|
||||
startPosition,
|
||||
});
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [recordings]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!controller || !recordings?.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (playerRef.current) {
|
||||
playerRef.current.autoplay = !isScrubbing;
|
||||
}
|
||||
|
||||
setLoadingTimeout(setTimeout(() => setIsLoading(true), 1000));
|
||||
|
||||
controller.newPlayback({
|
||||
@@ -215,7 +224,7 @@ export default function DynamicVideoPlayer({
|
||||
timeRange,
|
||||
});
|
||||
|
||||
// we only want this to change when recordings update
|
||||
// we only want this to change when controller or recordings update
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [controller, recordings]);
|
||||
|
||||
@@ -253,38 +262,40 @@ export default function DynamicVideoPlayer({
|
||||
|
||||
return (
|
||||
<>
|
||||
<HlsVideoPlayer
|
||||
videoRef={playerRef}
|
||||
containerRef={containerRef}
|
||||
visible={!(isScrubbing || isLoading)}
|
||||
currentSource={source}
|
||||
hotKeys={hotKeys}
|
||||
supportsFullscreen={supportsFullscreen}
|
||||
fullscreen={fullscreen}
|
||||
inpointOffset={inpointOffset}
|
||||
onTimeUpdate={onTimeUpdate}
|
||||
onPlayerLoaded={onPlayerLoaded}
|
||||
onClipEnded={onValidateClipEnd}
|
||||
onPlaying={() => {
|
||||
if (isScrubbing) {
|
||||
playerRef.current?.pause();
|
||||
}
|
||||
{source && (
|
||||
<HlsVideoPlayer
|
||||
videoRef={playerRef}
|
||||
containerRef={containerRef}
|
||||
visible={!(isScrubbing || isLoading)}
|
||||
currentSource={source}
|
||||
hotKeys={hotKeys}
|
||||
supportsFullscreen={supportsFullscreen}
|
||||
fullscreen={fullscreen}
|
||||
inpointOffset={inpointOffset}
|
||||
onTimeUpdate={onTimeUpdate}
|
||||
onPlayerLoaded={onPlayerLoaded}
|
||||
onClipEnded={onValidateClipEnd}
|
||||
onPlaying={() => {
|
||||
if (isScrubbing) {
|
||||
playerRef.current?.pause();
|
||||
}
|
||||
|
||||
if (loadingTimeout) {
|
||||
clearTimeout(loadingTimeout);
|
||||
}
|
||||
if (loadingTimeout) {
|
||||
clearTimeout(loadingTimeout);
|
||||
}
|
||||
|
||||
setNoRecording(false);
|
||||
}}
|
||||
setFullResolution={setFullResolution}
|
||||
onUploadFrame={onUploadFrameToPlus}
|
||||
toggleFullscreen={toggleFullscreen}
|
||||
onError={(error) => {
|
||||
if (error == "stalled" && !isScrubbing) {
|
||||
setIsBuffering(true);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
setNoRecording(false);
|
||||
}}
|
||||
setFullResolution={setFullResolution}
|
||||
onUploadFrame={onUploadFrameToPlus}
|
||||
toggleFullscreen={toggleFullscreen}
|
||||
onError={(error) => {
|
||||
if (error == "stalled" && !isScrubbing) {
|
||||
setIsBuffering(true);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
<PreviewPlayer
|
||||
className={cn(
|
||||
className,
|
||||
|
||||
@@ -24,3 +24,57 @@ export function calculateInpointOffset(
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the video player time (in seconds) for a given timestamp
|
||||
* by iterating through recording segments and summing their durations.
|
||||
* This accounts for the fact that the video is a concatenation of segments,
|
||||
* not a single continuous stream.
|
||||
*
|
||||
* @param timestamp - The target timestamp to seek to
|
||||
* @param recordings - Array of recording segments
|
||||
* @param inpointOffset - HLS inpoint offset to subtract from the result
|
||||
* @returns The calculated seek position in seconds, or undefined if timestamp is out of range
|
||||
*/
|
||||
export function calculateSeekPosition(
|
||||
timestamp: number,
|
||||
recordings: Recording[],
|
||||
inpointOffset: number = 0,
|
||||
): number | undefined {
|
||||
if (!recordings || recordings.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Check if timestamp is within the recordings range
|
||||
if (
|
||||
timestamp < recordings[0].start_time ||
|
||||
timestamp > recordings[recordings.length - 1].end_time
|
||||
) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
let seekSeconds = 0;
|
||||
|
||||
(recordings || []).every((segment) => {
|
||||
// if the next segment is past the desired time, stop calculating
|
||||
if (segment.start_time > timestamp) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (segment.end_time < timestamp) {
|
||||
// Add the full duration of this segment
|
||||
seekSeconds += segment.end_time - segment.start_time;
|
||||
return true;
|
||||
}
|
||||
|
||||
// We're in this segment - calculate position within it
|
||||
seekSeconds +=
|
||||
segment.end_time - segment.start_time - (segment.end_time - timestamp);
|
||||
return true;
|
||||
});
|
||||
|
||||
// Adjust for HLS inpoint offset
|
||||
seekSeconds -= inpointOffset;
|
||||
|
||||
return seekSeconds >= 0 ? seekSeconds : undefined;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user