Miscellaneous fixes (0.17 beta) (#21350)

* Fix genai callbacks in MQTT

* Cleanup cursor pointer for classification cards

* Cleanup

* Handle unknown SOCs for RKNN converter by only using known SOCs

* don't allow "none" as a classification class name

* change internal port user to admin and default unspecified username to viewer

* keep 5000 as anonymous user

* suppress tensorflow logging during classification training

* Always apply base log level suppressions for noisy third-party libraries even if no specific logConfig is provided

* remove decorator and specifically suppress TFLite delegate creation messages

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen
2025-12-18 15:12:10 -07:00
committed by GitHub
parent 6a0e31dcf9
commit e636449d56
20 changed files with 156 additions and 67 deletions

View File

@@ -237,8 +237,18 @@ ENV PYTHONWARNINGS="ignore:::numpy.core.getlimits"
# Set HailoRT to disable logging
ENV HAILORT_LOGGER_PATH=NONE
# TensorFlow error only
# TensorFlow C++ logging suppression (must be set before import)
# TF_CPP_MIN_LOG_LEVEL: 0=all, 1=INFO+, 2=WARNING+, 3=ERROR+ (we use 3 for errors only)
ENV TF_CPP_MIN_LOG_LEVEL=3
# Suppress verbose logging from TensorFlow C++ code
ENV TF_CPP_MIN_VLOG_LEVEL=3
# Disable oneDNN optimization messages ("optimized with oneDNN...")
ENV TF_ENABLE_ONEDNN_OPTS=0
# Suppress AutoGraph verbosity during conversion
ENV AUTOGRAPH_VERBOSITY=0
# Google Logging (GLOG) suppression for TensorFlow components
ENV GLOG_minloglevel=3
ENV GLOG_logtostderr=0
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"

View File

@@ -25,7 +25,7 @@ paths:
description: Authentication Accepted (no response body, different headers depending on auth method)
headers:
remote-user:
description: Authenticated username or "anonymous" in proxy-only mode
description: Authenticated username or "viewer" in proxy-only mode
schema:
type: string
remote-role:

View File

@@ -167,7 +167,7 @@ def allow_any_authenticated():
Allows:
- Port 5000 internal requests (remote-user: "anonymous", remote-role: "admin")
- Authenticated users with JWT tokens (remote-user: username)
- Unauthenticated requests when auth is disabled (remote-user: "anonymous")
- Unauthenticated requests when auth is disabled (remote-user: "viewer")
Rejects:
- Requests with no remote-user header (did not pass through /auth endpoint)
@@ -550,7 +550,7 @@ def resolve_role(
"description": "Authentication Accepted (no response body)",
"headers": {
"remote-user": {
"description": 'Authenticated username or "anonymous" in proxy-only mode',
"description": 'Authenticated username or "viewer" in proxy-only mode',
"schema": {"type": "string"},
},
"remote-role": {
@@ -592,12 +592,12 @@ def auth(request: Request):
# if auth is disabled, just apply the proxy header map and return success
if not auth_config.enabled:
# pass the user header value from the upstream proxy if a mapping is specified
# or use anonymous if none are specified
# or use viewer if none are specified
user_header = proxy_config.header_map.user
success_response.headers["remote-user"] = (
request.headers.get(user_header, default="anonymous")
request.headers.get(user_header, default="viewer")
if user_header
else "anonymous"
else "viewer"
)
# parse header and resolve a valid role
@@ -712,7 +712,7 @@ def auth(request: Request):
description="Returns the current authenticated user's profile including username, role, and allowed cameras. This endpoint requires authentication and returns information about the user's permissions.",
)
def profile(request: Request):
username = request.headers.get("remote-user", "anonymous")
username = request.headers.get("remote-user", "viewer")
role = request.headers.get("remote-role", "viewer")
all_camera_names = set(request.app.frigate_config.cameras.keys())

View File

@@ -225,7 +225,8 @@ class MqttClient(Communicator):
"birdseye_mode",
"review_alerts",
"review_detections",
"genai",
"object_descriptions",
"review_descriptions",
]
for name in self.config.cameras.keys():

View File

@@ -77,6 +77,9 @@ FFMPEG_HWACCEL_RKMPP = "preset-rkmpp"
FFMPEG_HWACCEL_AMF = "preset-amd-amf"
FFMPEG_HVC1_ARGS = ["-tag:v", "hvc1"]
# RKNN constants
SUPPORTED_RK_SOCS = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
# Regex constants
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"

View File

@@ -13,7 +13,7 @@ from frigate.comms.event_metadata_updater import (
)
from frigate.config import FrigateConfig
from frigate.const import MODEL_CACHE_DIR
from frigate.log import redirect_output_to_logger
from frigate.log import suppress_stderr_during
from frigate.util.object import calculate_region
from ..types import DataProcessorMetrics
@@ -80,8 +80,9 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
except Exception as e:
logger.error(f"Failed to download {path}: {e}")
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
self.interpreter = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "bird/bird.tflite"),
num_threads=2,

View File

@@ -21,7 +21,7 @@ from frigate.config.classification import (
ObjectClassificationType,
)
from frigate.const import CLIPS_DIR, MODEL_CACHE_DIR
from frigate.log import redirect_output_to_logger
from frigate.log import suppress_stderr_during
from frigate.types import TrackedObjectUpdateTypesEnum
from frigate.util.builtin import EventsPerSecond, InferenceSpeed, load_labels
from frigate.util.object import box_overlaps, calculate_region
@@ -72,7 +72,6 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.last_run = datetime.datetime.now().timestamp()
self.__build_detector()
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
try:
from tflite_runtime.interpreter import Interpreter
@@ -89,6 +88,8 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.labelmap = {}
return
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
self.interpreter = Interpreter(
model_path=model_path,
num_threads=2,
@@ -377,7 +378,6 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.__build_detector()
@redirect_output_to_logger(logger, logging.DEBUG)
def __build_detector(self) -> None:
model_path = os.path.join(self.model_dir, "model.tflite")
labelmap_path = os.path.join(self.model_dir, "labelmap.txt")
@@ -389,6 +389,8 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.labelmap = {}
return
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
self.interpreter = Interpreter(
model_path=model_path,
num_threads=2,

View File

@@ -5,7 +5,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.log import redirect_output_to_logger
from frigate.log import suppress_stderr_during
from ..detector_utils import tflite_detect_raw, tflite_init
@@ -28,8 +28,9 @@ class CpuDetectorConfig(BaseDetectorConfig):
class CpuTfl(DetectionApi):
type_key = DETECTOR_KEY
@redirect_output_to_logger(logger, logging.DEBUG)
def __init__(self, detector_config: CpuDetectorConfig):
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
interpreter = Interpreter(
model_path=detector_config.model.path,
num_threads=detector_config.num_threads or 3,

View File

@@ -8,7 +8,7 @@ import cv2
import numpy as np
from pydantic import Field
from frigate.const import MODEL_CACHE_DIR
from frigate.const import MODEL_CACHE_DIR, SUPPORTED_RK_SOCS
from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detection_runners import RKNNModelRunner
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
@@ -19,8 +19,6 @@ logger = logging.getLogger(__name__)
DETECTOR_KEY = "rknn"
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
supported_models = {
ModelTypeEnum.yologeneric: "^frigate-fp16-yolov9-[cemst]$",
ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$",
@@ -82,9 +80,9 @@ class Rknn(DetectionApi):
except FileNotFoundError:
raise Exception("Make sure to run docker in privileged mode.")
if soc not in supported_socs:
if soc not in SUPPORTED_RK_SOCS:
raise Exception(
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {supported_socs}."
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {SUPPORTED_RK_SOCS}."
)
return soc

View File

@@ -8,7 +8,7 @@ import numpy as np
from frigate.const import MODEL_CACHE_DIR
from frigate.detectors.detection_runners import get_optimized_runner
from frigate.embeddings.types import EnrichmentModelTypeEnum
from frigate.log import redirect_output_to_logger
from frigate.log import suppress_stderr_during
from frigate.util.downloader import ModelDownloader
from ...config import FaceRecognitionConfig
@@ -57,12 +57,13 @@ class FaceNetEmbedding(BaseEmbedding):
self._load_model_and_utils()
logger.debug(f"models are already downloaded for {self.model_name}")
@redirect_output_to_logger(logger, logging.DEBUG)
def _load_model_and_utils(self):
if self.runner is None:
if self.downloader:
self.downloader.wait_for_download()
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
self.runner = Interpreter(
model_path=os.path.join(MODEL_CACHE_DIR, "facedet/facenet.tflite"),
num_threads=2,

View File

@@ -34,7 +34,7 @@ from frigate.data_processing.real_time.audio_transcription import (
AudioTranscriptionRealTimeProcessor,
)
from frigate.ffmpeg_presets import parse_preset_input
from frigate.log import LogPipe, redirect_output_to_logger
from frigate.log import LogPipe, suppress_stderr_during
from frigate.object_detection.base import load_labels
from frigate.util.builtin import get_ffmpeg_arg_list
from frigate.util.process import FrigateProcess
@@ -367,16 +367,16 @@ class AudioEventMaintainer(threading.Thread):
class AudioTfl:
@redirect_output_to_logger(logger, logging.DEBUG)
def __init__(self, stop_event: threading.Event, num_threads=2):
self.stop_event = stop_event
self.num_threads = num_threads
self.labels = load_labels("/audio-labelmap.txt", prefill=521)
# Suppress TFLite delegate creation messages that bypass Python logging
with suppress_stderr_during("tflite_interpreter_init"):
self.interpreter = Interpreter(
model_path="/cpu_audio_model.tflite",
num_threads=self.num_threads,
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()

View File

@@ -80,10 +80,15 @@ def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
log_levels = {
"absl": LogLevel.error,
"httpx": LogLevel.error,
"h5py": LogLevel.error,
"keras": LogLevel.error,
"matplotlib": LogLevel.error,
"tensorflow": LogLevel.error,
"tensorflow.python": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
"PIL": LogLevel.warning,
"numba": LogLevel.warning,
**log_levels,
}
@@ -318,3 +323,31 @@ def suppress_os_output(func: Callable) -> Callable:
return result
return wrapper
@contextmanager
def suppress_stderr_during(operation_name: str) -> Generator[None, None, None]:
"""
Context manager to suppress stderr output during a specific operation.
Useful for silencing LLVM debug output, CUDA messages, and other native
library logging that cannot be controlled via Python logging or environment
variables. Completely redirects file descriptor 2 (stderr) to /dev/null.
Usage:
with suppress_stderr_during("model_conversion"):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
Args:
operation_name: Name of the operation for debugging purposes
"""
original_stderr_fd = os.dup(2)
devnull = os.open(os.devnull, os.O_WRONLY)
try:
os.dup2(devnull, 2)
yield
finally:
os.dup2(original_stderr_fd, 2)
os.close(devnull)
os.close(original_stderr_fd)

View File

@@ -19,7 +19,7 @@ from frigate.const import (
PROCESS_PRIORITY_LOW,
UPDATE_MODEL_STATE,
)
from frigate.log import redirect_output_to_logger
from frigate.log import redirect_output_to_logger, suppress_stderr_during
from frigate.models import Event, Recordings, ReviewSegment
from frigate.types import ModelStatusTypesEnum
from frigate.util.downloader import ModelDownloader
@@ -250,12 +250,17 @@ class ClassificationTrainingProcess(FrigateProcess):
logger.debug(f"Converting {self.model_name} to TFLite...")
# convert model to tflite
# Suppress stderr during conversion to avoid LLVM debug output
# (fully_quantize, inference_type, MLIR optimization messages, etc)
with suppress_stderr_during("tflite_conversion"):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = (
self.__generate_representative_dataset_factory(dataset_dir)
)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()

View File

@@ -65,10 +65,15 @@ class FrigateProcess(BaseProcess):
logging.basicConfig(handlers=[], force=True)
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
# Always apply base log level suppressions for noisy third-party libraries
# even if no specific logConfig is provided
if logConfig:
frigate.log.apply_log_levels(
logConfig.default.value.upper(), logConfig.logs
)
else:
# Apply default INFO level with standard library suppressions
frigate.log.apply_log_levels("INFO", {})
self._setup_memray()

View File

@@ -8,6 +8,7 @@ import time
from pathlib import Path
from typing import Optional
from frigate.const import SUPPORTED_RK_SOCS
from frigate.util.file import FileLock
logger = logging.getLogger(__name__)
@@ -68,9 +69,20 @@ def is_rknn_compatible(model_path: str, model_type: str | None = None) -> bool:
True if the model is RKNN-compatible, False otherwise
"""
soc = get_soc_type()
if soc is None:
return False
# Check if the SoC is actually a supported RK device
# This prevents false positives on non-RK devices (e.g., macOS Docker)
# where /proc/device-tree/compatible might exist but contain non-RK content
if soc not in SUPPORTED_RK_SOCS:
logger.debug(
f"SoC '{soc}' is not a supported RK device for RKNN conversion. "
f"Supported SoCs: {SUPPORTED_RK_SOCS}"
)
return False
if not model_type:
model_type = get_rknn_model_type(model_path)

View File

@@ -139,6 +139,7 @@
"nameOnlyNumbers": "Model name cannot contain only numbers",
"classRequired": "At least 1 class is required",
"classesUnique": "Class names must be unique",
"noneNotAllowed": "The class 'none' is not allowed",
"stateRequiresTwoClasses": "State models require at least 2 classes",
"objectLabelRequired": "Please select an object label",
"objectTypeRequired": "Please select a classification type"

View File

@@ -40,6 +40,7 @@ type ClassificationCardProps = {
data: ClassificationItemData;
threshold?: ClassificationThreshold;
selected: boolean;
clickable: boolean;
i18nLibrary: string;
showArea?: boolean;
count?: number;
@@ -56,6 +57,7 @@ export const ClassificationCard = forwardRef<
data,
threshold,
selected,
clickable,
i18nLibrary,
showArea = true,
count,
@@ -101,11 +103,12 @@ export const ClassificationCard = forwardRef<
<div
ref={ref}
className={cn(
"relative flex size-full cursor-pointer flex-col overflow-hidden rounded-lg outline outline-[3px]",
"relative flex size-full flex-col overflow-hidden rounded-lg outline outline-[3px]",
className,
selected
? "shadow-selected outline-selected"
: "outline-transparent duration-500",
clickable && "cursor-pointer",
)}
onClick={(e) => {
const isMeta = e.metaKey || e.ctrlKey;
@@ -289,6 +292,7 @@ export function GroupedClassificationCard({
data={bestItem}
threshold={threshold}
selected={selectedItems.includes(bestItem.filename)}
clickable={true}
i18nLibrary={i18nLibrary}
count={group.length}
onClick={(_, meta) => {
@@ -413,6 +417,7 @@ export function GroupedClassificationCard({
data={data}
threshold={threshold}
selected={false}
clickable={false}
i18nLibrary={i18nLibrary}
onClick={() => {}}
>

View File

@@ -94,7 +94,14 @@ export default function Step1NameAndDefine({
objectLabel: z.string().optional(),
objectType: z.enum(["sub_label", "attribute"]).optional(),
classes: z
.array(z.string())
.array(
z
.string()
.refine(
(val) => val.trim().toLowerCase() !== "none",
t("wizard.step1.errors.noneNotAllowed"),
),
)
.min(1, t("wizard.step1.errors.classRequired"))
.refine(
(classes) => {
@@ -467,6 +474,7 @@ export default function Step1NameAndDefine({
)}
</div>
</FormControl>
<FormMessage />
</FormItem>
)}
/>

View File

@@ -1026,6 +1026,7 @@ function FaceGrid({
filepath: `clips/faces/${pageToggle}/${image}`,
}}
selected={selectedFaces.includes(image)}
clickable={selectedFaces.length > 0}
i18nLibrary="views/faceLibrary"
onClick={(data, meta) => onClickFaces([data.filename], meta)}
>

View File

@@ -804,6 +804,7 @@ function DatasetGrid({
name: "",
}}
showArea={false}
clickable={selectedImages.length > 0}
selected={selectedImages.includes(image)}
i18nLibrary="views/classificationModel"
onClick={(data, _) => onClickImages([data.filename], true)}
@@ -962,6 +963,7 @@ function StateTrainGrid({
data={data}
threshold={threshold}
selected={selectedImages.includes(data.filename)}
clickable={selectedImages.length > 0}
i18nLibrary="views/classificationModel"
showArea={false}
onClick={(data, meta) => onClickImages([data.filename], meta)}