From 60052e5f9f27e7bba76232bddd4cb5aaab1b4d41 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:59:26 -0600 Subject: [PATCH] Miscellaneous Fixes (0.17 beta) (#21355) * remove footer messages and add update topic to motion tuner view restart after changing values is no longer required * add cache key and activity indicator for loading classification wizard images * Always mark model as untrained when a classname is changed * clarify object classification docs * add debug logs for individual lpr replace_rules * update memray docs * memray tweaks * Don't fail for audio transcription when semantic search is not enabled * Fix incorrect mismatch for object vs sub label * Check if the video is currently playing when deciding to seek due to misalignment * Refactor timeline event handling to allow multiple timeline entries per update * Check if zones have actually changed (not just count) for event state update * show event icon on mobile * move div inside conditional --------- Co-authored-by: Nicolas Mowen --- .../object_classification.md | 2 + docs/docs/troubleshooting/memory.md | 46 ++++++++------ frigate/api/classification.py | 7 +++ .../common/license_plate/mixin.py | 5 +- .../post/audio_transcription.py | 5 +- frigate/events/maintainer.py | 2 +- frigate/timeline.py | 62 +++++++++++++------ .../components/card/ClassificationCard.tsx | 52 +++++++++------- .../wizard/Step3ChooseExamples.tsx | 16 ++++- web/src/views/recording/RecordingView.tsx | 5 +- web/src/views/settings/MotionTunerView.tsx | 28 ++------- 11 files changed, 139 insertions(+), 91 deletions(-) diff --git a/docs/docs/configuration/custom_classification/object_classification.md b/docs/docs/configuration/custom_classification/object_classification.md index 52056a007..70fd1fbbd 100644 --- a/docs/docs/configuration/custom_classification/object_classification.md +++ b/docs/docs/configuration/custom_classification/object_classification.md @@ -95,6 +95,8 @@ The system will automatically generate example images from detected objects matc When choosing which objects to classify, start with a small number of visually distinct classes and ensure your training samples match camera viewpoints and distances typical for those objects. +If examples for some of your classes do not appear in the grid, you can continue configuring the model without them. New images will begin to appear in the Recent Classifications view. When your missing classes are seen, classify them from this view and retrain your model. + ### Improving the Model - **Problem framing**: Keep classes visually distinct and relevant to the chosen object types. diff --git a/docs/docs/troubleshooting/memory.md b/docs/docs/troubleshooting/memory.md index b8ef5367d..338037c7a 100644 --- a/docs/docs/troubleshooting/memory.md +++ b/docs/docs/troubleshooting/memory.md @@ -9,8 +9,20 @@ Frigate includes built-in memory profiling using [memray](https://bloomberg.gith Memory profiling is controlled via the `FRIGATE_MEMRAY_MODULES` environment variable. Set it to a comma-separated list of module names you want to profile: +```yaml +# docker-compose example +services: + frigate: + ... + environment: + - FRIGATE_MEMRAY_MODULES=frigate.embeddings,frigate.capture +``` + ```bash -export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture" +# docker run example +docker run -e FRIGATE_MEMRAY_MODULES="frigate.embeddings" \ + ... + --name frigate ``` ### Module Names @@ -24,11 +36,12 @@ Frigate processes are named using a module-based naming scheme. Common module na - `frigate.output` - Output processing - `frigate.audio_manager` - Audio processing - `frigate.embeddings` - Embeddings processing +- `frigate.embeddings_manager` - Embeddings manager You can also specify the full process name (including camera-specific identifiers) if you want to profile a specific camera: ```bash -export FRIGATE_MEMRAY_MODULES="frigate.capture:front_door" +FRIGATE_MEMRAY_MODULES=frigate.capture:front_door ``` When you specify a module name (e.g., `frigate.capture`), all processes with that module prefix will be profiled. For example, `frigate.capture` will profile all camera capture processes. @@ -55,11 +68,20 @@ After a process exits normally, you'll find HTML reports in `/config/memray_repo If a process crashes or you want to generate a report from an existing binary file, you can manually create the HTML report: +- Run `memray` inside the Frigate container: + ```bash -memray flamegraph /config/memray_reports/.bin +docker-compose exec frigate memray flamegraph /config/memray_reports/.bin +# or +docker exec -it memray flamegraph /config/memray_reports/.bin ``` -This will generate an HTML file that you can open in your browser. +- You can also copy the `.bin` file to the host and run `memray` locally if you have it installed: + +```bash +docker cp :/config/memray_reports/.bin /tmp/ +memray flamegraph /tmp/.bin +``` ## Understanding the Reports @@ -110,20 +132,4 @@ The interactive HTML reports allow you to: - Check that memray is properly installed (included by default in Frigate) - Verify the process actually started and ran (check process logs) -## Example Usage - -```bash -# Enable profiling for review and capture modules -export FRIGATE_MEMRAY_MODULES="frigate.review_segment_manager,frigate.capture" - -# Start Frigate -# ... let it run for a while ... - -# Check for reports -ls -lh /config/memray_reports/ - -# If a process crashed, manually generate report -memray flamegraph /config/memray_reports/frigate_capture_front_door.bin -``` - For more information about memray and interpreting reports, see the [official memray documentation](https://bloomberg.github.io/memray/). diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 18e590ce1..f60cfd3c3 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -40,6 +40,7 @@ from frigate.util.classification import ( collect_state_classification_examples, get_dataset_image_count, read_training_metadata, + write_training_metadata, ) from frigate.util.file import get_event_snapshot @@ -842,6 +843,12 @@ def rename_classification_category( try: os.rename(old_folder, new_folder) + + # Mark dataset as ready to train by resetting training metadata + # This ensures the dataset is marked as changed after renaming + sanitized_name = sanitize_filename(name) + write_training_metadata(sanitized_name, 0) + return JSONResponse( content=( { diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index a2509d4fa..b56c66a19 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -374,6 +374,9 @@ class LicensePlateProcessingMixin: combined_plate = re.sub( pattern, replacement, combined_plate ) + logger.debug( + f"{camera}: Processing replace rule: '{pattern}' -> '{replacement}', result: '{combined_plate}'" + ) except re.error as e: logger.warning( f"{camera}: Invalid regex in replace_rules '{pattern}': {e}" @@ -381,7 +384,7 @@ class LicensePlateProcessingMixin: if combined_plate != original_combined: logger.debug( - f"{camera}: Rules applied: '{original_combined}' -> '{combined_plate}'" + f"{camera}: All rules applied: '{original_combined}' -> '{combined_plate}'" ) # Compute the combined area for qualifying boxes diff --git a/frigate/data_processing/post/audio_transcription.py b/frigate/data_processing/post/audio_transcription.py index b7b6cb021..558ab433e 100644 --- a/frigate/data_processing/post/audio_transcription.py +++ b/frigate/data_processing/post/audio_transcription.py @@ -131,8 +131,9 @@ class AudioTranscriptionPostProcessor(PostProcessorApi): }, ) - # Embed the description - self.embeddings.embed_description(event_id, transcription) + # Embed the description if semantic search is enabled + if self.config.semantic_search.enabled: + self.embeddings.embed_description(event_id, transcription) except DoesNotExist: logger.debug("No recording found for audio transcription post-processing") diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index 2b0fc4193..0d1a1b025 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -46,7 +46,7 @@ def should_update_state(prev_event: Event, current_event: Event) -> bool: if prev_event["sub_label"] != current_event["sub_label"]: return True - if len(prev_event["current_zones"]) < len(current_event["current_zones"]): + if set(prev_event["current_zones"]) != set(current_event["current_zones"]): return True return False diff --git a/frigate/timeline.py b/frigate/timeline.py index a2d59b88e..cf2f5e8c7 100644 --- a/frigate/timeline.py +++ b/frigate/timeline.py @@ -86,11 +86,11 @@ class TimelineProcessor(threading.Thread): event_data: dict[Any, Any], ) -> bool: """Handle object detection.""" - save = False camera_config = self.config.cameras[camera] event_id = event_data["id"] - timeline_entry = { + # Base timeline entry data that all entries will share + base_entry = { Timeline.timestamp: event_data["frame_time"], Timeline.camera: camera, Timeline.source: "tracked_object", @@ -123,40 +123,64 @@ class TimelineProcessor(threading.Thread): e[Timeline.data]["sub_label"] = event_data["sub_label"] if event_type == EventStateEnum.start: + timeline_entry = base_entry.copy() timeline_entry[Timeline.class_type] = "visible" - save = True + self.insert_or_save(timeline_entry, prev_event_data, event_data) elif event_type == EventStateEnum.update: + # Check all conditions and create timeline entries for each change + entries_to_save = [] + + # Check for zone changes + prev_zones = set(prev_event_data["current_zones"]) + current_zones = set(event_data["current_zones"]) + zones_changed = prev_zones != current_zones + + # Only save "entered_zone" events when the object is actually IN zones if ( - len(prev_event_data["current_zones"]) < len(event_data["current_zones"]) + zones_changed and not event_data["stationary"] + and len(current_zones) > 0 ): - timeline_entry[Timeline.class_type] = "entered_zone" - timeline_entry[Timeline.data]["zones"] = event_data["current_zones"] - save = True - elif prev_event_data["stationary"] != event_data["stationary"]: - timeline_entry[Timeline.class_type] = ( + zone_entry = base_entry.copy() + zone_entry[Timeline.class_type] = "entered_zone" + zone_entry[Timeline.data] = base_entry[Timeline.data].copy() + zone_entry[Timeline.data]["zones"] = event_data["current_zones"] + entries_to_save.append(zone_entry) + + # Check for stationary status change + if prev_event_data["stationary"] != event_data["stationary"]: + stationary_entry = base_entry.copy() + stationary_entry[Timeline.class_type] = ( "stationary" if event_data["stationary"] else "active" ) - save = True - elif prev_event_data["attributes"] == {} and event_data["attributes"] != {}: - timeline_entry[Timeline.class_type] = "attribute" - timeline_entry[Timeline.data]["attribute"] = list( + stationary_entry[Timeline.data] = base_entry[Timeline.data].copy() + entries_to_save.append(stationary_entry) + + # Check for new attributes + if prev_event_data["attributes"] == {} and event_data["attributes"] != {}: + attribute_entry = base_entry.copy() + attribute_entry[Timeline.class_type] = "attribute" + attribute_entry[Timeline.data] = base_entry[Timeline.data].copy() + attribute_entry[Timeline.data]["attribute"] = list( event_data["attributes"].keys() )[0] if len(event_data["current_attributes"]) > 0: - timeline_entry[Timeline.data]["attribute_box"] = to_relative_box( + attribute_entry[Timeline.data]["attribute_box"] = to_relative_box( camera_config.detect.width, camera_config.detect.height, event_data["current_attributes"][0]["box"], ) - save = True - elif event_type == EventStateEnum.end: - timeline_entry[Timeline.class_type] = "gone" - save = True + entries_to_save.append(attribute_entry) - if save: + # Save all entries + for entry in entries_to_save: + self.insert_or_save(entry, prev_event_data, event_data) + + elif event_type == EventStateEnum.end: + timeline_entry = base_entry.copy() + timeline_entry[Timeline.class_type] = "gone" self.insert_or_save(timeline_entry, prev_event_data, event_data) def handle_api_entry( diff --git a/web/src/components/card/ClassificationCard.tsx b/web/src/components/card/ClassificationCard.tsx index bf91d89c2..a8ed12f24 100644 --- a/web/src/components/card/ClassificationCard.tsx +++ b/web/src/components/card/ClassificationCard.tsx @@ -233,7 +233,7 @@ export function GroupedClassificationCard({ }); if (!best) { - return group.at(-1); + best = group.at(-1)!; } const bestTyped: ClassificationItemData = best; @@ -377,30 +377,34 @@ export function GroupedClassificationCard({ )} - {isDesktop && ( -
- {classifiedEvent && ( - - -
{ - navigate(`/explore?event_id=${classifiedEvent.id}`); - }} - > - -
-
- - - {t("details.item.button.viewInExplore", { - ns: "views/explore", - })} - - -
+ {classifiedEvent && ( +
+ + +
{ + navigate(`/explore?event_id=${classifiedEvent.id}`); + }} + > + +
+
+ + + {t("details.item.button.viewInExplore", { + ns: "views/explore", + })} + + +
)} diff --git a/web/src/components/classification/wizard/Step3ChooseExamples.tsx b/web/src/components/classification/wizard/Step3ChooseExamples.tsx index ec45f4b4d..e3dd04afc 100644 --- a/web/src/components/classification/wizard/Step3ChooseExamples.tsx +++ b/web/src/components/classification/wizard/Step3ChooseExamples.tsx @@ -45,6 +45,12 @@ export default function Step3ChooseExamples({ const [isProcessing, setIsProcessing] = useState(false); const [currentClassIndex, setCurrentClassIndex] = useState(0); const [selectedImages, setSelectedImages] = useState>(new Set()); + const [cacheKey, setCacheKey] = useState(Date.now()); + const [loadedImages, setLoadedImages] = useState>(new Set()); + + const handleImageLoad = useCallback((imageName: string) => { + setLoadedImages((prev) => new Set(prev).add(imageName)); + }, []); const { data: trainImages, mutate: refreshTrainImages } = useSWR( hasGenerated ? `classification/${step1Data.modelName}/train` : null, @@ -332,6 +338,8 @@ export default function Step3ChooseExamples({ setHasGenerated(true); toast.success(t("wizard.step3.generateSuccess")); + // Update cache key to force image reload + setCacheKey(Date.now()); await refreshTrainImages(); } catch (error) { const axiosError = error as { @@ -565,10 +573,16 @@ export default function Step3ChooseExamples({ )} onClick={() => toggleImageSelection(imageName)} > + {!loadedImages.has(imageName) && ( +
+ +
+ )} {`Example handleImageLoad(imageName)} />
); diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx index 5b4d5328c..839186d8e 100644 --- a/web/src/views/recording/RecordingView.tsx +++ b/web/src/views/recording/RecordingView.tsx @@ -309,7 +309,10 @@ export function RecordingView({ currentTimeRange.after <= currentTime && currentTimeRange.before >= currentTime ) { - mainControllerRef.current?.seekToTimestamp(currentTime, true); + mainControllerRef.current?.seekToTimestamp( + currentTime, + mainControllerRef.current.isPlaying(), + ); } else { updateSelectedSegment(currentTime, true); } diff --git a/web/src/views/settings/MotionTunerView.tsx b/web/src/views/settings/MotionTunerView.tsx index 4bcd9cdc5..25d5f1469 100644 --- a/web/src/views/settings/MotionTunerView.tsx +++ b/web/src/views/settings/MotionTunerView.tsx @@ -4,7 +4,7 @@ import useSWR from "swr"; import axios from "axios"; import ActivityIndicator from "@/components/indicators/activity-indicator"; import AutoUpdatingCameraImage from "@/components/camera/AutoUpdatingCameraImage"; -import { useCallback, useContext, useEffect, useMemo, useState } from "react"; +import { useCallback, useEffect, useMemo, useState } from "react"; import { Slider } from "@/components/ui/slider"; import { Label } from "@/components/ui/label"; import { @@ -20,7 +20,6 @@ import { toast } from "sonner"; import { Separator } from "@/components/ui/separator"; import { Link } from "react-router-dom"; import { LuExternalLink } from "react-icons/lu"; -import { StatusBarMessagesContext } from "@/context/statusbar-provider"; import { Trans, useTranslation } from "react-i18next"; import { useDocDomain } from "@/hooks/use-doc-domain"; import { cn } from "@/lib/utils"; @@ -48,8 +47,6 @@ export default function MotionTunerView({ const [changedValue, setChangedValue] = useState(false); const [isLoading, setIsLoading] = useState(false); - const { addMessage, removeMessage } = useContext(StatusBarMessagesContext)!; - const { send: sendMotionThreshold } = useMotionThreshold(selectedCamera); const { send: sendMotionContourArea } = useMotionContourArea(selectedCamera); const { send: sendImproveContrast } = useImproveContrast(selectedCamera); @@ -119,7 +116,10 @@ export default function MotionTunerView({ axios .put( `config/set?cameras.${selectedCamera}.motion.threshold=${motionSettings.threshold}&cameras.${selectedCamera}.motion.contour_area=${motionSettings.contour_area}&cameras.${selectedCamera}.motion.improve_contrast=${motionSettings.improve_contrast ? "True" : "False"}`, - { requires_restart: 0 }, + { + requires_restart: 0, + update_topic: `config/cameras/${selectedCamera}/motion`, + }, ) .then((res) => { if (res.status === 200) { @@ -164,23 +164,7 @@ export default function MotionTunerView({ const onCancel = useCallback(() => { setMotionSettings(origMotionSettings); setChangedValue(false); - removeMessage("motion_tuner", `motion_tuner_${selectedCamera}`); - }, [origMotionSettings, removeMessage, selectedCamera]); - - useEffect(() => { - if (changedValue) { - addMessage( - "motion_tuner", - t("motionDetectionTuner.unsavedChanges", { camera: selectedCamera }), - undefined, - `motion_tuner_${selectedCamera}`, - ); - } else { - removeMessage("motion_tuner", `motion_tuner_${selectedCamera}`); - } - // we know that these deps are correct - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [changedValue, selectedCamera]); + }, [origMotionSettings]); useEffect(() => { document.title = t("documentTitle.motionTuner");