Compare commits

...

2 Commits

Author SHA1 Message Date
Josh Hawkins
b4520d9e2f Miscellaneous fixes (0.17 beta) (#21826)
* revert other changes

* fix verified icon overlay in debug view list

* Add classification object debug logs

* Formatting

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2026-01-29 12:42:06 -07:00
Josh Hawkins
3b6814fbc9 Revert "Miscellaneous fixes (0.17 beta) (#21764)" (#21825)
This reverts commit 50ac5a1483.
2026-01-29 11:30:21 -07:00
9 changed files with 95 additions and 120 deletions

View File

@@ -419,14 +419,21 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
"""
if object_id not in self.classification_history:
self.classification_history[object_id] = []
logger.debug(f"Created new classification history for {object_id}")
self.classification_history[object_id].append(
(current_label, current_score, current_time)
)
history = self.classification_history[object_id]
logger.debug(
f"History for {object_id}: {len(history)} entries, latest=({current_label}, {current_score})"
)
if len(history) < 3:
logger.debug(
f"History for {object_id} has {len(history)} entries, need at least 3"
)
return None, 0.0
label_counts = {}
@@ -445,14 +452,27 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
best_count = label_counts[best_label]
consensus_threshold = total_attempts * 0.6
logger.debug(
f"Consensus calc for {object_id}: label_counts={label_counts}, "
f"best_label={best_label}, best_count={best_count}, "
f"total={total_attempts}, threshold={consensus_threshold}"
)
if best_count < consensus_threshold:
logger.debug(
f"No consensus for {object_id}: {best_count} < {consensus_threshold}"
)
return None, 0.0
avg_score = sum(label_scores[best_label]) / len(label_scores[best_label])
if best_label == "none":
logger.debug(f"Filtering 'none' label for {object_id}")
return None, 0.0
logger.debug(
f"Consensus reached for {object_id}: {best_label} with avg_score={avg_score}"
)
return best_label, avg_score
def process_frame(self, obj_data, frame):
@@ -560,17 +580,30 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
)
if score < self.model_config.threshold:
logger.debug(f"Score {score} is less than threshold.")
logger.debug(
f"{self.model_config.name}: Score {score} < threshold {self.model_config.threshold} for {object_id}, skipping"
)
return
sub_label = self.labelmap[best_id]
logger.debug(
f"{self.model_config.name}: Object {object_id} (label={obj_data['label']}) passed threshold with sub_label={sub_label}, score={score}"
)
consensus_label, consensus_score = self.get_weighted_score(
object_id, sub_label, score, now
)
logger.debug(
f"{self.model_config.name}: get_weighted_score returned consensus_label={consensus_label}, consensus_score={consensus_score} for {object_id}"
)
if consensus_label is not None:
camera = obj_data["camera"]
logger.info(
f"{self.model_config.name}: Publishing sub_label={consensus_label} for {obj_data['label']} object {object_id} on {camera}"
)
if (
self.model_config.object_config.classification_type

View File

@@ -140,12 +140,7 @@ Each line represents a detection state, not necessarily unique individuals. Pare
) as f:
f.write(context_prompt)
json_schema = {
"name": "review_metadata",
"schema": ReviewMetadata.model_json_schema(),
"strict": True,
}
response = self._send(context_prompt, thumbnails, json_schema=json_schema)
response = self._send(context_prompt, thumbnails)
if debug_save and response:
with open(
@@ -157,8 +152,6 @@ Each line represents a detection state, not necessarily unique individuals. Pare
f.write(response)
if response:
# With JSON schema, response should already be valid JSON
# But keep regex cleanup as fallback for providers without schema support
clean_json = re.sub(
r"\n?```$", "", re.sub(r"^```[a-zA-Z0-9]*\n?", "", response)
)
@@ -291,16 +284,8 @@ Guidelines:
"""Initialize the client."""
return None
def _send(
self, prompt: str, images: list[bytes], json_schema: Optional[dict] = None
) -> Optional[str]:
"""Submit a request to the provider.
Args:
prompt: The text prompt to send
images: List of image bytes to include
json_schema: Optional JSON schema for structured output (provider-specific support)
"""
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to the provider."""
return None
def get_context_size(self) -> int:

View File

@@ -41,46 +41,29 @@ class OpenAIClient(GenAIClient):
azure_endpoint=azure_endpoint,
)
def _send(
self, prompt: str, images: list[bytes], json_schema: Optional[dict] = None
) -> Optional[str]:
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to Azure OpenAI."""
encoded_images = [base64.b64encode(image).decode("utf-8") for image in images]
request_params = {
"model": self.genai_config.model,
"messages": [
{
"role": "user",
"content": [{"type": "text", "text": prompt}]
+ [
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
for image in encoded_images
],
},
],
"timeout": self.timeout,
}
if json_schema:
request_params["response_format"] = {
"type": "json_schema",
"json_schema": {
"name": json_schema.get("name", "response"),
"schema": json_schema.get("schema", {}),
"strict": json_schema.get("strict", True),
},
}
try:
result = self.provider.chat.completions.create(
**request_params,
model=self.genai_config.model,
messages=[
{
"role": "user",
"content": [{"type": "text", "text": prompt}]
+ [
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image}",
"detail": "low",
},
}
for image in encoded_images
],
},
],
timeout=self.timeout,
**self.genai_config.runtime_options,
)
except Exception as e:

View File

@@ -41,9 +41,7 @@ class GeminiClient(GenAIClient):
http_options=types.HttpOptions(**http_options_dict),
)
def _send(
self, prompt: str, images: list[bytes], json_schema: Optional[dict] = None
) -> Optional[str]:
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to Gemini."""
contents = [
types.Part.from_bytes(data=img, mime_type="image/jpeg") for img in images
@@ -53,12 +51,6 @@ class GeminiClient(GenAIClient):
generation_config_dict = {"candidate_count": 1}
generation_config_dict.update(self.genai_config.runtime_options)
if json_schema and "schema" in json_schema:
generation_config_dict["response_mime_type"] = "application/json"
generation_config_dict["response_schema"] = types.Schema(
json_schema=json_schema["schema"]
)
response = self.provider.models.generate_content(
model=self.genai_config.model,
contents=contents,

View File

@@ -50,9 +50,7 @@ class OllamaClient(GenAIClient):
logger.warning("Error initializing Ollama: %s", str(e))
return None
def _send(
self, prompt: str, images: list[bytes], json_schema: Optional[dict] = None
) -> Optional[str]:
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to Ollama"""
if self.provider is None:
logger.warning(
@@ -64,10 +62,6 @@ class OllamaClient(GenAIClient):
**self.provider_options,
**self.genai_config.runtime_options,
}
if json_schema and "schema" in json_schema:
ollama_options["format"] = json_schema["schema"]
result = self.provider.generate(
self.genai_config.model,
prompt,

View File

@@ -31,9 +31,7 @@ class OpenAIClient(GenAIClient):
}
return OpenAI(api_key=self.genai_config.api_key, **provider_opts)
def _send(
self, prompt: str, images: list[bytes], json_schema: Optional[dict] = None
) -> Optional[str]:
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to OpenAI."""
encoded_images = [base64.b64encode(image).decode("utf-8") for image in images]
messages_content = []
@@ -53,31 +51,16 @@ class OpenAIClient(GenAIClient):
"text": prompt,
}
)
request_params = {
"model": self.genai_config.model,
"messages": [
{
"role": "user",
"content": messages_content,
},
],
"timeout": self.timeout,
}
if json_schema:
request_params["response_format"] = {
"type": "json_schema",
"json_schema": {
"name": json_schema.get("name", "response"),
"schema": json_schema.get("schema", {}),
"strict": json_schema.get("strict", True),
},
}
try:
result = self.provider.chat.completions.create(
**request_params,
model=self.genai_config.model,
messages=[
{
"role": "user",
"content": messages_content,
},
],
timeout=self.timeout,
**self.genai_config.runtime_options,
)
if (

View File

@@ -12,7 +12,7 @@ import { useCameraPreviews } from "@/hooks/use-camera-previews";
import { baseUrl } from "@/api/baseUrl";
import { VideoPreview } from "../preview/ScrubbablePreview";
import { useApiHost } from "@/api";
import { isSafari } from "react-device-detect";
import { isDesktop, isSafari } from "react-device-detect";
import { useUserPersistence } from "@/hooks/use-user-persistence";
import { Skeleton } from "../ui/skeleton";
import { Button } from "../ui/button";
@@ -87,6 +87,7 @@ export function AnimatedEventCard({
}, [visibilityListener]);
const [isLoaded, setIsLoaded] = useState(false);
const [isHovered, setIsHovered] = useState(false);
// interaction
@@ -133,27 +134,31 @@ export function AnimatedEventCard({
<Tooltip>
<TooltipTrigger asChild>
<div
className="group relative h-24 flex-shrink-0 overflow-hidden rounded md:rounded-lg 4k:h-32"
className="relative h-24 flex-shrink-0 overflow-hidden rounded md:rounded-lg 4k:h-32"
style={{
aspectRatio: alertVideos ? aspectRatio : undefined,
}}
onMouseEnter={isDesktop ? () => setIsHovered(true) : undefined}
onMouseLeave={isDesktop ? () => setIsHovered(false) : undefined}
>
<Tooltip>
<TooltipTrigger asChild>
<Button
className="pointer-events-none absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500 opacity-0 transition-opacity group-hover:pointer-events-auto group-hover:opacity-100"
size="xs"
aria-label={t("markAsReviewed")}
onClick={async () => {
await axios.post(`reviews/viewed`, { ids: [event.id] });
updateEvents();
}}
>
<FaCircleCheck className="size-3 text-white" />
</Button>
</TooltipTrigger>
<TooltipContent>{t("markAsReviewed")}</TooltipContent>
</Tooltip>
{isHovered && (
<Tooltip>
<TooltipTrigger asChild>
<Button
className="absolute left-2 top-1 z-40 bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
size="xs"
aria-label={t("markAsReviewed")}
onClick={async () => {
await axios.post(`reviews/viewed`, { ids: [event.id] });
updateEvents();
}}
>
<FaCircleCheck className="size-3 text-white" />
</Button>
</TooltipTrigger>
<TooltipContent>{t("markAsReviewed")}</TooltipContent>
</Tooltip>
)}
{previews != undefined && alertVideosLoaded && (
<div
className="size-full cursor-pointer"

View File

@@ -173,9 +173,9 @@ function getVerifiedIcon(
const simpleLabel = label.substring(0, label.lastIndexOf("-"));
return (
<div key={label} className="flex items-center">
<div key={label} className="relative flex items-center">
{getIconForLabel(simpleLabel, type, className)}
<FaCheckCircle className="absolute size-2 translate-x-[80%] translate-y-3/4" />
<FaCheckCircle className="absolute -bottom-0.5 -right-0.5 size-2" />
</div>
);
}
@@ -188,9 +188,9 @@ function getRecognizedPlateIcon(
const simpleLabel = label.substring(0, label.lastIndexOf("-"));
return (
<div key={label} className="flex items-center">
<div key={label} className="relative inline-flex items-center">
{getIconForLabel(simpleLabel, type, className)}
<LuScanBarcode className="absolute size-2.5 translate-x-[50%] translate-y-3/4" />
<LuScanBarcode className="absolute -bottom-0.5 -right-0.5 size-2" />
</div>
);
}

View File

@@ -391,7 +391,7 @@ function ObjectList({ cameraConfig, objects }: ObjectListProps) {
);
return (
<div className="scrollbar-container flex w-full flex-col overflow-y-auto">
<div className="scrollbar-container relative flex w-full flex-col overflow-y-auto">
{objects && objects.length > 0 ? (
objects.map((obj: ObjectType) => {
return (