#!/usr/bin/env -S uv run python
"""
WebSocket client for the FR-in-the-Cloud inference server (app/main.py).

Start the server first:
    sanic app.main --host=0.0.0.0 --port=42067 --single-process

Then run the client (server defaults to ws://localhost:42067):
    ./client.py                                        # Ghost PC camera via RTSP (default)
    ./client.py 0                                      # local webcam
    ./client.py data/10people.jpg                      # static image, loops until 'q'
    ./client.py path/to/video.mp4                      # video file
    ./client.py path/to/video.mp4 --output out.mp4     # save output
    ./client.py --server ws://host:42067                # custom server
"""
import json
import os
from pathlib import Path
from typing import Annotated, Optional

import cv2
import numpy as np
import supervision as sv
import typer
import websockets.sync.client

IMAGE_EXTENSIONS = {".jpg", ".jpeg", ".png", ".bmp", ".webp", ".tiff"}

app = typer.Typer()


def encode_frame(frame: np.ndarray) -> bytes:
    _, buf = cv2.imencode(".jpg", frame)
    return buf.tobytes()


def annotate_frame(frame: np.ndarray, detections: list[dict]) -> np.ndarray:
    if not detections:
        return frame
    xyxy = np.array([d["bbox"] for d in detections], dtype=np.float32)
    labels = [f"{d['identity'] or '?'} ({d['confidence']:.2f})" for d in detections]
    sv_detections = sv.Detections(xyxy=xyxy)
    frame = sv.BoxAnnotator(color_lookup=sv.ColorLookup.INDEX).annotate(frame, sv_detections)
    frame = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX).annotate(frame, sv_detections, labels)
    return frame


@app.command()
def main(
    source: Annotated[str, typer.Argument(help="Webcam index, video file, image, or stream URL")] = "rtsp://192.168.168.105:8554/cam",
    server: Annotated[str, typer.Option(help="WebSocket server URL")] = "ws://localhost:42067",
    output: Annotated[Optional[Path], typer.Option(help="Path to save annotated output video")] = None,
) -> None:
    """Send video frames to the FR-in-the-Cloud server and display annotated results."""
    is_image = any(source.lower().endswith(ext) for ext in IMAGE_EXTENSIONS)

    if is_image:
        static_frame = cv2.imread(source)
        if static_frame is None:
            typer.echo(f"Error: cannot load image '{source}'", err=True)
            raise typer.Exit(1)
        fps, width, height = 30.0, static_frame.shape[1], static_frame.shape[0]
    else:
        is_stream = source.startswith("rtsp://") or source.startswith("http")
        os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;tcp|fflags;nobuffer|flags;low_delay"
        cap = cv2.VideoCapture(int(source) if source.isdigit() else source, cv2.CAP_FFMPEG)
        if is_stream:
            cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        if not cap.isOpened():
            typer.echo(f"Error: cannot open source '{source}'", err=True)
            raise typer.Exit(1)
        fps = cap.get(cv2.CAP_PROP_FPS) or 30.0
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    writer = None
    if output is not None:
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        writer = cv2.VideoWriter(str(output), fourcc, fps, (width, height))

    try:
        with websockets.sync.client.connect(server) as ws:
            typer.echo(f"Connected to {server}")
            while True:
                frame = static_frame if is_image else None
                if not is_image:
                    # Drain buffered frames to get the latest one
                    for _ in range(5):
                        ret = cap.grab()
                        if not ret:
                            break
                    ret, frame = cap.retrieve()
                    if not ret:
                        break

                ws.send(encode_frame(frame))
                response = json.loads(ws.recv())
                annotated = annotate_frame(frame.copy(), response["detections"])

                cv2.imshow("FR-in-the-Cloud", annotated)
                if writer:
                    writer.write(annotated)
                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
    finally:
        if not is_image:
            cap.release()
        if writer:
            writer.release()
        cv2.destroyAllWindows()


if __name__ == "__main__":
    app()
