from pathlib import Path

import torch
from dotenv import load_dotenv

APP_ROOT = Path(__file__).parent


class Defaults:
    # Expected input frame dimensions
    FRAME_WIDTH = 1280
    FRAME_HEIGHT = 720

    # Path to JSON file mapping names to pre-computed face embeddings
    GALLERY_EMBEDDING_PATH = APP_ROOT / "data" / "klass_vit_embeddings.json"
    # ONNX model for face detection (SCRFD)
    FD_ONNX_PATH = APP_ROOT / "data" / "models" / "scrfd_s.onnx"
    # ONNX model for face recognition embeddings
    FR_ONNX_PATH = APP_ROOT / "data" / "models" / "ms1mv3_vit_run_4.onnx"
    # Use fp16 variant of the face detection model
    USE_HALF_PRECISION = True

    # Minimum score from SCRFD to consider a detection positive
    DETECTOR_THRESHOLD = 0.7
    # Maximum number of faces to keep after NMS
    DETECT_NUM_FACES = 20

    # Device for torch operations (auto-selects GPU if available)
    TORCH_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

    DEBUG = False
    PORT = 42067

    # Use facial landmarks from SCRFD for affine alignment before FR embedding
    USE_LANDMARK_ALIGNMENT = True

    # Minimum bbox width/height in pixels to pass to the tracker
    TRACK_MIN_FACE_SIZE = 30
    # Identity resolution strategy: embedding, voting, weighted_voting
    TRACK_AGGREGATION_STRATEGY = "embedding"
    # Number of gallery candidates considered per frame (for voting strategies)
    TRACK_VOTING_TOP_K = 1
    # Sliding window size for identity resolution history
    TRACK_MAX_HISTORY = 30

    @classmethod
    def dump(cls) -> dict:
        """Return all uppercase class attributes as a dict for Sanic Config."""
        return {k: v for k, v in vars(cls).items() if k.isupper()}


__all__ = ["Defaults", "load_dotenv"]
