#!/usr/bin/env python3
"""Low-friction test ladder for Milipol demo integration.

Runs layered validation for:
- unit
- component
- integration-mock
- scenario
- full-demo
"""

from __future__ import annotations

import argparse
import asyncio
import os
import ssl
import subprocess
import urllib.error
import urllib.request
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable

import websockets


ROOT = Path(__file__).resolve().parents[1]


@dataclass(frozen=True)
class PytestSuite:
    name: str
    workdir: Path
    pythonpath: str
    tests: tuple[str, ...]


SUITES_BY_LAYER: dict[str, tuple[PytestSuite, ...]] = {
    "unit": (
        PytestSuite(
            name="edge-proxy unit",
            workdir=ROOT / "edge-proxy",
            pythonpath=".",
            tests=("tests/test_waypoints.py", "tests/test_fr_loop.py"),
        ),
        PytestSuite(
            name="orchestrator unit",
            workdir=ROOT / "orchestrator",
            pythonpath="src",
            tests=(
                "src/planner/tests/test_dag_parser.py",
                "src/planner/tests/test_verifier.py",
                "src/tests/test_state_machine.py",
            ),
        ),
    ),
    "component": (
        PytestSuite(
            name="edge-proxy component",
            workdir=ROOT / "edge-proxy",
            pythonpath=".",
            tests=("tests/test_server_mock.py", "tests/test_rosbridge_backend.py"),
        ),
        PytestSuite(
            name="orchestrator component",
            workdir=ROOT / "orchestrator",
            pythonpath="src",
            tests=(
                "src/executor/tests/test_edge_proxy_integration.py",
                "src/tests/test_state_machine_executor_integration.py",
            ),
        ),
    ),
    "integration-mock": (
        PytestSuite(
            name="edge-proxy integration-mock",
            workdir=ROOT / "edge-proxy",
            pythonpath=".",
            tests=("tests/test_server_rosbridge_integration.py",),
        ),
        PytestSuite(
            name="orchestrator integration-mock",
            workdir=ROOT / "orchestrator",
            pythonpath="src",
            tests=("src/tests/test_dynamic_replanning_integration.py",),
        ),
    ),
    "scenario": (
        PytestSuite(
            name="demo scenario (edge-proxy)",
            workdir=ROOT / "edge-proxy",
            pythonpath=".",
            tests=(
                "tests/test_server_rosbridge_integration.py::test_rosbridge_demo_scenario_three_scene_waypoints",
            ),
        ),
    ),
}


def _http_ok(url: str, timeout_sec: float) -> tuple[bool, str]:
    try:
        with urllib.request.urlopen(url, timeout=timeout_sec) as resp:
            if 200 <= resp.status < 300:
                return True, f"{resp.status}"
            return False, f"HTTP {resp.status}"
    except urllib.error.URLError as exc:
        return False, str(exc.reason)
    except Exception as exc:  # pragma: no cover - defensive
        return False, str(exc)


async def _ws_ok(url: str, timeout_sec: float) -> tuple[bool, str]:
    try:
        ssl_ctx = None
        if url.lower().startswith("wss://"):
            ssl_ctx = ssl.create_default_context()
            ssl_ctx.check_hostname = False
            ssl_ctx.verify_mode = ssl.CERT_NONE
        async with websockets.connect(url, open_timeout=timeout_sec, ssl=ssl_ctx):
            return True, "connected"
    except Exception as exc:
        return False, str(exc)


def run_preflight(args: argparse.Namespace) -> int:
    checks: list[tuple[str, bool, str]] = []

    orch_ok, orch_detail = _http_ok(args.orchestrator_health, args.timeout_sec)
    checks.append(("orchestrator_health", orch_ok, orch_detail))

    edge_ok, edge_detail = _http_ok(args.edge_health, args.timeout_sec)
    checks.append(("edge_proxy_health", edge_ok, edge_detail))

    ws_ok, ws_detail = asyncio.run(_ws_ok(args.rosbridge_url, args.timeout_sec))
    checks.append(("rosbridge_ws", ws_ok, ws_detail))

    print("\nPreflight")
    print("=" * 72)
    for name, ok, detail in checks:
        status = "PASS" if ok else "FAIL"
        print(f"{name:24s} {status:5s} {detail}")

    failed = [c for c in checks if not c[1]]
    if failed:
        print("\nPreflight failed. Fix connectivity before running full pipeline tests.")
        return 1

    print("\nPreflight passed.")
    return 0


def _run_pytest_suite(suite: PytestSuite, pytest_args: Iterable[str]) -> int:
    env = os.environ.copy()
    env["PYTHONPATH"] = suite.pythonpath

    cmd = ["pytest", "-q", *suite.tests, *pytest_args]
    print(f"\n[{suite.name}]")
    print(f"workdir={suite.workdir}")
    print("cmd=" + " ".join(cmd))
    proc = subprocess.run(cmd, cwd=suite.workdir, env=env)
    return proc.returncode


def run_layer(layer: str, pytest_args: Iterable[str]) -> int:
    suites = SUITES_BY_LAYER[layer]
    for suite in suites:
        rc = _run_pytest_suite(suite, pytest_args)
        if rc != 0:
            return rc
    return 0


def run_live_llm_smoke(args: argparse.Namespace) -> int:
    env = os.environ.copy()
    api_key_present = any(
        bool(env.get(name))
        for name in ("Z_AI_API_KEY", "MODELAPI_KEY", "MODEL_API_KEY", "OPENAI_API_KEY")
    )
    if not api_key_present:
        print(
            "Missing API key env var. Set one of: "
            "Z_AI_API_KEY, MODELAPI_KEY, MODEL_API_KEY, OPENAI_API_KEY"
        )
        return 2

    cmd = [
        "python3",
        "scripts/live_plan_demo.py",
        "--endpoint",
        args.live_llm_endpoint,
        "--model",
        args.live_llm_model,
        "--intent",
        args.live_llm_intent,
        "--timeout-s",
        str(args.live_llm_timeout_sec),
    ]
    if args.live_llm_use_openai_sdk:
        cmd.append("--use-openai-sdk")
    if args.live_llm_no_response_format:
        cmd.append("--no-response-format")

    print("\n[live-llm orchestrator smoke]")
    print(f"workdir={ROOT / 'orchestrator'}")
    print("cmd=" + " ".join(cmd))
    proc = subprocess.run(cmd, cwd=ROOT / "orchestrator", env=env)
    return proc.returncode


def run_full_demo(args: argparse.Namespace) -> int:
    if not args.skip_preflight:
        rc = run_preflight(args)
        if rc != 0:
            return rc

    for layer in ("unit", "component", "integration-mock", "scenario"):
        if layer == "scenario" and args.with_live_llm:
            rc = run_live_llm_smoke(args)
            if rc != 0:
                return rc
        print(f"\nRunning layer: {layer}")
        rc = run_layer(layer, args.pytest_args)
        if rc != 0:
            return rc

    print("\nFull-demo test ladder passed.")
    print("Next manual step: run live robot dry-run (dashboard + orchestrator + edge-proxy + ros2 stack).")
    return 0


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Milipol demo test ladder runner")
    parser.add_argument(
        "--mode",
        choices=("preflight", "unit", "component", "integration-mock", "live-llm", "scenario", "full-demo"),
        default="component",
        help="Test ladder mode to run",
    )
    parser.add_argument(
        "--orchestrator-health",
        default=os.getenv("ORCHESTRATOR_HEALTH_URL", "http://127.0.0.1:8000/health"),
    )
    parser.add_argument(
        "--edge-health",
        default=os.getenv("EDGE_PROXY_HEALTH_URL", "http://127.0.0.1:8080/health"),
    )
    parser.add_argument(
        "--rosbridge-url",
        default=os.getenv("ROSBRIDGE_URL", "wss://127.0.0.1:9090"),
    )
    parser.add_argument(
        "--timeout-sec",
        type=float,
        default=2.0,
        help="Timeout for preflight checks",
    )
    parser.add_argument(
        "--skip-preflight",
        action="store_true",
        help="Skip connectivity checks (only for offline/unit work)",
    )
    parser.add_argument(
        "--with-live-llm",
        action="store_true",
        help="Run a real orchestrator planner call against configured LLM endpoint in full-demo mode",
    )
    parser.add_argument(
        "--live-llm-endpoint",
        default=os.getenv(
            "LIVE_LLM_ENDPOINT",
            os.getenv("ORCHESTRATOR_LLM_ENDPOINT", "https://modelapi.klass.dev/v1/chat/completions"),
        ),
    )
    parser.add_argument(
        "--live-llm-model",
        default=os.getenv("LIVE_LLM_MODEL", os.getenv("ORCHESTRATOR_LLM_MODEL", "Qwen3-Next-80B-A3B-FP8")),
    )
    parser.add_argument(
        "--live-llm-intent",
        default="Navigate to office_scene, scan area, verify chemical container.",
    )
    parser.add_argument(
        "--live-llm-timeout-sec",
        type=int,
        default=45,
    )
    parser.add_argument(
        "--live-llm-use-openai-sdk",
        action="store_true",
        help="Use openai-python SDK for live-llm mode",
    )
    parser.add_argument(
        "--live-llm-no-response-format",
        action="store_true",
        help="Disable response_format=json_object for live-llm mode",
    )
    parser.add_argument(
        "pytest_args",
        nargs=argparse.REMAINDER,
        help="Extra args forwarded to pytest (prefix with --)",
    )
    return parser.parse_args()


def main() -> int:
    args = parse_args()

    if args.mode == "preflight":
        return run_preflight(args)
    if args.mode == "live-llm":
        return run_live_llm_smoke(args)
    if args.mode == "full-demo":
        return run_full_demo(args)

    if not args.skip_preflight and args.mode in {"scenario"}:
        rc = run_preflight(args)
        if rc != 0:
            return rc

    return run_layer(args.mode, args.pytest_args)


if __name__ == "__main__":
    raise SystemExit(main())
