from __future__ import annotations

import argparse
import asyncio
from dataclasses import dataclass
from http import HTTPStatus
import json
import logging
import os
from typing import Any, Dict, Optional, Set, Tuple

import websockets
from websockets.server import WebSocketServerProtocol

from executor.action_executor import ActionExecutor, ExecutionReport
from planner.llm_client import LLMClient, LLMConfig
from planner.planner import PlanContext, PlanRequest, PlannerService
from state_machine import InvalidTransition, OrchestratorStateMachine, OrchestratorState, TransitionRecord
from edge_proxy.client import EdgeProxyClient, ClientConfig
from edge_proxy.messages import EventLogMessage, FRDetectionsMessage, NavStatusMessage
from services.fr_analyzer import FRTemporalAnalyzer


@dataclass
class ServerConfig:
    host: str = "0.0.0.0"
    port: int = 8000
    health_path: str = "/health"
    ws_path: str = "/ws"
    dashboard_ws_path: str = "/dashboard"
    ping_interval: int = 20
    ping_timeout: int = 20
    enable_planner: bool = False
    enable_executor: bool = True
    enable_edge_proxy: bool = False
    edge_proxy_host: str = "localhost"
    # Edge Proxy WS server default is 8080 (robot-side).
    edge_proxy_port: int = 8080
    llm_endpoint: str = "https://modelapi.klass.dev/v1/chat/completions"
    llm_model: str = "Qwen3-Next-80B-A3B-FP8"
    llm_api_key: Optional[str] = None
    llm_timeout_s: int = 30
    llm_include_response_format: bool = True
    llm_accept_language: Optional[str] = None
    llm_max_tokens: Optional[int] = None
    llm_top_p: Optional[float] = None
    llm_top_k: Optional[int] = None
    llm_use_openai_sdk: bool = False
    llm_stream: bool = False


class OrchestratorServer:
    def __init__(
        self,
        config: ServerConfig,
        state_machine: Optional[OrchestratorStateMachine] = None,
        planner: Optional[PlannerService] = None,
        executor: Optional[ActionExecutor] = None,
    ) -> None:
        self.config = config
        self.state_machine = state_machine or OrchestratorStateMachine()
        self.planner = planner if planner is not None else self._build_default_planner()
        self.executor = executor if executor is not None else (ActionExecutor() if config.enable_executor else None)
        self.logger = logging.getLogger("orchestrator.server")

        # Edge Proxy client integration
        self._edge_client: Optional[EdgeProxyClient] = None
        self._edge_client_connect_task: Optional[asyncio.Task[None]] = None
        self._pending_plan: Optional[Dict[str, Any]] = None

        # FR temporal analyzer — watches detection stream for consistent faces
        self._fr_analyzer = FRTemporalAnalyzer()

        # Dashboard WebSocket connections for confirmation broadcasts
        self._dashboard_clients: Set[WebSocketServerProtocol] = set()
        self._reported_nav_failures: Set[str] = set()
        self._nav_reasoning_timeout_s = max(
            float(os.getenv("ORCHESTRATOR_NAV_REASONING_TIMEOUT_S", "8")),
            1.0,
        )

        # Set up state machine listeners
        if self.executor:
            self._setup_executor_listener()
            self._setup_edge_proxy_client()
            self.executor.register_broadcast_callback(self.broadcast_to_dashboard)

        # Set up listener for dashboard confirmation flow
        self._setup_confirmation_listener()

    def _build_default_planner(self) -> Optional[PlannerService]:
        if not self.config.enable_planner:
            return None

        llm_config = LLMConfig(
            endpoint=self.config.llm_endpoint,
            model=self.config.llm_model,
            api_key=self.config.llm_api_key,
            timeout_s=self.config.llm_timeout_s,
            include_response_format=self.config.llm_include_response_format,
            accept_language=self.config.llm_accept_language,
            max_tokens=self.config.llm_max_tokens,
            top_p=self.config.llm_top_p,
            top_k=self.config.llm_top_k,
            use_openai_sdk=self.config.llm_use_openai_sdk,
            stream=self.config.llm_stream,
        )
        return PlannerService(llm_client=LLMClient(config=llm_config))

    def _setup_executor_listener(self) -> None:
        """Set up state machine listener to trigger executor on EXECUTING_ACTIONS."""
        def on_state_transition(record: TransitionRecord) -> None:
            # Check if we're transitioning to EXECUTING_ACTIONS
            if record.next == OrchestratorState.EXECUTING_ACTIONS:
                self.logger.info(
                    "State transitioned to EXECUTING_ACTIONS, triggering executor"
                )
                # Get plan from payload or pending plan
                plan = None
                if record.payload:
                    plan = record.payload.get("plan")
                if plan is None:
                    plan = self._pending_plan
                if plan:
                    # Execute plan asynchronously
                    asyncio.create_task(self._execute_plan_and_transition(plan))
                else:
                    self.logger.warning("No plan to execute, transitioning to SPEAKING")
                    # No plan to execute, immediately transition to SPEAKING
                    asyncio.create_task(self._send_actions_complete())

        self.state_machine.add_listener(on_state_transition)

    def _setup_confirmation_listener(self) -> None:
        """Set up state machine listener to broadcast plans on AWAITING_CONFIRM."""
        def on_state_transition(record: TransitionRecord) -> None:
            # Check if we're transitioning to AWAITING_CONFIRM
            if record.next == OrchestratorState.AWAITING_CONFIRM:
                self.logger.info(
                    "State transitioned to AWAITING_CONFIRM, broadcasting plan to dashboard"
                )
                # Broadcast plan to all connected dashboard clients
                asyncio.create_task(self._broadcast_confirmation_request(record))

        self.state_machine.add_listener(on_state_transition)

    def _setup_edge_proxy_client(self) -> None:
        """Set up Edge Proxy client if enabled."""
        if not self.config.enable_edge_proxy:
            return

        if not self.executor:
            self.logger.warning("Edge Proxy enabled but executor not available")
            return

        try:
            edge_config = ClientConfig(
                host=self.config.edge_proxy_host,
                port=self.config.edge_proxy_port,
                ws_path="/edge",
            )
            self._edge_client = EdgeProxyClient(config=edge_config, logger=self.logger)
            self.executor.register_edge_proxy_client(self._edge_client)

            # Wire FR temporal analyzer to the edge proxy detection stream.
            self._fr_analyzer.on_consistent_detection(self._on_consistent_fr_detection)
            self._edge_client.on_fr_detections(self._on_fr_detections)
            self._edge_client.on_nav_status(self._on_nav_status)
            self._edge_client.on_event_log(self._on_edge_event_log)

            self.logger.info(
                "Edge Proxy client configured for %s:%s",
                self.config.edge_proxy_host,
                self.config.edge_proxy_port,
            )
        except Exception as exc:
            self.logger.error("Failed to set up Edge Proxy client: %s", exc)

    async def _on_fr_detections(self, msg: FRDetectionsMessage) -> None:
        """Forward FR detections from the edge proxy to the temporal analyzer."""
        await self._fr_analyzer.feed(msg.detections, msg.timestamp)

    async def _on_nav_status(self, msg: NavStatusMessage) -> None:
        """Handle live nav_status updates from edge proxy."""
        await self._handle_navigation_failure_reasoning(
            msg,
            source="live_nav_status",
            replay=False,
        )

    async def _on_edge_event_log(self, msg: EventLogMessage) -> None:
        """Handle replay-safe edge event_log messages."""
        await self.broadcast_to_dashboard(
            {
                "type": "edge_event",
                "event_id": msg.event_id,
                "event_type": msg.event_type,
                "request_id": msg.request_id,
                "status": msg.status,
                "timestamp": msg.timestamp,
                "replay": msg.replay,
                "payload": msg.payload,
            }
        )

        if msg.event_type == "scan_area_capture":
            await self.broadcast_to_dashboard(
                {
                    "type": "edge_artifact",
                    "event_id": msg.event_id,
                    "request_id": msg.request_id,
                    "status": msg.status,
                    "timestamp": msg.timestamp,
                    "replay": msg.replay,
                    "artifact": msg.payload,
                }
            )
            return

        if msg.event_type != "nav_status":
            return

        payload = msg.payload if isinstance(msg.payload, dict) else {}
        nav_msg = NavStatusMessage(
            request_id=msg.request_id,
            status=msg.status,
            destination=str(payload.get("destination", "")),
            progress=float(payload.get("progress", 0.0)),
            reason=payload.get("reason"),
            error_code=payload.get("error_code"),
        )

        # Replayed nav events should still update executor navigation state.
        if msg.replay and self.executor is not None:
            try:
                await self.executor._handle_nav_status(nav_msg)
            except Exception as exc:
                self.logger.warning("Failed applying replayed nav status to executor: %s", exc)

        await self._handle_navigation_failure_reasoning(
            nav_msg,
            source="replay_event_log" if msg.replay else "event_log",
            replay=msg.replay,
        )

    async def _handle_navigation_failure_reasoning(
        self,
        msg: NavStatusMessage,
        *,
        source: str,
        replay: bool,
    ) -> None:
        if msg.status != "failed":
            return
        if not self._is_localization_failure(msg):
            return

        dedupe_key = "|".join(
            [
                msg.request_id or "",
                msg.destination or "",
                msg.error_code or "",
                msg.reason or "",
            ]
        )
        if dedupe_key in self._reported_nav_failures:
            return
        self._reported_nav_failures.add(dedupe_key)
        if len(self._reported_nav_failures) > 2000:
            self._reported_nav_failures.clear()

        reasoning = await self._generate_navigation_failure_reasoning(
            destination=msg.destination,
            reason=msg.reason,
            error_code=msg.error_code,
            source=source,
            replay=replay,
        )
        await self.broadcast_to_dashboard(
            {
                "type": "reasoning",
                "data": {
                    "intent": "navigation_failure",
                    "reasoning": reasoning,
                    "planSummary": f"NAVIGATE failed for destination '{msg.destination or 'unknown'}'",
                    "metadata": {
                        "request_id": msg.request_id,
                        "destination": msg.destination,
                        "error_code": msg.error_code,
                        "reason": msg.reason,
                        "source": source,
                        "replay": replay,
                    },
                },
            }
        )

    def _is_localization_failure(self, msg: NavStatusMessage) -> bool:
        if (msg.error_code or "").upper() == "NAV_LOCALIZATION_LOST":
            return True
        reason = (msg.reason or "").lower()
        if "localization" in reason:
            return True
        if "rosbridge_disconnected" in reason:
            return True
        return False

    async def _generate_navigation_failure_reasoning(
        self,
        *,
        destination: str,
        reason: Optional[str],
        error_code: Optional[str],
        source: str,
        replay: bool,
    ) -> str:
        endpoint = self.config.llm_endpoint
        model = self.config.llm_model
        if not endpoint or not model:
            return self._fallback_navigation_failure_reasoning(
                destination=destination,
                reason=reason,
                error_code=error_code,
                source=source,
                replay=replay,
            )

        def _call_llm() -> str:
            import requests

            payload = {
                "model": model,
                "messages": [
                    {
                        "role": "system",
                        "content": (
                            "You are the navigation reasoning module for a field robot. "
                            "Explain localization failures for operators in one short paragraph."
                        ),
                    },
                    {
                        "role": "user",
                        "content": (
                            "Navigation failure details:\n"
                            f"- destination: {destination or 'unknown'}\n"
                            f"- reason: {reason or 'unknown'}\n"
                            f"- error_code: {error_code or 'unknown'}\n"
                            f"- source: {source}\n"
                            f"- replayed_after_reconnect: {replay}\n"
                            "Provide likely root cause and immediate next checks."
                        ),
                    },
                ],
                "temperature": 0.2,
                "max_tokens": 180,
            }
            headers = {"Content-Type": "application/json"}
            if self.config.llm_api_key:
                headers["Authorization"] = f"Bearer {self.config.llm_api_key}"

            response = requests.post(
                endpoint,
                json=payload,
                headers=headers,
                timeout=self._nav_reasoning_timeout_s,
            )
            response.raise_for_status()
            body = response.json()
            content = (
                body.get("choices", [{}])[0]
                .get("message", {})
                .get("content", "")
            )
            if isinstance(content, str):
                return content.strip()
            return str(content).strip()

        try:
            reasoning = await asyncio.wait_for(
                asyncio.to_thread(_call_llm),
                timeout=self._nav_reasoning_timeout_s + 1.0,
            )
            if reasoning:
                return reasoning
        except Exception as exc:
            self.logger.warning("Navigation-failure LLM reasoning unavailable: %s", exc)

        return self._fallback_navigation_failure_reasoning(
            destination=destination,
            reason=reason,
            error_code=error_code,
            source=source,
            replay=replay,
        )

    def _fallback_navigation_failure_reasoning(
        self,
        *,
        destination: str,
        reason: Optional[str],
        error_code: Optional[str],
        source: str,
        replay: bool,
    ) -> str:
        replay_text = " after reconnect replay" if replay else ""
        return (
            f"Navigation to '{destination or 'unknown'}' failed{replay_text} with "
            f"{error_code or 'unknown_error'} ({reason or 'no reason provided'}). "
            "Likely localization/pose freshness issue; check rosbridge connection, "
            "robot pose updates, and waypoint_handler feedback before retrying."
        )

    async def _on_consistent_fr_detection(
        self, identity: str, confidence: float, duration: float
    ) -> None:
        """Called when the FR analyzer detects a consistent face."""
        self.logger.info(
            "Consistent detection: %s for %.1fs (confidence: %.2f)",
            identity,
            duration,
            confidence,
        )
        await self.broadcast_to_dashboard(
            {
                "type": "reasoning",
                "data": {
                    "intent": "person_detected",
                    "reasoning": (
                        f"Detected {identity} consistently for {duration:.0f}s"
                    ),
                    "planSummary": "",
                },
            }
        )

    async def _execute_plan_and_transition(self, plan: Dict[str, Any]) -> None:
        """Execute plan and transition state based on result.

        Args:
            plan: Plan dictionary to execute.
        """
        if not self.executor:
            self.logger.error("Executor not available for plan execution")
            await self._send_actions_failed("executor_not_available")
            return

        replan_signal: Optional[Dict[str, Any]] = None

        def _on_replan_requested(signal: Dict[str, Any]) -> None:
            nonlocal replan_signal
            if replan_signal is None:
                replan_signal = signal
                self.logger.info(
                    "Dynamic replanning requested at step=%s reasons=%s",
                    signal.get("step_id"),
                    signal.get("reasons"),
                )
                self.executor.cancel("dynamic_replan_requested")

        try:
            self.logger.info("Executing plan: %s", plan.get("plan_id", "unknown"))
            self.executor.reset_cancel()
            report = await self.executor.execute_plan(
                plan,
                on_replan_requested=_on_replan_requested,
            )

            # If scan triggered replan and planner is enabled, refine and continue.
            if replan_signal is not None and self.planner is not None:
                await self.broadcast_to_dashboard(
                    {
                        "type": "plan_update",
                        "phase": "dynamic_replan",
                        "trigger": replan_signal,
                    }
                )
                try:
                    base_context = _build_context({})
                    refined_graph = await asyncio.to_thread(
                        self.planner.refine_plan,
                        plan.get("intent", ""),
                        base_context,
                        {
                            "trigger": replan_signal,
                            "previous_plan_id": plan.get("plan_id"),
                            "previous_errors": report.errors,
                        },
                    )
                    self.logger.info(
                        "Dynamic replan generated: %s",
                        refined_graph.plan.get("plan_id", "unknown"),
                    )
                    self.executor.reset_cancel()
                    report = await self.executor.execute_plan(refined_graph.plan)
                except Exception as exc:
                    self.logger.error("Dynamic replan failed: %s", exc)
                    await self._send_actions_failed(f"dynamic_replan_failed: {exc}")
                    return

            if report.success:
                self.logger.info("Plan execution completed successfully")
                await self._send_actions_complete()
            else:
                self.logger.warning("Plan execution failed: %s", report.errors)
                await self._send_actions_failed(report.errors[0] if report.errors else "unknown_error")
        except Exception as exc:
            self.logger.error("Plan execution error: %s", exc)
            await self._send_actions_failed(str(exc))

    async def _send_actions_complete(self) -> None:
        """Send actions_complete event to state machine."""
        try:
            self.state_machine.handle_event("actions_complete")
        except InvalidTransition as exc:
            self.logger.warning("Invalid transition on actions_complete: %s", exc)

    async def _send_actions_failed(self, error: str) -> None:
        """Send actions_failed event to state machine.

        Args:
            error: Error message describing the failure.
        """
        try:
            self.state_machine.handle_event("actions_failed", {"error": error})
        except InvalidTransition as exc:
            self.logger.warning("Invalid transition on actions_failed: %s", exc)

    async def _broadcast_confirmation_request(self, record: TransitionRecord) -> None:
        """Broadcast plan confirmation request to all connected dashboard clients.

        Args:
            record: Transition record containing the plan in its payload.
        """
        if not self._dashboard_clients:
            self.logger.warning("No dashboard clients connected to send confirmation request")
            return

        # Extract plan from transition record payload
        plan = None
        if record.payload:
            plan = record.payload.get("plan")
        if plan is None:
            plan = self._pending_plan
        if plan is None:
            self.logger.warning("No plan available for confirmation request")
            return

        # Create confirmation request message
        message = {
            "type": "user_confirmation",
            "plan": plan,
            "timestamp": record.timestamp,
        }

        # Broadcast to all dashboard clients
        disconnected = set()
        for client in self._dashboard_clients:
            try:
                await client.send(json.dumps(message))
                self.logger.debug("Sent confirmation request to dashboard client %s", client.remote_address)
            except Exception as exc:
                self.logger.warning("Failed to send confirmation request to %s: %s", client.remote_address, exc)
                disconnected.add(client)

        # Remove disconnected clients
        self._dashboard_clients -= disconnected
        if disconnected:
            self.logger.info("Removed %d disconnected dashboard clients", len(disconnected))

    async def broadcast_to_dashboard(self, message: Dict[str, Any]) -> None:
        """Broadcast a message to all connected dashboard WebSocket clients.

        Serializes the message to JSON and sends it to every client in
        ``_dashboard_clients``. Removes any clients that fail to receive.

        Args:
            message: Dictionary to serialize and broadcast.
        """
        if not self._dashboard_clients:
            self.logger.debug("broadcast_to_dashboard: no clients connected")
            return

        disconnected = set()
        for client in self._dashboard_clients:
            try:
                await client.send(json.dumps(message))
                self.logger.debug(
                    "broadcast_to_dashboard: sent %s to %s",
                    message.get("type"),
                    client.remote_address,
                )
            except Exception as exc:
                self.logger.warning(
                    "broadcast_to_dashboard: failed to send to %s: %s",
                    client.remote_address,
                    exc,
                )
                disconnected.add(client)

        self._dashboard_clients -= disconnected
        if disconnected:
            self.logger.info(
                "broadcast_to_dashboard: removed %d disconnected clients",
                len(disconnected),
            )

    async def serve(self) -> None:
        self.logger.info(
            "Starting orchestrator server on %s:%s", self.config.host, self.config.port
        )
        if self._edge_client is not None and self._edge_client_connect_task is None:
            # Start connecting in the background so server startup isn't blocked.
            self._edge_client_connect_task = asyncio.create_task(self._edge_client.connect())
            self._edge_client_connect_task.add_done_callback(self._on_edge_client_connect_done)

        try:
            async with websockets.serve(
                self._handler,
                self.config.host,
                self.config.port,
                ping_interval=self.config.ping_interval,
                ping_timeout=self.config.ping_timeout,
                process_request=self._process_request,
            ):
                await asyncio.Future()
        finally:
            # Best-effort cleanup on shutdown/cancel.
            if self._edge_client is not None:
                try:
                    await self._edge_client.disconnect()
                except Exception as exc:
                    self.logger.warning("Edge Proxy disconnect failed: %s", exc)

            if self._edge_client_connect_task is not None and not self._edge_client_connect_task.done():
                self._edge_client_connect_task.cancel()
                try:
                    await self._edge_client_connect_task
                except asyncio.CancelledError:
                    pass
                except Exception as exc:
                    self.logger.warning("Edge Proxy connect task error during shutdown: %s", exc)

    def _on_edge_client_connect_done(self, task: asyncio.Task[None]) -> None:
        try:
            task.result()
        except asyncio.CancelledError:
            return
        except Exception as exc:
            # If reconnect is enabled (default), connect() usually won't raise.
            self.logger.error("Edge Proxy connect task failed: %s", exc)

    async def _handler(self, websocket: WebSocketServerProtocol, path: str) -> None:
        # Handle main WebSocket connections
        if path in ("/", self.config.ws_path):
            self.logger.info("Client connected from %s", websocket.remote_address)
            try:
                async for message in websocket:
                    response = await self._handle_message(message)
                    if response is not None:
                        await websocket.send(json.dumps(response))
            except Exception as exc:
                self.logger.warning("Connection error: %s", exc)
            finally:
                self.logger.info("Client disconnected from %s", websocket.remote_address)
            return

        # Handle dashboard WebSocket connections
        if path == self.config.dashboard_ws_path:
            self.logger.info("Dashboard client connected from %s", websocket.remote_address)
            self._dashboard_clients.add(websocket)
            try:
                # Send initial state to dashboard
                await websocket.send(json.dumps({
                    "type": "state_update",
                    "state": self.state_machine.state.value,
                }))
                # Keep connection alive for dashboard commands
                async for message in websocket:
                    response = await self._handle_dashboard_message(message)
                    if response is not None:
                        await websocket.send(json.dumps(response))
            except Exception as exc:
                self.logger.warning("Dashboard connection error: %s", exc)
            finally:
                self._dashboard_clients.discard(websocket)
                self.logger.info("Dashboard client disconnected from %s", websocket.remote_address)
            return

        await websocket.close(code=1008, reason="Unsupported path")

    async def _handle_message(self, message: Any) -> Optional[Dict[str, Any]]:
        if isinstance(message, (bytes, bytearray)):
            try:
                message = message.decode("utf-8", errors="replace")
            except Exception:
                return {"type": "error", "error": "invalid_bytes"}

        if not isinstance(message, str):
            return {"type": "error", "error": "unsupported_message"}

        try:
            payload = json.loads(message)
        except json.JSONDecodeError:
            return {"type": "error", "error": "invalid_json"}

        if not isinstance(payload, dict):
            return {"type": "error", "error": "invalid_payload"}

        msg_type = payload.get("type")
        if not msg_type:
            return {"type": "error", "error": "missing_type"}

        if msg_type == "ping":
            return {"type": "pong", "state": self.state_machine.state.value}

        if msg_type == "transcript":
            if payload.get("is_final"):
                return self._advance_state("final_transcript", payload)
            return {"type": "ack", "state": self.state_machine.state.value}

        if msg_type == "plan_request":
            if not self.planner:
                return {"type": "error", "error": "planner_disabled"}
            intent = payload.get("intent", "").strip()
            if not intent:
                return {"type": "error", "error": "missing_intent"}
            request = PlanRequest(
                intent=intent,
                context=_build_context(payload.get("context")),
                request_id=payload.get("request_id", "request"),
            )
            result = await asyncio.to_thread(self.planner.plan, request)
            return {"type": "plan_result", **_plan_result_payload(result)}

        if msg_type == "execute_plan":
            if not self.executor:
                return {"type": "error", "error": "executor_disabled"}
            plan = payload.get("plan")
            if plan is None:
                return {"type": "error", "error": "missing_plan"}
            report = await self.executor.execute_plan(plan, context=payload.get("context"))
            return {"type": "execution_report", **_report_payload(report)}

        if msg_type == "user_confirmed":
            # Store plan for executor and transition state
            plan = payload.get("plan")
            if plan:
                self._pending_plan = plan
            return self._advance_state("user_confirmed", {"plan": plan})

        if msg_type == "user_rejected":
            return self._advance_state("user_rejected", payload)

        if msg_type in {
            "wake_word_detected",
            "llm_response",
            "action_complete",
            "tts_complete",
            "barge_in",
            "timeout",
            "processing_failed",
            "reset",
        }:
            return self._advance_state(msg_type, payload)

        return {"type": "error", "error": "unknown_message", "detail": msg_type}

    async def _handle_dashboard_message(self, message: Any) -> Optional[Dict[str, Any]]:
        """Handle messages from dashboard clients.

        Dashboard clients can send confirmation responses (user_confirmed/user_rejected)
        that are forwarded to the state machine.

        Args:
            message: WebSocket message from dashboard.

        Returns:
            Optional response dictionary.
        """
        if isinstance(message, (bytes, bytearray)):
            try:
                message = message.decode("utf-8", errors="replace")
            except Exception:
                return {"type": "error", "error": "invalid_bytes"}

        if not isinstance(message, str):
            return {"type": "error", "error": "unsupported_message"}

        try:
            payload = json.loads(message)
        except json.JSONDecodeError:
            return {"type": "error", "error": "invalid_json"}

        if not isinstance(payload, dict):
            return {"type": "error", "error": "invalid_payload"}

        msg_type = payload.get("type")
        if not msg_type:
            return {"type": "error", "error": "missing_type"}

        # Dashboard clients can send ping to check connection
        if msg_type == "ping":
            return {"type": "pong", "state": self.state_machine.state.value}

        # Dashboard clients can send confirmation responses
        if msg_type == "user_confirmed":
            plan = payload.get("plan")
            if plan:
                self._pending_plan = plan
            return self._advance_state("user_confirmed", {"plan": plan})

        if msg_type == "user_rejected":
            return self._advance_state("user_rejected", payload)

        return {"type": "error", "error": "unknown_message", "detail": msg_type}

    def _advance_state(self, event: str, payload: Dict[str, Any]) -> Dict[str, Any]:
        try:
            new_state = self.state_machine.handle_event(event, payload)
        except InvalidTransition as exc:
            return {
                "type": "error",
                "error": "invalid_transition",
                "detail": str(exc),
                "state": self.state_machine.state.value,
            }
        return {"type": "state_update", "event": event, "state": new_state.value}

    def _process_request(
        self, path: str, _request_headers: Any
    ) -> Optional[Tuple[HTTPStatus, list, bytes]]:
        if path == self.config.health_path:
            body = b"OK"
            headers = [
                ("Content-Type", "text/plain"),
                ("Content-Length", str(len(body))),
            ]
            return HTTPStatus.OK, headers, body

        if path not in ("/", self.config.ws_path, self.config.dashboard_ws_path):
            body = b"Not Found"
            headers = [
                ("Content-Type", "text/plain"),
                ("Content-Length", str(len(body))),
            ]
            return HTTPStatus.NOT_FOUND, headers, body

        return None


def _build_context(payload: Optional[Dict[str, Any]]) -> PlanContext:
    payload = payload or {}
    return PlanContext(
        robot_id=payload.get("robot_id", "robot"),
        current_location=payload.get("current_location", "unknown"),
        battery_level=int(payload.get("battery_level", 100)),
        available_actions=payload.get("available_actions")
        or [
            "CHECK_BATTERY",
            "NAVIGATE",
            "SCAN_AREA",
            "VERIFY_OBJECT",
            "SPEAK",
            "WAIT",
            "IDENTIFY_PERSON",
            "ALERT_OPERATOR",
        ],
        venue_id=payload.get("venue_id"),
        mission_type=payload.get("mission_type"),
        additional_constraints=payload.get("additional_constraints", {}),
    )


def _report_payload(report: ExecutionReport) -> Dict[str, Any]:
    return {
        "plan_id": report.plan_id,
        "success": report.success,
        "errors": report.errors,
        "results": {
            step_id: {
                "action": outcome.action,
                "status": outcome.status,
                "output": outcome.output,
                "error": outcome.error,
            }
            for step_id, outcome in report.results.items()
        },
    }


def _plan_result_payload(result: Any) -> Dict[str, Any]:
    return {
        "success": result.success,
        "plan": result.plan,
        "is_safe_noop": result.is_safe_noop,
        "attempts": result.attempts,
        "total_latency_ms": result.total_latency_ms,
        "trace_id": result.trace_id,
        "failure_reason": result.failure_reason,
    }


def _env_flag(name: str, default: bool = False) -> bool:
    value = os.getenv(name)
    if value is None:
        return default
    return value.strip().lower() in {"1", "true", "yes", "on"}


def main() -> None:
    parser = argparse.ArgumentParser(description="Orchestrator WebSocket server")
    parser.add_argument("--host", default=os.getenv("ORCHESTRATOR_HOST", "0.0.0.0"))
    parser.add_argument(
        "--port",
        type=int,
        default=int(os.getenv("ORCHESTRATOR_PORT", "8000")),
    )
    parser.add_argument(
        "--ws-path",
        default=os.getenv("ORCHESTRATOR_WS_PATH", "/ws"),
    )
    parser.add_argument(
        "--dashboard-ws-path",
        default=os.getenv("ORCHESTRATOR_DASHBOARD_WS_PATH", "/dashboard"),
    )
    parser.add_argument(
        "--enable-planner",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_ENABLE_PLANNER", False),
    )
    parser.add_argument(
        "--disable-executor",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_DISABLE_EXECUTOR", False),
    )
    parser.add_argument(
        "--enable-edge-proxy",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_ENABLE_EDGE_PROXY", False),
    )
    parser.add_argument(
        "--edge-proxy-host",
        default=os.getenv("ORCHESTRATOR_EDGE_PROXY_HOST", "localhost"),
    )
    parser.add_argument(
        "--edge-proxy-port",
        type=int,
        default=int(os.getenv("ORCHESTRATOR_EDGE_PROXY_PORT", "8080")),
    )
    parser.add_argument(
        "--log-level",
        default=os.getenv("ORCHESTRATOR_LOG_LEVEL", "INFO"),
    )
    parser.add_argument(
        "--llm-endpoint",
        default=os.getenv("ORCHESTRATOR_LLM_ENDPOINT", "https://modelapi.klass.dev/v1/chat/completions"),
    )
    parser.add_argument(
        "--llm-model",
        default=os.getenv("ORCHESTRATOR_LLM_MODEL", "Qwen3-Next-80B-A3B-FP8"),
    )
    parser.add_argument(
        "--llm-api-key",
        default=os.getenv("ORCHESTRATOR_LLM_API_KEY"),
    )
    parser.add_argument(
        "--llm-timeout-s",
        type=int,
        default=int(os.getenv("ORCHESTRATOR_LLM_TIMEOUT_S", "30")),
    )
    parser.add_argument(
        "--llm-no-response-format",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_LLM_NO_RESPONSE_FORMAT", False),
    )
    parser.add_argument(
        "--llm-accept-language",
        default=os.getenv("ORCHESTRATOR_LLM_ACCEPT_LANGUAGE"),
    )
    parser.add_argument(
        "--llm-max-tokens",
        type=int,
        default=int(os.getenv("ORCHESTRATOR_LLM_MAX_TOKENS")) if os.getenv("ORCHESTRATOR_LLM_MAX_TOKENS") else None,
    )
    parser.add_argument(
        "--llm-top-p",
        type=float,
        default=float(os.getenv("ORCHESTRATOR_LLM_TOP_P")) if os.getenv("ORCHESTRATOR_LLM_TOP_P") else None,
    )
    parser.add_argument(
        "--llm-top-k",
        type=int,
        default=int(os.getenv("ORCHESTRATOR_LLM_TOP_K")) if os.getenv("ORCHESTRATOR_LLM_TOP_K") else None,
    )
    parser.add_argument(
        "--llm-use-openai-sdk",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_LLM_USE_OPENAI_SDK", False),
    )
    parser.add_argument(
        "--llm-stream",
        action="store_true",
        default=_env_flag("ORCHESTRATOR_LLM_STREAM", False),
    )

    args = parser.parse_args()
    logging.basicConfig(level=args.log_level)

    config = ServerConfig(
        host=args.host,
        port=args.port,
        ws_path=args.ws_path,
        dashboard_ws_path=args.dashboard_ws_path,
        enable_planner=args.enable_planner,
        enable_executor=not args.disable_executor,
        enable_edge_proxy=args.enable_edge_proxy,
        edge_proxy_host=args.edge_proxy_host,
        edge_proxy_port=args.edge_proxy_port,
        llm_endpoint=args.llm_endpoint,
        llm_model=args.llm_model,
        llm_api_key=args.llm_api_key,
        llm_timeout_s=args.llm_timeout_s,
        llm_include_response_format=not args.llm_no_response_format,
        llm_accept_language=args.llm_accept_language,
        llm_max_tokens=args.llm_max_tokens,
        llm_top_p=args.llm_top_p,
        llm_top_k=args.llm_top_k,
        llm_use_openai_sdk=args.llm_use_openai_sdk,
        llm_stream=args.llm_stream,
    )

    server = OrchestratorServer(config)

    try:
        asyncio.run(server.serve())
    except KeyboardInterrupt:
        return


if __name__ == "__main__":
    main()
