import asyncio
import json
import random
from typing import Any, Dict, Optional, Union

from loguru import logger
from openai import AsyncOpenAI, OpenAIError

from constants import openai_api_token

# ───────────────────────── Config ─────────────────────────
_API_KEY = openai_api_token
_MODEL = "gpt-4o-mini"


# ─────────────────────── Singleton client ───────────────────────
_client: Optional[AsyncOpenAI] = None

def _ensure_client() -> AsyncOpenAI:
    """Create or return a singleton AsyncOpenAI client."""
    global _client
    if _client is None:
        if not _API_KEY:
            raise RuntimeError("OPENAI_API_KEY не задан: добавь ключ в .env и/или src/constants.py")
        _client = AsyncOpenAI(api_key=_API_KEY)
    return _client


# ───────────────────────── Utilities ─────────────────────────
def _json_coerce(text: str) -> Dict[str, Any]:
    """Extract a JSON object from text (if model wrapped it with prose) and parse it."""
    s = (text or "").strip()
    start = s.find("{")
    end = s.rfind("}")
    if start != -1 and end != -1 and end > start:
        s = s[start : end + 1]
    return json.loads(s)

async def _backoff_retry(coro_factory, *, attempts: int = 4, base: float = 0.6) -> Any:
    """Execute an async factory with exponential backoff + jitter."""
    last_err: Optional[Exception] = None
    for i in range(attempts):
        try:
            return await coro_factory()
        except (OpenAIError, TimeoutError, asyncio.TimeoutError) as e:
            last_err = e
            delay = base * (2 ** i) * (0.8 + 0.4 * random.random())
            await asyncio.sleep(delay)
    if last_err:
        raise last_err
    raise RuntimeError("Unknown error in backoff retry")


# ───────────────────────── Public API ─────────────────────────
async def ask_llm(
    system_prompt: str,
    user_prompt: str,
    *,
    expect_json: bool = False,
    temperature: float = 0.2,
    max_tokens: int = 800,
    timeout_s: int = 25,
) -> Union[str, Dict[str, Any]]:

    client = _ensure_client()

    logger.debug("LLM call → model=%s, json=%s", _MODEL, expect_json)

    def _build_kwargs():
        kwargs: Dict[str, Any] = {
            "model": _MODEL,
            "messages": [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt},
            ],
            "temperature": temperature,
            "max_tokens": max_tokens,
        }
        if expect_json:
            # Enforce structured output
            kwargs["response_format"] = {"type": "json_object"}
        return kwargs

    async def _call():
        return await asyncio.wait_for(
            client.chat.completions.create(**_build_kwargs()),
            timeout=timeout_s,
        )

    resp = await _backoff_retry(_call)
    content = (resp.choices[0].message.content or "").strip()

    if expect_json:
        return _json_coerce(content)
    return content


# Optional sugar: explicit text/JSON helpers
async def ask_text(system_prompt: str, user_prompt: str, **kwargs) -> str:
    """Same as ask_llm(..., expect_json=False)."""
    return await ask_llm(system_prompt, user_prompt, expect_json=False, **kwargs)

async def ask_json(system_prompt: str, user_prompt: str, **kwargs) -> Dict[str, Any]:
    """Same as ask_llm(..., expect_json=True)."""
    return await ask_llm(system_prompt, user_prompt, expect_json=True, **kwargs)

def validate_strategy_answer(**kwargs):
    pass
