import os
from typing import TYPE_CHECKING
from unittest import mock

import pytest
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, RunnableSequence
from pydantic import SecretStr

from langchain.chat_models import __all__, init_chat_model
from langchain.chat_models.base import _SUPPORTED_PROVIDERS, _attempt_infer_model_provider

if TYPE_CHECKING:
    from langchain_core.language_models import BaseChatModel

EXPECTED_ALL = [
    "init_chat_model",
    "BaseChatModel",
]


def test_all_imports() -> None:
    """Test that all expected imports are present in the module's __all__."""
    assert set(__all__) == set(EXPECTED_ALL)


@pytest.mark.requires(
    "langchain_openai",
    "langchain_anthropic",
    "langchain_fireworks",
    "langchain_groq",
)
@pytest.mark.parametrize(
    ("model_name", "model_provider"),
    [
        ("gpt-4o", "openai"),
        ("claude-opus-4-1", "anthropic"),
        ("accounts/fireworks/models/mixtral-8x7b-instruct", "fireworks"),
        ("mixtral-8x7b-32768", "groq"),
    ],
)
def test_init_chat_model(model_name: str, model_provider: str | None) -> None:
    llm1: BaseChatModel = init_chat_model(
        model_name,
        model_provider=model_provider,
        api_key="foo",
    )
    llm2: BaseChatModel = init_chat_model(
        f"{model_provider}:{model_name}",
        api_key="foo",
    )
    assert llm1.dict() == llm2.dict()


def test_init_missing_dep() -> None:
    with pytest.raises(ImportError):
        init_chat_model("mixtral-8x7b-32768", model_provider="groq")


def test_init_unknown_provider() -> None:
    with pytest.raises(ValueError, match="Unsupported provider='bar'"):
        init_chat_model("foo", model_provider="bar")


def test_supported_providers_is_sorted() -> None:
    """Test that supported providers are sorted alphabetically."""
    assert list(_SUPPORTED_PROVIDERS) == sorted(_SUPPORTED_PROVIDERS.keys())


@pytest.mark.parametrize(
    ("model_name", "expected_provider"),
    [
        ("gpt-4o", "openai"),
        ("o1-mini", "openai"),
        ("o3-mini", "openai"),
        ("chatgpt-4o-latest", "openai"),
        ("text-davinci-003", "openai"),
        ("claude-3-haiku-20240307", "anthropic"),
        ("command-r-plus", "cohere"),
        ("accounts/fireworks/models/mixtral-8x7b-instruct", "fireworks"),
        ("Accounts/Fireworks/models/mixtral-8x7b-instruct", "fireworks"),
        ("gemini-1.5-pro", "google_vertexai"),
        ("gemini-2.5-pro", "google_vertexai"),
        ("gemini-3-pro-preview", "google_vertexai"),
        ("amazon.titan-text-express-v1", "bedrock"),
        ("Amazon.Titan-Text-Express-v1", "bedrock"),
        ("anthropic.claude-v2", "bedrock"),
        ("Anthropic.Claude-V2", "bedrock"),
        ("mistral-small", "mistralai"),
        ("mixtral-8x7b", "mistralai"),
        ("deepseek-v3", "deepseek"),
        ("grok-beta", "xai"),
        ("sonar-small", "perplexity"),
        ("solar-pro", "upstage"),
    ],
)
def test_attempt_infer_model_provider(model_name: str, expected_provider: str) -> None:
    assert _attempt_infer_model_provider(model_name) == expected_provider


@pytest.mark.requires("langchain_openai")
@mock.patch.dict(
    os.environ,
    {"OPENAI_API_KEY": "foo", "ANTHROPIC_API_KEY": "bar"},
    clear=True,
)
def test_configurable() -> None:
    """Test configurable chat model behavior without default parameters.

    Verifies that a configurable chat model initialized without default parameters:
    - Has access to all standard runnable methods (`invoke`, `stream`, etc.)
    - Blocks access to non-configurable methods until configuration is provided
    - Supports declarative operations (`bind_tools`) without mutating original model
    - Can chain declarative operations and configuration to access full functionality
    - Properly resolves to the configured model type when parameters are provided

    Example:
    ```python
    # This creates a configurable model without specifying which model
    model = init_chat_model()

    # This will FAIL - no model specified yet
    model.get_num_tokens("hello")  # AttributeError!

    # This works - provides model at runtime
    response = model.invoke("Hello", config={"configurable": {"model": "gpt-4o"}})
    ```
    """
    model = init_chat_model()

    for method in (
        "invoke",
        "ainvoke",
        "batch",
        "abatch",
        "stream",
        "astream",
        "batch_as_completed",
        "abatch_as_completed",
    ):
        assert hasattr(model, method)

    # Doesn't have access non-configurable, non-declarative methods until a config is
    # provided.
    for method in ("get_num_tokens", "get_num_tokens_from_messages"):
        with pytest.raises(AttributeError):
            getattr(model, method)

    # Can call declarative methods even without a default model.
    model_with_tools = model.bind_tools(
        [{"name": "foo", "description": "foo", "parameters": {}}],
    )

    # Check that original model wasn't mutated by declarative operation.
    assert model._queued_declarative_operations == []

    # Can iteratively call declarative methods.
    model_with_config = model_with_tools.with_config(
        RunnableConfig(tags=["foo"]),
        configurable={"model": "gpt-4o"},
    )
    assert model_with_config.model_name == "gpt-4o"  # type: ignore[attr-defined]

    for method in ("get_num_tokens", "get_num_tokens_from_messages"):
        assert hasattr(model_with_config, method)

    assert model_with_config.model_dump() == {  # type: ignore[attr-defined]
        "name": None,
        "bound": {
            "name": None,
            "disable_streaming": False,
            "disabled_params": None,
            "model_name": "gpt-4o",
            "temperature": None,
            "model_kwargs": {},
            "openai_api_key": SecretStr("foo"),
            "openai_api_base": None,
            "openai_organization": None,
            "openai_proxy": None,
            "output_version": None,
            "request_timeout": None,
            "max_retries": None,
            "presence_penalty": None,
            "reasoning": None,
            "reasoning_effort": None,
            "verbosity": None,
            "frequency_penalty": None,
            "include": None,
            "seed": None,
            "service_tier": None,
            "logprobs": None,
            "top_logprobs": None,
            "logit_bias": None,
            "streaming": False,
            "n": None,
            "top_p": None,
            "truncation": None,
            "max_tokens": None,
            "tiktoken_model_name": None,
            "default_headers": None,
            "default_query": None,
            "stop": None,
            "store": None,
            "extra_body": None,
            "include_response_headers": False,
            "stream_usage": True,
            "use_previous_response_id": False,
            "use_responses_api": None,
        },
        "kwargs": {
            "tools": [
                {
                    "type": "function",
                    "function": {"name": "foo", "description": "foo", "parameters": {}},
                },
            ],
        },
        "config": {
            "callbacks": None,
            "configurable": {},
            "metadata": {"model": "gpt-4o"},
            "recursion_limit": 25,
            "tags": ["foo"],
        },
        "config_factories": [],
        "custom_input_type": None,
        "custom_output_type": None,
    }


@pytest.mark.requires("langchain_openai", "langchain_anthropic")
@mock.patch.dict(
    os.environ,
    {"OPENAI_API_KEY": "foo", "ANTHROPIC_API_KEY": "bar"},
    clear=True,
)
def test_configurable_with_default() -> None:
    """Test configurable chat model behavior with default parameters.

    Verifies that a configurable chat model initialized with default parameters:
    - Has access to all standard runnable methods (`invoke`, `stream`, etc.)
    - Provides immediate access to non-configurable methods (e.g. `get_num_tokens`)
    - Supports model switching through runtime configuration using `config_prefix`
    - Maintains proper model identity and attributes when reconfigured
    - Can be used in chains with different model providers via configuration

    Example:
    ```python
    # This creates a configurable model with default parameters (model)
    model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")

    # This works immediately - uses default gpt-4o
    tokens = model.get_num_tokens("hello")

    # This also works - switches to Claude at runtime
    response = model.invoke(
        "Hello", config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
    )
    ```
    """
    model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
    for method in (
        "invoke",
        "ainvoke",
        "batch",
        "abatch",
        "stream",
        "astream",
        "batch_as_completed",
        "abatch_as_completed",
    ):
        assert hasattr(model, method)

    # Does have access non-configurable, non-declarative methods since default params
    # are provided.
    for method in ("get_num_tokens", "get_num_tokens_from_messages", "dict"):
        assert hasattr(model, method)

    assert model.model_name == "gpt-4o"

    model_with_tools = model.bind_tools(
        [{"name": "foo", "description": "foo", "parameters": {}}],
    )

    model_with_config = model_with_tools.with_config(
        RunnableConfig(tags=["foo"]),
        configurable={"bar_model": "claude-sonnet-4-5-20250929"},
    )

    assert model_with_config.model == "claude-sonnet-4-5-20250929"  # type: ignore[attr-defined]

    assert model_with_config.model_dump() == {  # type: ignore[attr-defined]
        "name": None,
        "bound": {
            "name": None,
            "disable_streaming": False,
            "effort": None,
            "model": "claude-sonnet-4-5-20250929",
            "mcp_servers": None,
            "max_tokens": 64000,
            "temperature": None,
            "thinking": None,
            "top_k": None,
            "top_p": None,
            "default_request_timeout": None,
            "max_retries": 2,
            "stop_sequences": None,
            "anthropic_api_url": "https://api.anthropic.com",
            "anthropic_proxy": None,
            "context_management": None,
            "anthropic_api_key": SecretStr("bar"),
            "betas": None,
            "default_headers": None,
            "model_kwargs": {},
            "reuse_last_container": None,
            "streaming": False,
            "stream_usage": True,
            "output_version": None,
        },
        "kwargs": {
            "tools": [{"name": "foo", "description": "foo", "input_schema": {}}],
        },
        "config": {
            "callbacks": None,
            "configurable": {},
            "metadata": {"bar_model": "claude-sonnet-4-5-20250929"},
            "recursion_limit": 25,
            "tags": ["foo"],
        },
        "config_factories": [],
        "custom_input_type": None,
        "custom_output_type": None,
    }
    prompt = ChatPromptTemplate.from_messages([("system", "foo")])
    chain = prompt | model_with_config
    assert isinstance(chain, RunnableSequence)
