Coverage for src/chat_limiter/types.py: 100%
80 statements
« prev ^ index » next coverage.py v7.9.2, created at 2025-09-18 21:15 +0100
« prev ^ index » next coverage.py v7.9.2, created at 2025-09-18 21:15 +0100
1"""
2Type definitions for chat completion requests and responses.
3"""
5from dataclasses import dataclass
6from enum import Enum
7from typing import Any
9from .models import detect_provider_from_model_sync
11from pydantic import BaseModel
14class MessageRole(str, Enum):
15 """Message roles supported across providers."""
17 USER = "user"
18 ASSISTANT = "assistant"
19 SYSTEM = "system"
22@dataclass
23class Message:
24 """A chat message that works across all providers."""
26 role: MessageRole
27 content: str
30class ChatCompletionRequest(BaseModel):
31 """High-level chat completion request."""
33 model: str
34 messages: list[Message]
35 max_tokens: int | None = None
36 temperature: float | None = None
37 top_p: float | None = None
38 stop: str | list[str] | None = None
39 stream: bool = False
40 seed: int | None = None
42 # Provider-specific parameters (will be filtered per provider)
43 frequency_penalty: float | None = None # OpenAI
44 presence_penalty: float | None = None # OpenAI
45 top_k: int | None = None # Anthropic
46 reasoning_effort: str | None = None # OpenAI/OpenRouter reasoning models
47 providers: list[str] | None = None # OpenRouter provider routing
50@dataclass
51class Usage:
52 """Token usage information."""
54 prompt_tokens: int
55 completion_tokens: int
56 total_tokens: int
59@dataclass
60class Choice:
61 """A completion choice."""
63 index: int
64 message: Message
65 finish_reason: str | None = None
68@dataclass
69class ChatCompletionResponse:
70 """High-level chat completion response."""
72 id: str
73 model: str
74 choices: list[Choice]
75 usage: Usage | None = None
76 created: int | None = None
78 # Error information
79 success: bool = True
80 error_message: str | None = None
82 # Provider-specific metadata
83 provider: str | None = None
84 raw_response: dict[str, Any] | None = None
87# Model mappings for each provider
88OPENAI_MODELS = {
89 "gpt-4o",
90 "gpt-4o-mini",
91 "gpt-4-turbo",
92 "gpt-4",
93 "gpt-3.5-turbo",
94 "gpt-3.5-turbo-16k",
95}
97ANTHROPIC_MODELS = {
98 "claude-3-5-sonnet-20241022",
99 "claude-3-5-haiku-20241022",
100 "claude-3-opus-20240229",
101 "claude-3-sonnet-20240229",
102 "claude-3-haiku-20240307",
103}
105OPENROUTER_MODELS = {
106 # OpenAI models via OpenRouter
107 "openai/gpt-4o",
108 "openai/gpt-4o-mini",
109 "openai/gpt-4-turbo",
110 "openai/gpt-3.5-turbo",
112 # Anthropic models via OpenRouter
113 "anthropic/claude-3-5-sonnet",
114 "anthropic/claude-3-opus",
115 "anthropic/claude-3-sonnet",
116 "anthropic/claude-3-haiku",
118 # Other providers via OpenRouter
119 "meta-llama/llama-3.1-405b-instruct",
120 "meta-llama/llama-3.1-70b-instruct",
121 "google/gemini-pro",
122 "cohere/command-r-plus",
123}
125ALL_MODELS = OPENAI_MODELS | ANTHROPIC_MODELS | OPENROUTER_MODELS
128def detect_provider_from_model(model: str, use_dynamic_discovery: bool = False, api_keys: dict[str, str] | None = None) -> str | None:
129 """
130 Detect provider from model name.
132 Args:
133 model: The model name to check
134 use_dynamic_discovery: Whether to use live API queries for model discovery
135 api_keys: Dictionary of API keys for dynamic discovery
137 Returns:
138 Provider name or None if not found
139 """
140 # Handle provider-prefixed models (e.g., "openai/o3", "anthropic/claude-3-sonnet")
141 preferred_provider = None
142 base_model = model
144 if "/" in model:
145 parts = model.split("/", 1)
146 if len(parts) == 2:
147 provider_prefix, base_model = parts
148 if provider_prefix == "openai":
149 preferred_provider = "openai"
150 elif provider_prefix == "anthropic":
151 preferred_provider = "anthropic"
153 # If we have a preferred provider, check if the base model exists in hardcoded lists
154 if preferred_provider:
155 if preferred_provider == "openai" and base_model in OPENAI_MODELS:
156 return "openai"
157 elif preferred_provider == "anthropic" and base_model in ANTHROPIC_MODELS:
158 return "anthropic"
159 # If base model not found in preferred provider, fall back to checking if full model is in OpenRouter
160 elif model in OPENROUTER_MODELS:
161 return "openrouter"
163 # Check hardcoded lists for fast lookup (for models without provider prefix)
164 if model in OPENAI_MODELS:
165 return "openai"
166 elif model in ANTHROPIC_MODELS:
167 return "anthropic"
168 elif model in OPENROUTER_MODELS:
169 return "openrouter"
171 # If dynamic discovery is enabled and we have API keys, try that
172 if use_dynamic_discovery and api_keys:
173 result = detect_provider_from_model_sync(model, api_keys)
174 return result.found_provider
176 return None