kiln_ai.adapters.model_adapters.langchain_adapters
1import os 2from typing import Any, Dict 3 4from langchain_aws import ChatBedrockConverse 5from langchain_core.language_models import LanguageModelInput 6from langchain_core.language_models.chat_models import BaseChatModel 7from langchain_core.messages import AIMessage, HumanMessage, SystemMessage 8from langchain_core.messages.base import BaseMessage 9from langchain_core.runnables import Runnable 10from langchain_fireworks import ChatFireworks 11from langchain_groq import ChatGroq 12from langchain_ollama import ChatOllama 13from pydantic import BaseModel 14 15import kiln_ai.datamodel as datamodel 16from kiln_ai.adapters.ml_model_list import ( 17 KilnModelProvider, 18 ModelProviderName, 19 StructuredOutputMode, 20) 21from kiln_ai.adapters.model_adapters.base_adapter import ( 22 COT_FINAL_ANSWER_PROMPT, 23 AdapterInfo, 24 BaseAdapter, 25 BasePromptBuilder, 26 RunOutput, 27) 28from kiln_ai.adapters.ollama_tools import ( 29 get_ollama_connection, 30 ollama_base_url, 31 ollama_model_installed, 32) 33from kiln_ai.utils.config import Config 34from kiln_ai.utils.exhaustive_error import raise_exhaustive_enum_error 35 36LangChainModelType = BaseChatModel | Runnable[LanguageModelInput, Dict | BaseModel] 37 38 39class LangchainAdapter(BaseAdapter): 40 _model: LangChainModelType | None = None 41 42 def __init__( 43 self, 44 kiln_task: datamodel.Task, 45 custom_model: BaseChatModel | None = None, 46 model_name: str | None = None, 47 provider: str | None = None, 48 prompt_builder: BasePromptBuilder | None = None, 49 tags: list[str] | None = None, 50 ): 51 if custom_model is not None: 52 self._model = custom_model 53 54 # Attempt to infer model provider and name from custom model 55 if provider is None: 56 provider = "custom.langchain:" + custom_model.__class__.__name__ 57 58 if model_name is None: 59 model_name = "custom.langchain:unknown_model" 60 if hasattr(custom_model, "model_name") and isinstance( 61 getattr(custom_model, "model_name"), str 62 ): 63 model_name = "custom.langchain:" + getattr( 64 custom_model, "model_name" 65 ) 66 if hasattr(custom_model, "model") and isinstance( 67 getattr(custom_model, "model"), str 68 ): 69 model_name = "custom.langchain:" + getattr(custom_model, "model") 70 elif model_name is not None: 71 # default provider name if not provided 72 provider = provider or "custom.langchain.default_provider" 73 else: 74 raise ValueError( 75 "model_name and provider must be provided if custom_model is not provided" 76 ) 77 78 if model_name is None: 79 raise ValueError("model_name must be provided") 80 81 super().__init__( 82 kiln_task, 83 model_name=model_name, 84 model_provider_name=provider, 85 prompt_builder=prompt_builder, 86 tags=tags, 87 ) 88 89 async def model(self) -> LangChainModelType: 90 # cached model 91 if self._model: 92 return self._model 93 94 self._model = await self.langchain_model_from() 95 96 # Decide if we want to use Langchain's structured output: 97 # 1. Only for structured tasks 98 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 99 provider = self.model_provider() 100 use_lc_structured_output = ( 101 self.has_structured_output() 102 and provider.structured_output_mode 103 != StructuredOutputMode.json_instructions 104 ) 105 106 if use_lc_structured_output: 107 if not hasattr(self._model, "with_structured_output") or not callable( 108 getattr(self._model, "with_structured_output") 109 ): 110 raise ValueError( 111 f"model {self._model} does not support structured output, cannot use output_json_schema" 112 ) 113 # Langchain expects title/description to be at top level, on top of json schema 114 output_schema = self.kiln_task.output_schema() 115 if output_schema is None: 116 raise ValueError( 117 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 118 ) 119 output_schema["title"] = "task_response" 120 output_schema["description"] = "A response from the task" 121 with_structured_output_options = self.get_structured_output_options( 122 self.model_name, self.model_provider_name 123 ) 124 self._model = self._model.with_structured_output( 125 output_schema, 126 include_raw=True, 127 **with_structured_output_options, 128 ) 129 return self._model 130 131 async def _run(self, input: Dict | str) -> RunOutput: 132 provider = self.model_provider() 133 model = await self.model() 134 chain = model 135 intermediate_outputs = {} 136 137 prompt = self.build_prompt() 138 user_msg = self.prompt_builder.build_user_message(input) 139 messages = [ 140 SystemMessage(content=prompt), 141 HumanMessage(content=user_msg), 142 ] 143 144 run_strategy, cot_prompt = self.run_strategy() 145 146 if run_strategy == "cot_as_message": 147 if not cot_prompt: 148 raise ValueError("cot_prompt is required for cot_as_message strategy") 149 messages.append(SystemMessage(content=cot_prompt)) 150 elif run_strategy == "cot_two_call": 151 if not cot_prompt: 152 raise ValueError("cot_prompt is required for cot_two_call strategy") 153 messages.append( 154 SystemMessage(content=cot_prompt), 155 ) 156 157 # Base model (without structured output) used for COT message 158 base_model = await self.langchain_model_from() 159 160 cot_messages = [*messages] 161 cot_response = await base_model.ainvoke(cot_messages) 162 intermediate_outputs["chain_of_thought"] = cot_response.content 163 messages.append(AIMessage(content=cot_response.content)) 164 messages.append(HumanMessage(content=COT_FINAL_ANSWER_PROMPT)) 165 166 response = await chain.ainvoke(messages) 167 168 # Langchain may have already parsed the response into structured output, so use that if available. 169 # However, a plain string may still be fixed at the parsing layer, so not being structured isn't a critical failure (yet) 170 if ( 171 self.has_structured_output() 172 and isinstance(response, dict) 173 and "parsed" in response 174 and isinstance(response["parsed"], dict) 175 ): 176 structured_response = response["parsed"] 177 return RunOutput( 178 output=self._munge_response(structured_response), 179 intermediate_outputs=intermediate_outputs, 180 ) 181 182 if not isinstance(response, BaseMessage): 183 raise RuntimeError(f"response is not a BaseMessage: {response}") 184 185 text_content = response.content 186 if not isinstance(text_content, str): 187 raise RuntimeError(f"response is not a string: {text_content}") 188 189 return RunOutput( 190 output=text_content, 191 intermediate_outputs=intermediate_outputs, 192 ) 193 194 def adapter_info(self) -> AdapterInfo: 195 return AdapterInfo( 196 model_name=self.model_name, 197 model_provider=self.model_provider_name, 198 adapter_name="kiln_langchain_adapter", 199 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 200 prompt_id=self.prompt_builder.prompt_id(), 201 ) 202 203 def _munge_response(self, response: Dict) -> Dict: 204 # Mistral Large tool calling format is a bit different. Convert to standard format. 205 if ( 206 "name" in response 207 and response["name"] == "task_response" 208 and "arguments" in response 209 ): 210 return response["arguments"] 211 return response 212 213 def get_structured_output_options( 214 self, model_name: str, model_provider_name: str 215 ) -> Dict[str, Any]: 216 provider = self.model_provider() 217 if not provider: 218 return {} 219 220 options = {} 221 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 222 match provider.structured_output_mode: 223 case StructuredOutputMode.function_calling: 224 options["method"] = "function_calling" 225 case StructuredOutputMode.json_mode: 226 options["method"] = "json_mode" 227 case StructuredOutputMode.json_instruction_and_object: 228 # We also pass instructions 229 options["method"] = "json_mode" 230 case StructuredOutputMode.json_schema: 231 options["method"] = "json_schema" 232 case StructuredOutputMode.json_instructions: 233 # JSON done via instructions in prompt, not via API 234 pass 235 case StructuredOutputMode.default: 236 if provider.name == ModelProviderName.ollama: 237 # Ollama has great json_schema support, so use that: https://ollama.com/blog/structured-outputs 238 options["method"] = "json_schema" 239 else: 240 # Let langchain decide the default 241 pass 242 case _: 243 raise_exhaustive_enum_error(provider.structured_output_mode) 244 245 return options 246 247 async def langchain_model_from(self) -> BaseChatModel: 248 provider = self.model_provider() 249 return await langchain_model_from_provider(provider, self.model_name) 250 251 252async def langchain_model_from_provider( 253 provider: KilnModelProvider, model_name: str 254) -> BaseChatModel: 255 if provider.name == ModelProviderName.openai: 256 # We use the OpenAICompatibleAdapter for OpenAI 257 raise ValueError("OpenAI is not supported in Langchain adapter") 258 elif provider.name == ModelProviderName.openai_compatible: 259 # We use the OpenAICompatibleAdapter for OpenAI compatible 260 raise ValueError("OpenAI compatible is not supported in Langchain adapter") 261 elif provider.name == ModelProviderName.groq: 262 api_key = Config.shared().groq_api_key 263 if api_key is None: 264 raise ValueError( 265 "Attempted to use Groq without an API key set. " 266 "Get your API key from https://console.groq.com/keys" 267 ) 268 return ChatGroq(**provider.provider_options, groq_api_key=api_key) # type: ignore[arg-type] 269 elif provider.name == ModelProviderName.amazon_bedrock: 270 api_key = Config.shared().bedrock_access_key 271 secret_key = Config.shared().bedrock_secret_key 272 # langchain doesn't allow passing these, so ugly hack to set env vars 273 os.environ["AWS_ACCESS_KEY_ID"] = api_key 274 os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key 275 return ChatBedrockConverse( 276 **provider.provider_options, 277 ) 278 elif provider.name == ModelProviderName.fireworks_ai: 279 api_key = Config.shared().fireworks_api_key 280 return ChatFireworks(**provider.provider_options, api_key=api_key) 281 elif provider.name == ModelProviderName.ollama: 282 # Ollama model naming is pretty flexible. We try a few versions of the model name 283 potential_model_names = [] 284 if "model" in provider.provider_options: 285 potential_model_names.append(provider.provider_options["model"]) 286 if "model_aliases" in provider.provider_options: 287 potential_model_names.extend(provider.provider_options["model_aliases"]) 288 289 # Get the list of models Ollama supports 290 ollama_connection = await get_ollama_connection() 291 if ollama_connection is None: 292 raise ValueError("Failed to connect to Ollama. Ensure Ollama is running.") 293 294 for model_name in potential_model_names: 295 if ollama_model_installed(ollama_connection, model_name): 296 return ChatOllama(model=model_name, base_url=ollama_base_url()) 297 298 raise ValueError(f"Model {model_name} not installed on Ollama") 299 elif provider.name == ModelProviderName.openrouter: 300 raise ValueError("OpenRouter is not supported in Langchain adapter") 301 else: 302 raise ValueError(f"Invalid model or provider: {model_name} - {provider.name}")
LangChainModelType =
typing.Union[langchain_core.language_models.chat_models.BaseChatModel, langchain_core.runnables.base.Runnable[typing.Union[langchain_core.prompt_values.PromptValue, str, collections.abc.Sequence[typing.Union[langchain_core.messages.base.BaseMessage, list[str], tuple[str, str], str, dict[str, typing.Any]]]], typing.Union[typing.Dict, pydantic.main.BaseModel]]]
40class LangchainAdapter(BaseAdapter): 41 _model: LangChainModelType | None = None 42 43 def __init__( 44 self, 45 kiln_task: datamodel.Task, 46 custom_model: BaseChatModel | None = None, 47 model_name: str | None = None, 48 provider: str | None = None, 49 prompt_builder: BasePromptBuilder | None = None, 50 tags: list[str] | None = None, 51 ): 52 if custom_model is not None: 53 self._model = custom_model 54 55 # Attempt to infer model provider and name from custom model 56 if provider is None: 57 provider = "custom.langchain:" + custom_model.__class__.__name__ 58 59 if model_name is None: 60 model_name = "custom.langchain:unknown_model" 61 if hasattr(custom_model, "model_name") and isinstance( 62 getattr(custom_model, "model_name"), str 63 ): 64 model_name = "custom.langchain:" + getattr( 65 custom_model, "model_name" 66 ) 67 if hasattr(custom_model, "model") and isinstance( 68 getattr(custom_model, "model"), str 69 ): 70 model_name = "custom.langchain:" + getattr(custom_model, "model") 71 elif model_name is not None: 72 # default provider name if not provided 73 provider = provider or "custom.langchain.default_provider" 74 else: 75 raise ValueError( 76 "model_name and provider must be provided if custom_model is not provided" 77 ) 78 79 if model_name is None: 80 raise ValueError("model_name must be provided") 81 82 super().__init__( 83 kiln_task, 84 model_name=model_name, 85 model_provider_name=provider, 86 prompt_builder=prompt_builder, 87 tags=tags, 88 ) 89 90 async def model(self) -> LangChainModelType: 91 # cached model 92 if self._model: 93 return self._model 94 95 self._model = await self.langchain_model_from() 96 97 # Decide if we want to use Langchain's structured output: 98 # 1. Only for structured tasks 99 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 100 provider = self.model_provider() 101 use_lc_structured_output = ( 102 self.has_structured_output() 103 and provider.structured_output_mode 104 != StructuredOutputMode.json_instructions 105 ) 106 107 if use_lc_structured_output: 108 if not hasattr(self._model, "with_structured_output") or not callable( 109 getattr(self._model, "with_structured_output") 110 ): 111 raise ValueError( 112 f"model {self._model} does not support structured output, cannot use output_json_schema" 113 ) 114 # Langchain expects title/description to be at top level, on top of json schema 115 output_schema = self.kiln_task.output_schema() 116 if output_schema is None: 117 raise ValueError( 118 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 119 ) 120 output_schema["title"] = "task_response" 121 output_schema["description"] = "A response from the task" 122 with_structured_output_options = self.get_structured_output_options( 123 self.model_name, self.model_provider_name 124 ) 125 self._model = self._model.with_structured_output( 126 output_schema, 127 include_raw=True, 128 **with_structured_output_options, 129 ) 130 return self._model 131 132 async def _run(self, input: Dict | str) -> RunOutput: 133 provider = self.model_provider() 134 model = await self.model() 135 chain = model 136 intermediate_outputs = {} 137 138 prompt = self.build_prompt() 139 user_msg = self.prompt_builder.build_user_message(input) 140 messages = [ 141 SystemMessage(content=prompt), 142 HumanMessage(content=user_msg), 143 ] 144 145 run_strategy, cot_prompt = self.run_strategy() 146 147 if run_strategy == "cot_as_message": 148 if not cot_prompt: 149 raise ValueError("cot_prompt is required for cot_as_message strategy") 150 messages.append(SystemMessage(content=cot_prompt)) 151 elif run_strategy == "cot_two_call": 152 if not cot_prompt: 153 raise ValueError("cot_prompt is required for cot_two_call strategy") 154 messages.append( 155 SystemMessage(content=cot_prompt), 156 ) 157 158 # Base model (without structured output) used for COT message 159 base_model = await self.langchain_model_from() 160 161 cot_messages = [*messages] 162 cot_response = await base_model.ainvoke(cot_messages) 163 intermediate_outputs["chain_of_thought"] = cot_response.content 164 messages.append(AIMessage(content=cot_response.content)) 165 messages.append(HumanMessage(content=COT_FINAL_ANSWER_PROMPT)) 166 167 response = await chain.ainvoke(messages) 168 169 # Langchain may have already parsed the response into structured output, so use that if available. 170 # However, a plain string may still be fixed at the parsing layer, so not being structured isn't a critical failure (yet) 171 if ( 172 self.has_structured_output() 173 and isinstance(response, dict) 174 and "parsed" in response 175 and isinstance(response["parsed"], dict) 176 ): 177 structured_response = response["parsed"] 178 return RunOutput( 179 output=self._munge_response(structured_response), 180 intermediate_outputs=intermediate_outputs, 181 ) 182 183 if not isinstance(response, BaseMessage): 184 raise RuntimeError(f"response is not a BaseMessage: {response}") 185 186 text_content = response.content 187 if not isinstance(text_content, str): 188 raise RuntimeError(f"response is not a string: {text_content}") 189 190 return RunOutput( 191 output=text_content, 192 intermediate_outputs=intermediate_outputs, 193 ) 194 195 def adapter_info(self) -> AdapterInfo: 196 return AdapterInfo( 197 model_name=self.model_name, 198 model_provider=self.model_provider_name, 199 adapter_name="kiln_langchain_adapter", 200 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 201 prompt_id=self.prompt_builder.prompt_id(), 202 ) 203 204 def _munge_response(self, response: Dict) -> Dict: 205 # Mistral Large tool calling format is a bit different. Convert to standard format. 206 if ( 207 "name" in response 208 and response["name"] == "task_response" 209 and "arguments" in response 210 ): 211 return response["arguments"] 212 return response 213 214 def get_structured_output_options( 215 self, model_name: str, model_provider_name: str 216 ) -> Dict[str, Any]: 217 provider = self.model_provider() 218 if not provider: 219 return {} 220 221 options = {} 222 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 223 match provider.structured_output_mode: 224 case StructuredOutputMode.function_calling: 225 options["method"] = "function_calling" 226 case StructuredOutputMode.json_mode: 227 options["method"] = "json_mode" 228 case StructuredOutputMode.json_instruction_and_object: 229 # We also pass instructions 230 options["method"] = "json_mode" 231 case StructuredOutputMode.json_schema: 232 options["method"] = "json_schema" 233 case StructuredOutputMode.json_instructions: 234 # JSON done via instructions in prompt, not via API 235 pass 236 case StructuredOutputMode.default: 237 if provider.name == ModelProviderName.ollama: 238 # Ollama has great json_schema support, so use that: https://ollama.com/blog/structured-outputs 239 options["method"] = "json_schema" 240 else: 241 # Let langchain decide the default 242 pass 243 case _: 244 raise_exhaustive_enum_error(provider.structured_output_mode) 245 246 return options 247 248 async def langchain_model_from(self) -> BaseChatModel: 249 provider = self.model_provider() 250 return await langchain_model_from_provider(provider, self.model_name)
Base class for AI model adapters that handle task execution.
This abstract class provides the foundation for implementing model-specific adapters that can process tasks with structured or unstructured inputs/outputs. It handles input/output validation, prompt building, and run tracking.
Attributes: prompt_builder (BasePromptBuilder): Builder for constructing prompts for the model kiln_task (Task): The task configuration and metadata output_schema (dict | None): JSON schema for validating structured outputs input_schema (dict | None): JSON schema for validating structured inputs
LangchainAdapter( kiln_task: kiln_ai.datamodel.Task, custom_model: langchain_core.language_models.chat_models.BaseChatModel | None = None, model_name: str | None = None, provider: str | None = None, prompt_builder: kiln_ai.adapters.prompt_builders.BasePromptBuilder | None = None, tags: list[str] | None = None)
43 def __init__( 44 self, 45 kiln_task: datamodel.Task, 46 custom_model: BaseChatModel | None = None, 47 model_name: str | None = None, 48 provider: str | None = None, 49 prompt_builder: BasePromptBuilder | None = None, 50 tags: list[str] | None = None, 51 ): 52 if custom_model is not None: 53 self._model = custom_model 54 55 # Attempt to infer model provider and name from custom model 56 if provider is None: 57 provider = "custom.langchain:" + custom_model.__class__.__name__ 58 59 if model_name is None: 60 model_name = "custom.langchain:unknown_model" 61 if hasattr(custom_model, "model_name") and isinstance( 62 getattr(custom_model, "model_name"), str 63 ): 64 model_name = "custom.langchain:" + getattr( 65 custom_model, "model_name" 66 ) 67 if hasattr(custom_model, "model") and isinstance( 68 getattr(custom_model, "model"), str 69 ): 70 model_name = "custom.langchain:" + getattr(custom_model, "model") 71 elif model_name is not None: 72 # default provider name if not provided 73 provider = provider or "custom.langchain.default_provider" 74 else: 75 raise ValueError( 76 "model_name and provider must be provided if custom_model is not provided" 77 ) 78 79 if model_name is None: 80 raise ValueError("model_name must be provided") 81 82 super().__init__( 83 kiln_task, 84 model_name=model_name, 85 model_provider_name=provider, 86 prompt_builder=prompt_builder, 87 tags=tags, 88 )
async def
model( self) -> Union[langchain_core.language_models.chat_models.BaseChatModel, langchain_core.runnables.base.Runnable[Union[langchain_core.prompt_values.PromptValue, str, Sequence[Union[langchain_core.messages.base.BaseMessage, list[str], tuple[str, str], str, dict[str, Any]]]], Union[Dict, pydantic.main.BaseModel]]]:
90 async def model(self) -> LangChainModelType: 91 # cached model 92 if self._model: 93 return self._model 94 95 self._model = await self.langchain_model_from() 96 97 # Decide if we want to use Langchain's structured output: 98 # 1. Only for structured tasks 99 # 2. Only if the provider's mode isn't json_instructions (only mode that doesn't use an API option for structured output capabilities) 100 provider = self.model_provider() 101 use_lc_structured_output = ( 102 self.has_structured_output() 103 and provider.structured_output_mode 104 != StructuredOutputMode.json_instructions 105 ) 106 107 if use_lc_structured_output: 108 if not hasattr(self._model, "with_structured_output") or not callable( 109 getattr(self._model, "with_structured_output") 110 ): 111 raise ValueError( 112 f"model {self._model} does not support structured output, cannot use output_json_schema" 113 ) 114 # Langchain expects title/description to be at top level, on top of json schema 115 output_schema = self.kiln_task.output_schema() 116 if output_schema is None: 117 raise ValueError( 118 f"output_json_schema is not valid json: {self.kiln_task.output_json_schema}" 119 ) 120 output_schema["title"] = "task_response" 121 output_schema["description"] = "A response from the task" 122 with_structured_output_options = self.get_structured_output_options( 123 self.model_name, self.model_provider_name 124 ) 125 self._model = self._model.with_structured_output( 126 output_schema, 127 include_raw=True, 128 **with_structured_output_options, 129 ) 130 return self._model
195 def adapter_info(self) -> AdapterInfo: 196 return AdapterInfo( 197 model_name=self.model_name, 198 model_provider=self.model_provider_name, 199 adapter_name="kiln_langchain_adapter", 200 prompt_builder_name=self.prompt_builder.__class__.prompt_builder_name(), 201 prompt_id=self.prompt_builder.prompt_id(), 202 )
def
get_structured_output_options(self, model_name: str, model_provider_name: str) -> Dict[str, Any]:
214 def get_structured_output_options( 215 self, model_name: str, model_provider_name: str 216 ) -> Dict[str, Any]: 217 provider = self.model_provider() 218 if not provider: 219 return {} 220 221 options = {} 222 # We may need to add some provider specific logic here if providers use different names for the same mode, but everyone is copying openai for now 223 match provider.structured_output_mode: 224 case StructuredOutputMode.function_calling: 225 options["method"] = "function_calling" 226 case StructuredOutputMode.json_mode: 227 options["method"] = "json_mode" 228 case StructuredOutputMode.json_instruction_and_object: 229 # We also pass instructions 230 options["method"] = "json_mode" 231 case StructuredOutputMode.json_schema: 232 options["method"] = "json_schema" 233 case StructuredOutputMode.json_instructions: 234 # JSON done via instructions in prompt, not via API 235 pass 236 case StructuredOutputMode.default: 237 if provider.name == ModelProviderName.ollama: 238 # Ollama has great json_schema support, so use that: https://ollama.com/blog/structured-outputs 239 options["method"] = "json_schema" 240 else: 241 # Let langchain decide the default 242 pass 243 case _: 244 raise_exhaustive_enum_error(provider.structured_output_mode) 245 246 return options
async def
langchain_model_from_provider( provider: kiln_ai.adapters.ml_model_list.KilnModelProvider, model_name: str) -> langchain_core.language_models.chat_models.BaseChatModel:
253async def langchain_model_from_provider( 254 provider: KilnModelProvider, model_name: str 255) -> BaseChatModel: 256 if provider.name == ModelProviderName.openai: 257 # We use the OpenAICompatibleAdapter for OpenAI 258 raise ValueError("OpenAI is not supported in Langchain adapter") 259 elif provider.name == ModelProviderName.openai_compatible: 260 # We use the OpenAICompatibleAdapter for OpenAI compatible 261 raise ValueError("OpenAI compatible is not supported in Langchain adapter") 262 elif provider.name == ModelProviderName.groq: 263 api_key = Config.shared().groq_api_key 264 if api_key is None: 265 raise ValueError( 266 "Attempted to use Groq without an API key set. " 267 "Get your API key from https://console.groq.com/keys" 268 ) 269 return ChatGroq(**provider.provider_options, groq_api_key=api_key) # type: ignore[arg-type] 270 elif provider.name == ModelProviderName.amazon_bedrock: 271 api_key = Config.shared().bedrock_access_key 272 secret_key = Config.shared().bedrock_secret_key 273 # langchain doesn't allow passing these, so ugly hack to set env vars 274 os.environ["AWS_ACCESS_KEY_ID"] = api_key 275 os.environ["AWS_SECRET_ACCESS_KEY"] = secret_key 276 return ChatBedrockConverse( 277 **provider.provider_options, 278 ) 279 elif provider.name == ModelProviderName.fireworks_ai: 280 api_key = Config.shared().fireworks_api_key 281 return ChatFireworks(**provider.provider_options, api_key=api_key) 282 elif provider.name == ModelProviderName.ollama: 283 # Ollama model naming is pretty flexible. We try a few versions of the model name 284 potential_model_names = [] 285 if "model" in provider.provider_options: 286 potential_model_names.append(provider.provider_options["model"]) 287 if "model_aliases" in provider.provider_options: 288 potential_model_names.extend(provider.provider_options["model_aliases"]) 289 290 # Get the list of models Ollama supports 291 ollama_connection = await get_ollama_connection() 292 if ollama_connection is None: 293 raise ValueError("Failed to connect to Ollama. Ensure Ollama is running.") 294 295 for model_name in potential_model_names: 296 if ollama_model_installed(ollama_connection, model_name): 297 return ChatOllama(model=model_name, base_url=ollama_base_url()) 298 299 raise ValueError(f"Model {model_name} not installed on Ollama") 300 elif provider.name == ModelProviderName.openrouter: 301 raise ValueError("OpenRouter is not supported in Langchain adapter") 302 else: 303 raise ValueError(f"Invalid model or provider: {model_name} - {provider.name}")