kiln_ai.adapters.ml_model_list

  1from enum import Enum
  2from typing import Dict, List
  3
  4from pydantic import BaseModel
  5
  6from kiln_ai.datamodel import StructuredOutputMode
  7
  8"""
  9Provides model configuration and management for various LLM providers and models.
 10This module handles the integration with different AI model providers and their respective models,
 11including configuration, validation, and instantiation of language models.
 12"""
 13
 14
 15class ModelProviderName(str, Enum):
 16    """
 17    Enumeration of supported AI model providers.
 18    """
 19
 20    openai = "openai"
 21    groq = "groq"
 22    amazon_bedrock = "amazon_bedrock"
 23    ollama = "ollama"
 24    openrouter = "openrouter"
 25    fireworks_ai = "fireworks_ai"
 26    kiln_fine_tune = "kiln_fine_tune"
 27    kiln_custom_registry = "kiln_custom_registry"
 28    openai_compatible = "openai_compatible"
 29
 30
 31class ModelFamily(str, Enum):
 32    """
 33    Enumeration of supported model families/architectures.
 34    """
 35
 36    gpt = "gpt"
 37    llama = "llama"
 38    phi = "phi"
 39    mistral = "mistral"
 40    gemma = "gemma"
 41    gemini = "gemini"
 42    claude = "claude"
 43    mixtral = "mixtral"
 44    qwen = "qwen"
 45    deepseek = "deepseek"
 46
 47
 48# Where models have instruct and raw versions, instruct is default and raw is specified
 49class ModelName(str, Enum):
 50    """
 51    Enumeration of specific model versions supported by the system.
 52    Where models have instruct and raw versions, instruct is default and raw is specified.
 53    """
 54
 55    llama_3_1_8b = "llama_3_1_8b"
 56    llama_3_1_70b = "llama_3_1_70b"
 57    llama_3_1_405b = "llama_3_1_405b"
 58    llama_3_2_1b = "llama_3_2_1b"
 59    llama_3_2_3b = "llama_3_2_3b"
 60    llama_3_2_11b = "llama_3_2_11b"
 61    llama_3_2_90b = "llama_3_2_90b"
 62    llama_3_3_70b = "llama_3_3_70b"
 63    gpt_4o_mini = "gpt_4o_mini"
 64    gpt_4o = "gpt_4o"
 65    phi_3_5 = "phi_3_5"
 66    phi_4 = "phi_4"
 67    mistral_large = "mistral_large"
 68    mistral_nemo = "mistral_nemo"
 69    gemma_2_2b = "gemma_2_2b"
 70    gemma_2_9b = "gemma_2_9b"
 71    gemma_2_27b = "gemma_2_27b"
 72    claude_3_5_haiku = "claude_3_5_haiku"
 73    claude_3_5_sonnet = "claude_3_5_sonnet"
 74    gemini_1_5_flash = "gemini_1_5_flash"
 75    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
 76    gemini_1_5_pro = "gemini_1_5_pro"
 77    gemini_2_0_flash = "gemini_2_0_flash"
 78    nemotron_70b = "nemotron_70b"
 79    mixtral_8x7b = "mixtral_8x7b"
 80    qwen_2p5_7b = "qwen_2p5_7b"
 81    qwen_2p5_72b = "qwen_2p5_72b"
 82    deepseek_3 = "deepseek_3"
 83    deepseek_r1 = "deepseek_r1"
 84    mistral_small_3 = "mistral_small_3"
 85    deepseek_r1_distill_qwen_32b = "deepseek_r1_distill_qwen_32b"
 86    deepseek_r1_distill_llama_70b = "deepseek_r1_distill_llama_70b"
 87    deepseek_r1_distill_qwen_14b = "deepseek_r1_distill_qwen_14b"
 88    deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
 89    deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
 90    deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"
 91
 92
 93class ModelParserID(str, Enum):
 94    """
 95    Enumeration of supported model parsers.
 96    """
 97
 98    r1_thinking = "r1_thinking"
 99
100
101class KilnModelProvider(BaseModel):
102    """
103    Configuration for a specific model provider.
104
105    Attributes:
106        name: The provider's identifier
107        supports_structured_output: Whether the provider supports structured output formats
108        supports_data_gen: Whether the provider supports data generation
109        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
110        provider_finetune_id: The finetune ID for the provider, if applicable
111        provider_options: Additional provider-specific configuration options
112        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
113        parser: A parser to use for the model, if applicable
114        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
115    """
116
117    name: ModelProviderName
118    supports_structured_output: bool = True
119    supports_data_gen: bool = True
120    untested_model: bool = False
121    provider_finetune_id: str | None = None
122    provider_options: Dict = {}
123    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
124    parser: ModelParserID | None = None
125    reasoning_capable: bool = False
126
127
128class KilnModel(BaseModel):
129    """
130    Configuration for a specific AI model.
131
132    Attributes:
133        family: The model's architecture family
134        name: The model's identifier
135        friendly_name: Human-readable name for the model
136        providers: List of providers that offer this model
137        supports_structured_output: Whether the model supports structured output formats
138    """
139
140    family: str
141    name: str
142    friendly_name: str
143    providers: List[KilnModelProvider]
144
145
146built_in_models: List[KilnModel] = [
147    # GPT 4o Mini
148    KilnModel(
149        family=ModelFamily.gpt,
150        name=ModelName.gpt_4o_mini,
151        friendly_name="GPT 4o Mini",
152        providers=[
153            KilnModelProvider(
154                name=ModelProviderName.openai,
155                provider_options={"model": "gpt-4o-mini"},
156                provider_finetune_id="gpt-4o-mini-2024-07-18",
157                structured_output_mode=StructuredOutputMode.json_schema,
158            ),
159            KilnModelProvider(
160                name=ModelProviderName.openrouter,
161                provider_options={"model": "openai/gpt-4o-mini"},
162                structured_output_mode=StructuredOutputMode.json_schema,
163            ),
164        ],
165    ),
166    # GPT 4o
167    KilnModel(
168        family=ModelFamily.gpt,
169        name=ModelName.gpt_4o,
170        friendly_name="GPT 4o",
171        providers=[
172            KilnModelProvider(
173                name=ModelProviderName.openai,
174                provider_options={"model": "gpt-4o"},
175                provider_finetune_id="gpt-4o-2024-08-06",
176                structured_output_mode=StructuredOutputMode.json_schema,
177            ),
178            KilnModelProvider(
179                name=ModelProviderName.openrouter,
180                provider_options={"model": "openai/gpt-4o"},
181                structured_output_mode=StructuredOutputMode.json_schema,
182            ),
183        ],
184    ),
185    # Claude 3.5 Haiku
186    KilnModel(
187        family=ModelFamily.claude,
188        name=ModelName.claude_3_5_haiku,
189        friendly_name="Claude 3.5 Haiku",
190        providers=[
191            KilnModelProvider(
192                name=ModelProviderName.openrouter,
193                structured_output_mode=StructuredOutputMode.function_calling,
194                provider_options={"model": "anthropic/claude-3-5-haiku"},
195            ),
196        ],
197    ),
198    # Claude 3.5 Sonnet
199    KilnModel(
200        family=ModelFamily.claude,
201        name=ModelName.claude_3_5_sonnet,
202        friendly_name="Claude 3.5 Sonnet",
203        providers=[
204            KilnModelProvider(
205                name=ModelProviderName.openrouter,
206                structured_output_mode=StructuredOutputMode.function_calling,
207                provider_options={"model": "anthropic/claude-3.5-sonnet"},
208            ),
209        ],
210    ),
211    # DeepSeek 3
212    KilnModel(
213        family=ModelFamily.deepseek,
214        name=ModelName.deepseek_3,
215        friendly_name="DeepSeek v3",
216        providers=[
217            KilnModelProvider(
218                name=ModelProviderName.openrouter,
219                provider_options={"model": "deepseek/deepseek-chat"},
220                structured_output_mode=StructuredOutputMode.function_calling,
221            ),
222            KilnModelProvider(
223                name=ModelProviderName.fireworks_ai,
224                provider_options={"model": "accounts/fireworks/models/deepseek-v3"},
225                structured_output_mode=StructuredOutputMode.json_mode,
226                supports_structured_output=True,
227                supports_data_gen=False,
228            ),
229        ],
230    ),
231    # DeepSeek R1
232    KilnModel(
233        family=ModelFamily.deepseek,
234        name=ModelName.deepseek_r1,
235        friendly_name="DeepSeek R1",
236        providers=[
237            KilnModelProvider(
238                name=ModelProviderName.openrouter,
239                provider_options={"model": "deepseek/deepseek-r1"},
240                # No custom parser -- openrouter implemented it themselves
241                structured_output_mode=StructuredOutputMode.json_instructions,
242                reasoning_capable=True,
243            ),
244            KilnModelProvider(
245                name=ModelProviderName.fireworks_ai,
246                provider_options={"model": "accounts/fireworks/models/deepseek-r1"},
247                parser=ModelParserID.r1_thinking,
248                structured_output_mode=StructuredOutputMode.json_instructions,
249                reasoning_capable=True,
250            ),
251            KilnModelProvider(
252                # I want your RAM
253                name=ModelProviderName.ollama,
254                provider_options={"model": "deepseek-r1:671b"},
255                parser=ModelParserID.r1_thinking,
256                structured_output_mode=StructuredOutputMode.json_instructions,
257                reasoning_capable=True,
258            ),
259        ],
260    ),
261    # Gemini 1.5 Pro
262    KilnModel(
263        family=ModelFamily.gemini,
264        name=ModelName.gemini_1_5_pro,
265        friendly_name="Gemini 1.5 Pro",
266        providers=[
267            KilnModelProvider(
268                name=ModelProviderName.openrouter,
269                provider_options={"model": "google/gemini-pro-1.5"},
270                structured_output_mode=StructuredOutputMode.json_schema,
271            ),
272        ],
273    ),
274    # Gemini 1.5 Flash
275    KilnModel(
276        family=ModelFamily.gemini,
277        name=ModelName.gemini_1_5_flash,
278        friendly_name="Gemini 1.5 Flash",
279        providers=[
280            KilnModelProvider(
281                name=ModelProviderName.openrouter,
282                provider_options={"model": "google/gemini-flash-1.5"},
283                structured_output_mode=StructuredOutputMode.json_schema,
284            ),
285        ],
286    ),
287    # Gemini 1.5 Flash 8B
288    KilnModel(
289        family=ModelFamily.gemini,
290        name=ModelName.gemini_1_5_flash_8b,
291        friendly_name="Gemini 1.5 Flash 8B",
292        providers=[
293            KilnModelProvider(
294                name=ModelProviderName.openrouter,
295                provider_options={"model": "google/gemini-flash-1.5-8b"},
296                structured_output_mode=StructuredOutputMode.json_mode,
297            ),
298        ],
299    ),
300    # Gemini 2.0 Flash
301    KilnModel(
302        family=ModelFamily.gemini,
303        name=ModelName.gemini_2_0_flash,
304        friendly_name="Gemini 2.0 Flash",
305        providers=[
306            KilnModelProvider(
307                name=ModelProviderName.openrouter,
308                structured_output_mode=StructuredOutputMode.json_schema,
309                provider_options={"model": "google/gemini-2.0-flash-001"},
310            ),
311        ],
312    ),
313    # Nemotron 70B
314    KilnModel(
315        family=ModelFamily.llama,
316        name=ModelName.nemotron_70b,
317        friendly_name="Nemotron 70B",
318        providers=[
319            KilnModelProvider(
320                name=ModelProviderName.openrouter,
321                supports_structured_output=False,
322                supports_data_gen=False,
323                provider_options={"model": "nvidia/llama-3.1-nemotron-70b-instruct"},
324            ),
325        ],
326    ),
327    # Llama 3.1-8b
328    KilnModel(
329        family=ModelFamily.llama,
330        name=ModelName.llama_3_1_8b,
331        friendly_name="Llama 3.1 8B",
332        providers=[
333            KilnModelProvider(
334                name=ModelProviderName.groq,
335                provider_options={"model": "llama-3.1-8b-instant"},
336            ),
337            KilnModelProvider(
338                name=ModelProviderName.amazon_bedrock,
339                structured_output_mode=StructuredOutputMode.json_schema,
340                supports_data_gen=False,
341                provider_options={
342                    "model": "meta.llama3-1-8b-instruct-v1:0",
343                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
344                },
345            ),
346            KilnModelProvider(
347                name=ModelProviderName.ollama,
348                structured_output_mode=StructuredOutputMode.json_schema,
349                provider_options={
350                    "model": "llama3.1:8b",
351                    "model_aliases": ["llama3.1"],  # 8b is default
352                },
353            ),
354            KilnModelProvider(
355                name=ModelProviderName.openrouter,
356                supports_data_gen=False,
357                structured_output_mode=StructuredOutputMode.function_calling,
358                provider_options={"model": "meta-llama/llama-3.1-8b-instruct"},
359            ),
360            KilnModelProvider(
361                name=ModelProviderName.fireworks_ai,
362                # JSON mode not ideal (no schema), but tool calling doesn't work on 8b
363                structured_output_mode=StructuredOutputMode.json_mode,
364                provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
365                provider_options={
366                    "model": "accounts/fireworks/models/llama-v3p1-8b-instruct"
367                },
368            ),
369        ],
370    ),
371    # Llama 3.1 70b
372    KilnModel(
373        family=ModelFamily.llama,
374        name=ModelName.llama_3_1_70b,
375        friendly_name="Llama 3.1 70B",
376        providers=[
377            KilnModelProvider(
378                name=ModelProviderName.amazon_bedrock,
379                structured_output_mode=StructuredOutputMode.json_schema,
380                supports_data_gen=False,
381                provider_options={
382                    "model": "meta.llama3-1-70b-instruct-v1:0",
383                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
384                },
385            ),
386            KilnModelProvider(
387                name=ModelProviderName.openrouter,
388                supports_data_gen=False,
389                structured_output_mode=StructuredOutputMode.function_calling,
390                provider_options={"model": "meta-llama/llama-3.1-70b-instruct"},
391            ),
392            KilnModelProvider(
393                name=ModelProviderName.ollama,
394                structured_output_mode=StructuredOutputMode.json_schema,
395                provider_options={"model": "llama3.1:70b"},
396            ),
397            KilnModelProvider(
398                name=ModelProviderName.fireworks_ai,
399                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
400                structured_output_mode=StructuredOutputMode.function_calling,
401                provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
402                provider_options={
403                    "model": "accounts/fireworks/models/llama-v3p1-70b-instruct"
404                },
405            ),
406        ],
407    ),
408    # Llama 3.1 405b
409    KilnModel(
410        family=ModelFamily.llama,
411        name=ModelName.llama_3_1_405b,
412        friendly_name="Llama 3.1 405B",
413        providers=[
414            KilnModelProvider(
415                name=ModelProviderName.amazon_bedrock,
416                structured_output_mode=StructuredOutputMode.json_schema,
417                supports_data_gen=False,
418                provider_options={
419                    "model": "meta.llama3-1-405b-instruct-v1:0",
420                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
421                },
422            ),
423            KilnModelProvider(
424                name=ModelProviderName.ollama,
425                structured_output_mode=StructuredOutputMode.json_schema,
426                provider_options={"model": "llama3.1:405b"},
427            ),
428            KilnModelProvider(
429                name=ModelProviderName.openrouter,
430                structured_output_mode=StructuredOutputMode.function_calling,
431                provider_options={"model": "meta-llama/llama-3.1-405b-instruct"},
432            ),
433            KilnModelProvider(
434                name=ModelProviderName.fireworks_ai,
435                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
436                structured_output_mode=StructuredOutputMode.function_calling,
437                provider_options={
438                    "model": "accounts/fireworks/models/llama-v3p1-405b-instruct"
439                },
440            ),
441        ],
442    ),
443    # Mistral Nemo
444    KilnModel(
445        family=ModelFamily.mistral,
446        name=ModelName.mistral_nemo,
447        friendly_name="Mistral Nemo",
448        providers=[
449            KilnModelProvider(
450                name=ModelProviderName.openrouter,
451                provider_options={"model": "mistralai/mistral-nemo"},
452                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
453            ),
454        ],
455    ),
456    # Mistral Large
457    KilnModel(
458        family=ModelFamily.mistral,
459        name=ModelName.mistral_large,
460        friendly_name="Mistral Large",
461        providers=[
462            KilnModelProvider(
463                name=ModelProviderName.amazon_bedrock,
464                structured_output_mode=StructuredOutputMode.json_schema,
465                provider_options={
466                    "model": "mistral.mistral-large-2407-v1:0",
467                    "region_name": "us-west-2",  # only in west-2
468                },
469            ),
470            KilnModelProvider(
471                name=ModelProviderName.openrouter,
472                structured_output_mode=StructuredOutputMode.json_schema,
473                provider_options={"model": "mistralai/mistral-large"},
474            ),
475            KilnModelProvider(
476                name=ModelProviderName.ollama,
477                structured_output_mode=StructuredOutputMode.json_schema,
478                provider_options={"model": "mistral-large"},
479            ),
480        ],
481    ),
482    # Llama 3.2 1B
483    KilnModel(
484        family=ModelFamily.llama,
485        name=ModelName.llama_3_2_1b,
486        friendly_name="Llama 3.2 1B",
487        providers=[
488            KilnModelProvider(
489                name=ModelProviderName.groq,
490                provider_options={"model": "llama-3.2-1b-preview"},
491                supports_data_gen=False,
492            ),
493            KilnModelProvider(
494                name=ModelProviderName.openrouter,
495                supports_structured_output=False,
496                supports_data_gen=False,
497                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
498                provider_options={"model": "meta-llama/llama-3.2-1b-instruct"},
499            ),
500            KilnModelProvider(
501                name=ModelProviderName.ollama,
502                supports_structured_output=False,
503                supports_data_gen=False,
504                provider_options={"model": "llama3.2:1b"},
505            ),
506        ],
507    ),
508    # Llama 3.2 3B
509    KilnModel(
510        family=ModelFamily.llama,
511        name=ModelName.llama_3_2_3b,
512        friendly_name="Llama 3.2 3B",
513        providers=[
514            KilnModelProvider(
515                name=ModelProviderName.groq,
516                provider_options={"model": "llama-3.2-3b-preview"},
517                supports_data_gen=False,
518            ),
519            KilnModelProvider(
520                name=ModelProviderName.openrouter,
521                supports_structured_output=False,
522                supports_data_gen=False,
523                structured_output_mode=StructuredOutputMode.json_schema,
524                provider_options={"model": "meta-llama/llama-3.2-3b-instruct"},
525            ),
526            KilnModelProvider(
527                name=ModelProviderName.ollama,
528                supports_data_gen=False,
529                provider_options={"model": "llama3.2"},
530            ),
531            KilnModelProvider(
532                name=ModelProviderName.fireworks_ai,
533                provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
534                structured_output_mode=StructuredOutputMode.json_mode,
535                provider_options={
536                    "model": "accounts/fireworks/models/llama-v3p2-3b-instruct"
537                },
538            ),
539        ],
540    ),
541    # Llama 3.2 11B
542    KilnModel(
543        family=ModelFamily.llama,
544        name=ModelName.llama_3_2_11b,
545        friendly_name="Llama 3.2 11B",
546        providers=[
547            KilnModelProvider(
548                name=ModelProviderName.groq,
549                provider_options={"model": "llama-3.2-11b-vision-preview"},
550            ),
551            KilnModelProvider(
552                name=ModelProviderName.openrouter,
553                structured_output_mode=StructuredOutputMode.json_schema,
554                provider_options={"model": "meta-llama/llama-3.2-11b-vision-instruct"},
555            ),
556            KilnModelProvider(
557                name=ModelProviderName.ollama,
558                structured_output_mode=StructuredOutputMode.json_schema,
559                provider_options={"model": "llama3.2-vision"},
560            ),
561            KilnModelProvider(
562                name=ModelProviderName.fireworks_ai,
563                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
564                provider_options={
565                    "model": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct"
566                },
567                structured_output_mode=StructuredOutputMode.json_mode,
568            ),
569        ],
570    ),
571    # Llama 3.2 90B
572    KilnModel(
573        family=ModelFamily.llama,
574        name=ModelName.llama_3_2_90b,
575        friendly_name="Llama 3.2 90B",
576        providers=[
577            KilnModelProvider(
578                name=ModelProviderName.groq,
579                provider_options={"model": "llama-3.2-90b-vision-preview"},
580            ),
581            KilnModelProvider(
582                name=ModelProviderName.openrouter,
583                structured_output_mode=StructuredOutputMode.json_schema,
584                provider_options={"model": "meta-llama/llama-3.2-90b-vision-instruct"},
585            ),
586            KilnModelProvider(
587                name=ModelProviderName.ollama,
588                structured_output_mode=StructuredOutputMode.json_schema,
589                provider_options={"model": "llama3.2-vision:90b"},
590            ),
591            KilnModelProvider(
592                name=ModelProviderName.fireworks_ai,
593                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
594                provider_options={
595                    "model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct"
596                },
597                structured_output_mode=StructuredOutputMode.json_mode,
598            ),
599        ],
600    ),
601    # Llama 3.3 70B
602    KilnModel(
603        family=ModelFamily.llama,
604        name=ModelName.llama_3_3_70b,
605        friendly_name="Llama 3.3 70B",
606        providers=[
607            KilnModelProvider(
608                name=ModelProviderName.openrouter,
609                provider_options={"model": "meta-llama/llama-3.3-70b-instruct"},
610                structured_output_mode=StructuredOutputMode.json_schema,
611                # Openrouter not working with json_schema or tools. JSON_schema sometimes works so force that, but not consistently so still not recommended.
612                supports_structured_output=False,
613                supports_data_gen=False,
614            ),
615            KilnModelProvider(
616                name=ModelProviderName.groq,
617                supports_structured_output=True,
618                supports_data_gen=True,
619                provider_options={"model": "llama-3.3-70b-versatile"},
620            ),
621            KilnModelProvider(
622                name=ModelProviderName.ollama,
623                structured_output_mode=StructuredOutputMode.json_schema,
624                provider_options={"model": "llama3.3"},
625            ),
626            KilnModelProvider(
627                name=ModelProviderName.fireworks_ai,
628                # Finetuning not live yet
629                # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
630                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
631                structured_output_mode=StructuredOutputMode.function_calling,
632                provider_options={
633                    "model": "accounts/fireworks/models/llama-v3p3-70b-instruct"
634                },
635            ),
636        ],
637    ),
638    # Phi 3.5
639    KilnModel(
640        family=ModelFamily.phi,
641        name=ModelName.phi_3_5,
642        friendly_name="Phi 3.5",
643        providers=[
644            KilnModelProvider(
645                name=ModelProviderName.ollama,
646                structured_output_mode=StructuredOutputMode.json_schema,
647                supports_structured_output=False,
648                supports_data_gen=False,
649                provider_options={"model": "phi3.5"},
650            ),
651            KilnModelProvider(
652                name=ModelProviderName.openrouter,
653                supports_structured_output=False,
654                supports_data_gen=False,
655                provider_options={"model": "microsoft/phi-3.5-mini-128k-instruct"},
656                structured_output_mode=StructuredOutputMode.json_schema,
657            ),
658            KilnModelProvider(
659                name=ModelProviderName.fireworks_ai,
660                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
661                structured_output_mode=StructuredOutputMode.json_mode,
662                supports_data_gen=False,
663                provider_options={
664                    "model": "accounts/fireworks/models/phi-3-vision-128k-instruct"
665                },
666            ),
667        ],
668    ),
669    # Phi 4
670    KilnModel(
671        family=ModelFamily.phi,
672        name=ModelName.phi_4,
673        friendly_name="Phi 4",
674        providers=[
675            KilnModelProvider(
676                name=ModelProviderName.ollama,
677                structured_output_mode=StructuredOutputMode.json_schema,
678                provider_options={"model": "phi4"},
679            ),
680            KilnModelProvider(
681                name=ModelProviderName.openrouter,
682                # JSON mode not consistent enough to enable in UI
683                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
684                supports_data_gen=False,
685                provider_options={"model": "microsoft/phi-4"},
686            ),
687        ],
688    ),
689    # Gemma 2 2.6b
690    KilnModel(
691        family=ModelFamily.gemma,
692        name=ModelName.gemma_2_2b,
693        friendly_name="Gemma 2 2B",
694        providers=[
695            KilnModelProvider(
696                name=ModelProviderName.ollama,
697                supports_data_gen=False,
698                provider_options={
699                    "model": "gemma2:2b",
700                },
701            ),
702        ],
703    ),
704    # Gemma 2 9b
705    KilnModel(
706        family=ModelFamily.gemma,
707        name=ModelName.gemma_2_9b,
708        friendly_name="Gemma 2 9B",
709        providers=[
710            KilnModelProvider(
711                name=ModelProviderName.ollama,
712                supports_data_gen=False,
713                provider_options={
714                    "model": "gemma2:9b",
715                },
716            ),
717            KilnModelProvider(
718                name=ModelProviderName.openrouter,
719                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
720                supports_data_gen=False,
721                provider_options={"model": "google/gemma-2-9b-it"},
722            ),
723            # fireworks AI errors - not allowing system role. Exclude until resolved.
724        ],
725    ),
726    # Gemma 2 27b
727    KilnModel(
728        family=ModelFamily.gemma,
729        name=ModelName.gemma_2_27b,
730        friendly_name="Gemma 2 27B",
731        providers=[
732            KilnModelProvider(
733                name=ModelProviderName.ollama,
734                supports_data_gen=False,
735                provider_options={
736                    "model": "gemma2:27b",
737                },
738            ),
739            KilnModelProvider(
740                name=ModelProviderName.openrouter,
741                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
742                supports_data_gen=False,
743                provider_options={"model": "google/gemma-2-27b-it"},
744            ),
745        ],
746    ),
747    # Mixtral 8x7B
748    KilnModel(
749        family=ModelFamily.mixtral,
750        name=ModelName.mixtral_8x7b,
751        friendly_name="Mixtral 8x7B",
752        providers=[
753            KilnModelProvider(
754                name=ModelProviderName.openrouter,
755                provider_options={"model": "mistralai/mixtral-8x7b-instruct"},
756                supports_data_gen=False,
757                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
758            ),
759            KilnModelProvider(
760                name=ModelProviderName.ollama,
761                provider_options={"model": "mixtral"},
762            ),
763        ],
764    ),
765    # Qwen 2.5 7B
766    KilnModel(
767        family=ModelFamily.qwen,
768        name=ModelName.qwen_2p5_7b,
769        friendly_name="Qwen 2.5 7B",
770        providers=[
771            KilnModelProvider(
772                name=ModelProviderName.openrouter,
773                provider_options={"model": "qwen/qwen-2.5-7b-instruct"},
774                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
775            ),
776            KilnModelProvider(
777                name=ModelProviderName.ollama,
778                provider_options={"model": "qwen2.5"},
779            ),
780        ],
781    ),
782    # Qwen 2.5 72B
783    KilnModel(
784        family=ModelFamily.qwen,
785        name=ModelName.qwen_2p5_72b,
786        friendly_name="Qwen 2.5 72B",
787        providers=[
788            KilnModelProvider(
789                name=ModelProviderName.openrouter,
790                provider_options={"model": "qwen/qwen-2.5-72b-instruct"},
791                # Not consistent with structure data. Works sometimes but not often
792                supports_structured_output=False,
793                supports_data_gen=False,
794                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
795            ),
796            KilnModelProvider(
797                name=ModelProviderName.ollama,
798                provider_options={"model": "qwen2.5:72b"},
799            ),
800            KilnModelProvider(
801                name=ModelProviderName.fireworks_ai,
802                provider_options={
803                    "model": "accounts/fireworks/models/qwen2p5-72b-instruct"
804                },
805                # Fireworks will start tuning, but it never finishes.
806                # provider_finetune_id="accounts/fireworks/models/qwen2p5-72b-instruct",
807                # Tool calling forces schema -- fireworks doesn't support json_schema, just json_mode
808                structured_output_mode=StructuredOutputMode.function_calling,
809            ),
810        ],
811    ),
812    # Mistral Small 3
813    KilnModel(
814        family=ModelFamily.mistral,
815        name=ModelName.mistral_small_3,
816        friendly_name="Mistral Small 3",
817        providers=[
818            KilnModelProvider(
819                name=ModelProviderName.openrouter,
820                structured_output_mode=StructuredOutputMode.json_instruction_and_object,
821                provider_options={"model": "mistralai/mistral-small-24b-instruct-2501"},
822            ),
823            KilnModelProvider(
824                name=ModelProviderName.ollama,
825                provider_options={"model": "mistral-small:24b"},
826            ),
827        ],
828    ),
829    # DeepSeek R1 Distill Qwen 32B
830    KilnModel(
831        family=ModelFamily.deepseek,
832        name=ModelName.deepseek_r1_distill_qwen_32b,
833        friendly_name="DeepSeek R1 Distill Qwen 32B",
834        providers=[
835            KilnModelProvider(
836                name=ModelProviderName.openrouter,
837                reasoning_capable=True,
838                structured_output_mode=StructuredOutputMode.json_instructions,
839                provider_options={"model": "deepseek/deepseek-r1-distill-qwen-32b"},
840            ),
841            KilnModelProvider(
842                name=ModelProviderName.ollama,
843                parser=ModelParserID.r1_thinking,
844                reasoning_capable=True,
845                structured_output_mode=StructuredOutputMode.json_instructions,
846                provider_options={"model": "deepseek-r1:32b"},
847            ),
848        ],
849    ),
850    # DeepSeek R1 Distill Llama 70B
851    KilnModel(
852        family=ModelFamily.deepseek,
853        name=ModelName.deepseek_r1_distill_llama_70b,
854        friendly_name="DeepSeek R1 Distill Llama 70B",
855        providers=[
856            KilnModelProvider(
857                name=ModelProviderName.openrouter,
858                reasoning_capable=True,
859                structured_output_mode=StructuredOutputMode.json_instructions,
860                provider_options={"model": "deepseek/deepseek-r1-distill-llama-70b"},
861            ),
862            KilnModelProvider(
863                name=ModelProviderName.ollama,
864                supports_data_gen=False,
865                parser=ModelParserID.r1_thinking,
866                reasoning_capable=True,
867                structured_output_mode=StructuredOutputMode.json_instructions,
868                provider_options={"model": "deepseek-r1:70b"},
869            ),
870        ],
871    ),
872    # DeepSeek R1 Distill Qwen 14B
873    KilnModel(
874        family=ModelFamily.deepseek,
875        name=ModelName.deepseek_r1_distill_qwen_14b,
876        friendly_name="DeepSeek R1 Distill Qwen 14B",
877        providers=[
878            KilnModelProvider(
879                name=ModelProviderName.openrouter,
880                supports_data_gen=False,
881                reasoning_capable=True,
882                structured_output_mode=StructuredOutputMode.json_instructions,
883                provider_options={"model": "deepseek/deepseek-r1-distill-qwen-14b"},
884            ),
885            KilnModelProvider(
886                name=ModelProviderName.ollama,
887                supports_data_gen=False,
888                parser=ModelParserID.r1_thinking,
889                reasoning_capable=True,
890                structured_output_mode=StructuredOutputMode.json_instructions,
891                provider_options={"model": "deepseek-r1:14b"},
892            ),
893        ],
894    ),
895    # DeepSeek R1 Distill Llama 8B
896    KilnModel(
897        family=ModelFamily.deepseek,
898        name=ModelName.deepseek_r1_distill_llama_8b,
899        friendly_name="DeepSeek R1 Distill Llama 8B",
900        providers=[
901            KilnModelProvider(
902                name=ModelProviderName.openrouter,
903                supports_data_gen=False,
904                reasoning_capable=True,
905                structured_output_mode=StructuredOutputMode.json_instructions,
906                provider_options={"model": "deepseek/deepseek-r1-distill-llama-8b"},
907            ),
908            KilnModelProvider(
909                name=ModelProviderName.ollama,
910                supports_data_gen=False,
911                parser=ModelParserID.r1_thinking,
912                reasoning_capable=True,
913                structured_output_mode=StructuredOutputMode.json_instructions,
914                provider_options={"model": "deepseek-r1:8b"},
915            ),
916        ],
917    ),
918    # DeepSeek R1 Distill Qwen 7B
919    KilnModel(
920        family=ModelFamily.deepseek,
921        name=ModelName.deepseek_r1_distill_qwen_7b,
922        friendly_name="DeepSeek R1 Distill Qwen 7B",
923        providers=[
924            KilnModelProvider(
925                name=ModelProviderName.ollama,
926                supports_data_gen=False,
927                parser=ModelParserID.r1_thinking,
928                reasoning_capable=True,
929                structured_output_mode=StructuredOutputMode.json_instructions,
930                provider_options={"model": "deepseek-r1:7b"},
931            ),
932        ],
933    ),
934    # DeepSeek R1 Distill Qwen 1.5B
935    KilnModel(
936        family=ModelFamily.deepseek,
937        name=ModelName.deepseek_r1_distill_qwen_1p5b,
938        friendly_name="DeepSeek R1 Distill Qwen 1.5B",
939        providers=[
940            KilnModelProvider(
941                name=ModelProviderName.openrouter,
942                supports_structured_output=False,
943                supports_data_gen=False,
944                reasoning_capable=True,
945                structured_output_mode=StructuredOutputMode.json_instructions,
946                provider_options={"model": "deepseek/deepseek-r1-distill-qwen-1.5b"},
947            ),
948            KilnModelProvider(
949                name=ModelProviderName.ollama,
950                supports_data_gen=False,
951                parser=ModelParserID.r1_thinking,
952                reasoning_capable=True,
953                structured_output_mode=StructuredOutputMode.json_instructions,
954                provider_options={"model": "deepseek-r1:1.5b"},
955            ),
956        ],
957    ),
958]
class ModelProviderName(builtins.str, enum.Enum):
16class ModelProviderName(str, Enum):
17    """
18    Enumeration of supported AI model providers.
19    """
20
21    openai = "openai"
22    groq = "groq"
23    amazon_bedrock = "amazon_bedrock"
24    ollama = "ollama"
25    openrouter = "openrouter"
26    fireworks_ai = "fireworks_ai"
27    kiln_fine_tune = "kiln_fine_tune"
28    kiln_custom_registry = "kiln_custom_registry"
29    openai_compatible = "openai_compatible"

Enumeration of supported AI model providers.

openai = <ModelProviderName.openai: 'openai'>
groq = <ModelProviderName.groq: 'groq'>
amazon_bedrock = <ModelProviderName.amazon_bedrock: 'amazon_bedrock'>
ollama = <ModelProviderName.ollama: 'ollama'>
openrouter = <ModelProviderName.openrouter: 'openrouter'>
fireworks_ai = <ModelProviderName.fireworks_ai: 'fireworks_ai'>
kiln_fine_tune = <ModelProviderName.kiln_fine_tune: 'kiln_fine_tune'>
kiln_custom_registry = <ModelProviderName.kiln_custom_registry: 'kiln_custom_registry'>
openai_compatible = <ModelProviderName.openai_compatible: 'openai_compatible'>
class ModelFamily(builtins.str, enum.Enum):
32class ModelFamily(str, Enum):
33    """
34    Enumeration of supported model families/architectures.
35    """
36
37    gpt = "gpt"
38    llama = "llama"
39    phi = "phi"
40    mistral = "mistral"
41    gemma = "gemma"
42    gemini = "gemini"
43    claude = "claude"
44    mixtral = "mixtral"
45    qwen = "qwen"
46    deepseek = "deepseek"

Enumeration of supported model families/architectures.

gpt = <ModelFamily.gpt: 'gpt'>
llama = <ModelFamily.llama: 'llama'>
phi = <ModelFamily.phi: 'phi'>
mistral = <ModelFamily.mistral: 'mistral'>
gemma = <ModelFamily.gemma: 'gemma'>
gemini = <ModelFamily.gemini: 'gemini'>
claude = <ModelFamily.claude: 'claude'>
mixtral = <ModelFamily.mixtral: 'mixtral'>
qwen = <ModelFamily.qwen: 'qwen'>
deepseek = <ModelFamily.deepseek: 'deepseek'>
class ModelName(builtins.str, enum.Enum):
50class ModelName(str, Enum):
51    """
52    Enumeration of specific model versions supported by the system.
53    Where models have instruct and raw versions, instruct is default and raw is specified.
54    """
55
56    llama_3_1_8b = "llama_3_1_8b"
57    llama_3_1_70b = "llama_3_1_70b"
58    llama_3_1_405b = "llama_3_1_405b"
59    llama_3_2_1b = "llama_3_2_1b"
60    llama_3_2_3b = "llama_3_2_3b"
61    llama_3_2_11b = "llama_3_2_11b"
62    llama_3_2_90b = "llama_3_2_90b"
63    llama_3_3_70b = "llama_3_3_70b"
64    gpt_4o_mini = "gpt_4o_mini"
65    gpt_4o = "gpt_4o"
66    phi_3_5 = "phi_3_5"
67    phi_4 = "phi_4"
68    mistral_large = "mistral_large"
69    mistral_nemo = "mistral_nemo"
70    gemma_2_2b = "gemma_2_2b"
71    gemma_2_9b = "gemma_2_9b"
72    gemma_2_27b = "gemma_2_27b"
73    claude_3_5_haiku = "claude_3_5_haiku"
74    claude_3_5_sonnet = "claude_3_5_sonnet"
75    gemini_1_5_flash = "gemini_1_5_flash"
76    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
77    gemini_1_5_pro = "gemini_1_5_pro"
78    gemini_2_0_flash = "gemini_2_0_flash"
79    nemotron_70b = "nemotron_70b"
80    mixtral_8x7b = "mixtral_8x7b"
81    qwen_2p5_7b = "qwen_2p5_7b"
82    qwen_2p5_72b = "qwen_2p5_72b"
83    deepseek_3 = "deepseek_3"
84    deepseek_r1 = "deepseek_r1"
85    mistral_small_3 = "mistral_small_3"
86    deepseek_r1_distill_qwen_32b = "deepseek_r1_distill_qwen_32b"
87    deepseek_r1_distill_llama_70b = "deepseek_r1_distill_llama_70b"
88    deepseek_r1_distill_qwen_14b = "deepseek_r1_distill_qwen_14b"
89    deepseek_r1_distill_qwen_1p5b = "deepseek_r1_distill_qwen_1p5b"
90    deepseek_r1_distill_qwen_7b = "deepseek_r1_distill_qwen_7b"
91    deepseek_r1_distill_llama_8b = "deepseek_r1_distill_llama_8b"

Enumeration of specific model versions supported by the system. Where models have instruct and raw versions, instruct is default and raw is specified.

llama_3_1_8b = <ModelName.llama_3_1_8b: 'llama_3_1_8b'>
llama_3_1_70b = <ModelName.llama_3_1_70b: 'llama_3_1_70b'>
llama_3_1_405b = <ModelName.llama_3_1_405b: 'llama_3_1_405b'>
llama_3_2_1b = <ModelName.llama_3_2_1b: 'llama_3_2_1b'>
llama_3_2_3b = <ModelName.llama_3_2_3b: 'llama_3_2_3b'>
llama_3_2_11b = <ModelName.llama_3_2_11b: 'llama_3_2_11b'>
llama_3_2_90b = <ModelName.llama_3_2_90b: 'llama_3_2_90b'>
llama_3_3_70b = <ModelName.llama_3_3_70b: 'llama_3_3_70b'>
gpt_4o_mini = <ModelName.gpt_4o_mini: 'gpt_4o_mini'>
gpt_4o = <ModelName.gpt_4o: 'gpt_4o'>
phi_3_5 = <ModelName.phi_3_5: 'phi_3_5'>
phi_4 = <ModelName.phi_4: 'phi_4'>
mistral_large = <ModelName.mistral_large: 'mistral_large'>
mistral_nemo = <ModelName.mistral_nemo: 'mistral_nemo'>
gemma_2_2b = <ModelName.gemma_2_2b: 'gemma_2_2b'>
gemma_2_9b = <ModelName.gemma_2_9b: 'gemma_2_9b'>
gemma_2_27b = <ModelName.gemma_2_27b: 'gemma_2_27b'>
claude_3_5_haiku = <ModelName.claude_3_5_haiku: 'claude_3_5_haiku'>
claude_3_5_sonnet = <ModelName.claude_3_5_sonnet: 'claude_3_5_sonnet'>
gemini_1_5_flash = <ModelName.gemini_1_5_flash: 'gemini_1_5_flash'>
gemini_1_5_flash_8b = <ModelName.gemini_1_5_flash_8b: 'gemini_1_5_flash_8b'>
gemini_1_5_pro = <ModelName.gemini_1_5_pro: 'gemini_1_5_pro'>
gemini_2_0_flash = <ModelName.gemini_2_0_flash: 'gemini_2_0_flash'>
nemotron_70b = <ModelName.nemotron_70b: 'nemotron_70b'>
mixtral_8x7b = <ModelName.mixtral_8x7b: 'mixtral_8x7b'>
qwen_2p5_7b = <ModelName.qwen_2p5_7b: 'qwen_2p5_7b'>
qwen_2p5_72b = <ModelName.qwen_2p5_72b: 'qwen_2p5_72b'>
deepseek_3 = <ModelName.deepseek_3: 'deepseek_3'>
deepseek_r1 = <ModelName.deepseek_r1: 'deepseek_r1'>
mistral_small_3 = <ModelName.mistral_small_3: 'mistral_small_3'>
deepseek_r1_distill_qwen_32b = <ModelName.deepseek_r1_distill_qwen_32b: 'deepseek_r1_distill_qwen_32b'>
deepseek_r1_distill_llama_70b = <ModelName.deepseek_r1_distill_llama_70b: 'deepseek_r1_distill_llama_70b'>
deepseek_r1_distill_qwen_14b = <ModelName.deepseek_r1_distill_qwen_14b: 'deepseek_r1_distill_qwen_14b'>
deepseek_r1_distill_qwen_1p5b = <ModelName.deepseek_r1_distill_qwen_1p5b: 'deepseek_r1_distill_qwen_1p5b'>
deepseek_r1_distill_qwen_7b = <ModelName.deepseek_r1_distill_qwen_7b: 'deepseek_r1_distill_qwen_7b'>
deepseek_r1_distill_llama_8b = <ModelName.deepseek_r1_distill_llama_8b: 'deepseek_r1_distill_llama_8b'>
class ModelParserID(builtins.str, enum.Enum):
94class ModelParserID(str, Enum):
95    """
96    Enumeration of supported model parsers.
97    """
98
99    r1_thinking = "r1_thinking"

Enumeration of supported model parsers.

r1_thinking = <ModelParserID.r1_thinking: 'r1_thinking'>
class KilnModelProvider(pydantic.main.BaseModel):
102class KilnModelProvider(BaseModel):
103    """
104    Configuration for a specific model provider.
105
106    Attributes:
107        name: The provider's identifier
108        supports_structured_output: Whether the provider supports structured output formats
109        supports_data_gen: Whether the provider supports data generation
110        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
111        provider_finetune_id: The finetune ID for the provider, if applicable
112        provider_options: Additional provider-specific configuration options
113        structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output.
114        parser: A parser to use for the model, if applicable
115        reasoning_capable: Whether the model is designed to output thinking in a structured format (eg <think></think>). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.
116    """
117
118    name: ModelProviderName
119    supports_structured_output: bool = True
120    supports_data_gen: bool = True
121    untested_model: bool = False
122    provider_finetune_id: str | None = None
123    provider_options: Dict = {}
124    structured_output_mode: StructuredOutputMode = StructuredOutputMode.default
125    parser: ModelParserID | None = None
126    reasoning_capable: bool = False

Configuration for a specific model provider.

Attributes: name: The provider's identifier supports_structured_output: Whether the provider supports structured output formats supports_data_gen: Whether the provider supports data generation untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable. provider_finetune_id: The finetune ID for the provider, if applicable provider_options: Additional provider-specific configuration options structured_output_mode: The mode we should use to call the model for structured output, if it was trained with structured output. parser: A parser to use for the model, if applicable reasoning_capable: Whether the model is designed to output thinking in a structured format (eg ). If so we don't use COT across 2 calls, and ask for thinking and final response in the same call.

supports_structured_output: bool
supports_data_gen: bool
untested_model: bool
provider_finetune_id: str | None
provider_options: Dict
structured_output_mode: kiln_ai.datamodel.StructuredOutputMode
parser: ModelParserID | None
reasoning_capable: bool
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class KilnModel(pydantic.main.BaseModel):
129class KilnModel(BaseModel):
130    """
131    Configuration for a specific AI model.
132
133    Attributes:
134        family: The model's architecture family
135        name: The model's identifier
136        friendly_name: Human-readable name for the model
137        providers: List of providers that offer this model
138        supports_structured_output: Whether the model supports structured output formats
139    """
140
141    family: str
142    name: str
143    friendly_name: str
144    providers: List[KilnModelProvider]

Configuration for a specific AI model.

Attributes: family: The model's architecture family name: The model's identifier friendly_name: Human-readable name for the model providers: List of providers that offer this model supports_structured_output: Whether the model supports structured output formats

family: str
name: str
friendly_name: str
providers: List[KilnModelProvider]
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

built_in_models: List[KilnModel] = [KilnModel(family='gpt', name='gpt_4o_mini', friendly_name='GPT 4o Mini', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-mini-2024-07-18', provider_options={'model': 'gpt-4o-mini'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o-mini'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gpt', name='gpt_4o', friendly_name='GPT 4o', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-2024-08-06', provider_options={'model': 'gpt-4o'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='claude', name='claude_3_5_haiku', friendly_name='Claude 3.5 Haiku', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3-5-haiku'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='claude', name='claude_3_5_sonnet', friendly_name='Claude 3.5 Sonnet', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3.5-sonnet'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='deepseek', name='deepseek_3', friendly_name='DeepSeek v3', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-chat'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/deepseek-v3'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='deepseek', name='deepseek_r1', friendly_name='DeepSeek R1', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/deepseek-r1'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:671b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='gemini', name='gemini_1_5_pro', friendly_name='Gemini 1.5 Pro', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-pro-1.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemini', name='gemini_1_5_flash', friendly_name='Gemini 1.5 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemini', name='gemini_1_5_flash_8b', friendly_name='Gemini 1.5 Flash 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5-8b'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemini', name='gemini_2_0_flash', friendly_name='Gemini 2.0 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-2.0-flash-001'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='nemotron_70b', friendly_name='Nemotron 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'nvidia/llama-3.1-nemotron-70b-instruct'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_8b', friendly_name='Llama 3.1 8B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.1-8b-instant'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-8b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:8b', 'model_aliases': ['llama3.1']}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-8b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-8b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-8b-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_70b', friendly_name='Llama 3.1 70B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-70b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:70b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-70b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_1_405b', friendly_name='Llama 3.1 405B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-405b-instruct-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:405b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-405b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p1-405b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='mistral', name='mistral_nemo', friendly_name='Mistral Nemo', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-nemo'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='mistral', name='mistral_large', friendly_name='Mistral Large', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral.mistral-large-2407-v1:0', 'region_name': 'us-west-2'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-large'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral-large'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_1b', friendly_name='Llama 3.2 1B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.2-1b-preview'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-1b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2:1b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_3b', friendly_name='Llama 3.2 3B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.2-3b-preview'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-3b-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p2-3b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p2-3b-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_11b', friendly_name='Llama 3.2 11B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.2-11b-vision-preview'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-11b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_2_90b', friendly_name='Llama 3.2 90B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.2-90b-vision-preview'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-90b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision:90b'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='llama', name='llama_3_3_70b', friendly_name='Llama 3.3 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.3-70b-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.3-70b-versatile'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.3'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p3-70b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='phi', name='phi_3_5', friendly_name='Phi 3.5', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'phi3.5'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'microsoft/phi-3.5-mini-128k-instruct'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/phi-3-vision-128k-instruct'}, structured_output_mode=<StructuredOutputMode.json_mode: 'json_mode'>, parser=None, reasoning_capable=False)]), KilnModel(family='phi', name='phi_4', friendly_name='Phi 4', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'phi4'}, structured_output_mode=<StructuredOutputMode.json_schema: 'json_schema'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'microsoft/phi-4'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_2b', friendly_name='Gemma 2 2B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:2b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_9b', friendly_name='Gemma 2 9B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:9b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-9b-it'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='gemma', name='gemma_2_27b', friendly_name='Gemma 2 27B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:27b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-27b-it'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False)]), KilnModel(family='mixtral', name='mixtral_8x7b', friendly_name='Mixtral 8x7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mixtral-8x7b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mixtral'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='qwen', name='qwen_2p5_7b', friendly_name='Qwen 2.5 7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-7b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='qwen', name='qwen_2p5_72b', friendly_name='Qwen 2.5 72B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-72b-instruct'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5:72b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/qwen2p5-72b-instruct'}, structured_output_mode=<StructuredOutputMode.function_calling: 'function_calling'>, parser=None, reasoning_capable=False)]), KilnModel(family='mistral', name='mistral_small_3', friendly_name='Mistral Small 3', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-small-24b-instruct-2501'}, structured_output_mode=<StructuredOutputMode.json_instruction_and_object: 'json_instruction_and_object'>, parser=None, reasoning_capable=False), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral-small:24b'}, structured_output_mode=<StructuredOutputMode.default: 'default'>, parser=None, reasoning_capable=False)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_32b', friendly_name='DeepSeek R1 Distill Qwen 32B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1-distill-qwen-32b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:32b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='deepseek', name='deepseek_r1_distill_llama_70b', friendly_name='DeepSeek R1 Distill Llama 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1-distill-llama-70b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:70b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_14b', friendly_name='DeepSeek R1 Distill Qwen 14B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1-distill-qwen-14b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:14b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='deepseek', name='deepseek_r1_distill_llama_8b', friendly_name='DeepSeek R1 Distill Llama 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1-distill-llama-8b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:8b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_7b', friendly_name='DeepSeek R1 Distill Qwen 7B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:7b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)]), KilnModel(family='deepseek', name='deepseek_r1_distill_qwen_1p5b', friendly_name='DeepSeek R1 Distill Qwen 1.5B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek/deepseek-r1-distill-qwen-1.5b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=None, reasoning_capable=True), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'deepseek-r1:1.5b'}, structured_output_mode=<StructuredOutputMode.json_instructions: 'json_instructions'>, parser=<ModelParserID.r1_thinking: 'r1_thinking'>, reasoning_capable=True)])]