{"object":"list","data":[{"id":"claude-opus4-7","object":"model","created":1776407400,"owned_by":"Anthropic","description":"Claude Opus 4.7 is Anthropic’s most capable general model, optimized for coding, enterprise workflows, multimodal reasoning, and long-running agentic tasks.","pricing":{"input_token":4.769,"output_token":23.843,"cache_read_cost":0.477,"cache_write_cost":5.961,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"minimax-m2.7","object":"model","created":1776091082,"owned_by":"MiniMax AI","description":"MiniMax-M2.7 is a frontier autonomous agent model designed for iterative self-improvement, multi-agent coordination, and enterprise-scale execution.","pricing":{"input_token":0.444,"output_token":1.331,"currency":"EUR"},"context_size":196608,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"glm-5.1","object":"model","created":1775735882,"owned_by":"Z.ai","description":"GLM-5.1 is a next-generation flagship model for agentic engineering, excelling in long-horizon reasoning and advanced coding tasks.","pricing":{"input_token":1.242,"output_token":3.903,"cache_read_cost":null,"currency":"EUR"},"context_size":202752,"tags":["Instruct","Code","Reasoning","Tools"]},{"id":"qwen3.5-122b-a10b","object":"model","created":1775727482,"owned_by":"Alibaba Cloud","description":"Qwen3.5-122B-A10B is a multimodal Mixture-of-Experts model designed for agentic applications across text, image, and video tasks.","pricing":{"input_token":0.444,"output_token":3.106,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Reasoning","Tools","Code"]},{"id":"qwen3.5-9b","object":"model","created":1775727362,"owned_by":"Alibaba Cloud","description":"Qwen3.5-9B is a dense multimodal model delivering strong reasoning, coding, and visual understanding with efficient performance and long context.","pricing":{"input_token":0.133,"output_token":0.177,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Reasoning","Tools","Code"]},{"id":"nemotron-3-super-120b-a12b","object":"model","created":1775723882,"owned_by":"Nvidia","description":"Nemotron-3-Super-120B-A12B is a large hybrid MoE model from NVIDIA optimized for agentic reasoning, conversational tasks, and high-throughput workloads.","pricing":{"input_token":0.266,"output_token":0.799,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Tools","Reasoning"]},{"id":"qwen3-coder-next","object":"model","created":1775140682,"owned_by":"Alibaba Cloud","description":"Qwen3 Coder Next 80B is a high-performance coding model optimized for complex software development, agentic workflows, and multi-language programming.","pricing":{"input_token":0.15,"output_token":0.8,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Reasoning","Tools","Code"]},{"id":"glm-5","object":"model","created":1774522682,"owned_by":"Z.ai","description":"GLM-5 is Zhipu AI’s flagship multimodal model with strong bilingual reasoning, long-context understanding, and advanced agentic and tool-use capabilities.","pricing":{"input_token":0.887,"output_token":2.84,"currency":"EUR"},"context_size":202752,"tags":["Instruct","Code","Reasoning","Tools"]},{"id":"glm-4.6","object":"model","created":1774515482,"owned_by":"Z.ai","description":"GLM-4.6 is an upgraded version of GLM-4.5 with longer context, enhanced coding and reasoning, improved agent capabilities, and refined writing quality.","pricing":{"input_token":0.355,"output_token":1.553,"currency":"EUR"},"context_size":203000,"tags":["Instruct","Code","Reasoning","Tools"]},{"id":"deepseek-chat-v3.1","object":"model","created":1774510682,"owned_by":"Deepseek","description":"DeepSeek-V3.1 is a large hybrid reasoning model supporting both thinking and non-thinking modes, optimized for fast reasoning, tool use, and agentic workflows.","pricing":{"input_token":0.177,"output_token":0.71,"currency":"EUR"},"context_size":164000,"tags":["Instruct","Tools","Reasoning"]},{"id":"qwen-2.5-72b-instruct","object":"model","created":1774438682,"owned_by":"Alibaba Cloud","description":"Qwen2.5-72B-Instruct is a large instruction-tuned language model with strong reasoning, coding, long-context, and multilingual capabilities.","pricing":{"input_token":0.062,"output_token":0.231,"currency":"EUR"},"context_size":33000,"tags":["Instruct","Tools","Code"]},{"id":"qwen3.5-397b-a17b","object":"model","created":1773837482,"owned_by":"Alibaba Cloud","description":"Qwen3.5-397B-A17B is a large-scale multimodal model with a hybrid Mixture-of-Experts architecture, delivering state-of-the-art performance across chat, RAG, vision-language, video understanding, and agentic workflows.","pricing":{"input_token":0.6,"output_token":3.6,"currency":"EUR"},"context_size":250000,"tags":["Instruct","Reasoning","Tools","Code"]},{"id":"deepseek-v3.2","object":"model","created":1773833882,"owned_by":"Deepseek","description":"DeepSeek V3.2 is a high-efficiency model designed for strong reasoning and agentic tool-use performance, optimized for long-context and scalable post-training.","pricing":{"input_token":0.266,"output_token":0.444,"currency":"EUR"},"context_size":163840,"tags":["Instruct","Tools","Reasoning"]},{"id":"mistral-small-2603","object":"model","created":1773741193,"owned_by":"Mistral AI","description":"Mistral Small 4 is a versatile Mixture-of-Experts model that unifies reasoning, multimodal understanding, and agentic coding in a single architecture, optimized for efficiency and long-context interactions.","pricing":{"input_token":0.128,"output_token":0.51,"cache_read_cost":0.013,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image","Reasoning"]},{"id":"minimax-m2.5","object":"model","created":1773675482,"owned_by":"MiniMax AI","description":"MiniMax-M2.5 is a state-of-the-art language model optimized for real-world productivity, autonomous agents, coding, and professional office workflows.","pricing":{"input_token":0.266,"output_token":0.976,"cache_read_cost":0.027,"currency":"EUR"},"context_size":196608,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"claude-4-6-sonnet","object":"model","created":1771490282,"owned_by":"Anthropic","description":"Claude Sonnet 4.6 delivers frontier-level intelligence at scale, optimized for coding, agents, and enterprise-grade workflows.","pricing":{"input_token":2.869,"output_token":14.309,"cache_read_cost":0.287,"cache_write_cost":3.59,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"glm-4.7-flash","object":"model","created":1771487882,"owned_by":"Z.ai","description":"GLM-4.7-Flash is a fast, resource-efficient variant of GLM-4.7 optimized for low-latency text and code generation.","pricing":{"input_token":0.072,"output_token":0.429,"currency":"EUR"},"context_size":203000,"tags":["Instruct","Code","Tools"]},{"id":"kimi-k2.5","object":"model","created":1771231082,"owned_by":"Moonshot AI","description":"Kimi K2.5 is an open-source native multimodal agentic model delivering state-of-the-art performance in agents, coding, visual understanding, and general reasoning tasks.","pricing":{"input_token":0.444,"output_token":2.13,"cache_read_cost":0.106,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"claude-opus4-6","object":"model","created":1770377400,"owned_by":"Anthropic","description":"Claude Opus 4.6 is the next generation of Anthropic’s most intelligent model, setting the global standard for coding, enterprise agents, and professional-grade workflows.","pricing":{"input_token":4.769,"output_token":23.843,"cache_read_cost":0.477,"cache_write_cost":5.965,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"minimax-m2","object":"model","created":1770374282,"owned_by":"MiniMax AI","description":"MiniMax-M2 is a large instruction-tuned MoE model optimized for multilingual dialogue, reasoning, and tool-calling, designed as a general-purpose assistant for consumer and enterprise use.","pricing":{"input_token":0.222,"output_token":0.887,"currency":"EUR"},"context_size":196608,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"glm-4.7","object":"model","created":1770206282,"owned_by":"Z.ai","description":"GLM-4.7 is a flagship GLM model featuring strong multilingual reasoning, long-context understanding, and robust tool use, with major gains in agentic coding, UI generation, and complex reasoning tasks.","pricing":{"input_token":0.532,"output_token":1.952,"currency":"EUR"},"context_size":202752,"tags":["Instruct","Code","Reasoning","Tools"]},{"id":"minimax-m2.1","object":"model","created":1770205082,"owned_by":"MiniMax AI","description":"MiniMax-M2.1 is an open-source agentic coding model designed for polyglot development, precision refactoring, and reliable execution of long, multi-step coding and office workflows.","pricing":{"input_token":0.322,"output_token":1.288,"currency":"EUR"},"context_size":196000,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"qwen3guard-gen-8b","object":"model","created":1770204122,"owned_by":"Alibaba Cloud","description":"Qwen3Guard-Gen-8B is a large-scale multilingual safety moderation model designed for high-accuracy prompt and response classification.","pricing":{"input_token":0,"output_token":0,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Safety-guard"]},{"id":"qwen3guard-gen-0.6b","object":"model","created":1770204002,"owned_by":"Alibaba Cloud","description":"Qwen3Guard-Gen-0.6B is a lightweight multilingual safety moderation model that classifies prompts and responses into safe, controversial, or unsafe categories.","pricing":{"input_token":0,"output_token":0,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Safety-guard"]},{"id":"voxtral-small-2507","object":"model","created":1770025993,"owned_by":"Mistral AI","description":"Voxtral Small is a multimodal model with audio input, combining advanced speech capabilities with strong text performance for transcription, translation, and audio understanding.","pricing":{"input_token":0.1,"output_token":0.3,"audio_cost":0.000067,"cache_read_cost":0.01,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Audio"]},{"id":"qwen3-vl-235b-a22b","object":"model","created":1768304282,"owned_by":"Alibaba Cloud","description":"Qwen3 VL 235B A22B is a 235B-parameter MoE vision-language flagship model (≈22B active) designed for frontier-level multimodal understanding across text, images, documents, and long videos.","pricing":{"input_token":0.186,"output_token":1.686,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"mistral-small-creative","object":"model","created":1768297993,"owned_by":"Mistral AI","description":"Mistral Small Creative is a fine-tuned small model optimized for creative writing, roleplay, and chat, trained on curated data.","pricing":{"input_token":0.1,"output_token":0.3,"cache_read_cost":0.01,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Tools"]},{"id":"nvidia-nemotron-3-nano-30b-a3b","object":"model","created":1768217882,"owned_by":"Nvidia","description":"Nemotron-Nano-3-30B-A3B is a compact Mixture-of-Experts model optimized for efficient reasoning, chat, and coding, with strong multilingual support and long-context RAG and agent workflows.","pricing":{"input_token":0.054,"output_token":0.215,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning"]},{"id":"claude-opus4-5","object":"model","created":1767625200,"owned_by":"Anthropic","description":"Claude Opus 4.5 is Anthropic’s next-generation, most intelligent model, delivering industry-leading performance across coding, agents, computer use, and complex enterprise workflows.","pricing":{"input_token":4.769,"output_token":23.849,"cache_read_cost":0.477,"cache_write_cost":5.965,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"qwen3-next-80b-a3b-thinking","object":"model","created":1765370282,"owned_by":"Alibaba Cloud","description":"Qwen3-Next-80B-A3B-Thinking is an 80B-parameter thinking-oriented model combining Hybrid Attention, high-sparsity MoE, and multi-token prediction to deliver exceptional performance on complex reasoning tasks, surpassing both open-source and proprietary alternatives.","pricing":{"input_token":0.134,"output_token":1.073,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Code"]},{"id":"holo2-30b-a3b","object":"model","created":1765367099,"owned_by":"H company","description":"Holo2 30B A3B is a text-and-vision model optimized for analyzing graphical user interfaces, including web, desktop, and mobile, and enabling agents to interpret interfaces, reason over content, and take actions.","pricing":{"input_token":0.3,"output_token":0.7,"currency":"EUR"},"context_size":22000,"tags":["Instruct","Tools","Reasoning","Image"]},{"id":"devstral-2512","object":"model","created":1765356793,"owned_by":"Mistral AI","description":"Devstral 2 2512 is an enterprise-grade text model designed for advanced coding agents and software engineering workflows.","pricing":{"input_token":0.4,"output_token":2,"cache_read_cost":0.04,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Code","Tools"]},{"id":"nova-2-lite","object":"model","created":1764848282,"owned_by":"Amazon","description":"Nova 2 Lite is an advanced multimodal reasoning model that combines efficiency and performance, delivering reliable AI for agentic workflows and enterprise applications.","pricing":{"input_token":0.335,"output_token":2.822,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-oss-safeguard-120b","object":"model","created":1764848282,"owned_by":"OpenAI","description":"GPT OSS Safeguard 120B is a 120B-parameter open-weight safety reasoning model designed for high-accuracy enterprise moderation and policy enforcement.","pricing":{"input_token":0.161,"output_token":0.626,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Safety-guard"]},{"id":"mistral-large-2512","object":"model","created":1764762602,"owned_by":"Mistral AI","description":"Mistral Large 3 is a state-of-the-art open-weight multimodal Mixture-of-Experts model with 41B active parameters, delivering frontier-level performance across text and vision tasks.","pricing":{"input_token":0.5,"output_token":1.5,"cache_read_cost":0.05,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-8b-2512","object":"model","created":1764762542,"owned_by":"Mistral AI","description":"Ministral 3 8B is a balanced, efficient multimodal model offering strong text and vision capabilities, optimized for edge and local deployment.","pricing":{"input_token":0.15,"output_token":0.15,"cache_read_cost":0.015,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-3b-2512","object":"model","created":1764762482,"owned_by":"Mistral AI","description":"Ministral 3 3B is a compact, efficient multimodal model with strong language, vision capabilities, and ideal for custom fine-tuning.","pricing":{"input_token":0.1,"output_token":0.1,"cache_read_cost":0.01,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-14b-2512","object":"model","created":1764761882,"owned_by":"Mistral AI","description":"Ministral 3 14B is a frontier-level 14B multimodal model optimized for local deployment, delivering state-of-the-art text and vision reasoning with a 256K context window and strong agentic capabilities.","pricing":{"input_token":0.2,"output_token":0.2,"cache_read_cost":0.02,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"intellect-3","object":"model","created":1764243482,"owned_by":"PrimeIntellect","description":"INTELLECT-3 is a 106B Mixture-of-Experts reasoning model built on GLM-4.5-Air, achieving state-of-the-art performance in math, coding, science, and multi-step reasoning through supervised fine-tuning and large-scale reinforcement learning.","pricing":{"input_token":0.179,"output_token":0.984,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","Code"]},{"id":"gpt-5.1","object":"model","created":1763168400,"owned_by":"OpenAI","description":"GPT-5.1 is an enhanced version of GPT-5, offering clearer responses, adaptive reasoning, improved consistency, expanded context capacity, and better customization for complex and interactive tasks.","pricing":{"input_token":1.234,"output_token":9.838,"cache_read_cost":0.14,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"nemotron-nano-v2-12b","object":"model","created":1761910682,"owned_by":"Nvidia","description":"NVIDIA Nemotron Nano v2 12B is a 12-billion-parameter multimodal reasoning model designed for advanced video understanding, document intelligence, and visual reasoning, built with a hybrid Transformer-Mamba architecture for high efficiency and low latency.","pricing":{"input_token":0.215,"output_token":0.635,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","Image"]},{"id":"claude-haiku-4-5","object":"model","created":1760547600,"owned_by":"Anthropic","description":"Claude Haiku 4.5 delivers near-frontier performance for coding and agent tasks, optimized for speed and cost to support high-volume and free-tier applications.","pricing":{"input_token":0.894,"output_token":4.472,"cache_read_cost":0.089,"cache_write_cost":1.065,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"claude-4-5-sonnet","object":"model","created":1759145882,"owned_by":"Anthropic","description":"Claude Sonnet 4.5 is Anthropic’s most capable model for real-world agents, excelling in coding, computer use, and long-horizon tasks.","pricing":{"input_token":2.683,"output_token":13.416,"cache_read_cost":0.293,"cache_write_cost":3.661,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"magistral-medium-2509","object":"model","created":1758185593,"owned_by":"Mistral AI","description":"Magistral Medium 2509 is a frontier-class reasoning model with vision support, delivering transparent, multilingual reasoning and a 15% performance boost over its predecessor.","pricing":{"input_token":2,"output_token":5,"cache_read_cost":0.2,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"magistral-small-2509","object":"model","created":1758185593,"owned_by":"Mistral AI","description":"Magistral Small 2509 is a 24B-parameter open-weight reasoning model with vision support, offering a 15% performance boost and improved multimodal reasoning over its predecessor.","pricing":{"input_token":0.5,"output_token":1.5,"cache_read_cost":0.05,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"hermes-4-70b","object":"model","created":1755085082,"owned_by":"NousResearch","description":"Hermes 4 70B is a hybrid-mode reasoning model built on Llama 3.1, designed for advanced logic, coding, math, and structured outputs with improved steerability.","pricing":{"input_token":0.116,"output_token":0.358,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools"]},{"id":"gpt-5","object":"model","created":1754528400,"owned_by":"OpenAI","description":"GPT-5 is OpenAI’s most advanced model, built for deep reasoning, high-quality code generation, and complex multi-step tasks.","pricing":{"input_token":1.234,"output_token":9.838,"cache_read_cost":0.14,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"gpt-oss-120b","object":"model","created":1754393882,"owned_by":"OpenAI","description":"GPT Oss 120b is a 120B parameter Mixture-of-Experts model by OpenAI, built for advanced reasoning, agentic tasks, and flexible developer use under Apache 2.0.","pricing":{"input_token":0.035,"output_token":0.177,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning","Tools"]},{"id":"qwen3-30b-a3b-instruct-2507","object":"model","created":1753702682,"owned_by":"Alibaba Cloud","description":"Qwen3-30B-A3B-Instruct-2507 is an advanced Mixture-of-Experts model optimized for reasoning, coding, and multilingual instruction following.","pricing":{"input_token":0.089,"output_token":0.268,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Tools","Reasoning"]},{"id":"gpt-oss-20b","object":"model","created":1752838682,"owned_by":"OpenAI","description":"GPT Oss 20B is an open-weight Mixture-of-Experts model from OpenAI, optimized for low-latency inference and local deployment.","pricing":{"input_token":0.027,"output_token":0.124,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning","Tools"]},{"id":"mistral-7b-instruct-v0.3","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral-7B-Instruct-v0.3 model is a fine-tuned version of the Mistral 7B base model, optimized for instruction-following tasks. Released in 2023, it is intended for demonstration purposes and does not include built-in guardrails or moderation features.","pricing":{"input_token":0.1,"output_token":0.1,"currency":"EUR"},"context_size":127000,"tags":["Instruct","Tools"]},{"id":"mistral-7b-instruct-v0.2","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral 7B Instruct is a compact, 7B parameter model optimized for fast and efficient text and code generation with a 32K token context window.","pricing":{"input_token":0.143,"output_token":0.197,"currency":"EUR"},"context_size":32000,"tags":["Instruct"]},{"id":"mistral-large-2402","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral Large (24.02) is Mistral AI’s most advanced language model, built for complex multilingual reasoning, code generation, and deep text understanding.","pricing":{"input_token":3.846,"output_token":11.627,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Tools","Reasoning"]},{"id":"pixtral-large-2502","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Pixtral Large (25.02) is a 124B open-weight multimodal model built on Mistral Large 2, offering advanced image understanding and strong performance across text and code tasks.","pricing":{"input_token":1.789,"output_token":5.366,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Tools","Reasoning"]},{"id":"mistral-small-3.2-24b-instruct-2506","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral-Small-3.2-24B-Instruct-2506 is a 24B parameter instruction-tuned model with enhanced long-context support (128k) and state-of-the-art vision understanding.","pricing":{"input_token":0.09,"output_token":0.28,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image"]},{"id":"qwen3-32b","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3 delivers cutting-edge advancements in reasoning, agent capabilities, and multilingual support, with seamless mode switching for optimized performance across tasks.","pricing":{"input_token":0.089,"output_token":0.268,"currency":"EUR"},"context_size":40000,"tags":["Instruct","Reasoning","Tools"]},{"id":"qwen3-235b-a22b-instruct-2507","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3-235B-A22B-Instruct-2507 is a large non-thinking mode model with major improvements in reasoning, instruction following, knowledge coverage, and long-context understanding up to 256K tokens.","pricing":{"input_token":0.062,"output_token":0.408,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Tools","Reasoning"]},{"id":"qwen3-coder-30b-a3b-instruct","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3 Coder 30b a3b Instruct is a 30.5B parameter open-source Mixture-of-Experts model specialized for coding, tool use, and complex multi-step programming workflows.","pricing":{"input_token":0.053,"output_token":0.222,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Code","Tools","Reasoning"]},{"id":"gpt-4.1","object":"model","created":1744630682,"owned_by":"OpenAI","description":"GPT-4.1 is the latest evolution of the GPT-4o model family, offering superior coding, instruction following, and support for up to 1 million input tokens with enhanced multimodal capabilities.","pricing":{"input_token":1.968,"output_token":7.872,"cache_read_cost":0.49,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-4.1-mini","object":"model","created":1744630682,"owned_by":"OpenAI","description":"gpt-4.1-mini is a cost-effective, low-latency model in the GPT-4.1 series, optimized for coding, instruction following, and multimodal tasks with large-context support.","pricing":{"input_token":0.39,"output_token":1.53,"cache_read_cost":0.12,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-4.1-nano","object":"model","created":1744630682,"owned_by":"OpenAI","description":"gpt-4.1-nano is an ultra-efficient model in the GPT-4.1 series, offering powerful coding and instruction capabilities with support for text and vision at the lowest cost and latency.","pricing":{"input_token":0.1,"output_token":0.39,"cache_read_cost":0.05,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"nova-micro-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Micro is a multilingual text-to-text foundation model with strong reasoning capabilities and broad language coverage across 200+ languages.","pricing":{"input_token":0.036,"output_token":0.143,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"nova-lite-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Lite is a fast, low-cost multimodal foundation model capable of reasoning over text, images, and video in 200+ languages.","pricing":{"input_token":0.062,"output_token":0.247,"currency":"EUR"},"context_size":300000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"nova-pro-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Pro is a powerful, multilingual multimodal foundation model that excels at reasoning over text, images, and video across 200+ languages.","pricing":{"input_token":0.824,"output_token":3.295,"currency":"EUR"},"context_size":300000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"claude-sonnet-4","object":"model","created":1744630682,"owned_by":"Anthropic","description":"Claude Sonnet 4 is a versatile, mid-sized model optimized for coding, real-time AI assistants, and large-scale content tasks with strong cost-performance balance.","pricing":{"input_token":2.601,"output_token":13.01,"cache_read_cost":0.26,"cache_write_cost":3.253,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning"]},{"id":"llama-3.1-nemotron-ultra-253b-v1","object":"model","created":1744025882,"owned_by":"Nvidia","description":"A reasoning-optimized LLM based on Llama 3.1, Nemotron Ultra 253B delivers strong performance in tasks like RAG and tool use, with high efficiency and reduced latency.","pricing":{"input_token":0.537,"output_token":1.61,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning"]},{"id":"llama-4-maverick","object":"model","created":1742989082,"owned_by":"Meta","description":"Llama 4 Maverick is a high-performance, natively multimodal AI model with 17B active parameters (400B total) designed for efficient image and text understanding, reasoning, and captioning.","pricing":{"input_token":0.124,"output_token":0.603,"currency":"EUR"},"context_size":1050000,"tags":["Instruct","Tools"]},{"id":"deepseek-v3-0324","object":"model","created":1742816282,"owned_by":"Deepseek","description":"DeepSeek-V3-0324 delivers major upgrades in reasoning, coding, and Chinese language tasks, with enhanced function calling, translation, and interactive capabilities.","pricing":{"input_token":0.266,"output_token":0.887,"currency":"EUR"},"context_size":163840,"tags":["Instruct","Tools","Reasoning"]},{"id":"mistral-small-2503","object":"model","created":1742482730,"owned_by":"Mistral AI","description":"Combines advanced text and vision capabilities with 24 billion parameters, supporting multilingual tasks and long contexts up to 131k tokens, making it versatile for various applications without sacrificing performance.","pricing":{"input_token":0.1,"output_token":0.3,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Tools"]},{"id":"mistral-small-2506","object":"model","created":1742482730,"owned_by":"Mistral AI","description":"Mistral Small 2506 is a 24B parameter efficient reasoning LLM, optimized for coding, math, multilingual, and multimodal tasks.","pricing":{"input_token":0.1,"output_token":0.3,"cache_read_cost":0.01,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Tools","Image"]},{"id":"gemini-2.0-flash-001","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.0 Flash is a next-generation lightweight model offering fast multimodal generation, built-in tool use, and a 1M token context window for real-time, high-quality AI experiences.","pricing":{"input_token":0.134,"output_token":0.537,"audio_cost":0.00002235,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Audio"]},{"id":"gemini-2.0-flash-lite-001","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.0 Flash-Lite is a cost-efficient, high-throughput model with a 1M token context window and multimodal input support, optimized for mixed workload performance.","pricing":{"input_token":0.067,"output_token":0.268,"audio_cost":0.00000168,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Audio"]},{"id":"gemini-2.5-flash","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.5 models are advanced thinking models that reason before responding, delivering improved accuracy and performance.","pricing":{"input_token":0.268,"output_token":2.236,"audio_cost":0.00002236,"cache_read_cost":0.026,"cache_write_cost":0.087,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Reasoning","Audio"]},{"id":"gemini-2.5-pro","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.5 Pro is the most advanced reasoning model in the Gemini series, excelling at complex problem-solving across multiple data types.","pricing":{"input_token":1.342,"output_token":8.944,"audio_cost":0.000034,"cache_read_cost":0.217,"cache_write_cost":0.39,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Reasoning","Audio"]},{"id":"gemma-3-27b-it","object":"model","created":1741779482,"owned_by":"Google","description":"Gemma 3 is a family of lightweight, multimodal models from Google, supporting text and image inputs, multilingual capabilities, and a 131K context window.","pricing":{"input_token":0.089,"output_token":0.268,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Image","Tools","Reasoning"]},{"id":"qwen2.5-vl-72b-instruct","object":"model","created":1737977882,"owned_by":"Alibaba Cloud","description":"Qwen2.5-VL is a powerful vision-language model with advanced capabilities in visual understanding, long video reasoning, and structured output generation.","pricing":{"input_token":0.224,"output_token":0.671,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Image","Reasoning"]},{"id":"deepseek-r1-0528","object":"model","created":1737373082,"owned_by":"Deepseek","description":"DeepSeek R1 0528 is an upgraded reasoning model with enhanced depth, reduced hallucinations, and stronger performance in math, coding, and logic.","pricing":{"input_token":0.585,"output_token":2.307,"currency":"EUR"},"context_size":164000,"tags":["Instruct","Tools","Reasoning"]},{"id":"codestral-2508","object":"model","created":1736768282,"owned_by":"Mistral AI","description":"Codestral-2508 is a cutting-edge coding model optimized for low-latency, high-frequency tasks like FIM, code correction, and test generation.","pricing":{"input_token":0.3,"output_token":0.9,"cache_read_cost":0.03,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools"]},{"id":"llama-3.3-70b-instruct","object":"model","created":1733830682,"owned_by":"Meta","description":"With 70 billion parameters, this cutting-edge multilingual model excels in generating high-quality, safe, and helpful text across eight languages, making it stand out from other chat models.","pricing":{"input_token":0.089,"output_token":0.275,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Tools","Reasoning"]},{"id":"gpt-4o","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT-4o is a transformative multimodal AI model that seamlessly fuses text, images, and audio to deliver exceptional speed, efficiency, and performance—at twice the speed and half the cost of GPT-4 Turbo.","pricing":{"input_token":2.387,"output_token":9.547,"cache_read_cost":1.194,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-5-mini","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT 5 mini is a compact, low-cost, and fast version of GPT-5, ideal for real-time agents and lightweight reasoning tasks.","pricing":{"input_token":0.25,"output_token":1.968,"cache_read_cost":0.05,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-5-nano","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT 5 nano is the fastest, cheapest GPT-5 model, optimized for summarization, classification, and ultra-low latency tasks.","pricing":{"input_token":0.054,"output_token":0.394,"cache_read_cost":0.017,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"mistral-large-2411","object":"model","created":1731941042,"owned_by":"Mistral AI","description":"Mistral-Large-Instruct-2411 is a 123B parameter language model designed for instruction following, reasoning, and coding, with robust support for long-context tasks, native function calling, and multilingual understanding. It's built to power intelligent agents and RAG workflows at scale.","pricing":{"input_token":1.8,"output_token":5.4,"cache_read_cost":0.18,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Tools","Reasoning"]},{"id":"pixtral-large-2411","object":"model","created":1731940922,"owned_by":"Mistral AI","description":"Pixtral Large Instruct 2411 is a frontier-level 124B multimodal model combining powerful image understanding with state-of-the-art language capabilities. Built on Mistral Large 2, it delivers top-tier performance on tasks involving documents, charts, natural images, and text.","pricing":{"input_token":1.8,"output_token":5.4,"cache_read_cost":0.18,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Image","Reasoning"]},{"id":"pixtral-12b-2409","object":"model","created":1731176378,"owned_by":"Mistral AI","description":"Pixtral 2409 12B is a state-of-the-art multimodal model with 12B parameters and a 400M vision encoder, natively trained on interleaved text and image data. It excels in tasks spanning vision-language reasoning, instruction following, and pure text understanding, making it highly effective for real-world multimodal applications.","pricing":{"input_token":0.2,"output_token":0.2,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Reasoning"]},{"id":"hermes-4-405b","object":"model","created":1723549082,"owned_by":"NousResearch","description":"Hermes 4 405B is a frontier hybrid-mode reasoning model built on Llama 3.1, optimized for advanced logic, math, coding, and structured output generation.","pricing":{"input_token":0.894,"output_token":2.683,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools"]},{"id":"mistral-nemo-instruct-2407","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"A 12B parameter, instruct-tuned language model by Mistral AI and NVIDIA, designed for advanced instruction following, multi-turn conversations, and generating text and code across multiple languages.","pricing":{"input_token":0.13,"output_token":0.13,"cache_read_cost":0.013,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Tools"]},{"id":"devstral-medium-2507","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Devstral Medium 2507 is a high-performance code generation and agentic reasoning model, optimized for advanced coding tasks and tool use.","pricing":{"input_token":0.4,"output_token":2,"cache_read_cost":0.04,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Code","Tools"]},{"id":"devstral-small-2507","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Devstral Small 2507 is an improved code generation and reasoning model, optimized for agentic coding tasks and versatile prompt handling.","pricing":{"input_token":0.1,"output_token":0.3,"cache_read_cost":0.01,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Code","Tools"]},{"id":"mistral-medium-2508","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Mistral Medium 2508 is a frontier-class multimodal LLM with a 128,000 token context window, optimized for reasoning, coding, and multimodal tasks.","pricing":{"input_token":0.4,"output_token":2,"cache_read_cost":0.04,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"llama-3.1-405b-instruct","object":"model","created":1721734682,"owned_by":"Meta","description":"Meta Llama 3.1 is a multilingual LLM series optimized for dialogue, combining SFT and RLHF for improved helpfulness, safety, and performance.","pricing":{"input_token":1.75,"output_token":1.75,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning"]},{"id":"gpt-4o-mini","object":"model","created":1721302682,"owned_by":"OpenAI","description":"GPT-4o mini is a lightweight, cost-efficient multimodal model optimized for real-time tasks and large-context applications, with support for text and vision.","pricing":{"input_token":0.143,"output_token":0.573,"cache_read_cost":0.073,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gemma-2-2b-it","object":"model","created":1721129882,"owned_by":"Google","description":"Gemma is a family of lightweight, open-source language models from Google, optimized for text generation tasks.","pricing":{"input_token":0.018,"output_token":0.054,"currency":"EUR"},"context_size":8192,"tags":["Instruct","Reasoning"]},{"id":"llama-3.1-8b-instruct","object":"model","created":1712652299,"owned_by":"Meta","description":"Optimized for dialogue, this LLM by Meta outperforms other open-source chat models in benchmarks while prioritizing helpfulness and safety.","pricing":{"input_token":0.018,"output_token":0.054,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning"]},{"id":"codellama-13b-instruct-hf","object":"model","created":1710362282,"owned_by":"Meta","description":"Code Llama is a family of pretrained and fine-tuned code generation models, ranging from 7B to 34B parameters. This version is the 13B instruction-tuned model for code synthesis and understanding.","pricing":{"input_token":0.45,"output_token":0.45,"currency":"EUR"},"context_size":16000,"tags":["Instruct","Code"]},{"id":"mixtral-8x7B-instruct-v0.1","object":"model","created":1702294682,"owned_by":"Mistral AI","description":"Mixtral-8x7B is a sparse mixture-of-experts model with open weights, delivering fast, high-performance inference that outperforms Llama 2 70B and rivals GPT-3.5.","pricing":{"input_token":0.438,"output_token":0.68,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Reasoning"]}]}