{"object":"list","data":[{"id":"glm-5.1","object":"model","created":1775735882,"owned_by":"Z.ai","description":"GLM-5.1 is a next-generation flagship model for agentic engineering, excelling in long-horizon reasoning and advanced coding tasks.","pricing":{"input_token":1.304,"output_token":4.099,"currency":"EUR"},"context_size":202752,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"qwen3.5-122b-a10b","object":"model","created":1775727482,"owned_by":"Alibaba Cloud","description":"Qwen3.5-122B-A10B is a multimodal Mixture-of-Experts model designed for agentic applications across text, image, and video tasks.","pricing":{"input_token":0.466,"output_token":3.261,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Reasoning","Tools","Code","OpenClaw"]},{"id":"qwen3.5-9b","object":"model","created":1775727362,"owned_by":"Alibaba Cloud","description":"Qwen3.5-9B is a dense multimodal model delivering strong reasoning, coding, and visual understanding with efficient performance and long context.","pricing":{"input_token":0.14,"output_token":0.186,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Reasoning","Tools","Code","OpenClaw"]},{"id":"nemotron-3-super-120b-a12b","object":"model","created":1775723882,"owned_by":"Nvidia","description":"Nemotron-3-Super-120B-A12B is a large hybrid MoE model from NVIDIA optimized for agentic reasoning, conversational tasks, and high-throughput workloads.","pricing":{"input_token":0.28,"output_token":0.839,"currency":"EUR"},"context_size":262144,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"qwen3-coder-next","object":"model","created":1775140682,"owned_by":"Alibaba Cloud","description":"Qwen3 Coder Next 80B is a high-performance coding model optimized for complex software development, agentic workflows, and multi-language programming.","pricing":{"input_token":0.158,"output_token":0.84,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Reasoning","Tools","Code","OpenClaw"]},{"id":"glm-5","object":"model","created":1774522682,"owned_by":"Z.ai","description":"GLM-5 is Zhipu AI’s flagship multimodal model with strong bilingual reasoning, long-context understanding, and advanced agentic and tool-use capabilities.","pricing":{"input_token":0.932,"output_token":2.982,"currency":"EUR"},"context_size":202752,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"glm-4.6","object":"model","created":1774515482,"owned_by":"Z.ai","description":"GLM-4.6 is an upgraded version of GLM-4.5 with longer context, enhanced coding and reasoning, improved agent capabilities, and refined writing quality.","pricing":{"input_token":0.373,"output_token":1.631,"currency":"EUR"},"context_size":203000,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"deepseek-chat-v3.1","object":"model","created":1774510682,"owned_by":"Deepseek","description":"DeepSeek-V3.1 is a large hybrid reasoning model supporting both thinking and non-thinking modes, optimized for fast reasoning, tool use, and agentic workflows.","pricing":{"input_token":0.186,"output_token":0.745,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"qwen-2.5-72b-instruct","object":"model","created":1774438682,"owned_by":"Alibaba Cloud","description":"Qwen2.5-72B-Instruct is a large instruction-tuned language model with strong reasoning, coding, long-context, and multilingual capabilities.","pricing":{"input_token":0.065,"output_token":0.242,"currency":"EUR"},"context_size":33000,"tags":["Instruct","Tools","Code","OpenClaw"]},{"id":"qwen3.5-397b-a17b","object":"model","created":1773837482,"owned_by":"Alibaba Cloud","description":"Qwen3.5-397B-A17B is a large-scale multimodal model with a hybrid Mixture-of-Experts architecture, delivering state-of-the-art performance across chat, RAG, vision-language, video understanding, and agentic workflows.","pricing":{"input_token":0.63,"output_token":3.78,"currency":"EUR"},"context_size":250000,"tags":["Instruct","Reasoning","Tools","Code","OpenClaw"]},{"id":"deepseek-v3.2","object":"model","created":1773833882,"owned_by":"Deepseek","description":"DeepSeek V3.2 is a high-efficiency model designed for strong reasoning and agentic tool-use performance, optimized for long-context and scalable post-training.","pricing":{"input_token":0.28,"output_token":0.466,"currency":"EUR"},"context_size":163840,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"mistral-small-2603","object":"model","created":1773741193,"owned_by":"Mistral AI","description":"Mistral Small 4 is a versatile Mixture-of-Experts model that unifies reasoning, multimodal understanding, and agentic coding in a single architecture, optimized for efficiency and long-context interactions.","pricing":{"input_token":0.134,"output_token":0.536,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image","Reasoning"]},{"id":"minimax-m2.5","object":"model","created":1773675482,"owned_by":"MiniMax AI","description":"MiniMax-M2.5 is a state-of-the-art language model optimized for real-world productivity, autonomous agents, coding, and professional office workflows.","pricing":{"input_token":0.28,"output_token":1.025,"currency":"EUR"},"context_size":65536,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"claude-4-6-sonnet","object":"model","created":1771490282,"owned_by":"Anthropic","description":"Claude Sonnet 4.6 delivers frontier-level intelligence at scale, optimized for coding, agents, and enterprise-grade workflows.","pricing":{"input_token":3.099,"output_token":15.495,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"glm-4.7-flash","object":"model","created":1771487882,"owned_by":"Z.ai","description":"GLM-4.7-Flash is a fast, resource-efficient variant of GLM-4.7 optimized for low-latency text and code generation.","pricing":{"input_token":0.075,"output_token":0.451,"currency":"EUR"},"context_size":203000,"tags":["Instruct","Code","Tools","OpenClaw"]},{"id":"kimi-k2.5","object":"model","created":1771231082,"owned_by":"Moonshot AI","description":"Kimi K2.5 is an open-source native multimodal agentic model delivering state-of-the-art performance in agents, coding, visual understanding, and general reasoning tasks.","pricing":{"input_token":0.466,"output_token":2.236,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"claude-opus4-6","object":"model","created":1770377400,"owned_by":"Anthropic","description":"Claude Opus 4.6 is the next generation of Anthropic’s most intelligent model, setting the global standard for coding, enterprise agents, and professional-grade workflows.","pricing":{"input_token":5.165,"output_token":25.826,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"minimax-m2","object":"model","created":1770374282,"owned_by":"MiniMax AI","description":"MiniMax-M2 is a large instruction-tuned MoE model optimized for multilingual dialogue, reasoning, and tool-calling, designed as a general-purpose assistant for consumer and enterprise use.","pricing":{"input_token":0.233,"output_token":0.932,"currency":"EUR"},"context_size":196608,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"glm-4.7","object":"model","created":1770206282,"owned_by":"Z.ai","description":"GLM-4.7 is a flagship GLM model featuring strong multilingual reasoning, long-context understanding, and robust tool use, with major gains in agentic coding, UI generation, and complex reasoning tasks.","pricing":{"input_token":0.376,"output_token":1.878,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"minimax-m2.1","object":"model","created":1770205082,"owned_by":"MiniMax AI","description":"MiniMax-M2.1 is an open-source agentic coding model designed for polyglot development, precision refactoring, and reliable execution of long, multi-step coding and office workflows.","pricing":{"input_token":0.28,"output_token":1.127,"currency":"EUR"},"context_size":196000,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"llama-guard-3-8b","object":"model","created":1770204602,"owned_by":"Meta","description":"Llama Guard 3 is a fine-tuned Llama-3.1-8B model for robust content safety classification and policy enforcement across multiple languages.","pricing":{"input_token":0.019,"output_token":0.056,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Safety-guard"]},{"id":"qwen3guard-gen-8b","object":"model","created":1770204122,"owned_by":"Alibaba Cloud","description":"Qwen3Guard-Gen-8B is a large-scale multilingual safety moderation model designed for high-accuracy prompt and response classification.","pricing":{"input_token":0,"output_token":0,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Safety-guard"]},{"id":"qwen3guard-gen-0.6b","object":"model","created":1770204002,"owned_by":"Alibaba Cloud","description":"Qwen3Guard-Gen-0.6B is a lightweight multilingual safety moderation model that classifies prompts and responses into safe, controversial, or unsafe categories.","pricing":{"input_token":0,"output_token":0,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Safety-guard"]},{"id":"voxtral-small-2507","object":"model","created":1770025993,"owned_by":"Mistral AI","description":"Voxtral Small is a multimodal model with audio input, combining advanced speech capabilities with strong text performance for transcription, translation, and audio understanding.","pricing":{"input_token":0.105,"output_token":0.315,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Audio","OpenClaw"]},{"id":"qwen3-vl-235b-a22b","object":"model","created":1768304282,"owned_by":"Alibaba Cloud","description":"Qwen3 VL 235B A22B is a 235B-parameter MoE vision-language flagship model (≈22B active) designed for frontier-level multimodal understanding across text, images, documents, and long videos.","pricing":{"input_token":0.196,"output_token":1.77,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning","Tools","Image","OpenClaw"]},{"id":"mistral-small-creative","object":"model","created":1768297993,"owned_by":"Mistral AI","description":"Mistral Small Creative is a fine-tuned small model optimized for creative writing, roleplay, and chat, trained on curated data.","pricing":{"input_token":0.105,"output_token":0.315,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Tools"]},{"id":"nvidia-nemotron-3-nano-30b-a3b","object":"model","created":1768217882,"owned_by":"Nvidia","description":"Nemotron-Nano-3-30B-A3B is a compact Mixture-of-Experts model optimized for efficient reasoning, chat, and coding, with strong multilingual support and long-context RAG and agent workflows.","pricing":{"input_token":0.056,"output_token":0.226,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"claude-opus4-5","object":"model","created":1767625200,"owned_by":"Anthropic","description":"Claude Opus 4.5 is Anthropic’s next-generation, most intelligent model, delivering industry-leading performance across coding, agents, computer use, and complex enterprise workflows.","pricing":{"input_token":5.165,"output_token":25.826,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"qwen3-next-80b-a3b-thinking","object":"model","created":1765370282,"owned_by":"Alibaba Cloud","description":"Qwen3-Next-80B-A3B-Thinking is an 80B-parameter thinking-oriented model combining Hybrid Attention, high-sparsity MoE, and multi-token prediction to deliver exceptional performance on complex reasoning tasks, surpassing both open-source and proprietary alternatives.","pricing":{"input_token":0.141,"output_token":1.127,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Code","OpenClaw"]},{"id":"holo2-30b-a3b","object":"model","created":1765367099,"owned_by":"H company","description":"Holo2 30B A3B is a text-and-vision model optimized for analyzing graphical user interfaces, including web, desktop, and mobile, and enabling agents to interpret interfaces, reason over content, and take actions.","pricing":{"input_token":0.315,"output_token":0.735,"currency":"EUR"},"context_size":22000,"tags":["Instruct","Tools","Reasoning","Image","OpenClaw"]},{"id":"devstral-2512","object":"model","created":1765356793,"owned_by":"Mistral AI","description":"Devstral 2 2512 is an enterprise-grade text model designed for advanced coding agents and software engineering workflows.","pricing":{"input_token":0.42,"output_token":2.1,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Code","Tools","OpenClaw"]},{"id":"nova-2-lite","object":"model","created":1764848282,"owned_by":"Amazon","description":"Nova 2 Lite is an advanced multimodal reasoning model that combines efficiency and performance, delivering reliable AI for agentic workflows and enterprise applications.","pricing":{"input_token":0.352,"output_token":2.963,"currency":"EUR"},"context_size":1000000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"gpt-oss-safeguard-120b","object":"model","created":1764848282,"owned_by":"OpenAI","description":"GPT OSS Safeguard 120B is a 120B-parameter open-weight safety reasoning model designed for high-accuracy enterprise moderation and policy enforcement.","pricing":{"input_token":0.169,"output_token":0.657,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Safety‑guard","OpenClaw"]},{"id":"mistral-large-2512","object":"model","created":1764762602,"owned_by":"Mistral AI","description":"Mistral Large 3 is a state-of-the-art open-weight multimodal Mixture-of-Experts model with 41B active parameters, delivering frontier-level performance across text and vision tasks.","pricing":{"input_token":0.525,"output_token":1.575,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-8b-2512","object":"model","created":1764762542,"owned_by":"Mistral AI","description":"Ministral 3 8B is a balanced, efficient multimodal model offering strong text and vision capabilities, optimized for edge and local deployment.","pricing":{"input_token":0.158,"output_token":0.158,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-3b-2512","object":"model","created":1764762482,"owned_by":"Mistral AI","description":"Ministral 3 3B is a compact, efficient multimodal model with strong language, vision capabilities, and ideal for custom fine-tuning.","pricing":{"input_token":0.105,"output_token":0.105,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"ministral-14b-2512","object":"model","created":1764761882,"owned_by":"Mistral AI","description":"Ministral 3 14B is a frontier-level 14B multimodal model optimized for local deployment, delivering state-of-the-art text and vision reasoning with a 256K context window and strong agentic capabilities.","pricing":{"input_token":0.21,"output_token":0.21,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools","Image"]},{"id":"kimi-k2-thinking","object":"model","created":1764329882,"owned_by":"Moonshot AI","description":"Kimi K2 Thinking is an advanced open-source reasoning model designed as a long-horizon thinking agent, combining deep step-by-step reasoning with stable multi-stage tool use, INT4 efficiency, and a 262k context window.","pricing":{"input_token":0.564,"output_token":2.348,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"intellect-3","object":"model","created":1764243482,"owned_by":"PrimeIntellect","description":"INTELLECT-3 is a 106B Mixture-of-Experts reasoning model built on GLM-4.5-Air, achieving state-of-the-art performance in math, coding, science, and multi-step reasoning through supervised fine-tuning and large-scale reinforcement learning.","pricing":{"input_token":0.188,"output_token":1.033,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","Code","OpenClaw"]},{"id":"gpt-5.1","object":"model","created":1763168400,"owned_by":"OpenAI","description":"GPT-5.1 is an enhanced version of GPT-5, offering clearer responses, adaptive reasoning, improved consistency, expanded context capacity, and better customization for complex and interactive tasks.","pricing":{"input_token":1.296,"output_token":10.33,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Reasoning","Tools","Image","OpenClaw"]},{"id":"nemotron-nano-v2-12b","object":"model","created":1761910682,"owned_by":"Nvidia","description":"NVIDIA Nemotron Nano v2 12B is a 12-billion-parameter multimodal reasoning model designed for advanced video understanding, document intelligence, and visual reasoning, built with a hybrid Transformer-Mamba architecture for high efficiency and low latency.","pricing":{"input_token":0.066,"output_token":0.188,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","Image","OpenClaw"]},{"id":"claude-haiku-4-5","object":"model","created":1760547600,"owned_by":"Anthropic","description":"Claude Haiku 4.5 delivers near-frontier performance for coding and agent tasks, optimized for speed and cost to support high-volume and free-tier applications.","pricing":{"input_token":0.939,"output_token":4.696,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"claude-4-5-sonnet","object":"model","created":1759145882,"owned_by":"Anthropic","description":"Claude Sonnet 4.5 is Anthropic’s most capable model for real-world agents, excelling in coding, computer use, and long-horizon tasks.","pricing":{"input_token":2.817,"output_token":14.087,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"magistral-medium-2509","object":"model","created":1758185593,"owned_by":"Mistral AI","description":"Magistral Medium 2509 is a frontier-class reasoning model with vision support, delivering transparent, multilingual reasoning and a 15% performance boost over its predecessor.","pricing":{"input_token":2.1,"output_token":5.25,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"magistral-small-2509","object":"model","created":1758185593,"owned_by":"Mistral AI","description":"Magistral Small 2509 is a 24B-parameter open-weight reasoning model with vision support, offering a 15% performance boost and improved multimodal reasoning over its predecessor.","pricing":{"input_token":0.525,"output_token":1.575,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"hermes-4-70b","object":"model","created":1755085082,"owned_by":"NousResearch","description":"Hermes 4 70B is a hybrid-mode reasoning model built on Llama 3.1, designed for advanced logic, coding, math, and structured outputs with improved steerability.","pricing":{"input_token":0.122,"output_token":0.376,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools"]},{"id":"gpt-5","object":"model","created":1754528400,"owned_by":"OpenAI","description":"GPT-5 is OpenAI’s most advanced model, built for deep reasoning, high-quality code generation, and complex multi-step tasks.","pricing":{"input_token":1.296,"output_token":10.33,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Reasoning","Tools","Image","OpenClaw"]},{"id":"qwen3-235b-a22b-thinking-2507","object":"model","created":1754480282,"owned_by":"Alibaba Cloud","description":"Qwen3-235B-A22B-Thinking-2507 is a high-performance Mixture-of-Experts model optimized for advanced reasoning, agentic workflows, and multilingual tasks.","pricing":{"input_token":0.188,"output_token":0.752,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"gpt-oss-120b","object":"model","created":1754393882,"owned_by":"OpenAI","description":"GPT Oss 120b is a 120B parameter Mixture-of-Experts model by OpenAI, built for advanced reasoning, agentic tasks, and flexible developer use under Apache 2.0.","pricing":{"input_token":0.037,"output_token":0.186,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"qwen3-30b-a3b-instruct-2507","object":"model","created":1753702682,"owned_by":"Alibaba Cloud","description":"Qwen3-30B-A3B-Instruct-2507 is an advanced Mixture-of-Experts model optimized for reasoning, coding, and multilingual instruction following.","pricing":{"input_token":0.093,"output_token":0.281,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"qwen3-30b-a3b-thinking-2507","object":"model","created":1753702682,"owned_by":"Alibaba Cloud","description":"Qwen3-30B-A3B-Thinking-2507 is a specialized Mixture-of-Experts model designed for advanced reasoning in mathematics, science, coding, and academic tasks.","pricing":{"input_token":0.093,"output_token":0.281,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"qwen3-coder-480b-a35b-instruct","object":"model","created":1753702682,"owned_by":"Alibaba Cloud","description":"Qwen3-Coder-480B-A35B-Instruct is Alibaba’s flagship Mixture-of-Experts coding model, optimized for autonomous software engineering and complex reasoning in large-scale development tasks.","pricing":{"input_token":0.376,"output_token":1.691,"currency":"EUR"},"context_size":262000,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"gpt-oss-20b","object":"model","created":1752838682,"owned_by":"OpenAI","description":"GPT Oss 20B is an open-weight Mixture-of-Experts model from OpenAI, optimized for low-latency inference and local deployment.","pricing":{"input_token":0.028,"output_token":0.13,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"kimi-k2-instruct","object":"model","created":1752838682,"owned_by":"Moonshot AI","description":"Kimi-K2-Instruct is a state-of-the-art MoE large language model optimized for agentic intelligence, tool use, and multi-step task automation.","pricing":{"input_token":0.469,"output_token":2.254,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Code","Tools","OpenClaw"]},{"id":"glm-4.5","object":"model","created":1752838682,"owned_by":"Z.ai","description":"GLM-4.5 is a top-tier MoE large language model designed for reasoning, coding, and agentic AI applications with dual processing modes.","pricing":{"input_token":0.564,"output_token":2.066,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"glm-4.5-air","object":"model","created":1752838682,"owned_by":"Z.ai","description":"GLM-4.5-Air is a compact and efficient MoE large language model designed for reasoning, coding, and multi-step agentic tasks.","pricing":{"input_token":0.188,"output_token":1.127,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Code","Reasoning","Tools","OpenClaw"]},{"id":"mistral-7b-instruct-v0.3","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral-7B-Instruct-v0.3 model is a fine-tuned version of the Mistral 7B base model, optimized for instruction-following tasks. Released in 2023, it is intended for demonstration purposes and does not include built-in guardrails or moderation features.","pricing":{"input_token":0.105,"output_token":0.105,"currency":"EUR"},"context_size":127000,"tags":["Instruct","Tools"]},{"id":"mistral-large-2402","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral Large (24.02) is Mistral AI’s most advanced language model, built for complex multilingual reasoning, code generation, and deep text understanding.","pricing":{"input_token":4.038,"output_token":12.208,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Tools","Reasoning"]},{"id":"mistral-7b-instruct-v0.2","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral 7B Instruct is a compact, 7B parameter model optimized for fast and efficient text and code generation with a 32K token context window.","pricing":{"input_token":0.15,"output_token":0.207,"currency":"EUR"},"context_size":32000,"tags":["Instruct"]},{"id":"pixtral-large-2502","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Pixtral Large (25.02) is a 124B open-weight multimodal model built on Mistral Large 2, offering advanced image understanding and strong performance across text and code tasks.","pricing":{"input_token":1.878,"output_token":5.634,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Tools","Reasoning","OpenClaw"]},{"id":"mistral-small-3.2-24b-instruct-2506","object":"model","created":1748252162,"owned_by":"Mistral AI","description":"Mistral-Small-3.2-24B-Instruct-2506 is a 24B parameter instruction-tuned model with enhanced long-context support (128k) and state-of-the-art vision understanding.","pricing":{"input_token":0.095,"output_token":0.294,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","OpenClaw"]},{"id":"qwen3-32b","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3 delivers cutting-edge advancements in reasoning, agent capabilities, and multilingual support, with seamless mode switching for optimized performance across tasks.","pricing":{"input_token":0.084,"output_token":0.281,"currency":"EUR"},"context_size":16384,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"qwen3-235b-a22b-instruct-2507","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3-235B-A22B-Instruct-2507 is a large non-thinking mode model with major improvements in reasoning, instruction following, knowledge coverage, and long-context understanding up to 256K tokens.","pricing":{"input_token":0.065,"output_token":0.429,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"qwen3-coder-30b-a3b-instruct","object":"model","created":1745840282,"owned_by":"Alibaba Cloud","description":"Qwen3 Coder 30b a3b Instruct is a 30.5B parameter open-source Mixture-of-Experts model specialized for coding, tool use, and complex multi-step programming workflows.","pricing":{"input_token":0.056,"output_token":0.231,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Code","Tools","Reasoning","OpenClaw"]},{"id":"gpt-4.1-mini","object":"model","created":1744630682,"owned_by":"OpenAI","description":"gpt-4.1-mini is a cost-effective, low-latency model in the GPT-4.1 series, optimized for coding, instruction following, and multimodal tasks with large-context support.","pricing":{"input_token":0.41,"output_token":1.607,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-4.1-nano","object":"model","created":1744630682,"owned_by":"OpenAI","description":"gpt-4.1-nano is an ultra-efficient model in the GPT-4.1 series, offering powerful coding and instruction capabilities with support for text and vision at the lowest cost and latency.","pricing":{"input_token":0.105,"output_token":0.41,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"nova-lite-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Lite is a fast, low-cost multimodal foundation model capable of reasoning over text, images, and video in 200+ languages.","pricing":{"input_token":0.065,"output_token":0.259,"currency":"EUR"},"context_size":300000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"nova-micro-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Micro is a multilingual text-to-text foundation model with strong reasoning capabilities and broad language coverage across 200+ languages.","pricing":{"input_token":0.038,"output_token":0.15,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"gpt-4.1","object":"model","created":1744630682,"owned_by":"OpenAI","description":"GPT-4.1 is the latest evolution of the GPT-4o model family, offering superior coding, instruction following, and support for up to 1 million input tokens with enhanced multimodal capabilities.","pricing":{"input_token":2.066,"output_token":8.266,"currency":"EUR"},"context_size":1047576,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"claude-3-7-sonnet","object":"model","created":1744630682,"owned_by":"Anthropic","description":"Claude 3.7 Sonnet is Anthropic’s most advanced model, featuring hybrid reasoning for both fast responses and deep, step-by-step problem-solving within a single system.","pricing":{"input_token":2.817,"output_token":14.087,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"nova-pro-v1","object":"model","created":1744630682,"owned_by":"Amazon","description":"Nova Pro is a powerful, multilingual multimodal foundation model that excels at reasoning over text, images, and video across 200+ languages.","pricing":{"input_token":0.865,"output_token":3.46,"currency":"EUR"},"context_size":300000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"claude-sonnet-4","object":"model","created":1744630682,"owned_by":"Anthropic","description":"Claude Sonnet 4 is a versatile, mid-sized model optimized for coding, real-time AI assistants, and large-scale content tasks with strong cost-performance balance.","pricing":{"input_token":2.817,"output_token":14.087,"currency":"EUR"},"context_size":200000,"tags":["Instruct","Tools","Image","Code","Reasoning","OpenClaw"]},{"id":"llama-3.1-nemotron-ultra-253b-v1","object":"model","created":1744025882,"owned_by":"Nvidia","description":"A reasoning-optimized LLM based on Llama 3.1, Nemotron Ultra 253B delivers strong performance in tasks like RAG and tool use, with high efficiency and reduced latency.","pricing":{"input_token":0.564,"output_token":1.691,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"llama-4-maverick","object":"model","created":1742989082,"owned_by":"Meta","description":"Llama 4 Maverick is a high-performance, natively multimodal AI model with 17B active parameters (400B total) designed for efficient image and text understanding, reasoning, and captioning.","pricing":{"input_token":0.13,"output_token":0.633,"currency":"EUR"},"context_size":1050000,"tags":["Instruct","Tools","OpenClaw"]},{"id":"deepseek-v3-0324","object":"model","created":1742816282,"owned_by":"Deepseek","description":"DeepSeek-V3-0324 delivers major upgrades in reasoning, coding, and Chinese language tasks, with enhanced function calling, translation, and interactive capabilities.","pricing":{"input_token":0.28,"output_token":0.932,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"mistral-small-2503","object":"model","created":1742482730,"owned_by":"Mistral AI","description":"Combines advanced text and vision capabilities with 24 billion parameters, supporting multilingual tasks and long contexts up to 131k tokens, making it versatile for various applications without sacrificing performance.","pricing":{"input_token":0.105,"output_token":0.315,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Tools"]},{"id":"mistral-small-2506","object":"model","created":1742482730,"owned_by":"Mistral AI","description":"Mistral Small 2506 is a 24B parameter efficient reasoning LLM, optimized for coding, math, multilingual, and multimodal tasks.","pricing":{"input_token":0.105,"output_token":0.315,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Tools","Image"]},{"id":"gemini-2.0-flash-001","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.0 Flash is a next-generation lightweight model offering fast multimodal generation, built-in tool use, and a 1M token context window for real-time, high-quality AI experiences.","pricing":{"input_token":0.141,"output_token":0.563,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Audio"]},{"id":"gemini-2.0-flash-lite-001","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.0 Flash-Lite is a cost-efficient, high-throughput model with a 1M token context window and multimodal input support, optimized for mixed workload performance.","pricing":{"input_token":0.07,"output_token":0.282,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Audio"]},{"id":"gemini-2.5-flash","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.5 models are advanced thinking models that reason before responding, delivering improved accuracy and performance.","pricing":{"input_token":0.282,"output_token":2.348,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Reasoning","Audio"]},{"id":"gemini-2.5-pro","object":"model","created":1742125082,"owned_by":"Google","description":"Gemini 2.5 Pro is the most advanced reasoning model in the Gemini series, excelling at complex problem-solving across multiple data types.","pricing":{"input_token":1.409,"output_token":9.391,"currency":"EUR"},"context_size":1048576,"tags":["Instruct","Tools","Image","Reasoning","Audio"]},{"id":"gemma-3-27b-it","object":"model","created":1741779482,"owned_by":"Google","description":"Gemma 3 is a family of lightweight, multimodal models from Google, supporting text and image inputs, multilingual capabilities, and a 131K context window.","pricing":{"input_token":0.093,"output_token":0.281,"currency":"EUR"},"context_size":40000,"tags":["Instruct","Image","Tools","Reasoning","OpenClaw"]},{"id":"qwen2.5-vl-72b-instruct","object":"model","created":1737977882,"owned_by":"Alibaba Cloud","description":"Qwen2.5-VL is a powerful vision-language model with advanced capabilities in visual understanding, long video reasoning, and structured output generation.","pricing":{"input_token":0.235,"output_token":0.705,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Image","Reasoning"]},{"id":"deepseek-r1-distill-llama-70b","object":"model","created":1737373082,"owned_by":"Deepseek","description":"Distilled from the powerful LLaMA 70B base, DeepSeek-R1-Distill offers an efficient yet capable large language model with strong performance across code, math, and reasoning tasks—ideal for real-world AI applications.","pricing":{"input_token":0.704,"output_token":0.704,"currency":"EUR"},"context_size":16000,"tags":["Instruct","Reasoning","Tools","OpenClaw"]},{"id":"deepseek-r1-0528","object":"model","created":1737373082,"owned_by":"Deepseek","description":"DeepSeek R1 0528 is an upgraded reasoning model with enhanced depth, reduced hallucinations, and stronger performance in math, coding, and logic.","pricing":{"input_token":0.614,"output_token":2.254,"currency":"EUR"},"context_size":164000,"tags":["Instruct","Tools","OpenClaw","Reasoning"]},{"id":"codestral-2508","object":"model","created":1736768282,"owned_by":"Mistral AI","description":"Codestral-2508 is a cutting-edge coding model optimized for low-latency, high-frequency tasks like FIM, code correction, and test generation.","pricing":{"input_token":0.315,"output_token":0.945,"currency":"EUR"},"context_size":256000,"tags":["Instruct","Code","Tools"]},{"id":"llama-3.3-70b-instruct","object":"model","created":1733830682,"owned_by":"Meta","description":"With 70 billion parameters, this cutting-edge multilingual model excels in generating high-quality, safe, and helpful text across eight languages, making it stand out from other chat models.","pricing":{"input_token":0.093,"output_token":0.289,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"gpt-4o","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT-4o is a transformative multimodal AI model that seamlessly fuses text, images, and audio to deliver exceptional speed, efficiency, and performance—at twice the speed and half the cost of GPT-4 Turbo.","pricing":{"input_token":2.506,"output_token":10.024,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gpt-5-nano","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT 5 nano is the fastest, cheapest GPT-5 model, optimized for summarization, classification, and ultra-low latency tasks.","pricing":{"input_token":0.057,"output_token":0.414,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"gpt-5-mini","object":"model","created":1732102682,"owned_by":"OpenAI","description":"GPT 5 mini is a compact, low-cost, and fast version of GPT-5, ideal for real-time agents and lightweight reasoning tasks.","pricing":{"input_token":0.263,"output_token":2.066,"currency":"EUR"},"context_size":400000,"tags":["Instruct","Tools","Image","Reasoning","OpenClaw"]},{"id":"mistral-large-2411","object":"model","created":1731941042,"owned_by":"Mistral AI","description":"Mistral-Large-Instruct-2411 is a 123B parameter language model designed for instruction following, reasoning, and coding, with robust support for long-context tasks, native function calling, and multilingual understanding. It's built to power intelligent agents and RAG workflows at scale.","pricing":{"input_token":1.89,"output_token":5.67,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Tools","Reasoning"]},{"id":"pixtral-large-2411","object":"model","created":1731940922,"owned_by":"Mistral AI","description":"Pixtral Large Instruct 2411 is a frontier-level 124B multimodal model combining powerful image understanding with state-of-the-art language capabilities. Built on Mistral Large 2, it delivers top-tier performance on tasks involving documents, charts, natural images, and text.","pricing":{"input_token":1.89,"output_token":5.67,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Image","Reasoning"]},{"id":"pixtral-12b-2409","object":"model","created":1731176378,"owned_by":"Mistral AI","description":"Pixtral 2409 12B is a state-of-the-art multimodal model with 12B parameters and a 400M vision encoder, natively trained on interleaved text and image data. It excels in tasks spanning vision-language reasoning, instruction following, and pure text understanding, making it highly effective for real-world multimodal applications.","pricing":{"input_token":0.21,"output_token":0.21,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Image","Reasoning"]},{"id":"teuken-7b-instruct-commercial","object":"model","created":1729864802,"owned_by":"OpenGPT-X","description":"Teuken-7B-Instruct-v04 is a 7-billion-parameter multilingual large language model developed under the OpenGPT-X project. Pretrained on 4 trillion tokens spanning all 24 official European languages, it is fine-tuned for instruction following to deliver precise and context-aware responses. The model is open source under the Apache 2.0 license, enabling both commercial and research applications. With a strong focus on cultural alignment, it ensures responses reflect European values and norms across diverse linguistic settings.","pricing":{"input_token":0.158,"output_token":0.158,"currency":"EUR"},"context_size":4096,"tags":["Instruct","OpenClaw"]},{"id":"llama-3.2-1b-instruct","object":"model","created":1727264282,"owned_by":"Meta","description":"Optimized for multilingual dialogue, retrieval, and summarization, this LLM by Meta outperforms many open and closed-source models in industry benchmarks.","pricing":{"input_token":0.103,"output_token":0.103,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning"]},{"id":"llama-3.2-3b-instruct","object":"model","created":1726659482,"owned_by":"Meta","description":"Llama 3.2 is a multilingual open-source language model optimized for dialogue and summarization.","pricing":{"input_token":0.16,"output_token":0.16,"currency":"EUR"},"context_size":131000,"tags":["Instruct","Reasoning"]},{"id":"qwen2.5-coder-7b","object":"model","created":1726486682,"owned_by":"Alibaba Cloud","description":"Qwen2.5-Coder is a series of code-optimized models with improved code generation, reasoning, and fixing capabilities, supporting long-context tasks up to 128K tokens.","pricing":{"input_token":0.028,"output_token":0.084,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Code"]},{"id":"hermes-4-405b","object":"model","created":1723549082,"owned_by":"NousResearch","description":"Hermes 4 405B is a frontier hybrid-mode reasoning model built on Llama 3.1, optimized for advanced logic, math, coding, and structured output generation.","pricing":{"input_token":0.939,"output_token":2.817,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","OpenClaw"]},{"id":"mistral-nemo-instruct-2407","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"A 12B parameter, instruct-tuned language model by Mistral AI and NVIDIA, designed for advanced instruction following, multi-turn conversations, and generating text and code across multiple languages.","pricing":{"input_token":0.137,"output_token":0.137,"currency":"EUR"},"context_size":118000,"tags":["Instruct","Tools"]},{"id":"mistral-medium-2508","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Mistral Medium 2508 is a frontier-class multimodal LLM with a 128,000 token context window, optimized for reasoning, coding, and multimodal tasks.","pricing":{"input_token":0.42,"output_token":2.1,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Reasoning","Tools","Image"]},{"id":"devstral-medium-2507","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Devstral Medium 2507 is a high-performance code generation and agentic reasoning model, optimized for advanced coding tasks and tool use.","pricing":{"input_token":0.42,"output_token":2.1,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Code","Tools"]},{"id":"devstral-small-2507","object":"model","created":1723020793,"owned_by":"Mistral AI","description":"Devstral Small 2507 is an improved code generation and reasoning model, optimized for agentic coding tasks and versatile prompt handling.","pricing":{"input_token":0.105,"output_token":0.315,"currency":"EUR"},"context_size":131072,"tags":["Instruct","Code","Tools"]},{"id":"llama-3.1-405b-instruct","object":"model","created":1721734682,"owned_by":"Meta","description":"Meta Llama 3.1 is a multilingual LLM series optimized for dialogue, combining SFT and RLHF for improved helpfulness, safety, and performance.","pricing":{"input_token":1.838,"output_token":1.838,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning"]},{"id":"gpt-4o-mini","object":"model","created":1721302682,"owned_by":"OpenAI","description":"GPT-4o mini is a lightweight, cost-efficient multimodal model optimized for real-time tasks and large-context applications, with support for text and vision.","pricing":{"input_token":0.15,"output_token":0.601,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Image","Reasoning"]},{"id":"gemma-2-2b-it","object":"model","created":1721129882,"owned_by":"Google","description":"Gemma is a family of lightweight, open-source language models from Google, optimized for text generation tasks.","pricing":{"input_token":0.019,"output_token":0.057,"currency":"EUR"},"context_size":8192,"tags":["Instruct","Reasoning","OpenClaw"]},{"id":"gemma-2-9b-it","object":"model","created":1719315482,"owned_by":"Google","description":"Gemma is a family of lightweight, open-source language models from Google, designed for efficient and versatile text generation tasks.","pricing":{"input_token":0.028,"output_token":0.084,"currency":"EUR"},"context_size":8000,"tags":["Instruct","Reasoning","OpenClaw"]},{"id":"llama-3.1-8b-instruct","object":"model","created":1712652299,"owned_by":"Meta","description":"Optimized for dialogue, this LLM by Meta outperforms other open-source chat models in benchmarks while prioritizing helpfulness and safety.","pricing":{"input_token":0.019,"output_token":0.057,"currency":"EUR"},"context_size":128000,"tags":["Instruct","Tools","Reasoning","OpenClaw"]},{"id":"codellama-13b-instruct-hf","object":"model","created":1710362282,"owned_by":"Meta","description":"Code Llama is a family of pretrained and fine-tuned code generation models, ranging from 7B to 34B parameters. This version is the 13B instruction-tuned model for code synthesis and understanding.","pricing":{"input_token":0.473,"output_token":0.473,"currency":"EUR"},"context_size":16000,"tags":["Instruct","Code"]},{"id":"mixtral-8x7B-instruct-v0.1","object":"model","created":1702294682,"owned_by":"Mistral AI","description":"Mixtral-8x7B is a sparse mixture-of-experts model with open weights, delivering fast, high-performance inference that outperforms Llama 2 70B and rivals GPT-3.5.","pricing":{"input_token":0.46,"output_token":0.662,"currency":"EUR"},"context_size":32000,"tags":["Instruct","Reasoning"]}]}