* Update defaultModels.js add gemma-3-27b-it to v1BetaModels * Update defaultModels.js 20250330 model update * Update defaultModels.js remove text embedding * Update name and inputTokenLimit modelMap.js * Update gemini to load models from both endpoints dedupe models decide endpoint based on expieremental status from fetch add util script for maintainers reduce cache time on gemini models to 1 day * remove comment --------- Co-authored-by: DreamerC <dreamerwolf.tw@gmail.com>
110 lines
3.3 KiB
JavaScript
110 lines
3.3 KiB
JavaScript
/**
|
|
* The model name and context window for all know model windows
|
|
* that are available through providers which has discrete model options.
|
|
* This file is automatically generated by syncStaticLists.mjs
|
|
* and should not be edited manually.
|
|
*
|
|
* Last updated: 2025-04-07T20:29:49.277Z
|
|
*/
|
|
const MODEL_MAP = {
|
|
anthropic: {
|
|
"claude-instant-1.2": 100000,
|
|
"claude-2.0": 100000,
|
|
"claude-2.1": 200000,
|
|
"claude-3-haiku-20240307": 200000,
|
|
"claude-3-sonnet-20240229": 200000,
|
|
"claude-3-opus-20240229": 200000,
|
|
"claude-3-opus-latest": 200000,
|
|
"claude-3-5-haiku-latest": 200000,
|
|
"claude-3-5-haiku-20241022": 200000,
|
|
"claude-3-5-sonnet-latest": 200000,
|
|
"claude-3-5-sonnet-20241022": 200000,
|
|
"claude-3-5-sonnet-20240620": 200000,
|
|
"claude-3-7-sonnet-20250219": 200000,
|
|
"claude-3-7-sonnet-latest": 200000,
|
|
},
|
|
cohere: {
|
|
"command-r": 128000,
|
|
"command-r-plus": 128000,
|
|
command: 4096,
|
|
"command-light": 4096,
|
|
"command-nightly": 8192,
|
|
"command-light-nightly": 8192,
|
|
},
|
|
gemini: {
|
|
"gemini-1.5-pro-001": 2000000,
|
|
"gemini-1.5-pro-002": 2000000,
|
|
"gemini-1.5-pro": 2000000,
|
|
"gemini-1.5-flash-001": 1000000,
|
|
"gemini-1.5-flash": 1000000,
|
|
"gemini-1.5-flash-002": 1000000,
|
|
"gemini-1.5-flash-8b": 1000000,
|
|
"gemini-1.5-flash-8b-001": 1000000,
|
|
"gemini-2.0-flash": 1048576,
|
|
"gemini-2.0-flash-001": 1048576,
|
|
"gemini-2.0-flash-lite-001": 1048576,
|
|
"gemini-2.0-flash-lite": 1048576,
|
|
"gemini-1.5-pro-latest": 2000000,
|
|
"gemini-1.5-flash-latest": 1000000,
|
|
"gemini-1.5-flash-8b-latest": 1000000,
|
|
"gemini-1.5-flash-8b-exp-0827": 1000000,
|
|
"gemini-1.5-flash-8b-exp-0924": 1000000,
|
|
"gemini-2.5-pro-exp-03-25": 1048576,
|
|
"gemini-2.5-pro-preview-03-25": 1048576,
|
|
"gemini-2.0-flash-exp": 1048576,
|
|
"gemini-2.0-flash-exp-image-generation": 1048576,
|
|
"gemini-2.0-flash-lite-preview-02-05": 1048576,
|
|
"gemini-2.0-flash-lite-preview": 1048576,
|
|
"gemini-2.0-pro-exp": 1048576,
|
|
"gemini-2.0-pro-exp-02-05": 1048576,
|
|
"gemini-exp-1206": 1048576,
|
|
"gemini-2.0-flash-thinking-exp-01-21": 1048576,
|
|
"gemini-2.0-flash-thinking-exp": 1048576,
|
|
"gemini-2.0-flash-thinking-exp-1219": 1048576,
|
|
"learnlm-1.5-pro-experimental": 32767,
|
|
"gemma-3-1b-it": 32768,
|
|
"gemma-3-4b-it": 32768,
|
|
"gemma-3-12b-it": 32768,
|
|
"gemma-3-27b-it": 131072,
|
|
},
|
|
groq: {
|
|
"gemma2-9b-it": 8192,
|
|
"gemma-7b-it": 8192,
|
|
"llama3-70b-8192": 8192,
|
|
"llama3-8b-8192": 8192,
|
|
"llama-3.1-70b-versatile": 8000,
|
|
"llama-3.1-8b-instant": 8000,
|
|
"mixtral-8x7b-32768": 32768,
|
|
},
|
|
openai: {
|
|
"gpt-3.5-turbo": 16385,
|
|
"gpt-3.5-turbo-1106": 16385,
|
|
"gpt-4o": 128000,
|
|
"gpt-4o-2024-08-06": 128000,
|
|
"gpt-4o-2024-05-13": 128000,
|
|
"gpt-4o-mini": 128000,
|
|
"gpt-4o-mini-2024-07-18": 128000,
|
|
"gpt-4-turbo": 128000,
|
|
"gpt-4-1106-preview": 128000,
|
|
"gpt-4-turbo-preview": 128000,
|
|
"gpt-4": 8192,
|
|
"gpt-4-32k": 32000,
|
|
"o1-preview": 128000,
|
|
"o1-preview-2024-09-12": 128000,
|
|
"o1-mini": 128000,
|
|
"o1-mini-2024-09-12": 128000,
|
|
"o3-mini": 200000,
|
|
"o3-mini-2025-01-31": 200000,
|
|
},
|
|
deepseek: {
|
|
"deepseek-chat": 128000,
|
|
"deepseek-coder": 128000,
|
|
"deepseek-reasoner": 128000,
|
|
},
|
|
xai: {
|
|
"grok-beta": 131072,
|
|
},
|
|
};
|
|
|
|
module.exports = { MODEL_MAP };
|