From d5fd93f64a046371c4d2befac0f2cd5452e91801 Mon Sep 17 00:00:00 2001
From: penurin <71948-penurin@users.noreply.gitgud.io>
Date: Fri, 27 Dec 2024 17:05:48 +0000
Subject: [PATCH] Adjust release o1 context to 200K

---
 .../request/preprocessors/validate-context-size.ts        | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/proxy/middleware/request/preprocessors/validate-context-size.ts b/src/proxy/middleware/request/preprocessors/validate-context-size.ts
index 9d7cab59..887b039a 100644
--- a/src/proxy/middleware/request/preprocessors/validate-context-size.ts
+++ b/src/proxy/middleware/request/preprocessors/validate-context-size.ts
@@ -7,7 +7,7 @@ import { RequestPreprocessor } from "../index";
 const CLAUDE_MAX_CONTEXT = config.maxContextTokensAnthropic;
 const OPENAI_MAX_CONTEXT = config.maxContextTokensOpenAI;
 // todo: make configurable
-const GOOGLE_AI_MAX_CONTEXT = 1024000;
+const GOOGLE_AI_MAX_CONTEXT = 2048000;
 const MISTRAL_AI_MAX_CONTENT = 131072;
 
 /**
@@ -68,6 +68,12 @@ export const validateContextSize: RequestPreprocessor = async (req) => {
     modelMax = 131072;
   } else if (model.match(/^gpt-4(-\d{4})?-vision(-preview)?$/)) {
     modelMax = 131072;
+  } else if (model.match(/^o1(-\d{4}-\d{2}-\d{2})?$/)) {
+    modelMax = 200000;
+  } else if (model.match(/^o1-mini(-\d{4}-\d{2}-\d{2})?$/)) {
+    modelMax = 128000;
+  } else if (model.match(/^o1-preview(-\d{4}-\d{2}-\d{2})?$/)) {
+    modelMax = 128000;
   } else if (model.match(/gpt-3.5-turbo/)) {
     modelMax = 16384;
   } else if (model.match(/gpt-4-32k/)) {
-- 
GitLab