diff --git a/locales/ar/modelProvider.json b/locales/ar/modelProvider.json
index 57c9517afd2f7..63cc3f6c86068 100644
--- a/locales/ar/modelProvider.json
+++ b/locales/ar/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "يجب أن يتضمن العنوان، بخلاف الافتراضي، http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "عنوان وكيل API"
- },
"title": "Anthropic",
"token": {
"desc": "أدخل مفتاح API الخاص بـ Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "بالإضافة إلى العنوان الافتراضي، يجب أن يتضمن http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "عنوان وكيل API"
- },
"title": "Google",
"token": {
"desc": "أدخل مفتاح API الخاص بـ Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "اختبر ما إذا تم إدخال عنوان الوكيل بشكل صحيح"
+ "desc": "اختبر ما إذا تم إدخال عنوان الوكيل بشكل صحيح",
+ "title": "فحص الاتصال"
},
"customModelName": {
"desc": "أضف نماذج مخصصة، استخدم الفاصلة (،) لفصل عدة نماذج",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "أدخل عنوان واجهة برمجة التطبيقات الخاص بـ Ollama، إذا لم يتم تحديده محليًا، يمكن تركه فارغًا",
- "placeholder": "http://127.0.0.1:11434",
"title": "عنوان وكيل الواجهة"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "يجب أن يتضمن العنوان، بخلاف الافتراضي، http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "عنوان وكيل الواجهة"
- },
"title": "OpenAI",
"token": {
"desc": "استخدم مفتاح OpenAI الخاص بك",
diff --git a/locales/ar/setting.json b/locales/ar/setting.json
index 4ad4c0cd7664d..ca67838e88a25 100644
--- a/locales/ar/setting.json
+++ b/locales/ar/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "فحص",
"desc": "اختبار ما إذا كان مفتاح واجهة البرمجة وعنوان الوكيل مملوء بشكل صحيح",
- "ollamaDesc": "اختبار عنوان الوكيل للتأكد من صحة الملء",
"pass": "تمت المراقبة",
"title": "فحص الاتصال"
},
@@ -96,6 +95,10 @@
"title": "قائمة النماذج",
"total": "متاح {{count}} نموذج"
},
+ "proxyUrl": {
+ "desc": "يجب أن يتضمن عنوان الوكيل API بالإضافة إلى العنوان الافتراضي http(s)://",
+ "title": "عنوان وكيل API"
+ },
"waitingForMore": "يتم <1>التخطيط لتوفير1> المزيد من النماذج، ترقبوا المزيد ✨"
},
"ollama": {
diff --git a/locales/bg-BG/modelProvider.json b/locales/bg-BG/modelProvider.json
index c91ecd56def12..62352423b8240 100644
--- a/locales/bg-BG/modelProvider.json
+++ b/locales/bg-BG/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Освен адреса по подразбиране, задължително трябва да включва http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Адрес на API прокси"
- },
"title": "Anthropic",
"token": {
"desc": "Въведете API Key, получен от Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Изисква се адрес, включително http(s)://, освен ако не е по подразбиране",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Адрес на API прокси"
- },
"title": "Google",
"token": {
"desc": "Въведете API Key, получен от Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Тестване дали адресът на прокси е попълнен правилно"
+ "desc": "Тестване дали адресът на прокси е попълнен правилно",
+ "title": "Проверка на свързаност"
},
"customModelName": {
"desc": "Добавяне на персонализирани модели, използвайте запетая (,) за разделяне на множество модели",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Въведете адрес на Ollama интерфейсния прокси, оставете празно, ако локално не е указано специално",
- "placeholder": "http://127.0.0.1:11434",
"title": "Адрес на прокси интерфейс"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Освен адреса по подразбиране, задължително трябва да включва http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Адрес на прокси интерфейс"
- },
"title": "OpenAI",
"token": {
"desc": "Използвайте собствения си OpenAI Key",
diff --git a/locales/bg-BG/setting.json b/locales/bg-BG/setting.json
index 01ab04b0e64b6..dd6037d9cc685 100644
--- a/locales/bg-BG/setting.json
+++ b/locales/bg-BG/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Провери",
"desc": "Проверете дали API ключът и адресът на прокси сървъра са попълнени правилно",
- "ollamaDesc": "Проверете дали адресът на прокси се попълва правилно",
"pass": "Проверката е успешна",
"title": "Проверка на свързаността"
},
@@ -96,6 +95,10 @@
"title": "Списък с модели",
"total": "Общо {{count}} налични модела"
},
+ "proxyUrl": {
+ "desc": "Включващ адреса по подразбиране, трябва да включва http(s)://",
+ "title": "Адрес на API прокси"
+ },
"waitingForMore": "Още модели са <1>планирани да бъдат добавени1>, очаквайте ✨"
},
"ollama": {
diff --git a/locales/de-DE/modelProvider.json b/locales/de-DE/modelProvider.json
index 5fb4f32af92ee..2d54eb5033a23 100644
--- a/locales/de-DE/modelProvider.json
+++ b/locales/de-DE/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Muss neben der Standardadresse auch http(s):// enthalten",
- "placeholder": "https://api.anthropic.com",
- "title": "API-Proxy-Adresse"
- },
"title": "Anthropic",
"token": {
"desc": "Geben Sie Ihren API-Key von Anthropic ein",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "Geben Sie Ihren API-Key von Google ein",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Testen Sie, ob die Proxy-Adresse korrekt eingetragen wurde"
+ "desc": "Testen Sie, ob die Proxy-Adresse korrekt eingetragen wurde",
+ "title": "Konnektivitätsprüfung"
},
"customModelName": {
"desc": "Fügen Sie benutzerdefinierte Modelle hinzu, trennen Sie mehrere Modelle mit Kommas (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Geben Sie die Proxy-Adresse der Ollama-Schnittstelle ein, leer lassen, wenn lokal nicht spezifiziert",
- "placeholder": "http://127.0.0.1:11434",
"title": "Schnittstellen-Proxy-Adresse"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Muss neben der Standardadresse auch http(s):// enthalten",
- "placeholder": "https://api.openai.com/v1",
- "title": "Schnittstellen-Proxy-Adresse"
- },
"title": "OpenAI",
"token": {
"desc": "Verwenden Sie Ihren eigenen OpenAI-Key",
diff --git a/locales/de-DE/setting.json b/locales/de-DE/setting.json
index 9b0e9016d6576..60b86ef525c2c 100644
--- a/locales/de-DE/setting.json
+++ b/locales/de-DE/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Überprüfen",
"desc": "Überprüfen Sie, ob der API-Schlüssel und die Proxy-Adresse korrekt eingegeben wurden",
- "ollamaDesc": "Testen Sie, ob die Proxy-Adresse korrekt eingegeben wurde",
"pass": "Überprüfung bestanden",
"title": "Konnektivitätsprüfung"
},
@@ -96,6 +95,10 @@
"title": "Modellliste",
"total": "Insgesamt {{count}} Modelle verfügbar"
},
+ "proxyUrl": {
+ "desc": "Außer der Standardadresse muss http(s):// enthalten sein",
+ "title": "API-Proxy-Adresse"
+ },
"waitingForMore": "Weitere Modelle werden <1>geplant1>, bitte freuen Sie sich auf weitere Updates ✨"
},
"ollama": {
diff --git a/locales/en-US/modelProvider.json b/locales/en-US/modelProvider.json
index 5eb0055fe7c79..7884fffc30d49 100644
--- a/locales/en-US/modelProvider.json
+++ b/locales/en-US/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Must include http(s):// in addition to the default address",
- "placeholder": "https://api.anthropic.com",
- "title": "API Proxy Address"
- },
"title": "Anthropic",
"token": {
"desc": "Enter the API Key from Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Must include http(s):// besides the default address",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API Proxy Address"
- },
"title": "Google",
"token": {
"desc": "Enter the API Key from Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Test if the proxy address is correctly filled in"
+ "desc": "Test if the proxy address is correctly filled in",
+ "title": "Connectivity Check"
},
"customModelName": {
"desc": "Add custom models, separate multiple models with commas",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Enter the Ollama interface proxy address, leave blank if not specified locally",
- "placeholder": "http://127.0.0.1:11434",
"title": "Interface proxy address"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Must include http(s):// besides the default address",
- "placeholder": "https://api.openai.com/v1",
- "title": "Interface proxy address"
- },
"title": "OpenAI",
"token": {
"desc": "Use your own OpenAI Key",
diff --git a/locales/en-US/setting.json b/locales/en-US/setting.json
index 962680e31477d..7f48aeb694216 100644
--- a/locales/en-US/setting.json
+++ b/locales/en-US/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Check",
"desc": "Test if the Api Key and proxy address are filled in correctly",
- "ollamaDesc": "Check if the proxy address is filled in correctly",
"pass": "Check Passed",
"title": "Connectivity Check"
},
@@ -96,6 +95,10 @@
"title": "Model List",
"total": "{{count}} models available in total"
},
+ "proxyUrl": {
+ "desc": "Must include http(s):// in addition to the default address",
+ "title": "API Proxy Address"
+ },
"waitingForMore": "More models are <1>planned to be added1>, stay tuned ✨"
},
"ollama": {
diff --git a/locales/es-ES/modelProvider.json b/locales/es-ES/modelProvider.json
index ae3b47478ba52..5dc1106e27231 100644
--- a/locales/es-ES/modelProvider.json
+++ b/locales/es-ES/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Además de la dirección predeterminada, debe incluir http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Dirección del proxy de API"
- },
"title": "Anthropic",
"token": {
"desc": "Introduce la clave API proporcionada por Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Aparte de la dirección predeterminada, debe incluir http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Dirección del proxy de la API"
- },
"title": "Google",
"token": {
"desc": "Introduce la clave API proporcionada por Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Prueba si la dirección del proxy de la interfaz se ha introducido correctamente"
+ "desc": "Prueba si la dirección del proxy de la interfaz se ha introducido correctamente",
+ "title": "Comprobación de conectividad"
},
"customModelName": {
"desc": "Añade modelos personalizados, separa múltiples modelos con comas (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Introduce la dirección del proxy de la interfaz de Ollama, déjalo en blanco si no se ha especificado localmente",
- "placeholder": "http://127.0.0.1:11434",
"title": "Dirección del proxy de la interfaz"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Además de la dirección predeterminada, debe incluir http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Dirección del proxy de la interfaz"
- },
"title": "OpenAI",
"token": {
"desc": "Usa tu propia clave de OpenAI",
diff --git a/locales/es-ES/setting.json b/locales/es-ES/setting.json
index 482aa6c1bc6fd..7a98bce395102 100644
--- a/locales/es-ES/setting.json
+++ b/locales/es-ES/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Comprobar",
"desc": "Comprueba si la clave API y la dirección del proxy están escritas correctamente",
- "ollamaDesc": "Verifica si la dirección del proxy está correctamente completada",
"pass": "Comprobación exitosa",
"title": "Comprobación de conectividad"
},
@@ -96,6 +95,10 @@
"title": "Lista de modelos",
"total": "Total de {{count}} modelos disponibles"
},
+ "proxyUrl": {
+ "desc": "Además de la dirección predeterminada, debe incluir http(s)://",
+ "title": "Dirección del proxy de la API"
+ },
"waitingForMore": "Más modelos están en <1>planificación para su incorporación1>, ¡estén atentos! ✨"
},
"ollama": {
diff --git a/locales/fr-FR/modelProvider.json b/locales/fr-FR/modelProvider.json
index 62c51f6e9f5a4..aaf3915152b99 100644
--- a/locales/fr-FR/modelProvider.json
+++ b/locales/fr-FR/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "En dehors de l'adresse par défaut, doit inclure http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Adresse de l'API Proxy"
- },
"title": "Anthropic",
"token": {
"desc": "Saisissez la clé API d'Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Incluez http(s):// en plus de l'adresse par défaut",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Adresse du proxy API"
- },
"title": "Google",
"token": {
"desc": "Saisissez la clé API de Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Vérifiez si l'adresse du proxy est correctement saisie"
+ "desc": "Vérifiez si l'adresse du proxy est correctement saisie",
+ "title": "Vérification de la connectivité"
},
"customModelName": {
"desc": "Ajoutez un modèle personnalisé, séparez les modèles multiples par des virgules (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Saisissez l'adresse du proxy Ollama, laissez vide si non spécifié localement",
- "placeholder": "http://127.0.0.1:11434",
"title": "Adresse du proxy"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "En dehors de l'adresse par défaut, doit inclure http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Adresse du proxy"
- },
"title": "OpenAI",
"token": {
"desc": "Utilisez votre propre clé OpenAI",
diff --git a/locales/fr-FR/setting.json b/locales/fr-FR/setting.json
index 310ab92745ab9..e043b6824994b 100644
--- a/locales/fr-FR/setting.json
+++ b/locales/fr-FR/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Vérifier",
"desc": "Vérifie si la clé API et l'adresse du proxy sont correctement renseignées",
- "ollamaDesc": "Vérifiez si l'adresse du proxy est correctement renseignée",
"pass": "Vérification réussie",
"title": "Vérification de la connectivité"
},
@@ -96,6 +95,10 @@
"title": "Liste des modèles",
"total": "{{count}} modèles disponibles au total"
},
+ "proxyUrl": {
+ "desc": "Doit inclure http(s):// en plus de l'adresse par défaut",
+ "title": "Adresse du proxy de l'API"
+ },
"waitingForMore": "Plus de modèles sont en cours de <1>planification pour être ajoutés1>, restez à l'écoute ✨"
},
"ollama": {
diff --git a/locales/it-IT/modelProvider.json b/locales/it-IT/modelProvider.json
index e488b950c639b..d70fb02bc25ea 100644
--- a/locales/it-IT/modelProvider.json
+++ b/locales/it-IT/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
- "placeholder": "https://api.anthropic.com",
- "title": "Indirizzo API Proxy"
- },
"title": "Anthropic",
"token": {
"desc": "Inserisci la chiave API da Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Indirizzo dell'API Proxy"
- },
"title": "Google",
"token": {
"desc": "Inserisci la chiave API da Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Verifica se l'indirizzo del proxy è stato compilato correttamente"
+ "desc": "Verifica se l'indirizzo del proxy è stato compilato correttamente",
+ "title": "Controllo della connettività"
},
"customModelName": {
"desc": "Aggiungi modelli personalizzati, separati da virgola (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Inserisci l'indirizzo del proxy dell'interfaccia Ollama. Lascia vuoto se non specificato localmente",
- "placeholder": "http://127.0.0.1:11434",
"title": "Indirizzo del proxy dell'interfaccia"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
- "placeholder": "https://api.openai.com/v1",
- "title": "Indirizzo del proxy dell'interfaccia"
- },
"title": "OpenAI",
"token": {
"desc": "Utilizza la tua chiave OpenAI",
diff --git a/locales/it-IT/setting.json b/locales/it-IT/setting.json
index e4ae685bbef5e..db6142e1c1765 100644
--- a/locales/it-IT/setting.json
+++ b/locales/it-IT/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Verifica",
"desc": "Verifica se la chiave API e l'indirizzo del proxy sono stati inseriti correttamente",
- "ollamaDesc": "Verifica se l'indirizzo del proxy è stato compilato correttamente",
"pass": "Verifica superata",
"title": "Verifica di connettività"
},
@@ -96,6 +95,10 @@
"title": "Elenco dei modelli",
"total": "Totale modelli disponibili: {{count}}"
},
+ "proxyUrl": {
+ "desc": "Deve includere http(s):// oltre all'indirizzo predefinito",
+ "title": "Indirizzo del proxy API"
+ },
"waitingForMore": "Altri modelli sono in fase di <1> pianificazione per l'integrazione 1>, resta sintonizzato ✨"
},
"ollama": {
diff --git a/locales/ja-JP/modelProvider.json b/locales/ja-JP/modelProvider.json
index 0fb9479dcf8b8..a56953f6d2cad 100644
--- a/locales/ja-JP/modelProvider.json
+++ b/locales/ja-JP/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "デフォルトアドレス以外は、http(s)://を含む必要があります",
- "placeholder": "https://api.anthropic.com",
- "title": "APIプロキシアドレス"
- },
"title": "Anthropic",
"token": {
"desc": "AnthropicからのAPIキーを入力してください",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "デフォルトのアドレスに加えて、http(s)://を含める必要があります",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "APIプロキシアドレス"
- },
"title": "Google",
"token": {
"desc": "Google の API Key を入力してください",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "プロキシアドレスが正しく入力されているかをテストします"
+ "desc": "プロキシアドレスが正しく入力されているかをテストします",
+ "title": "連結性チェック"
},
"customModelName": {
"desc": "カスタムモデルを追加します。複数のモデルはカンマ(,)で区切ります",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Ollamaプロキシインターフェースアドレスを入力してください。ローカルで追加の指定がない場合は空白のままにしてください",
- "placeholder": "http://127.0.0.1:11434",
"title": "プロキシインターフェースアドレス"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "デフォルト以外のアドレスは、http(s)://を含める必要があります",
- "placeholder": "https://api.openai.com/v1",
- "title": "プロキシインターフェースアドレス"
- },
"title": "OpenAI",
"token": {
"desc": "独自のOpenAIキーを使用します",
diff --git a/locales/ja-JP/setting.json b/locales/ja-JP/setting.json
index 77b54358fe1b1..d05c39e6f16a6 100644
--- a/locales/ja-JP/setting.json
+++ b/locales/ja-JP/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "チェック",
"desc": "APIキーとプロキシアドレスが正しく記入されているかをテスト",
- "ollamaDesc": "代理アドレスが正しく入力されているかをテストします",
"pass": "チェック合格",
"title": "接続性チェック"
},
@@ -96,6 +95,10 @@
"title": "モデルリスト",
"total": "合計 {{count}} 個のモデルが利用可能です"
},
+ "proxyUrl": {
+ "desc": "デフォルトのアドレスに加えて、http(s)://を含める必要があります",
+ "title": "APIプロキシアドレス"
+ },
"waitingForMore": "さらに多くのモデルが <1>計画されています1>。お楽しみに ✨"
},
"ollama": {
diff --git a/locales/ko-KR/modelProvider.json b/locales/ko-KR/modelProvider.json
index 648344240538f..210993cc16732 100644
--- a/locales/ko-KR/modelProvider.json
+++ b/locales/ko-KR/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다.",
- "placeholder": "https://api.anthropic.com",
- "title": "API 프록시 주소"
- },
"title": "Anthropic",
"token": {
"desc": "Anthropic에서 제공하는 API 키를 입력하세요.",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다.",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 프록시 주소"
- },
"title": "Google",
"token": {
"desc": "Google에서 제공하는 API 키를 입력하세요.",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "프록시 주소가 올바르게 입력되었는지 테스트합니다"
+ "desc": "프록시 주소가 올바르게 입력되었는지 테스트합니다",
+ "title": "연결성 검사"
},
"customModelName": {
"desc": "사용자 정의 모델을 추가하려면 쉼표(,)로 구분하여 여러 모델을 입력하세요",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Ollama 인터페이스 프록시 주소를 입력하세요. 로컬에서 별도로 지정하지 않은 경우 비워둘 수 있습니다",
- "placeholder": "http://127.0.0.1:11434",
"title": "인터페이스 프록시 주소"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다",
- "placeholder": "https://api.openai.com/v1",
- "title": "인터페이스 프록시 주소"
- },
"title": "OpenAI",
"token": {
"desc": "자체 OpenAI 키를 사용하세요",
diff --git a/locales/ko-KR/setting.json b/locales/ko-KR/setting.json
index 3f1ea2060e258..d632e718e48ca 100644
--- a/locales/ko-KR/setting.json
+++ b/locales/ko-KR/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "확인",
"desc": "API Key 및 프록시 주소가 올바르게 입력되었는지 테스트합니다",
- "ollamaDesc": "프록시 주소가 올바르게 입력되었는지 확인합니다.",
"pass": "확인 통과",
"title": "연결성 확인"
},
@@ -96,6 +95,10 @@
"title": "모델 목록",
"total": "총 {{count}} 개 모델 사용 가능"
},
+ "proxyUrl": {
+ "desc": "기본 주소 이외에 http(s)://를 포함해야 합니다.",
+ "title": "API 프록시 주소"
+ },
"waitingForMore": "<1>계획에 따라 더 많은 모델이 추가될 예정1>이니 기대해 주세요 ✨"
},
"ollama": {
diff --git a/locales/nl-NL/modelProvider.json b/locales/nl-NL/modelProvider.json
index b45609b70e436..b1b8f86f5cb3f 100644
--- a/locales/nl-NL/modelProvider.json
+++ b/locales/nl-NL/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Naast het standaardadres moet http(s):// worden opgenomen",
- "placeholder": "https://api.anthropic.com",
- "title": "API Proxy Adres"
- },
"title": "Anthropic",
"token": {
"desc": "Voer de API Key van Anthropic in",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "Voer de API Key van Google in",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Test of het proxyadres correct is ingevuld"
+ "desc": "Test of het proxyadres correct is ingevuld",
+ "title": "Connectiviteitscontrole"
},
"customModelName": {
"desc": "Voeg aangepaste modellen toe, gebruik een komma (,) om meerdere modellen te scheiden",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Voer het Ollama interface proxyadres in, laat leeg indien niet specifiek aangegeven",
- "placeholder": "http://127.0.0.1:11434",
"title": "Interface Proxyadres"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Naast het standaardadres moet http(s):// worden opgenomen",
- "placeholder": "https://api.openai.com/v1",
- "title": "Interface Proxyadres"
- },
"title": "OpenAI",
"token": {
"desc": "Gebruik je eigen OpenAI Key",
diff --git a/locales/nl-NL/setting.json b/locales/nl-NL/setting.json
index a7212965229bc..997f2099770d6 100644
--- a/locales/nl-NL/setting.json
+++ b/locales/nl-NL/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Controleren",
"desc": "Test of de API-sleutel en proxyadres correct zijn ingevuld",
- "ollamaDesc": "Controleer of het proxy-adres correct is ingevuld",
"pass": "Succesvol gecontroleerd",
"title": "Connectiviteitscontrole"
},
@@ -96,6 +95,10 @@
"title": "Modellijst",
"total": "In totaal {{count}} modellen beschikbaar"
},
+ "proxyUrl": {
+ "desc": "Moet http(s):// bevatten, naast het standaardadres",
+ "title": "API Proxy Adres"
+ },
"waitingForMore": "Meer modellen worden <1>gepland om te worden toegevoegd1>, dus blijf op de hoogte ✨"
},
"ollama": {
diff --git a/locales/pl-PL/modelProvider.json b/locales/pl-PL/modelProvider.json
index fe787a286392a..4b83435118991 100644
--- a/locales/pl-PL/modelProvider.json
+++ b/locales/pl-PL/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Oprócz domyślnego adresu, musi zawierać http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Adres proxy API"
- },
"title": "Anthropic",
"token": {
"desc": "Wprowadź klucz API uzyskany od Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "Wprowadź klucz API uzyskany od Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Test czy adres proxy jest poprawnie wypełniony"
+ "desc": "Test czy adres proxy jest poprawnie wypełniony",
+ "title": "Sprawdzanie łączności"
},
"customModelName": {
"desc": "Dodaj własny model, oddzielaj modele przecinkiem (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Wprowadź adres rest API Ollama, jeśli lokalnie nie określono, pozostaw puste",
- "placeholder": "http://127.0.0.1:11434",
"title": "Adres proxy API"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Oprócz domyślnego adresu, musi zawierać http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Adres proxy API"
- },
"title": "OpenAI",
"token": {
"desc": "Użyj własnego klucza OpenAI",
diff --git a/locales/pl-PL/setting.json b/locales/pl-PL/setting.json
index 4253b0df6b9bf..677a471fd32da 100644
--- a/locales/pl-PL/setting.json
+++ b/locales/pl-PL/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Sprawdź",
"desc": "Sprawdź poprawność wypełnienia klucza API i adresu proxy",
- "ollamaDesc": "Sprawdź, czy adres proxy został poprawnie wprowadzony",
"pass": "Połączenie udane",
"title": "Test połączenia"
},
@@ -96,6 +95,10 @@
"title": "Lista modeli",
"total": "Razem dostępne są {{count}} modele"
},
+ "proxyUrl": {
+ "desc": "Oprócz domyślnego adresu, musi zawierać http(s)://",
+ "title": "Adres proxy API"
+ },
"waitingForMore": "Więcej modeli jest obecnie w <1>planach dołączenia1>, prosimy o cierpliwość ✨"
},
"ollama": {
diff --git a/locales/pt-BR/modelProvider.json b/locales/pt-BR/modelProvider.json
index 373f1d4860d55..8e98cc9a99b2c 100644
--- a/locales/pt-BR/modelProvider.json
+++ b/locales/pt-BR/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Além do endereço padrão, deve incluir http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Endereço do Proxy da API"
- },
"title": "Anthropic",
"token": {
"desc": "Insira sua API Key fornecida pela Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Além do endereço padrão, deve incluir http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Endereço do Proxy da API"
- },
"title": "Google",
"token": {
"desc": "Insira sua API Key fornecida pelo Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Teste se o endereço do proxy está corretamente preenchido"
+ "desc": "Teste se o endereço do proxy está corretamente preenchido",
+ "title": "Verificação de Conectividade"
},
"customModelName": {
"desc": "Adicione modelos personalizados, separe múltiplos modelos com vírgulas (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Insira o endereço do proxy de interface da Ollama, se não foi especificado localmente, pode deixar em branco",
- "placeholder": "http://127.0.0.1:11434",
"title": "Endereço do Proxy de Interface"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Além do endereço padrão, deve incluir http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Endereço do Proxy de Interface"
- },
"title": "OpenAI",
"token": {
"desc": "Use sua própria OpenAI Key",
diff --git a/locales/pt-BR/setting.json b/locales/pt-BR/setting.json
index d8e61d59647b4..4e0e0ac014354 100644
--- a/locales/pt-BR/setting.json
+++ b/locales/pt-BR/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Verificar",
"desc": "Verifica se a Api Key e o endereço do proxy estão preenchidos corretamente",
- "ollamaDesc": "Verifique se o endereço do proxy está preenchido corretamente",
"pass": "Verificação aprovada",
"title": "Verificação de Conectividade"
},
@@ -96,6 +95,10 @@
"title": "Lista de Modelos",
"total": "Total de {{count}} modelos disponíveis"
},
+ "proxyUrl": {
+ "desc": "Além do endereço padrão, deve incluir http(s)://",
+ "title": "Endereço do Proxy da API"
+ },
"waitingForMore": "Mais modelos estão sendo <1>planejados para serem adicionados1>, aguarde ansiosamente ✨"
},
"ollama": {
diff --git a/locales/ru-RU/modelProvider.json b/locales/ru-RU/modelProvider.json
index 387b8ca9f8dec..74f931d0589af 100644
--- a/locales/ru-RU/modelProvider.json
+++ b/locales/ru-RU/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Должен включать http(s):// помимо стандартного адреса",
- "placeholder": "https://api.anthropic.com",
- "title": "Адрес API-прокси"
- },
"title": "Anthropic",
"token": {
"desc": "Введите свой API Key от Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Помимо адреса по умолчанию, должен включать http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Адрес прокси-API"
- },
"title": "Google",
"token": {
"desc": "Введите свой API Key от Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Проверить правильность адреса прокси"
+ "desc": "Проверить правильность адреса прокси",
+ "title": "Проверка связности"
},
"customModelName": {
"desc": "Добавить кастомные модели, разделяя их через запятую (,)",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Введите адрес прокси-интерфейса Ollama, если локально не указано иное, можете оставить пустым",
- "placeholder": "http://127.0.0.1:11434",
"title": "Адрес прокси-интерфейса"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Должен включать http(s):// помимо стандартного адреса",
- "placeholder": "https://api.openai.com/v1",
- "title": "Адрес прокси-интерфейса"
- },
"title": "OpenAI",
"token": {
"desc": "Используйте свой собственный OpenAI Key",
diff --git a/locales/ru-RU/setting.json b/locales/ru-RU/setting.json
index 8708dafb83d83..eef014074139e 100644
--- a/locales/ru-RU/setting.json
+++ b/locales/ru-RU/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Проверить",
"desc": "Проверьте правильность заполнения ключа API и адреса прокси",
- "ollamaDesc": "Проверьте правильность заполнения адреса прокси",
"pass": "Проверка пройдена",
"title": "Проверка доступности"
},
@@ -96,6 +95,10 @@
"title": "Список моделей",
"total": "Всего доступно {{count}} моделей"
},
+ "proxyUrl": {
+ "desc": "За исключением адреса по умолчанию, должен включать http(s)://",
+ "title": "Адрес прокси API"
+ },
"waitingForMore": "Больше моделей доступно в <1>плане подключения1>, ожидайте ✨"
},
"ollama": {
diff --git a/locales/tr-TR/modelProvider.json b/locales/tr-TR/modelProvider.json
index b6020115ca54c..eedd099be1db0 100644
--- a/locales/tr-TR/modelProvider.json
+++ b/locales/tr-TR/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Varsayılan adres dışında, http(s):// içermelidir",
- "placeholder": "https://api.anthropic.com",
- "title": "API Proxy Adresi"
- },
"title": "Anthropic",
"token": {
"desc": "Anthropic'ten gelen API Key'i girin",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "Google'dan gelen API Key'i girin",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Proxy adresinin doğru girilip girilmediğini test edin"
+ "desc": "Proxy adresinin doğru girilip girilmediğini test edin",
+ "title": "Bağlantı Kontrolü"
},
"customModelName": {
"desc": "Özel modeller ekleyin, birden fazla model için virgül (,) kullanın",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Ollama arayüz proxy adresini girin, yerel olarak belirtilmemişse boş bırakılabilir",
- "placeholder": "http://127.0.0.1:11434",
"title": "Arayüz Proxy Adresi"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Varsayılan adres dışında, http(s):// içermelidir",
- "placeholder": "https://api.openai.com/v1",
- "title": "Arayüz Proxy Adresi"
- },
"title": "OpenAI",
"token": {
"desc": "Kendi OpenAI Key'inizi kullanın",
diff --git a/locales/tr-TR/setting.json b/locales/tr-TR/setting.json
index 9d170c515495b..0f0d4baf54713 100644
--- a/locales/tr-TR/setting.json
+++ b/locales/tr-TR/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Kontrol Et",
"desc": "Api Anahtarı ve vekil adresinin doğru şekilde doldurulup doldurulmadığını test eder",
- "ollamaDesc": "Proxy adresinin doğru şekilde doldurulup doldurulmadığını test edin",
"pass": "Kontrol Başarılı",
"title": "Bağlantı Kontrolü"
},
@@ -96,6 +95,10 @@
"title": "Model Listesi",
"total": "Toplam {{count}} model kullanılabilir"
},
+ "proxyUrl": {
+ "desc": "Varsayılan adres dışında, http(s):// içermelidir",
+ "title": "API Proxy Adresi"
+ },
"waitingForMore": "Daha fazla model eklenmesi planlanıyor ✨"
},
"ollama": {
diff --git a/locales/vi-VN/modelProvider.json b/locales/vi-VN/modelProvider.json
index f6097bf77a949..6fd3ea5ca8634 100644
--- a/locales/vi-VN/modelProvider.json
+++ b/locales/vi-VN/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "Địa chỉ API proxy"
- },
"title": "Anthropic",
"token": {
"desc": "Nhập API Key từ Anthropic",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "Địa chỉ Proxy API"
- },
"title": "Google",
"token": {
"desc": "Nhập API Key từ Google",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "Kiểm tra địa chỉ proxy có được nhập chính xác không"
+ "desc": "Kiểm tra địa chỉ proxy có được nhập chính xác không",
+ "title": "Kiểm tra tính liên thông"
},
"customModelName": {
"desc": "Thêm mô hình tùy chỉnh, sử dụng dấu phẩy (,) để tách biệt nhiều mô hình",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "Nhập địa chỉ proxy API của Ollama, có thể để trống nếu không chỉ định cụ thể",
- "placeholder": "http://127.0.0.1:11434",
"title": "Địa chỉ proxy API"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "Địa chỉ proxy API"
- },
"title": "OpenAI",
"token": {
"desc": "Sử dụng OpenAI Key của riêng bạn",
diff --git a/locales/vi-VN/setting.json b/locales/vi-VN/setting.json
index de4e6ddbccf5a..5ff03bd7b30b6 100644
--- a/locales/vi-VN/setting.json
+++ b/locales/vi-VN/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "Kiểm tra",
"desc": "Kiểm tra xem Api Key và địa chỉ proxy đã được điền đúng chưa",
- "ollamaDesc": "Kiểm tra xem địa chỉ proxy đã được điền đúng chưa",
"pass": "Kiểm tra thành công",
"title": "Kiểm tra kết nối"
},
@@ -96,6 +95,10 @@
"title": "Danh sách mô hình",
"total": "Tổng cộng có {{count}} mô hình có sẵn"
},
+ "proxyUrl": {
+ "desc": "Ngoài địa chỉ mặc định, phải bao gồm http(s)://",
+ "title": "Địa chỉ Proxy API"
+ },
"waitingForMore": "Có thêm mô hình đang <1>được lên kế hoạch tích hợp1>, hãy chờ đợi ✨"
},
"ollama": {
diff --git a/locales/zh-CN/error.json b/locales/zh-CN/error.json
index 2a3dca19a8cec..e9335d653a836 100644
--- a/locales/zh-CN/error.json
+++ b/locales/zh-CN/error.json
@@ -65,7 +65,7 @@
"PluginOpenApiInitError": "很抱歉,OpenAPI 客户端初始化失败,请检查 OpenAPI 的配置信息是否正确",
"InvalidAccessCode": "密码不正确或为空,请输入正确的访问密码,或者添加自定义 API Key",
"InvalidClerkUser": "很抱歉,你当前尚未登录,请先登录或注册账号后继续操作",
- "LocationNotSupportError": "很抱歉,你的所在位置不支持此模型服务,可能是由于地区限制或服务未开通。请确认当前位置是否支持使用此服务,或尝试使用其他位置信息。",
+ "LocationNotSupportError": "很抱歉,你的所在地区不支持此模型服务,可能是由于区域限制或服务未开通。请确认当前地区是否支持使用此服务,或尝试使用切换到其他地区后重试。",
"OpenAIBizError": "请求 OpenAI 服务出错,请根据以下信息排查或重试",
"NoOpenAIAPIKey": "OpenAI API Key 为空,请添加自定义 OpenAI API Key",
"ZhipuBizError": "请求智谱服务出错,请根据以下信息排查或重试",
diff --git a/locales/zh-CN/modelProvider.json b/locales/zh-CN/modelProvider.json
index 682dc96cbab5f..624993ad2907c 100644
--- a/locales/zh-CN/modelProvider.json
+++ b/locales/zh-CN/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "API 代理地址"
- },
"title": "Anthropic",
"token": {
"desc": "填入来自 Anthropic 的 API Key",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "填入来自 Google 的 API Key",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "测试代理地址是否正确填写"
+ "desc": "测试代理地址是否正确填写",
+ "title": "连通性检查"
},
"customModelName": {
"desc": "增加自定义模型,多个模型使用逗号(,)隔开",
@@ -142,8 +133,7 @@
},
"endpoint": {
"desc": "填入 Ollama 接口代理地址,本地未额外指定可留空",
- "placeholder": "http://127.0.0.1:11434",
- "title": "接口代理地址"
+ "title": "Ollama 服务地址"
},
"setup": {
"cors": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "除默认地址外,必须包含 http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "接口代理地址"
- },
"title": "OpenAI",
"token": {
"desc": "使用自己的 OpenAI Key",
diff --git a/locales/zh-CN/setting.json b/locales/zh-CN/setting.json
index fbe3fc79ef9cd..25826fbb20a5f 100644
--- a/locales/zh-CN/setting.json
+++ b/locales/zh-CN/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "检查",
"desc": "测试 Api Key 与代理地址是否正确填写",
- "ollamaDesc": "测试代理地址是否正确填写",
"pass": "检查通过",
"title": "连通性检查"
},
@@ -96,6 +95,10 @@
"title": "模型列表",
"total": "共 {{count}} 个模型可用"
},
+ "proxyUrl": {
+ "desc": "除默认地址外,必须包含 http(s)://",
+ "title": "API 代理地址"
+ },
"waitingForMore": "更多模型正在 <1>计划接入1> 中,敬请期待 ✨"
},
"ollama": {
diff --git a/locales/zh-TW/modelProvider.json b/locales/zh-TW/modelProvider.json
index 24704b648b6d4..f4992e9ca4491 100644
--- a/locales/zh-TW/modelProvider.json
+++ b/locales/zh-TW/modelProvider.json
@@ -1,10 +1,5 @@
{
"anthropic": {
- "endpoint": {
- "desc": "除預設地址外,必須包含 http(s)://",
- "placeholder": "https://api.anthropic.com",
- "title": "API 代理地址"
- },
"title": "Anthropic",
"token": {
"desc": "填入來自 Anthropic 的 API 金鑰",
@@ -67,11 +62,6 @@
}
},
"google": {
- "endpoint": {
- "desc": "除了預設地址外,必須包含 http(s)://",
- "placeholder": "https://generativelanguage.googleapis.com",
- "title": "API 代理地址"
- },
"title": "Google",
"token": {
"desc": "填入來自 Google 的 API 金鑰",
@@ -133,7 +123,8 @@
},
"ollama": {
"checker": {
- "desc": "測試代理地址是否正確填寫"
+ "desc": "測試代理地址是否正確填寫",
+ "title": "連通性檢查"
},
"customModelName": {
"desc": "增加自定義模型,多個模型使用逗號(,)隔開",
@@ -142,7 +133,6 @@
},
"endpoint": {
"desc": "填入 Ollama 接口代理地址,本地未額外指定可留空",
- "placeholder": "http://127.0.0.1:11434",
"title": "接口代理地址"
},
"setup": {
@@ -172,11 +162,6 @@
"title": "Ollama"
},
"openai": {
- "endpoint": {
- "desc": "除默認地址外,必須包含 http(s)://",
- "placeholder": "https://api.openai.com/v1",
- "title": "接口代理地址"
- },
"title": "OpenAI",
"token": {
"desc": "使用自己的 OpenAI 金鑰",
diff --git a/locales/zh-TW/setting.json b/locales/zh-TW/setting.json
index 2e366c4e22c53..120f7d7f5e915 100644
--- a/locales/zh-TW/setting.json
+++ b/locales/zh-TW/setting.json
@@ -38,7 +38,6 @@
"checker": {
"button": "檢查",
"desc": "測試 API 金鑰與代理地址是否正確填寫",
- "ollamaDesc": "測試代理地址是否正確填寫",
"pass": "檢查通過",
"title": "連通性檢查"
},
@@ -96,6 +95,10 @@
"title": "模型列表",
"total": "共 {{count}} 個模型可用"
},
+ "proxyUrl": {
+ "desc": "必須包含 http(s):// 以外的預設地址",
+ "title": "API 代理位址"
+ },
"waitingForMore": "更多模型正在 <1>計劃接入1> 中,敬請期待 ✨"
},
"ollama": {
diff --git a/src/app/(main)/settings/llm/Anthropic/index.tsx b/src/app/(main)/settings/llm/Anthropic/index.tsx
index e13fc210cce39..74ed0333bccf6 100644
--- a/src/app/(main)/settings/llm/Anthropic/index.tsx
+++ b/src/app/(main)/settings/llm/Anthropic/index.tsx
@@ -14,7 +14,9 @@ const AnthropicProvider = memo(() => {
}
/>
);
diff --git a/src/app/(main)/settings/llm/Google/index.tsx b/src/app/(main)/settings/llm/Google/index.tsx
index 96312c0249aa0..67890cb0cf050 100644
--- a/src/app/(main)/settings/llm/Google/index.tsx
+++ b/src/app/(main)/settings/llm/Google/index.tsx
@@ -14,7 +14,9 @@ const GoogleProvider = memo(() => {
diff --git a/src/app/(main)/settings/llm/Groq/index.tsx b/src/app/(main)/settings/llm/Groq/index.tsx
index b339f04ffd934..33e4b28d05753 100644
--- a/src/app/(main)/settings/llm/Groq/index.tsx
+++ b/src/app/(main)/settings/llm/Groq/index.tsx
@@ -15,6 +15,9 @@ const GroqProvider = memo(() => {
}
/>
);
diff --git a/src/app/(main)/settings/llm/Ollama/index.tsx b/src/app/(main)/settings/llm/Ollama/index.tsx
index 788abf5b4abf4..174d773f16803 100644
--- a/src/app/(main)/settings/llm/Ollama/index.tsx
+++ b/src/app/(main)/settings/llm/Ollama/index.tsx
@@ -10,21 +10,25 @@ import ProviderConfig from '../components/ProviderConfig';
import Checker from './Checker';
const OllamaProvider = memo(() => {
- const { t } = useTranslation('setting');
+ const { t } = useTranslation('modelProvider');
return (
,
- desc: t('llm.checker.ollamaDesc'),
- label: t('llm.checker.title'),
+ desc: t('ollama.checker.desc'),
+ label: t('ollama.checker.title'),
minWidth: undefined,
}}
modelList={{ showModelFetcher: true }}
provider={ModelProvider.Ollama}
+ proxyUrl={{
+ desc: t('ollama.endpoint.desc'),
+ placeholder: 'http://127.0.0.1:11434',
+ title: t('ollama.endpoint.title'),
+ }}
showApiKey={false}
showBrowserRequest
- showEndpoint
title={}
/>
);
diff --git a/src/app/(main)/settings/llm/OpenAI/index.tsx b/src/app/(main)/settings/llm/OpenAI/index.tsx
index 1928efc12e214..e3b34f4d5a14f 100644
--- a/src/app/(main)/settings/llm/OpenAI/index.tsx
+++ b/src/app/(main)/settings/llm/OpenAI/index.tsx
@@ -14,9 +14,13 @@ const OpenAIProvider = memo(() => {
}
/>
);
diff --git a/src/app/(main)/settings/llm/Perplexity/index.tsx b/src/app/(main)/settings/llm/Perplexity/index.tsx
index 419d901b04837..b391e06b4e7c3 100644
--- a/src/app/(main)/settings/llm/Perplexity/index.tsx
+++ b/src/app/(main)/settings/llm/Perplexity/index.tsx
@@ -12,6 +12,9 @@ const PerplexityProvider = memo(() => {
}
/>
);
diff --git a/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx b/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx
index 88107c0a7564c..dbc6f6f45a8e4 100644
--- a/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx
+++ b/src/app/(main)/settings/llm/components/ProviderConfig/index.tsx
@@ -57,9 +57,15 @@ interface ProviderConfigProps {
showModelFetcher?: boolean;
};
provider: GlobalLLMProviderKey;
+ proxyUrl?:
+ | {
+ desc?: string;
+ placeholder: string;
+ title?: string;
+ }
+ | false;
showApiKey?: boolean;
showBrowserRequest?: boolean;
- showEndpoint?: boolean;
title: ReactNode;
}
@@ -67,7 +73,7 @@ const ProviderConfig = memo(
({
apiKeyItems,
provider,
- showEndpoint,
+ proxyUrl,
showApiKey = true,
checkModel,
canDeactivate = true,
@@ -112,14 +118,13 @@ const ProviderConfig = memo(
},
];
+ const showEndpoint = !!proxyUrl;
const formItems = [
...apiKeyItem,
showEndpoint && {
- children: (
-
- ),
- desc: modelT(`${provider}.endpoint.desc` as any),
- label: modelT(`${provider}.endpoint.title` as any),
+ children: ,
+ desc: proxyUrl?.desc || t('llm.proxyUrl.desc'),
+ label: proxyUrl?.title || t('llm.proxyUrl.title'),
name: [LLMProviderConfigKey, provider, LLMProviderBaseUrlKey],
},
(showBrowserRequest || (showEndpoint && isProviderEndpointNotEmpty)) && {
diff --git a/src/app/api/chat/agentRuntime.ts b/src/app/api/chat/agentRuntime.ts
index 31fd70a32e95a..57649b764c1a0 100644
--- a/src/app/api/chat/agentRuntime.ts
+++ b/src/app/api/chat/agentRuntime.ts
@@ -85,76 +85,71 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
accessKeySecret = payload?.awsSecretAccessKey;
region = payload?.awsRegion;
}
- return {
- accessKeyId,
- accessKeySecret,
- region,
- };
+ return { accessKeyId, accessKeySecret, region };
}
case ModelProvider.Ollama: {
const { OLLAMA_PROXY_URL } = getServerConfig();
const baseURL = payload?.endpoint || OLLAMA_PROXY_URL;
- return {
- baseURL,
- };
+ return { baseURL };
}
case ModelProvider.Perplexity: {
- const { PERPLEXITY_API_KEY } = getServerConfig();
+ const { PERPLEXITY_API_KEY, PERPLEXITY_PROXY_URL } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || PERPLEXITY_API_KEY);
- return {
- apiKey,
- };
+ const baseURL = payload?.endpoint || PERPLEXITY_PROXY_URL;
+
+ return { apiKey, baseURL };
}
case ModelProvider.Anthropic: {
const { ANTHROPIC_API_KEY, ANTHROPIC_PROXY_URL } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || ANTHROPIC_API_KEY);
const baseURL = payload?.endpoint || ANTHROPIC_PROXY_URL;
- return {
- apiKey,
- baseURL,
- };
+
+ return { apiKey, baseURL };
}
case ModelProvider.Minimax: {
const { MINIMAX_API_KEY } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || MINIMAX_API_KEY);
- return {
- apiKey,
- };
+
+ return { apiKey };
}
case ModelProvider.Mistral: {
const { MISTRAL_API_KEY } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || MISTRAL_API_KEY);
- return {
- apiKey,
- };
+
+ return { apiKey };
}
case ModelProvider.Groq: {
- const { GROQ_API_KEY } = getServerConfig();
+ const { GROQ_API_KEY, GROQ_PROXY_URL } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || GROQ_API_KEY);
- return {
- apiKey,
- };
+ const baseURL = payload?.endpoint || GROQ_PROXY_URL;
+
+ return { apiKey, baseURL };
}
case ModelProvider.OpenRouter: {
const { OPENROUTER_API_KEY } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || OPENROUTER_API_KEY);
- return {
- apiKey,
- };
+
+ return { apiKey };
}
case ModelProvider.TogetherAI: {
const { TOGETHERAI_API_KEY } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || TOGETHERAI_API_KEY);
- return {
- apiKey,
- };
+
+ return { apiKey };
}
case ModelProvider.ZeroOne: {
const { ZEROONE_API_KEY } = getServerConfig();
+
const apiKey = apiKeyManager.pick(payload?.apiKey || ZEROONE_API_KEY);
- return {
- apiKey,
- };
+
+ return { apiKey };
}
}
};
diff --git a/src/config/modelProviders/groq.ts b/src/config/modelProviders/groq.ts
index 51857c15b807b..2a3a71b5dc3a2 100644
--- a/src/config/modelProviders/groq.ts
+++ b/src/config/modelProviders/groq.ts
@@ -3,6 +3,12 @@ import { ModelProviderCard } from '@/types/llm';
// ref https://console.groq.com/docs/models
const Groq: ModelProviderCard = {
chatModels: [
+ {
+ displayName: 'LLaMA3-3-70B',
+ enabled: true,
+ id: 'llama3-70b-8192',
+ tokens: 8192,
+ },
{
displayName: 'Mixtral-8x7b-Instruct-v0.1',
enabled: true,
@@ -21,12 +27,6 @@ const Groq: ModelProviderCard = {
id: 'llama3-8b-8192',
tokens: 8192,
},
- {
- displayName: 'LLaMA3-3-70B',
- enabled: true,
- id: 'llama3-70b-8192',
- tokens: 8192,
- },
{
displayName: 'LLaMA2-70b-chat',
id: 'llama2-70b-4096',
diff --git a/src/config/modelProviders/togetherai.ts b/src/config/modelProviders/togetherai.ts
index 9d1c2d0cc51cb..e26b91e6a6532 100644
--- a/src/config/modelProviders/togetherai.ts
+++ b/src/config/modelProviders/togetherai.ts
@@ -6,90 +6,68 @@ const TogetherAI: ModelProviderCard = {
{
displayName: 'Deepseek Coder Instruct (33B)',
enabled: true,
- functionCall: false,
id: 'deepseek-ai/deepseek-coder-33b-instruct',
tokens: 16_384,
- vision: false,
},
{
displayName: 'Phind Code LLaMA v2 (34B)',
enabled: true,
- functionCall: false,
id: 'Phind/Phind-CodeLlama-34B-v2',
tokens: 16_384,
- vision: false,
},
{
displayName: 'Gemma Instruct (2B)',
enabled: true,
- functionCall: false,
id: 'google/gemma-2b-it',
tokens: 8192,
- vision: false,
},
{
displayName: 'LLaMA-2 Chat (13B)',
enabled: true,
- functionCall: false,
id: 'meta-llama/Llama-2-13b-chat-hf',
tokens: 4096,
- vision: false,
},
{
displayName: '01-ai Yi Chat (34B)',
enabled: true,
- functionCall: false,
id: 'zero-one-ai/Yi-34B-Chat',
tokens: 4096,
- vision: false,
},
{
displayName: 'Mixtral-8x7B Instruct (46.7B)',
enabled: true,
- functionCall: false,
id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
tokens: 32_768,
- vision: false,
},
{
displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)',
enabled: true,
- functionCall: false,
id: 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
tokens: 32_768,
- vision: false,
},
{
displayName: 'Nous Hermes-2 Yi (34B)',
enabled: true,
- functionCall: false,
id: 'NousResearch/Nous-Hermes-2-Yi-34B',
tokens: 4096,
- vision: false,
},
{
displayName: 'Qwen 1.5 Chat (7B)',
enabled: true,
- functionCall: false,
id: 'Qwen/Qwen1.5-7B-Chat',
tokens: 32_768,
- vision: false,
},
{
displayName: 'Qwen 1.5 Chat (14B)',
enabled: true,
- functionCall: false,
id: 'Qwen/Qwen1.5-14B-Chat',
tokens: 32_768,
- vision: false,
},
{
displayName: 'Qwen 1.5 Chat (72B)',
enabled: true,
- functionCall: false,
id: 'Qwen/Qwen1.5-72B-Chat',
tokens: 32_768,
- vision: false,
},
],
id: 'togetherai',
diff --git a/src/config/server/provider.ts b/src/config/server/provider.ts
index 249ba8ffa14df..d1d497dab4a44 100644
--- a/src/config/server/provider.ts
+++ b/src/config/server/provider.ts
@@ -37,6 +37,7 @@ declare global {
// Perplexity Provider
ENABLED_PERPLEXITY?: string;
PERPLEXITY_API_KEY?: string;
+ PERPLEXITY_PROXY_URL?: string;
// Anthropic Provider
ENABLED_ANTHROPIC?: string;
@@ -54,6 +55,7 @@ declare global {
// Groq Provider
ENABLED_GROQ?: string;
GROQ_API_KEY?: string;
+ GROQ_PROXY_URL?: string;
// OpenRouter Provider
ENABLED_OPENROUTER?: string;
@@ -172,6 +174,7 @@ export const getProviderConfig = () => {
ENABLED_PERPLEXITY: !!PERPLEXITY_API_KEY,
PERPLEXITY_API_KEY,
+ PERPLEXITY_PROXY_URL: process.env.PERPLEXITY_PROXY_URL,
ENABLED_ANTHROPIC: !!ANTHROPIC_API_KEY,
ANTHROPIC_API_KEY,
@@ -197,6 +200,7 @@ export const getProviderConfig = () => {
MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
ENABLED_GROQ: !!GROQ_API_KEY,
+ GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
GROQ_API_KEY,
ENABLED_ZEROONE: !!ZEROONE_API_KEY,
diff --git a/src/libs/agent-runtime/anthropic/index.ts b/src/libs/agent-runtime/anthropic/index.ts
index 5fa09970f30d2..6467ae5ded8aa 100644
--- a/src/libs/agent-runtime/anthropic/index.ts
+++ b/src/libs/agent-runtime/anthropic/index.ts
@@ -27,21 +27,11 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
}
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
- const { messages, model, max_tokens, temperature, top_p } = payload;
- const system_message = messages.find((m) => m.role === 'system');
- const user_messages = messages.filter((m) => m.role !== 'system');
-
try {
+ const anthropicPayload = this.buildAnthropicPayload(payload);
+
const response = await this.client.messages.create(
- {
- max_tokens: max_tokens || 4096,
- messages: buildAnthropicMessages(user_messages),
- model: model,
- stream: true,
- system: system_message?.content as string,
- temperature: temperature,
- top_p: top_p,
- },
+ { ...anthropicPayload, stream: true },
{ signal: options?.signal },
);
@@ -71,6 +61,15 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
provider: ModelProvider.Anthropic,
});
}
+
+ case 403: {
+ throw AgentRuntimeError.chat({
+ endpoint: desensitizedEndpoint,
+ error: error as any,
+ errorType: AgentRuntimeErrorType.LocationNotSupportError,
+ provider: ModelProvider.Anthropic,
+ });
+ }
default: {
break;
}
@@ -84,6 +83,22 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
});
}
}
+
+ private buildAnthropicPayload(payload: ChatStreamPayload) {
+ const { messages, model, max_tokens, temperature, top_p } = payload;
+ const system_message = messages.find((m) => m.role === 'system');
+ const user_messages = messages.filter((m) => m.role !== 'system');
+
+ return {
+ max_tokens: max_tokens || 4096,
+ messages: buildAnthropicMessages(user_messages),
+ model: model,
+ stream: true,
+ system: system_message?.content as string,
+ temperature: temperature,
+ top_p: top_p,
+ };
+ }
}
export default LobeAnthropicAI;
diff --git a/src/libs/agent-runtime/groq/index.ts b/src/libs/agent-runtime/groq/index.ts
index dc685ae246374..f30c1a9c159de 100644
--- a/src/libs/agent-runtime/groq/index.ts
+++ b/src/libs/agent-runtime/groq/index.ts
@@ -4,6 +4,13 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
export const LobeGroq = LobeOpenAICompatibleFactory({
baseURL: 'https://api.groq.com/openai/v1',
+ chatCompletion: {
+ handleError: (error) => {
+ // 403 means the location is not supporteds
+ if (error.status === 403)
+ return { error, errorType: AgentRuntimeErrorType.LocationNotSupportError };
+ },
+ },
debug: {
chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
},
diff --git a/src/libs/agent-runtime/perplexity/index.test.ts b/src/libs/agent-runtime/perplexity/index.test.ts
index 21ed19654bd38..07e29fa5eaa3c 100644
--- a/src/libs/agent-runtime/perplexity/index.test.ts
+++ b/src/libs/agent-runtime/perplexity/index.test.ts
@@ -2,7 +2,7 @@
import OpenAI from 'openai';
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
-import { ChatStreamCallbacks } from '@/libs/agent-runtime';
+import { ChatStreamCallbacks, LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
import * as debugStreamModule from '../utils/debugStream';
import { LobePerplexityAI } from './index';
@@ -15,7 +15,7 @@ const invalidErrorType = 'InvalidPerplexityAPIKey';
// Mock the console.error to avoid polluting test output
vi.spyOn(console, 'error').mockImplementation(() => {});
-let instance: LobePerplexityAI;
+let instance: LobeOpenAICompatibleRuntime;
beforeEach(() => {
instance = new LobePerplexityAI({ apiKey: 'test' });
diff --git a/src/libs/agent-runtime/perplexity/index.ts b/src/libs/agent-runtime/perplexity/index.ts
index 391f41d865965..302618f8096e6 100644
--- a/src/libs/agent-runtime/perplexity/index.ts
+++ b/src/libs/agent-runtime/perplexity/index.ts
@@ -1,85 +1,37 @@
-import { OpenAIStream, StreamingTextResponse } from 'ai';
-import OpenAI, { ClientOptions } from 'openai';
+import OpenAI from 'openai';
-import { LobeRuntimeAI } from '../BaseAI';
import { AgentRuntimeErrorType } from '../error';
-import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
-import { AgentRuntimeError } from '../utils/createError';
-import { debugStream } from '../utils/debugStream';
-import { desensitizeUrl } from '../utils/desensitizeUrl';
-import { handleOpenAIError } from '../utils/handleOpenAIError';
+import { ChatStreamPayload, ModelProvider } from '../types';
+import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
-const DEFAULT_BASE_URL = 'https://api.perplexity.ai';
-
-export class LobePerplexityAI implements LobeRuntimeAI {
- private client: OpenAI;
-
- baseURL: string;
-
- constructor({ apiKey, baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
- if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidPerplexityAPIKey);
-
- this.client = new OpenAI({ apiKey, baseURL, ...res });
- this.baseURL = this.client.baseURL;
- }
-
- async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
- try {
+export const LobePerplexityAI = LobeOpenAICompatibleFactory({
+ baseURL: 'https://api.perplexity.ai',
+ chatCompletion: {
+ handlePayload: (payload: ChatStreamPayload) => {
// Set a default frequency penalty value greater than 0
- const defaultFrequencyPenalty = 0.1;
- const chatPayload = {
- ...payload,
- frequency_penalty: payload.frequency_penalty || defaultFrequencyPenalty,
- };
- const response = await this.client.chat.completions.create(
- chatPayload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
- { signal: options?.signal },
- );
- const [prod, debug] = response.tee();
-
- if (process.env.DEBUG_PERPLEXITY_CHAT_COMPLETION === '1') {
- debugStream(debug.toReadableStream()).catch(console.error);
- }
-
- return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
- headers: options?.headers,
- });
- } catch (error) {
- let desensitizedEndpoint = this.baseURL;
+ const { presence_penalty, frequency_penalty, ...res } = payload;
- if (this.baseURL !== DEFAULT_BASE_URL) {
- desensitizedEndpoint = desensitizeUrl(this.baseURL);
- }
+ let param;
- if ('status' in (error as any)) {
- switch ((error as Response).status) {
- case 401: {
- throw AgentRuntimeError.chat({
- endpoint: desensitizedEndpoint,
- error: error as any,
- errorType: AgentRuntimeErrorType.InvalidPerplexityAPIKey,
- provider: ModelProvider.Perplexity,
- });
- }
+ // Ensure we are only have one frequency_penalty or frequency_penalty
+ if (presence_penalty !== 0) {
+ param = { presence_penalty };
+ } else {
+ const defaultFrequencyPenalty = 1;
- default: {
- break;
- }
- }
+ param = { frequency_penalty: frequency_penalty || defaultFrequencyPenalty };
}
- const { errorResult, RuntimeError } = handleOpenAIError(error);
-
- const errorType = RuntimeError || AgentRuntimeErrorType.PerplexityBizError;
-
- throw AgentRuntimeError.chat({
- endpoint: desensitizedEndpoint,
- error: errorResult,
- errorType,
- provider: ModelProvider.Perplexity,
- });
- }
- }
-}
-
-export default LobePerplexityAI;
+ console.log(param);
+ return { ...res, ...param } as OpenAI.ChatCompletionCreateParamsStreaming;
+ },
+ },
+ debug: {
+ chatCompletion: () => process.env.DEBUG_PERPLEXITY_CHAT_COMPLETION === '1',
+ },
+ errorType: {
+ bizError: AgentRuntimeErrorType.PerplexityBizError,
+ invalidAPIKey: AgentRuntimeErrorType.InvalidPerplexityAPIKey,
+ },
+ provider: ModelProvider.Perplexity,
+});
diff --git a/src/libs/agent-runtime/utils/anthropicHelpers.test.ts b/src/libs/agent-runtime/utils/anthropicHelpers.test.ts
index 37866e6b42ec2..8cf8d7f5e6158 100644
--- a/src/libs/agent-runtime/utils/anthropicHelpers.test.ts
+++ b/src/libs/agent-runtime/utils/anthropicHelpers.test.ts
@@ -1,7 +1,11 @@
import { describe, expect, it } from 'vitest';
import { OpenAIChatMessage, UserMessageContentPart } from '../types/chat';
-import { buildAnthropicBlock, buildAnthropicMessage } from './anthropicHelpers';
+import {
+ buildAnthropicBlock,
+ buildAnthropicMessage,
+ buildAnthropicMessages,
+} from './anthropicHelpers';
import { parseDataUri } from './uriParser';
describe('anthropicHelpers', () => {
@@ -48,4 +52,63 @@ describe('anthropicHelpers', () => {
expect(result).toEqual({ content: [{ type: 'text', text: 'Hello!' }], role: 'assistant' });
});
});
+
+ describe('buildAnthropicMessages', () => {
+ it('should correctly convert OpenAI Messages to Anthropic Messages', () => {
+ const messages: OpenAIChatMessage[] = [
+ { content: 'Hello', role: 'user' },
+ { content: 'Hi', role: 'assistant' },
+ ];
+
+ const result = buildAnthropicMessages(messages);
+ expect(result).toHaveLength(2);
+ expect(result).toEqual([
+ { content: 'Hello', role: 'user' },
+ { content: 'Hi', role: 'assistant' },
+ ]);
+ });
+
+ it('messages should end with user', () => {
+ const messages: OpenAIChatMessage[] = [
+ { content: 'Hello', role: 'user' },
+ { content: 'Hello', role: 'user' },
+ { content: 'Hi', role: 'assistant' },
+ ];
+
+ const contents = buildAnthropicMessages(messages);
+
+ expect(contents).toHaveLength(4);
+ expect(contents).toEqual([
+ { content: 'Hello', role: 'user' },
+ { content: '_', role: 'assistant' },
+ { content: 'Hello', role: 'user' },
+ { content: 'Hi', role: 'assistant' },
+ ]);
+ });
+
+ it('messages should pair', () => {
+ const messages: OpenAIChatMessage[] = [
+ { content: 'a', role: 'assistant' },
+ { content: 'b', role: 'assistant' },
+ { content: 'c', role: 'assistant' },
+ { content: 'd', role: 'assistant' },
+ { content: '你好', role: 'user' },
+ ];
+
+ const contents = buildAnthropicMessages(messages);
+
+ expect(contents).toHaveLength(9);
+ expect(contents).toEqual([
+ { content: '_', role: 'user' },
+ { content: 'a', role: 'assistant' },
+ { content: '_', role: 'user' },
+ { content: 'b', role: 'assistant' },
+ { content: '_', role: 'user' },
+ { content: 'c', role: 'assistant' },
+ { content: '_', role: 'user' },
+ { content: 'd', role: 'assistant' },
+ { content: '你好', role: 'user' },
+ ]);
+ });
+ });
});
diff --git a/src/libs/agent-runtime/utils/anthropicHelpers.ts b/src/libs/agent-runtime/utils/anthropicHelpers.ts
index 89e69ece38ff0..9a1bab6287372 100644
--- a/src/libs/agent-runtime/utils/anthropicHelpers.ts
+++ b/src/libs/agent-runtime/utils/anthropicHelpers.ts
@@ -37,5 +37,21 @@ export const buildAnthropicMessage = (
};
export const buildAnthropicMessages = (
- messages: OpenAIChatMessage[],
-): Anthropic.Messages.MessageParam[] => messages.map((message) => buildAnthropicMessage(message));
+ oaiMessages: OpenAIChatMessage[],
+): Anthropic.Messages.MessageParam[] => {
+ const messages: Anthropic.Messages.MessageParam[] = [];
+ let lastRole = 'assistant';
+
+ oaiMessages.forEach((message) => {
+ const anthropicMessage = buildAnthropicMessage(message);
+
+ if (lastRole === anthropicMessage.role) {
+ messages.push({ content: '_', role: lastRole === 'user' ? 'assistant' : 'user' });
+ }
+
+ lastRole = anthropicMessage.role;
+ messages.push(anthropicMessage);
+ });
+
+ return messages;
+};
diff --git a/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts b/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts
index 29332dbe8cbe9..89a4c8ec2fe6f 100644
--- a/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts
+++ b/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts
@@ -6,7 +6,7 @@ import { ChatModelCard } from '@/types/llm';
import { LobeRuntimeAI } from '../../BaseAI';
import { ILobeAgentRuntimeErrorType } from '../../error';
-import { ChatCompetitionOptions, ChatStreamPayload } from '../../types';
+import { ChatCompetitionOptions, ChatCompletionErrorPayload, ChatStreamPayload } from '../../types';
import { AgentRuntimeError } from '../createError';
import { debugStream } from '../debugStream';
import { desensitizeUrl } from '../desensitizeUrl';
@@ -28,6 +28,7 @@ const CHAT_MODELS_BLOCK_LIST = [
interface OpenAICompatibleFactoryOptions {
baseURL?: string;
chatCompletion?: {
+ handleError?: (error: any) => Omit | undefined;
handlePayload?: (payload: ChatStreamPayload) => OpenAI.ChatCompletionCreateParamsStreaming;
};
constructorOptions?: ClientOptions;
@@ -113,6 +114,16 @@ export const LobeOpenAICompatibleFactory = ({
}
}
+ if (chatCompletion?.handleError) {
+ const errorResult = chatCompletion.handleError(error);
+
+ if (errorResult)
+ throw AgentRuntimeError.chat({
+ ...errorResult,
+ provider,
+ } as ChatCompletionErrorPayload);
+ }
+
const { errorResult, RuntimeError } = handleOpenAIError(error);
throw AgentRuntimeError.chat({
diff --git a/src/locales/default/error.ts b/src/locales/default/error.ts
index 0c3d6114cc24e..ed92e47431ffa 100644
--- a/src/locales/default/error.ts
+++ b/src/locales/default/error.ts
@@ -72,7 +72,7 @@ export default {
InvalidAccessCode: '密码不正确或为空,请输入正确的访问密码,或者添加自定义 API Key',
InvalidClerkUser: '很抱歉,你当前尚未登录,请先登录或注册账号后继续操作',
LocationNotSupportError:
- '很抱歉,你的所在位置不支持此模型服务,可能是由于地区限制或服务未开通。请确认当前位置是否支持使用此服务,或尝试使用其他位置信息。',
+ '很抱歉,你的所在地区不支持此模型服务,可能是由于区域限制或服务未开通。请确认当前地区是否支持使用此服务,或尝试使用切换到其他地区后重试。',
OpenAIBizError: '请求 OpenAI 服务出错,请根据以下信息排查或重试',
NoOpenAIAPIKey: 'OpenAI API Key 为空,请添加自定义 OpenAI API Key',
diff --git a/src/locales/default/modelProvider.ts b/src/locales/default/modelProvider.ts
index 5f60d594a483d..0b5d2fd6d2f4d 100644
--- a/src/locales/default/modelProvider.ts
+++ b/src/locales/default/modelProvider.ts
@@ -1,17 +1,11 @@
export default {
anthropic: {
- endpoint: {
- desc: '除默认地址外,必须包含 http(s)://',
- placeholder: 'https://api.anthropic.com',
- title: 'API 代理地址',
- },
title: 'Anthropic',
token: {
desc: '填入来自 Anthropic 的 API Key',
placeholder: 'Anthropic API Key',
title: 'API Key',
},
-
unlock: {
description: '输入你的 Anthropic API Key 即可开始会话。应用不会记录你的 API Key',
title: '使用自定义 Anthropic API Key',
@@ -69,11 +63,6 @@ export default {
},
},
google: {
- endpoint: {
- desc: '除默认地址外,必须包含 http(s)://',
- placeholder: 'https://generativelanguage.googleapis.com',
- title: 'API 代理地址',
- },
title: 'Google',
token: {
desc: '填入来自 Google 的 API Key',
@@ -136,6 +125,7 @@ export default {
ollama: {
checker: {
desc: '测试代理地址是否正确填写',
+ title: '连通性检查',
},
customModelName: {
desc: '增加自定义模型,多个模型使用逗号(,)隔开',
@@ -144,8 +134,7 @@ export default {
},
endpoint: {
desc: '填入 Ollama 接口代理地址,本地未额外指定可留空',
- placeholder: 'http://127.0.0.1:11434',
- title: '接口代理地址',
+ title: 'Ollama 服务地址',
},
setup: {
cors: {
@@ -176,11 +165,6 @@ export default {
title: 'Ollama',
},
openai: {
- endpoint: {
- desc: '除默认地址外,必须包含 http(s)://',
- placeholder: 'https://api.openai.com/v1',
- title: '接口代理地址',
- },
title: 'OpenAI',
token: {
desc: '使用自己的 OpenAI Key',
diff --git a/src/locales/default/setting.ts b/src/locales/default/setting.ts
index 57c4b04f8628b..ddacd332a5a78 100644
--- a/src/locales/default/setting.ts
+++ b/src/locales/default/setting.ts
@@ -38,7 +38,6 @@ export default {
checker: {
button: '检查',
desc: '测试 Api Key 与代理地址是否正确填写',
- ollamaDesc: '测试代理地址是否正确填写',
pass: '检查通过',
title: '连通性检查',
},
@@ -99,6 +98,10 @@ export default {
title: '模型列表',
total: '共 {{count}} 个模型可用',
},
+ proxyUrl: {
+ desc: '除默认地址外,必须包含 http(s)://',
+ title: 'API 代理地址',
+ },
waitingForMore: '更多模型正在 <1>计划接入1> 中,敬请期待 ✨',
},
ollama: {