@@ -8,20 +8,21 @@ const Google: ModelProviderCard = {
8
8
description : 'A legacy text-only model optimized for chat conversations' ,
9
9
displayName : 'PaLM 2 Chat (Legacy)' ,
10
10
id : 'chat-bison-001' ,
11
+ legacy : true ,
11
12
maxOutput : 1024 ,
12
13
// tokens: 4096 + 1024, // none tokens test
13
14
} ,
14
15
{
15
16
description : 'A legacy model that understands text and generates text as an output' ,
16
17
displayName : 'PaLM 2 (Legacy)' ,
17
18
id : 'text-bison-001' ,
19
+ legacy : true ,
18
20
maxOutput : 1024 ,
19
21
tokens : 8196 + 1024 ,
20
22
} ,
21
23
{
22
24
description : 'The best model for scaling across a wide range of tasks' ,
23
25
displayName : 'Gemini 1.0 Pro' ,
24
- enabled : true ,
25
26
functionCall : true ,
26
27
id : 'gemini-pro' ,
27
28
maxOutput : 2048 ,
@@ -38,7 +39,6 @@ const Google: ModelProviderCard = {
38
39
{
39
40
description : 'The best image understanding model to handle a broad range of applications' ,
40
41
displayName : 'Gemini 1.0 Pro Vision' ,
41
- enabled : true ,
42
42
id : 'gemini-pro-vision' ,
43
43
maxOutput : 4096 ,
44
44
tokens : 12_288 + 4096 ,
@@ -69,6 +69,16 @@ const Google: ModelProviderCard = {
69
69
maxOutput : 2048 ,
70
70
tokens : 30_720 + 2048 ,
71
71
} ,
72
+ {
73
+ description : 'Fast and versatile multimodal model for scaling across diverse tasks' ,
74
+ displayName : 'Gemini 1.5 Flash' ,
75
+ enabled : true ,
76
+ functionCall : true ,
77
+ id : 'gemini-1.5-flash-latest' ,
78
+ maxOutput : 8192 ,
79
+ tokens : 1_048_576 + 8192 ,
80
+ vision : true ,
81
+ } ,
72
82
{
73
83
description : 'Mid-size multimodal model that supports up to 1 million tokens' ,
74
84
displayName : 'Gemini 1.5 Pro' ,
0 commit comments