Skip to content

Commit 2138bbc

Browse files
committed
feat: add gpt-4o&gpt-4o-mini
1 parent bddf7cc commit 2138bbc

File tree

5 files changed

+66
-0
lines changed

5 files changed

+66
-0
lines changed

src/cli/commands/index/selectModel.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,18 @@ export const selectModel = (
2626
getMaxPromptLength(prompts, LLMModels.GPT432k)
2727
) {
2828
return models[LLMModels.GPT432k];
29+
} else if (
30+
llms.includes(LLMModels.GPT4o) &&
31+
models[LLMModels.GPT4o].maxLength >
32+
getMaxPromptLength(prompts, LLMModels.GPT4o)
33+
) {
34+
return models[LLMModels.GPT4o];
35+
} else if (
36+
llms.includes(LLMModels.GPT4omini) &&
37+
models[LLMModels.GPT4omini].maxLength >
38+
getMaxPromptLength(prompts, LLMModels.GPT4omini)
39+
) {
40+
return models[LLMModels.GPT4omini];
2941
} else {
3042
return null;
3143
}
@@ -42,6 +54,18 @@ export const selectModel = (
4254
getMaxPromptLength(prompts, LLMModels.GPT432k)
4355
) {
4456
return models[LLMModels.GPT432k];
57+
} else if (
58+
llms.includes(LLMModels.GPT4o) &&
59+
models[LLMModels.GPT4o].maxLength >
60+
getMaxPromptLength(prompts, LLMModels.GPT4o)
61+
) {
62+
return models[LLMModels.GPT4o];
63+
} else if (
64+
llms.includes(LLMModels.GPT4omini) &&
65+
models[LLMModels.GPT4omini].maxLength >
66+
getMaxPromptLength(prompts, LLMModels.GPT4omini)
67+
) {
68+
return models[LLMModels.GPT4omini];
4569
} else {
4670
return null;
4771
}

src/cli/commands/init/index.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,10 @@ export const init = async (
111111
name: 'GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)',
112112
value: [LLMModels.GPT3, LLMModels.GPT4, LLMModels.GPT432k],
113113
},
114+
{
115+
name: 'GPT-4o, GPT-4o-mini',
116+
value: [LLMModels.GPT4o, LLMModels.GPT4omini],
117+
},
114118
],
115119
},
116120
{

src/cli/commands/user/index.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,10 @@ export const user = async (
6363
name: 'GPT-3.5 Turbo, GPT-4 8K (Early Access), GPT-4 32K (Early Access)',
6464
value: [LLMModels.GPT3, LLMModels.GPT4, LLMModels.GPT432k],
6565
},
66+
{
67+
name: 'GPT-4o, GPT-4o-mini',
68+
value: [LLMModels.GPT4o, LLMModels.GPT4omini],
69+
},
6670
],
6771
},
6872
];

src/cli/utils/LLMUtil.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,38 @@ export const models: Record<LLMModels, LLMModelDetails> = {
5050
failed: 0,
5151
total: 0,
5252
},
53+
[LLMModels.GPT4o]: {
54+
name: LLMModels.GPT4o,
55+
inputCostPer1KTokens: 0.005,
56+
outputCostPer1KTokens: 0.015,
57+
maxLength: 4096,
58+
llm: new OpenAIChat({
59+
temperature: 0.1,
60+
openAIApiKey: process.env.OPENAI_API_KEY,
61+
modelName: LLMModels.GPT4o,
62+
}),
63+
inputTokens: 0,
64+
outputTokens: 0,
65+
succeeded: 0,
66+
failed: 0,
67+
total: 0,
68+
},
69+
[LLMModels.GPT4omini]: {
70+
name: LLMModels.GPT4omini,
71+
inputCostPer1KTokens: 0.00015,
72+
outputCostPer1KTokens: 0.0006,
73+
maxLength: 16384,
74+
llm: new OpenAIChat({
75+
temperature: 0.1,
76+
openAIApiKey: process.env.OPENAI_API_KEY,
77+
modelName: LLMModels.GPT4omini,
78+
}),
79+
inputTokens: 0,
80+
outputTokens: 0,
81+
succeeded: 0,
82+
failed: 0,
83+
total: 0,
84+
},
5385
};
5486

5587
export const printModelDetails = (models: LLMModelDetails[]): void => {

src/types.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ export enum LLMModels {
8585
GPT3 = 'gpt-3.5-turbo',
8686
GPT4 = 'gpt-4',
8787
GPT432k = 'gpt-4-32k',
88+
GPT4o = 'gpt-4o',
89+
GPT4omini = 'gpt-4o-mini',
8890
}
8991

9092
export type LLMModelDetails = {

0 commit comments

Comments
 (0)