mirror of
https://github.com/lobehub/lobe-chat.git
synced 2025-12-20 01:12:52 +08:00
✨ feat: add gpt-4o-mini in OpenAI Provider and set it as the default model (#3256)
* ✨ feat: gpt-4o-mini to openai provider * ♻️ refactor: move gpt-4o-mini as default model * ✅ test: fix test * 🐛 fix: set gpt-4o-mini as default model * chore: improve code * chore: improve code --------- Co-authored-by: arvinxx <arvinx@foxmail.com>
This commit is contained in:
@@ -23,7 +23,7 @@ module.exports = defineConfig({
|
||||
'vi-VN',
|
||||
],
|
||||
temperature: 0,
|
||||
modelName: 'gpt-3.5-turbo-0125',
|
||||
modelName: 'gpt-4o-mini',
|
||||
splitToken: 2048,
|
||||
experimental: {
|
||||
jsonMode: true,
|
||||
|
||||
@@ -2,7 +2,7 @@ const { defineConfig } = require('@lobehub/seo-cli');
|
||||
|
||||
module.exports = defineConfig({
|
||||
entry: ['./docs/**/*.mdx'],
|
||||
modelName: 'gpt-3.5-turbo-0125',
|
||||
modelName: 'gpt-4o-mini',
|
||||
experimental: {
|
||||
jsonMode: true,
|
||||
},
|
||||
|
||||
@@ -4,48 +4,40 @@ import { ModelProviderCard } from '@/types/llm';
|
||||
const OpenAI: ModelProviderCard = {
|
||||
chatModels: [
|
||||
{
|
||||
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
description: 'Currently points to gpt-4o-mini-2024-07-18',
|
||||
displayName: 'GPT-4o mini',
|
||||
enabled: true,
|
||||
functionCall: true,
|
||||
id: 'gpt-3.5-turbo',
|
||||
tokens: 16_385,
|
||||
id: 'gpt-4o-mini',
|
||||
maxOutput: 16_385,
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo (0125)',
|
||||
description: 'Currently points to gpt-4o-2024-05-13',
|
||||
displayName: 'GPT-4o',
|
||||
enabled: true,
|
||||
functionCall: true,
|
||||
id: 'gpt-3.5-turbo-0125',
|
||||
tokens: 16_385,
|
||||
id: 'gpt-4o',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo (1106)',
|
||||
description: 'GPT-4 Turbo with Vision',
|
||||
displayName: 'GPT-4 Turbo',
|
||||
enabled: true,
|
||||
functionCall: true,
|
||||
id: 'gpt-3.5-turbo-1106',
|
||||
tokens: 16_385,
|
||||
id: 'gpt-4-turbo',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo Instruct',
|
||||
id: 'gpt-3.5-turbo-instruct',
|
||||
tokens: 4096,
|
||||
},
|
||||
{
|
||||
description: 'Currently points to gpt-3.5-turbo-16k-0613',
|
||||
displayName: 'GPT-3.5 Turbo 16K',
|
||||
id: 'gpt-3.5-turbo-16k',
|
||||
legacy: true,
|
||||
tokens: 16_385,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo (0613)',
|
||||
id: 'gpt-3.5-turbo-0613',
|
||||
legacy: true,
|
||||
tokens: 4096,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo 16K (0613)',
|
||||
id: 'gpt-3.5-turbo-16k-0613',
|
||||
legacy: true,
|
||||
tokens: 16_385,
|
||||
description: 'GPT-4 Turbo 视觉版 (240409)',
|
||||
displayName: 'GPT-4 Turbo Vision (240409)',
|
||||
functionCall: true,
|
||||
id: 'gpt-4-turbo-2024-04-09',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
},
|
||||
{
|
||||
description: 'Currently points to gpt-4-0125-preview',
|
||||
@@ -106,33 +98,50 @@ const OpenAI: ModelProviderCard = {
|
||||
tokens: 32_768,
|
||||
},
|
||||
{
|
||||
description: 'GPT-4 Turbo with Vision',
|
||||
displayName: 'GPT-4 Turbo',
|
||||
enabled: true,
|
||||
description: 'GPT 3.5 Turbo,适用于各种文本生成和理解任务',
|
||||
displayName: 'GPT-3.5 Turbo',
|
||||
functionCall: true,
|
||||
id: 'gpt-4-turbo',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
id: 'gpt-3.5-turbo',
|
||||
tokens: 16_385,
|
||||
},
|
||||
{
|
||||
description: 'GPT-4 Turbo 视觉版 (240409)',
|
||||
displayName: 'GPT-4 Turbo Vision (240409)',
|
||||
displayName: 'GPT-3.5 Turbo (0125)',
|
||||
functionCall: true,
|
||||
id: 'gpt-4-turbo-2024-04-09',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
id: 'gpt-3.5-turbo-0125',
|
||||
tokens: 16_385,
|
||||
},
|
||||
{
|
||||
description: 'Currently points to gpt-4o-2024-05-13',
|
||||
displayName: 'GPT-4o',
|
||||
enabled: true,
|
||||
displayName: 'GPT-3.5 Turbo (1106)',
|
||||
functionCall: true,
|
||||
id: 'gpt-4o',
|
||||
tokens: 128_000,
|
||||
vision: true,
|
||||
id: 'gpt-3.5-turbo-1106',
|
||||
tokens: 16_385,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo Instruct',
|
||||
id: 'gpt-3.5-turbo-instruct',
|
||||
tokens: 4096,
|
||||
},
|
||||
{
|
||||
description: 'Currently points to gpt-3.5-turbo-16k-0613',
|
||||
displayName: 'GPT-3.5 Turbo 16K',
|
||||
id: 'gpt-3.5-turbo-16k',
|
||||
legacy: true,
|
||||
tokens: 16_385,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo (0613)',
|
||||
id: 'gpt-3.5-turbo-0613',
|
||||
legacy: true,
|
||||
tokens: 4096,
|
||||
},
|
||||
{
|
||||
displayName: 'GPT-3.5 Turbo 16K (0613)',
|
||||
id: 'gpt-3.5-turbo-16k-0613',
|
||||
legacy: true,
|
||||
tokens: 16_385,
|
||||
},
|
||||
],
|
||||
checkModel: 'gpt-3.5-turbo',
|
||||
checkModel: 'gpt-4o-mini',
|
||||
enabled: true,
|
||||
id: 'openai',
|
||||
modelList: { showModelFetcher: true },
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { DEFAULT_AGENT_META } from '@/const/meta';
|
||||
import { DEFAULT_MODEL } from '@/const/settings/llm';
|
||||
import { ModelProvider } from '@/libs/agent-runtime';
|
||||
import { LobeAgentChatConfig, LobeAgentConfig, LobeAgentTTSConfig } from '@/types/agent';
|
||||
import { UserDefaultAgent } from '@/types/user/settings';
|
||||
@@ -21,7 +22,7 @@ export const DEFAULT_AGENT_CHAT_CONFIG: LobeAgentChatConfig = {
|
||||
|
||||
export const DEFAULT_AGENT_CONFIG: LobeAgentConfig = {
|
||||
chatConfig: DEFAULT_AGENT_CHAT_CONFIG,
|
||||
model: 'gpt-3.5-turbo',
|
||||
model: DEFAULT_MODEL,
|
||||
params: {
|
||||
frequency_penalty: 0,
|
||||
presence_penalty: 0,
|
||||
|
||||
@@ -111,6 +111,6 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
|
||||
},
|
||||
};
|
||||
|
||||
export const DEFAULT_MODEL = 'gpt-3.5-turbo';
|
||||
export const DEFAULT_MODEL = 'gpt-4o-mini';
|
||||
|
||||
export const DEFAULT_PROVIDER = ModelProvider.OpenAI;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
import { DEFAULT_MODEL } from '@/const/settings';
|
||||
import { AgentChatConfigSchema } from '@/types/agent';
|
||||
import { LobeMetaDataSchema } from '@/types/meta';
|
||||
|
||||
@@ -26,7 +27,7 @@ const ttsSchema = z.object({
|
||||
export const AgentSchema = z.object({
|
||||
chatConfig: AgentChatConfigSchema,
|
||||
fewShots: fewShotsSchema.optional(),
|
||||
model: z.string().default('gpt-3.5-turbo'),
|
||||
model: z.string().default(DEFAULT_MODEL),
|
||||
params: z.object({
|
||||
frequency_penalty: z.number().default(0).optional(),
|
||||
max_tokens: z.number().optional(),
|
||||
|
||||
@@ -5,7 +5,6 @@ exports[`LobeOpenAI > models > should get models 1`] = `
|
||||
{
|
||||
"description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务",
|
||||
"displayName": "GPT-3.5 Turbo",
|
||||
"enabled": true,
|
||||
"functionCall": true,
|
||||
"id": "gpt-3.5-turbo",
|
||||
"tokens": 16385,
|
||||
|
||||
@@ -33,7 +33,7 @@ This project is led by [qnguyen3](https://twitter.com/stablequan) and [teknium](
|
||||
|
||||
Updated by OpenAI to point to the [latest version of GPT-3.5](/models?q=openai/gpt-3.5). Training data up to Sep 2021.",
|
||||
"displayName": "OpenAI: GPT-3.5 Turbo",
|
||||
"enabled": true,
|
||||
"enabled": false,
|
||||
"functionCall": false,
|
||||
"id": "openai/gpt-3.5-turbo",
|
||||
"maxTokens": 4096,
|
||||
|
||||
@@ -213,12 +213,6 @@ export default {
|
||||
},
|
||||
model: {
|
||||
desc: '{{provider}} 模型',
|
||||
list: {
|
||||
'gpt-3.5-turbo': 'GPT 3.5',
|
||||
'gpt-3.5-turbo-16k': 'GPT 3.5 (16K)',
|
||||
'gpt-4': 'GPT 4',
|
||||
'gpt-4-32k': 'GPT 4 (32K)',
|
||||
},
|
||||
title: '模型',
|
||||
},
|
||||
presencePenalty: {
|
||||
|
||||
@@ -49,7 +49,7 @@ describe('modelProviderSelectors', () => {
|
||||
const s = merge(initialState, {}) as unknown as UserStore;
|
||||
|
||||
const result = modelProviderSelectors.getDefaultEnabledModelsById('openai')(s);
|
||||
expect(result).toEqual(['gpt-3.5-turbo', 'gpt-4-turbo', 'gpt-4o']);
|
||||
expect(result).toEqual(['gpt-4o-mini', 'gpt-4o', 'gpt-4-turbo']);
|
||||
});
|
||||
|
||||
it('should return undefined for a non-existing provider', () => {
|
||||
|
||||
@@ -49,7 +49,7 @@ export const getDefaultModeProviderById = (provider: string) => (s: UserStore) =
|
||||
/**
|
||||
* get the default enabled models for a provider
|
||||
* it's a default enabled model list by Lobe Chat
|
||||
* e.g. openai is ['gpt-3.5-turbo','gpt-4-turbo']
|
||||
* e.g. openai is ['gpt-4o-mini','gpt-4o','gpt-4-turbo']
|
||||
*/
|
||||
const getDefaultEnabledModelsById = (provider: string) => (s: UserStore) => {
|
||||
const modelProvider = getDefaultModeProviderById(provider)(s);
|
||||
|
||||
@@ -20,7 +20,7 @@ export interface LobeAgentConfig {
|
||||
fewShots?: FewShots;
|
||||
/**
|
||||
* 角色所使用的语言模型
|
||||
* @default gpt-3.5-turbo
|
||||
* @default gpt-4o-mini
|
||||
*/
|
||||
model: string;
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user