set default gpt4o-mini
Showing
3 changed files
with
12 additions
and
8 deletions
| ... | @@ -13,10 +13,12 @@ function App() { | ... | @@ -13,10 +13,12 @@ function App() { |
| 13 | displayTogetherAiResponse() | 13 | displayTogetherAiResponse() |
| 14 | }, []) | 14 | }, []) |
| 15 | 15 | ||
| 16 | const DEFAULT_MODEL = process.env.REACT_APP_DEFAULT_MODEL || 'gpt-4o-mini'; | ||
| 17 | |||
| 16 | const [chatInput, setChatInput] = useState(""); | 18 | const [chatInput, setChatInput] = useState(""); |
| 17 | const [models, setModels] = useState([]); | 19 | const [models, setModels] = useState([]); |
| 18 | const [temperature, setTemperature] = useState(0.7); | 20 | const [temperature, setTemperature] = useState(0.7); |
| 19 | const GPTTurbo = "gpt-3.5-turbo"; | 21 | const GPTTurbo = DEFAULT_MODEL; |
| 20 | const [currentModel, setCurrentModel] = useState(GPTTurbo); | 22 | const [currentModel, setCurrentModel] = useState(GPTTurbo); |
| 21 | const [chatLog, setChatLog] = useState([{ | 23 | const [chatLog, setChatLog] = useState([{ |
| 22 | user: "gpt", | 24 | user: "gpt", |
| ... | @@ -149,7 +151,7 @@ function App() { | ... | @@ -149,7 +151,7 @@ function App() { |
| 149 | const parsedData = data.message ? data.message.trim() : ""; | 151 | const parsedData = data.message ? data.message.trim() : ""; |
| 150 | 152 | ||
| 151 | if (togetherAiResponse) { | 153 | if (togetherAiResponse) { |
| 152 | if (currentModel !== 'gpt-3.5-turbo') { | 154 | if (currentModel !== 'gpt-4o-mini') { |
| 153 | console.log(data) | 155 | console.log(data) |
| 154 | } | 156 | } |
| 155 | } | 157 | } | ... | ... |
| 1 | import ExportButton from "./ExportButton"; | 1 | import ExportButton from "./ExportButton"; |
| 2 | const DEFAULT_MODEL = process.env.REACT_APP_DEFAULT_MODEL || 'gpt-4o-mini'; | ||
| 2 | const SideMenu = ({ | 3 | const SideMenu = ({ |
| 3 | clearChat, | 4 | clearChat, |
| 4 | currentModel, | 5 | currentModel, |
| ... | @@ -33,8 +34,8 @@ const SideMenu = ({ | ... | @@ -33,8 +34,8 @@ const SideMenu = ({ |
| 33 | </option> | 34 | </option> |
| 34 | )) | 35 | )) |
| 35 | ) : ( | 36 | ) : ( |
| 36 | <option key={"gpt-3.5-turbo"} value={"gpt-3.5-turbo"}> | 37 | <option key={DEFAULT_MODEL} value={DEFAULT_MODEL}> |
| 37 | {"gpt-3.5-turbo"} | 38 | {DEFAULT_MODEL} |
| 38 | </option> | 39 | </option> |
| 39 | )} | 40 | )} |
| 40 | </select> | 41 | </select> | ... | ... |
| ... | @@ -10,6 +10,7 @@ const fetch = require('node-fetch'); | ... | @@ -10,6 +10,7 @@ const fetch = require('node-fetch'); |
| 10 | const anchorme = require("anchorme").default; | 10 | const anchorme = require("anchorme").default; |
| 11 | const axios = require('axios'); | 11 | const axios = require('axios'); |
| 12 | const { encodingForModel } = require('js-tiktoken'); | 12 | const { encodingForModel } = require('js-tiktoken'); |
| 13 | const DEFAULT_MODEL = process.env.REACT_APP_DEFAULT_MODEL || 'gpt-4o-mini'; | ||
| 13 | const tiktokenModels = [ | 14 | const tiktokenModels = [ |
| 14 | 'text-davinci-003', | 15 | 'text-davinci-003', |
| 15 | 'text-davinci-002', | 16 | 'text-davinci-002', |
| ... | @@ -45,7 +46,7 @@ const tiktokenModels = [ | ... | @@ -45,7 +46,7 @@ const tiktokenModels = [ |
| 45 | 'gpt-4-0314', | 46 | 'gpt-4-0314', |
| 46 | 'gpt-4-32k', | 47 | 'gpt-4-32k', |
| 47 | 'gpt-4-32k-0314', | 48 | 'gpt-4-32k-0314', |
| 48 | 'gpt-3.5-turbo', | 49 | 'gpt-4o-mini', |
| 49 | 'gpt-3.5-turbo-0301' | 50 | 'gpt-3.5-turbo-0301' |
| 50 | ]; | 51 | ]; |
| 51 | 52 | ||
| ... | @@ -53,7 +54,7 @@ let client; | ... | @@ -53,7 +54,7 @@ let client; |
| 53 | let filteredModels = {}; | 54 | let filteredModels = {}; |
| 54 | const allowedEndpoints = ["openAI", "Opensource", "Llama"]; | 55 | const allowedEndpoints = ["openAI", "Opensource", "Llama"]; |
| 55 | const allowedModels = [ | 56 | const allowedModels = [ |
| 56 | "gpt-3.5-turbo", | 57 | DEFAULT_MODEL, |
| 57 | "google/gemma-2-9b-it", | 58 | "google/gemma-2-9b-it", |
| 58 | "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo" | 59 | "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo" |
| 59 | ]; | 60 | ]; |
| ... | @@ -157,7 +158,7 @@ app.post('/api', async (req, res) => { | ... | @@ -157,7 +158,7 @@ app.post('/api', async (req, res) => { |
| 157 | let usage = {}; | 158 | let usage = {}; |
| 158 | let enc = null; | 159 | let enc = null; |
| 159 | try { | 160 | try { |
| 160 | enc = encodingForModel(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | 161 | enc = encodingForModel(tiktokenModels.includes(currentModel) ? currentModel : DEFAULT_MODEL); |
| 161 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | 162 | usage.prompt_tokens = (enc.encode(query_prompt)).length; |
| 162 | usage.completion_tokens = (enc.encode(input)).length; | 163 | usage.completion_tokens = (enc.encode(input)).length; |
| 163 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | 164 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; |
| ... | @@ -240,7 +241,7 @@ async function runGPTTurbo(req, res) { | ... | @@ -240,7 +241,7 @@ async function runGPTTurbo(req, res) { |
| 240 | let usage = {}; | 241 | let usage = {}; |
| 241 | let enc = null; | 242 | let enc = null; |
| 242 | try { | 243 | try { |
| 243 | enc = encodingForModel(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | 244 | enc = encodingForModel(tiktokenModels.includes(currentModel) ? currentModel : DEFAULT_MODEL); |
| 244 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | 245 | usage.prompt_tokens = (enc.encode(query_prompt)).length; |
| 245 | usage.completion_tokens = (enc.encode(input)).length; | 246 | usage.completion_tokens = (enc.encode(input)).length; |
| 246 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | 247 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ... | ... |
-
Please register or sign in to post a comment