f52ee68a by Ryan

Merge branch '27103_gpt_turbo' into 'master'

27103_gpt_turbo

See merge request !43
2 parents b7d19f6f 60739d36
......@@ -14,14 +14,23 @@ function App() {
const [chatInput, setChatInput] = useState("");
const [models, setModels] = useState([]);
const [temperature, setTemperature] = useState(0.7);
const [currentModel, setCurrentModel] = useState("text-davinci-003");
const GPTTurbo = "gpt-3.5-turbo";
const GPTTurbo0301 = "gpt-3.5-turbo-0301";
const [currentModel, setCurrentModel] = useState(GPTTurbo);
const [chatLog, setChatLog] = useState([{
user: "gpt",
message: "Welcome to AI-PRO... How can I help you?"
}]);
// "gpt-3.5-turbo"
const defaultChatLogTurbo = [
{ role: "system", content: "You are a helpful assistant."}
];
const [chatLogTurbo, setChatLogTurbo] = useState(defaultChatLogTurbo);
//
function clearChat(){
setChatLog([]);
setChatLogTurbo(defaultChatLogTurbo);
setChatInput("");
setStartedInteraction(false);
}
......@@ -34,8 +43,13 @@ function App() {
if(a.id < b.id) { return -1; }
if(a.id > b.id) { return 1; }
return 0;
})
setModels(data.models.data)
});
let model_list = [];
for( var i = 1; i < data.models.data.length; i++ ) {
let model = data.models.data[i];
if(model.id != "whisper-1") model_list.push(model);
}
setModels(model_list)
})
}
......@@ -67,7 +81,13 @@ function App() {
const userMessage = { user: "gpt", message: "..." };
setChatLog(prevChatLog => [...prevChatLog, userMessage]);
const messages = chatLogNew.map((message) => message.message).join("\n")
var messages = chatLogNew.map((message) => message.message).join("\n")
if(currentModel == GPTTurbo || currentModel == GPTTurbo0301) {
// "gpt-3.5-turbo"
let chatLogTurboNew = [...chatLogTurbo, { role: "user", content: chatInput }];
setChatLogTurbo(chatLogTurboNew);
messages = JSON.stringify(chatLogTurboNew);
}
let intervalId = startInterval();
try {
......@@ -79,10 +99,15 @@ function App() {
body: JSON.stringify({
message: messages,
currentModel,
temperature
})
});
const data = await response.json();
const parsedData = data.message.trim();
// "gpt-3.5-turbo"
let chatLogTurboNew = [...chatLogTurbo, { role: "assistant", content: parsedData }];
setChatLogTurbo(chatLogTurboNew);
//
clearInterval(intervalId);
const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table'];
......
......@@ -21,8 +21,8 @@ const SideMenu = ({ clearChat, currentModel, setCurrentModel, models, setTempera
key={model.id}
value={model.id}>{model.id}</option>
)) : <option
key={"text-davinci-003"}
value={"text-davinci-003"}>{"text-davinci-003"}</option>}
key={"gpt-3.5-turbo"}
value={"gpt-3.5-turbo"}>{"gpt-3.5-turbo"}</option>}
</select>
<Button
......
......@@ -34,6 +34,11 @@ app.use(rateLimiter)
app.post('/api', async (req, res) => {
const { message, currentModel, temperature } = req.body;
if(currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
runGPTTurbo(req,res);
return;
}
let greetingPrompt = 'Hello, how can I assist you?'
const greetings = ['hi', 'hello', 'hey']
if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
......@@ -75,6 +80,34 @@ app.post('/api', async (req, res) => {
}
});
async function runGPTTurbo(req, res) {
// "gpt-3.5-turbo"
const { message, currentModel, temperature } = req.body;
var returnMessage = '';
try {
const response = await openai.createChatCompletion({
model: `${currentModel}`,
messages: JSON.parse(message),
max_tokens: 3000,
temperature
});
returnMessage = response.data.choices[0].message.content
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length')>=0){
returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
}else{
console.log(e.response);
}
} finally {
res.json({
prompt: JSON.parse(message),
message: returnMessage
});
return;
}
}
// Get Models Route
app.get('/models', async (req, res) => {
const response = await openai.listEngines();
......
......@@ -15,6 +15,6 @@
"express": "^4.18.2",
"express-rate-limit": "^6.7.0",
"morgan": "^1.10.0",
"openai": "^3.1.0"
"openai": "^3.2.0"
}
}
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!