f52ee68a by Ryan

Merge branch '27103_gpt_turbo' into 'master'

27103_gpt_turbo

See merge request !43
2 parents b7d19f6f 60739d36
...@@ -14,14 +14,23 @@ function App() { ...@@ -14,14 +14,23 @@ function App() {
14 const [chatInput, setChatInput] = useState(""); 14 const [chatInput, setChatInput] = useState("");
15 const [models, setModels] = useState([]); 15 const [models, setModels] = useState([]);
16 const [temperature, setTemperature] = useState(0.7); 16 const [temperature, setTemperature] = useState(0.7);
17 const [currentModel, setCurrentModel] = useState("text-davinci-003"); 17 const GPTTurbo = "gpt-3.5-turbo";
18 const GPTTurbo0301 = "gpt-3.5-turbo-0301";
19 const [currentModel, setCurrentModel] = useState(GPTTurbo);
18 const [chatLog, setChatLog] = useState([{ 20 const [chatLog, setChatLog] = useState([{
19 user: "gpt", 21 user: "gpt",
20 message: "Welcome to AI-PRO... How can I help you?" 22 message: "Welcome to AI-PRO... How can I help you?"
21 }]); 23 }]);
24 // "gpt-3.5-turbo"
25 const defaultChatLogTurbo = [
26 { role: "system", content: "You are a helpful assistant."}
27 ];
28 const [chatLogTurbo, setChatLogTurbo] = useState(defaultChatLogTurbo);
29 //
22 30
23 function clearChat(){ 31 function clearChat(){
24 setChatLog([]); 32 setChatLog([]);
33 setChatLogTurbo(defaultChatLogTurbo);
25 setChatInput(""); 34 setChatInput("");
26 setStartedInteraction(false); 35 setStartedInteraction(false);
27 } 36 }
...@@ -34,8 +43,13 @@ function App() { ...@@ -34,8 +43,13 @@ function App() {
34 if(a.id < b.id) { return -1; } 43 if(a.id < b.id) { return -1; }
35 if(a.id > b.id) { return 1; } 44 if(a.id > b.id) { return 1; }
36 return 0; 45 return 0;
37 }) 46 });
38 setModels(data.models.data) 47 let model_list = [];
48 for( var i = 1; i < data.models.data.length; i++ ) {
49 let model = data.models.data[i];
50 if(model.id != "whisper-1") model_list.push(model);
51 }
52 setModels(model_list)
39 }) 53 })
40 } 54 }
41 55
...@@ -67,7 +81,13 @@ function App() { ...@@ -67,7 +81,13 @@ function App() {
67 const userMessage = { user: "gpt", message: "..." }; 81 const userMessage = { user: "gpt", message: "..." };
68 setChatLog(prevChatLog => [...prevChatLog, userMessage]); 82 setChatLog(prevChatLog => [...prevChatLog, userMessage]);
69 83
70 const messages = chatLogNew.map((message) => message.message).join("\n") 84 var messages = chatLogNew.map((message) => message.message).join("\n")
85 if(currentModel == GPTTurbo || currentModel == GPTTurbo0301) {
86 // "gpt-3.5-turbo"
87 let chatLogTurboNew = [...chatLogTurbo, { role: "user", content: chatInput }];
88 setChatLogTurbo(chatLogTurboNew);
89 messages = JSON.stringify(chatLogTurboNew);
90 }
71 let intervalId = startInterval(); 91 let intervalId = startInterval();
72 92
73 try { 93 try {
...@@ -79,10 +99,15 @@ function App() { ...@@ -79,10 +99,15 @@ function App() {
79 body: JSON.stringify({ 99 body: JSON.stringify({
80 message: messages, 100 message: messages,
81 currentModel, 101 currentModel,
102 temperature
82 }) 103 })
83 }); 104 });
84 const data = await response.json(); 105 const data = await response.json();
85 const parsedData = data.message.trim(); 106 const parsedData = data.message.trim();
107 // "gpt-3.5-turbo"
108 let chatLogTurboNew = [...chatLogTurbo, { role: "assistant", content: parsedData }];
109 setChatLogTurbo(chatLogTurboNew);
110 //
86 clearInterval(intervalId); 111 clearInterval(intervalId);
87 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table']; 112 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table'];
88 113
......
...@@ -21,8 +21,8 @@ const SideMenu = ({ clearChat, currentModel, setCurrentModel, models, setTempera ...@@ -21,8 +21,8 @@ const SideMenu = ({ clearChat, currentModel, setCurrentModel, models, setTempera
21 key={model.id} 21 key={model.id}
22 value={model.id}>{model.id}</option> 22 value={model.id}>{model.id}</option>
23 )) : <option 23 )) : <option
24 key={"text-davinci-003"} 24 key={"gpt-3.5-turbo"}
25 value={"text-davinci-003"}>{"text-davinci-003"}</option>} 25 value={"gpt-3.5-turbo"}>{"gpt-3.5-turbo"}</option>}
26 </select> 26 </select>
27 27
28 <Button 28 <Button
......
...@@ -34,6 +34,11 @@ app.use(rateLimiter) ...@@ -34,6 +34,11 @@ app.use(rateLimiter)
34 app.post('/api', async (req, res) => { 34 app.post('/api', async (req, res) => {
35 const { message, currentModel, temperature } = req.body; 35 const { message, currentModel, temperature } = req.body;
36 36
37 if(currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
38 runGPTTurbo(req,res);
39 return;
40 }
41
37 let greetingPrompt = 'Hello, how can I assist you?' 42 let greetingPrompt = 'Hello, how can I assist you?'
38 const greetings = ['hi', 'hello', 'hey'] 43 const greetings = ['hi', 'hello', 'hey']
39 if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) { 44 if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
...@@ -75,6 +80,34 @@ app.post('/api', async (req, res) => { ...@@ -75,6 +80,34 @@ app.post('/api', async (req, res) => {
75 } 80 }
76 }); 81 });
77 82
83 async function runGPTTurbo(req, res) {
84 // "gpt-3.5-turbo"
85 const { message, currentModel, temperature } = req.body;
86 var returnMessage = '';
87 try {
88 const response = await openai.createChatCompletion({
89 model: `${currentModel}`,
90 messages: JSON.parse(message),
91 max_tokens: 3000,
92 temperature
93 });
94 returnMessage = response.data.choices[0].message.content
95 } catch (e) {
96 let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
97 if (error_msg.indexOf('maximum context length')>=0){
98 returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
99 }else{
100 console.log(e.response);
101 }
102 } finally {
103 res.json({
104 prompt: JSON.parse(message),
105 message: returnMessage
106 });
107 return;
108 }
109 }
110
78 // Get Models Route 111 // Get Models Route
79 app.get('/models', async (req, res) => { 112 app.get('/models', async (req, res) => {
80 const response = await openai.listEngines(); 113 const response = await openai.listEngines();
......
...@@ -15,6 +15,6 @@ ...@@ -15,6 +15,6 @@
15 "express": "^4.18.2", 15 "express": "^4.18.2",
16 "express-rate-limit": "^6.7.0", 16 "express-rate-limit": "^6.7.0",
17 "morgan": "^1.10.0", 17 "morgan": "^1.10.0",
18 "openai": "^3.1.0" 18 "openai": "^3.2.0"
19 } 19 }
20 } 20 }
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!