d2da63b4 by Jonille Arreglo

27103_gpt_turbo

1 parent 7a7f6394
...@@ -14,14 +14,23 @@ function App() { ...@@ -14,14 +14,23 @@ function App() {
14 const [chatInput, setChatInput] = useState(""); 14 const [chatInput, setChatInput] = useState("");
15 const [models, setModels] = useState([]); 15 const [models, setModels] = useState([]);
16 const [temperature, setTemperature] = useState(0.7); 16 const [temperature, setTemperature] = useState(0.7);
17 const [currentModel, setCurrentModel] = useState("text-davinci-003"); 17 const GPTTurbo = "gpt-3.5-turbo";
18 const GPTTurbo0301 = "gpt-3.5-turbo-0301";
19 const [currentModel, setCurrentModel] = useState(GPTTurbo);
18 const [chatLog, setChatLog] = useState([{ 20 const [chatLog, setChatLog] = useState([{
19 user: "gpt", 21 user: "gpt",
20 message: "Welcome to AI-PRO... How can I help you?" 22 message: "Welcome to AI-PRO... How can I help you?"
21 }]); 23 }]);
24 // "gpt-3.5-turbo"
25 const defaultChatLogTurbo = [
26 { role: "system", content: "You are a helpful assistant."}
27 ];
28 const [chatLogTurbo, setChatLogTurbo] = useState(defaultChatLogTurbo);
29 //
22 30
23 function clearChat(){ 31 function clearChat(){
24 setChatLog([]); 32 setChatLog([]);
33 setChatLogTurbo(defaultChatLogTurbo);
25 setChatInput(""); 34 setChatInput("");
26 setStartedInteraction(false); 35 setStartedInteraction(false);
27 } 36 }
...@@ -67,7 +76,13 @@ function App() { ...@@ -67,7 +76,13 @@ function App() {
67 const userMessage = { user: "gpt", message: "..." }; 76 const userMessage = { user: "gpt", message: "..." };
68 setChatLog(prevChatLog => [...prevChatLog, userMessage]); 77 setChatLog(prevChatLog => [...prevChatLog, userMessage]);
69 78
70 const messages = chatLogNew.map((message) => message.message).join("\n") 79 var messages = chatLogNew.map((message) => message.message).join("\n")
80 if(currentModel == GPTTurbo || currentModel == GPTTurbo0301) {
81 // "gpt-3.5-turbo"
82 let chatLogTurboNew = [...chatLogTurbo, { role: "user", content: chatInput }];
83 setChatLogTurbo(chatLogTurboNew);
84 messages = JSON.stringify(chatLogTurboNew);
85 }
71 let intervalId = startInterval(); 86 let intervalId = startInterval();
72 87
73 try { 88 try {
...@@ -79,10 +94,15 @@ function App() { ...@@ -79,10 +94,15 @@ function App() {
79 body: JSON.stringify({ 94 body: JSON.stringify({
80 message: messages, 95 message: messages,
81 currentModel, 96 currentModel,
97 temperature
82 }) 98 })
83 }); 99 });
84 const data = await response.json(); 100 const data = await response.json();
85 const parsedData = data.message.trim(); 101 const parsedData = data.message.trim();
102 // "gpt-3.5-turbo"
103 let chatLogTurboNew = [...chatLogTurbo, { role: "assistant", content: parsedData }];
104 setChatLogTurbo(chatLogTurboNew);
105 //
86 clearInterval(intervalId); 106 clearInterval(intervalId);
87 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table']; 107 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table'];
88 108
......
...@@ -34,6 +34,11 @@ app.use(rateLimiter) ...@@ -34,6 +34,11 @@ app.use(rateLimiter)
34 app.post('/api', async (req, res) => { 34 app.post('/api', async (req, res) => {
35 const { message, currentModel, temperature } = req.body; 35 const { message, currentModel, temperature } = req.body;
36 36
37 if(currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
38 runGPTTurbo(req,res);
39 return;
40 }
41
37 let greetingPrompt = 'Hello, how can I assist you?' 42 let greetingPrompt = 'Hello, how can I assist you?'
38 const greetings = ['hi', 'hello', 'hey'] 43 const greetings = ['hi', 'hello', 'hey']
39 if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) { 44 if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
...@@ -75,6 +80,34 @@ app.post('/api', async (req, res) => { ...@@ -75,6 +80,34 @@ app.post('/api', async (req, res) => {
75 } 80 }
76 }); 81 });
77 82
83 async function runGPTTurbo(req, res) {
84 // "gpt-3.5-turbo"
85 const { message, currentModel, temperature } = req.body;
86 var returnMessage = '';
87 try {
88 const response = await openai.createChatCompletion({
89 model: `${currentModel}`,
90 messages: JSON.parse(message),
91 max_tokens: 3000,
92 temperature
93 });
94 returnMessage = response.data.choices[0].message.content
95 } catch (e) {
96 let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
97 if (error_msg.indexOf('maximum context length')>=0){
98 returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
99 }else{
100 console.log(e.response);
101 }
102 } finally {
103 res.json({
104 prompt: JSON.parse(message),
105 message: returnMessage
106 });
107 return;
108 }
109 }
110
78 // Get Models Route 111 // Get Models Route
79 app.get('/models', async (req, res) => { 112 app.get('/models', async (req, res) => {
80 const response = await openai.listEngines(); 113 const response = await openai.listEngines();
......
...@@ -15,6 +15,6 @@ ...@@ -15,6 +15,6 @@
15 "express": "^4.18.2", 15 "express": "^4.18.2",
16 "express-rate-limit": "^6.7.0", 16 "express-rate-limit": "^6.7.0",
17 "morgan": "^1.10.0", 17 "morgan": "^1.10.0",
18 "openai": "^3.1.0" 18 "openai": "^3.2.0"
19 } 19 }
20 } 20 }
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!