79b5ccd4 by Ryan

Merge branch '27103_gpt_turbo' into '1DEVT'

27103_gpt_turbo

See merge request !42
2 parents 7fd3de54 d2da63b4
...@@ -250,6 +250,19 @@ ...@@ -250,6 +250,19 @@
250 font-size: 14px; 250 font-size: 14px;
251 } 251 }
252 252
253 .suggested-header-title {
254 font-size: 1.5em;
255 font-weight: bold;
256 }
257
258 .suggested-header-title p:first-child {
259 font-size: 1.57em;
260 }
261
262 .suggested-header-title p {
263 margin: 0;
264 }
265
253 ul.suggested-options { 266 ul.suggested-options {
254 cursor: pointer; 267 cursor: pointer;
255 overflow-x: hidden; 268 overflow-x: hidden;
......
...@@ -14,14 +14,23 @@ function App() { ...@@ -14,14 +14,23 @@ function App() {
14 const [chatInput, setChatInput] = useState(""); 14 const [chatInput, setChatInput] = useState("");
15 const [models, setModels] = useState([]); 15 const [models, setModels] = useState([]);
16 const [temperature, setTemperature] = useState(0.7); 16 const [temperature, setTemperature] = useState(0.7);
17 const [currentModel, setCurrentModel] = useState("text-davinci-003"); 17 const GPTTurbo = "gpt-3.5-turbo";
18 const GPTTurbo0301 = "gpt-3.5-turbo-0301";
19 const [currentModel, setCurrentModel] = useState(GPTTurbo);
18 const [chatLog, setChatLog] = useState([{ 20 const [chatLog, setChatLog] = useState([{
19 user: "gpt", 21 user: "gpt",
20 message: "Welcome to AI-PRO... How can I help you?" 22 message: "Welcome to AI-PRO... How can I help you?"
21 }]); 23 }]);
24 // "gpt-3.5-turbo"
25 const defaultChatLogTurbo = [
26 { role: "system", content: "You are a helpful assistant."}
27 ];
28 const [chatLogTurbo, setChatLogTurbo] = useState(defaultChatLogTurbo);
29 //
22 30
23 function clearChat(){ 31 function clearChat(){
24 setChatLog([]); 32 setChatLog([]);
33 setChatLogTurbo(defaultChatLogTurbo);
25 setChatInput(""); 34 setChatInput("");
26 setStartedInteraction(false); 35 setStartedInteraction(false);
27 } 36 }
...@@ -66,7 +75,13 @@ function App() { ...@@ -66,7 +75,13 @@ function App() {
66 const userMessage = { user: "gpt", message: "..." }; 75 const userMessage = { user: "gpt", message: "..." };
67 setChatLog(prevChatLog => [...prevChatLog, userMessage]); 76 setChatLog(prevChatLog => [...prevChatLog, userMessage]);
68 77
69 const messages = chatLogNew.map((message) => message.message).join("\n") 78 var messages = chatLogNew.map((message) => message.message).join("\n")
79 if(currentModel == GPTTurbo || currentModel == GPTTurbo0301) {
80 // "gpt-3.5-turbo"
81 let chatLogTurboNew = [...chatLogTurbo, { role: "user", content: chatInput }];
82 setChatLogTurbo(chatLogTurboNew);
83 messages = JSON.stringify(chatLogTurboNew);
84 }
70 let intervalId = startInterval(); 85 let intervalId = startInterval();
71 86
72 try { 87 try {
...@@ -78,11 +93,16 @@ function App() { ...@@ -78,11 +93,16 @@ function App() {
78 body: JSON.stringify({ 93 body: JSON.stringify({
79 message: messages, 94 message: messages,
80 currentModel, 95 currentModel,
96 temperature
81 }) 97 })
82 }); 98 });
83 99
84 const data = await response.json(); 100 const data = await response.json();
85 const parsedData = data.message.trim(); 101 const parsedData = data.message.trim();
102 // "gpt-3.5-turbo"
103 let chatLogTurboNew = [...chatLogTurbo, { role: "assistant", content: parsedData }];
104 setChatLogTurbo(chatLogTurboNew);
105 //
86 clearInterval(intervalId); 106 clearInterval(intervalId);
87 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table']; 107 const programmingKeywords = ['code', 'application', 'controller', 'rails' , 'PHP', 'java', 'javascript', 'script', 'console', 'python', 'programming', 'table'];
88 108
......
1 const SideMenu = ({ clearChat, currentModel, setCurrentModel, models, setTemperature, temperature }) => 1 const SideMenu = ({ clearChat, currentModel, setCurrentModel, models, setTemperature, temperature }) =>
2 <aside className="sidemenu"> 2 <aside className="sidemenu">
3 <div className="ai-logo-container"> 3 <div className="ai-logo-container">
4 <img className="ai-logo" alt="Ai-pro logo" src="../assets/images/AIPRO-WHITE.png" height="50px"/> 4 <img className="ai-logo" alt="Ai-pro logo" src="../assets/images/chatgpt-aipro.png" height="50px"/>
5 </div> 5 </div>
6 <div className="side-menu-button" onClick={clearChat}> 6 <div className="side-menu-button" onClick={clearChat}>
7 <span>+</span> 7 <span>+</span>
......
...@@ -21,7 +21,10 @@ const SuggestedOptions = ({ setChatInput }) => { ...@@ -21,7 +21,10 @@ const SuggestedOptions = ({ setChatInput }) => {
21 return ( 21 return (
22 <div className="suggested"> 22 <div className="suggested">
23 <div className="suggestedrow title"> 23 <div className="suggestedrow title">
24 <h1>Welcome to AI-PRO</h1> 24 <div className="suggested-header-title">
25 <p>ChatGPT</p>
26 <p>by AI-PRO</p>
27 </div>
25 <p>This chatbot is capable of answering questions and generating text based on the input you provide.</p> 28 <p>This chatbot is capable of answering questions and generating text based on the input you provide.</p>
26 </div> 29 </div>
27 <div className="suggestedcol rack1"> 30 <div className="suggestedcol rack1">
......
...@@ -34,6 +34,11 @@ app.use(rateLimiter) ...@@ -34,6 +34,11 @@ app.use(rateLimiter)
34 app.post('/api', async (req, res) => { 34 app.post('/api', async (req, res) => {
35 const { message, currentModel, temperature } = req.body; 35 const { message, currentModel, temperature } = req.body;
36 36
37 if(currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
38 runGPTTurbo(req,res);
39 return;
40 }
41
37 let greetingPrompt = 'Hello, how can I assist you?' 42 let greetingPrompt = 'Hello, how can I assist you?'
38 const greetings = ['hi', 'hello', 'hey'] 43 const greetings = ['hi', 'hello', 'hey']
39 44
...@@ -78,6 +83,34 @@ app.post('/api', async (req, res) => { ...@@ -78,6 +83,34 @@ app.post('/api', async (req, res) => {
78 83
79 }); 84 });
80 85
86 async function runGPTTurbo(req, res) {
87 // "gpt-3.5-turbo"
88 const { message, currentModel, temperature } = req.body;
89 var returnMessage = '';
90 try {
91 const response = await openai.createChatCompletion({
92 model: `${currentModel}`,
93 messages: JSON.parse(message),
94 max_tokens: 3000,
95 temperature
96 });
97 returnMessage = response.data.choices[0].message.content
98 } catch (e) {
99 let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
100 if (error_msg.indexOf('maximum context length')>=0){
101 returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
102 }else{
103 console.log(e.response);
104 }
105 } finally {
106 res.json({
107 prompt: JSON.parse(message),
108 message: returnMessage
109 });
110 return;
111 }
112 }
113
81 // Get Models Route 114 // Get Models Route
82 app.get('/models', async (req, res) => { 115 app.get('/models', async (req, res) => {
83 const response = await openai.listEngines(); 116 const response = await openai.listEngines();
......
...@@ -15,6 +15,6 @@ ...@@ -15,6 +15,6 @@
15 "express": "^4.18.2", 15 "express": "^4.18.2",
16 "express-rate-limit": "^6.7.0", 16 "express-rate-limit": "^6.7.0",
17 "morgan": "^1.10.0", 17 "morgan": "^1.10.0",
18 "openai": "^3.1.0" 18 "openai": "^3.2.0"
19 } 19 }
20 } 20 }
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!