0b0fc94a by RSA

fix conflict

2 parents e5ab7e31 ec12f544
......@@ -266,3 +266,30 @@ ul.suggested-options {
padding: 10px 10px 0 10px;
}
}
.message .error_msg {
background: rgb(18 0 255 / 20%);
padding: 10px;
border-radius: 5px;
display: flex;
}
.message .error_msg span {
background: #0023ff;
width: 25px;
height: 25px;
display: inline-block;
text-align: center;
padding-top: 1px;
border-radius: 50px;
font-family: auto;
font-weight: 600;
vertical-align: top;
}
.message .error_msg .msg {
display: inline-block;
margin: 0 10px;
font-style: italic;
width: 80%;
}
......
......@@ -46,7 +46,7 @@ function App() {
const userInputRegex = new RegExp(`\\b(${userInput.join('|')})\\b`, 'gi');
const inputMatches = chatInput.match(userInputRegex);
const userPunctuation = ['\.', '?', '!', ':', ';', ','];
const userPunctuation = ['.', '?', '!', ':', ';', ','];
const userPunctuationRegex = new RegExp(`[${userPunctuation.join('')}]$`);
const punctuationMatches = chatInput.match(userPunctuationRegex);
......
......@@ -3,6 +3,7 @@ const express = require('express')
const bodyParser = require('body-parser')
const cors = require('cors')
require('dotenv').config()
const rateLimit = require('express-rate-limit')
// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
......@@ -12,6 +13,12 @@ const configuration = new Configuration({
});
const openai = new OpenAIApi(configuration);
const rateLimiter = rateLimit({
windowMs: 1000 * 60 * 1, // 1 minute (refreshTime)
max: 3000, // limit each IP to x requests per windowMs (refreshTime)
message: 'Sorry, too many requests. Please try again in a bit!',
});
// Express Configuration
const app = express()
const port = 3080
......@@ -19,7 +26,7 @@ const port = 3080
app.use(bodyParser.json())
app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)
// Routing
......@@ -34,19 +41,44 @@ app.post('/api', async (req, res) => {
greetingPrompt = 'Hello, how can I help you today?'
}
const prompt = `${greetingPrompt}\n${message}`;
let query_prompt = `${greetingPrompt}\n${message}`;
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt,
max_tokens: 2500,
temperature,
});
str_length = req.body.message.split(' ').length;
if (str_length>=800){
arr_body = req.body.message.split("\n");
if (arr_body.length>=4){
var i = arr_body.length-2
while (i--) {
arr_body.splice(i, 1);
}
query_prompt = arr_body.join("\n")
}
}
res.json({
message: response.data.choices[0].text,
})
try {
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt: query_prompt,
max_tokens: 3000,
temperature,
});
res.json({
message: response.data.choices[0].text,
})
} catch (e) {
// let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
// if (error_msg.indexOf('maximum context length')>=0){
// console.log(error_msg);
// }else{
// console.log(e.response);
// }
console.log(e.response);
} finally {
// console.log('We do cleanup here');
}
});
// Get Models Route
......
......@@ -13,6 +13,7 @@
"cors": "^2.8.5",
"dotenv": "^16.0.3",
"express": "^4.18.2",
"express-rate-limit": "^6.7.0",
"morgan": "^1.10.0",
"openai": "^3.1.0"
}
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!