index.js
2.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
const { Configuration, OpenAIApi } = require("openai");
const express = require('express')
const bodyParser = require('body-parser')
const cors = require('cors')
require('dotenv').config()
const rateLimit = require('express-rate-limit')
// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
const configuration = new Configuration({
organization: process.env.OPENAI_API_ORG,
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
const rateLimiter = rateLimit({
windowMs: 1000 * 60 * 1, // 1 minute (refreshTime)
max: 3000, // limit each IP to x requests per windowMs (refreshTime)
message: 'Sorry, too many requests. Please try again in a bit!',
});
// Express Configuration
const app = express()
const port = 3080
app.use(bodyParser.json())
app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)
// Routing
// Primary Open AI Route
app.post('/api', async (req, res) => {
const { message, currentModel, temperature } = req.body;
let greetingPrompt = 'Hello, how can I assist you?'
const greetings = ['hi', 'hello', 'hey']
if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
greetingPrompt = 'Hello, how can I help you today?'
}
let query_prompt = `${greetingPrompt}\n${message}`;
str_length = req.body.message.split(' ').length;
if (str_length>=800){
arr_body = req.body.message.split("\n");
if (arr_body.length>=4){
var i = arr_body.length-2
while (i--) {
arr_body.splice(i, 1);
}
query_prompt = arr_body.join("\n")
}
}
try {
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt: query_prompt,
max_tokens: 3000,
temperature,
});
res.json({
message: response.data.choices[0].text,
})
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length')>=0){
res.json({
message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.",
})
}else{
console.log(e.response);
}
} finally {
// console.log('We do cleanup here');
}
});
// Get Models Route
app.get('/models', async (req, res) => {
const response = await openai.listEngines();
res.json({
models: response.data
})
});
// Start the server
app.listen(port, () => {
console.log(`Example app listening at http://localhost:${port}`)
});