index.js
4.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
const { Configuration, OpenAIApi } = require("openai");
const express = require('express')
const bodyParser = require('body-parser')
const cors = require('cors')
require('dotenv').config()
const rateLimit = require('express-rate-limit')
const anchorme = require("anchorme").default;
const axios = require('axios');
// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
const configuration = new Configuration({
organization: process.env.OPENAI_API_ORG,
apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
const rateLimiter = rateLimit({
windowMs: 1000 * 60 * 1, // 1 minute (refreshTime)
max: 3000, // limit each IP to x requests per windowMs (refreshTime)
message: 'Sorry, too many requests. Please try again in a bit!',
});
// Express Configuration
const app = express()
const port = 3080
app.use(bodyParser.json())
app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)
// Routing
// Primary Open AI Route
app.post('/api', async (req, res) => {
const { message, currentModel, temperature } = req.body;
if(currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
runGPTTurbo(req,res);
return;
}
let greetingPrompt = 'Hello, how can I assist you?'
const greetings = ['hi', 'hello', 'hey']
if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
greetingPrompt = 'Hello, how can I help you today?'
}
let query_prompt = `${greetingPrompt}\n${message}`;
str_length = req.body.message.split(' ').length;
if (str_length>=800){
arr_body = req.body.message.split("\n");
if (arr_body.length>=4){
var i = arr_body.length-2
while (i--) {
arr_body.splice(i, 1);
}
query_prompt = arr_body.join("\n")
}
}
const moderation = await axios.post("https://api.openai.com/v1/moderations", {
input: query_prompt
}, { headers: { 'content-type': 'application/json', 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}` } });
if(moderation.data.results[0].flagged) {
res.json({
success: false,
message: "I'm sorry, but I can't assist with that. We want everyone to use our tool safely and responsibly.\nIf you have any other questions or need advice on a different topic, feel free to ask."
});
res.end();
return;
}
try {
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt: query_prompt,
max_tokens: 3000,
temperature,
});
let input = response.data.choices[0].text;
res.json({
message: anchorme({
input,
options: {
attributes: {
target: "_blank"
},
}
})
})
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length')>=0){
res.json({
message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.",
})
}else{
console.log(e.response);
}
} finally {
// console.log('We do cleanup here');
}
});
async function runGPTTurbo(req, res) {
// "gpt-3.5-turbo"
const { message, currentModel, temperature } = req.body;
var input = '';
const message_history = JSON.parse(message);
const query_prompt = message_history.length ? message_history[message_history.length - 1].content : "";
const moderation = await axios.post("https://api.openai.com/v1/moderations", {
input: query_prompt
}, { headers: { 'content-type': 'application/json', 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}` } });
if(moderation.data.results[0].flagged) {
res.json({
success: false,
message: "I'm sorry, but I can't assist with that. We want everyone to use our tool safely and responsibly.\nIf you have any other questions or need advice on a different topic, feel free to ask."
});
res.end();
return;
}
try {
const response = await openai.createChatCompletion({
model: `${currentModel}`,
messages: JSON.parse(message),
max_tokens: 3000,
temperature
});
input = response.data.choices[0].message.content
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length')>=0){
input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
}else{
console.log(e.response);
}
} finally {
res.json({
prompt: JSON.parse(message),
message: anchorme({
input,
options: {
attributes: {
target: "_blank"
},
}
})
});
return;
}
}
// Get Models Route
app.get('/models', async (req, res) => {
const response = await openai.listEngines();
res.json({
models: response.data
})
});
// Start the server
app.listen(port, () => {
console.log(`Example app listening at http://localhost:${port}`)
});