94593dd1 by Ryan

Merge branch 'jeff_fix_maxtoken' into 'master'

jeff_fix_maxtoken

See merge request !27
2 parents 5d77d99f 4e4163e2
Showing 1 changed file with 34 additions and 15 deletions
......@@ -36,24 +36,43 @@ app.post('/api', async (req, res) => {
let greetingPrompt = 'Hello, how can I assist you?'
const greetings = ['hi', 'hello', 'hey']
if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
greetingPrompt = 'Hello, how can I help you today?'
}
const prompt = `${greetingPrompt}\n${message}`;
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt,
max_tokens: 2500,
temperature,
});
res.json({
message: response.data.choices[0].text,
})
let query_prompt = `${greetingPrompt}\n${message}`;
str_length = req.body.message.split(' ').length;
if (str_length>=800){
arr_body = req.body.message.split("\n");
if (arr_body.length>=4){
var i = arr_body.length-2
while (i--) {
arr_body.splice(i, 1);
}
query_prompt = arr_body.join("\n")
}
}
try {
const response = await openai.createCompletion({
model: `${currentModel}`,// "text-davinci-003",
prompt: query_prompt,
max_tokens: 3000,
temperature,
});
res.json({
message: response.data.choices[0].text,
})
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length')>=0){
res.json({
message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.",
})
}else{
console.log(e.response);
}
} finally {
// console.log('We do cleanup here');
}
});
// Get Models Route
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!