43da8db2 by Bryan Batac

- fix the issue of model not responding when temperature is cahnged

- added error log
1 parent 84486d88
Showing 1 changed file with 12 additions and 12 deletions
......@@ -89,11 +89,11 @@ const user_secret_id = process.env.USER_SECRET_ID || "aiwp_logged_in";
const aiwp_app_id = "chatbot+";
// Primary Open AI Route
app.post('/api', async (req, res) => {
// if(!req.get('origin') || (!req.get('origin').includes(req.get('host')))) {
// res.status(401);
// res.send('Method Not Allowed');
// return;
// }
if(!req.get('origin') || (!req.get('origin').includes(req.get('host')))) {
res.status(401);
res.send('Method Not Allowed');
return;
}
const { message, currentModel, temperature } = req.body;
const getEndpoint = (modelName) => {
const model = Object.values(filteredModels).find(m => m.model === modelName);
......@@ -151,7 +151,7 @@ app.post('/api', async (req, res) => {
model: `${currentModel}`,// "text-davinci-003",
prompt: query_prompt,
max_tokens: max_tokens,
temperature,
temperature:parseFloat(temperature),
});
let input = response.data.choices[0].text;
let usage = {};
......@@ -191,7 +191,7 @@ app.post('/api', async (req, res) => {
message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.",
})
} else {
// console.log(e.response);
console.log(e.response);
}
} finally {
// console.log('We do cleanup here');
......@@ -225,7 +225,7 @@ async function runGPTTurbo(req, res) {
model: `${currentModel}`,
messages: JSON.parse(message),
max_tokens: max_tokens,
temperature
temperature:parseFloat(temperature),
});
input = response.data.choices[0].message.content
} catch (e) {
......@@ -233,7 +233,7 @@ async function runGPTTurbo(req, res) {
if (error_msg.indexOf('maximum context length') >= 0) {
input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
} else {
// console.log(e.response);
console.log(e.response);
}
} finally {
......@@ -300,7 +300,7 @@ async function runOpensource(req, res) {
model: currentModel,
messages: JSON.parse(message),
max_tokens: 2048,
temperature,
temperature:parseFloat(temperature),
top_p: 0.7,
top_k: 50,
repetition_penalty: 1
......@@ -334,7 +334,7 @@ async function runOpensource(req, res) {
if (error_msg.indexOf('maximum context length') >= 0) {
input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again.";
} else {
// console.log(e.response);
console.log(e.response);
}
} finally {
let usage_params = {
......@@ -440,7 +440,7 @@ async function setChatUsage(params) {
async function validation (aiwp_app_id, req, res) {
const aiwp_logged_in = req.cookies[user_secret_id] ? decodeURIComponent(req.cookies[user_secret_id]) : "";
const limit = req.cookies["WcvYPABR"] ? parseInt(req.cookies["WcvYPABR"].replace(/\D/g, '')) : 9999999999999999999;
const limit = req.cookies["WcvYPABR"] ? parseInt(req.cookies["WcvYPABR"].replace(/\D/g, '')) : 3; // change this to 99999 if working on dev
let IS_FREE_USER = false;
let TRIED_USAGE = 0;
const ip_address = getClientIP(req);
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!