- fix the issue of model not responding when temperature is cahnged
- added error log
Showing
1 changed file
with
12 additions
and
12 deletions
| ... | @@ -89,11 +89,11 @@ const user_secret_id = process.env.USER_SECRET_ID || "aiwp_logged_in"; | ... | @@ -89,11 +89,11 @@ const user_secret_id = process.env.USER_SECRET_ID || "aiwp_logged_in"; |
| 89 | const aiwp_app_id = "chatbot+"; | 89 | const aiwp_app_id = "chatbot+"; |
| 90 | // Primary Open AI Route | 90 | // Primary Open AI Route |
| 91 | app.post('/api', async (req, res) => { | 91 | app.post('/api', async (req, res) => { |
| 92 | // if(!req.get('origin') || (!req.get('origin').includes(req.get('host')))) { | 92 | if(!req.get('origin') || (!req.get('origin').includes(req.get('host')))) { |
| 93 | // res.status(401); | 93 | res.status(401); |
| 94 | // res.send('Method Not Allowed'); | 94 | res.send('Method Not Allowed'); |
| 95 | // return; | 95 | return; |
| 96 | // } | 96 | } |
| 97 | const { message, currentModel, temperature } = req.body; | 97 | const { message, currentModel, temperature } = req.body; |
| 98 | const getEndpoint = (modelName) => { | 98 | const getEndpoint = (modelName) => { |
| 99 | const model = Object.values(filteredModels).find(m => m.model === modelName); | 99 | const model = Object.values(filteredModels).find(m => m.model === modelName); |
| ... | @@ -151,7 +151,7 @@ app.post('/api', async (req, res) => { | ... | @@ -151,7 +151,7 @@ app.post('/api', async (req, res) => { |
| 151 | model: `${currentModel}`,// "text-davinci-003", | 151 | model: `${currentModel}`,// "text-davinci-003", |
| 152 | prompt: query_prompt, | 152 | prompt: query_prompt, |
| 153 | max_tokens: max_tokens, | 153 | max_tokens: max_tokens, |
| 154 | temperature, | 154 | temperature:parseFloat(temperature), |
| 155 | }); | 155 | }); |
| 156 | let input = response.data.choices[0].text; | 156 | let input = response.data.choices[0].text; |
| 157 | let usage = {}; | 157 | let usage = {}; |
| ... | @@ -191,7 +191,7 @@ app.post('/api', async (req, res) => { | ... | @@ -191,7 +191,7 @@ app.post('/api', async (req, res) => { |
| 191 | message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.", | 191 | message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.", |
| 192 | }) | 192 | }) |
| 193 | } else { | 193 | } else { |
| 194 | // console.log(e.response); | 194 | console.log(e.response); |
| 195 | } | 195 | } |
| 196 | } finally { | 196 | } finally { |
| 197 | // console.log('We do cleanup here'); | 197 | // console.log('We do cleanup here'); |
| ... | @@ -225,7 +225,7 @@ async function runGPTTurbo(req, res) { | ... | @@ -225,7 +225,7 @@ async function runGPTTurbo(req, res) { |
| 225 | model: `${currentModel}`, | 225 | model: `${currentModel}`, |
| 226 | messages: JSON.parse(message), | 226 | messages: JSON.parse(message), |
| 227 | max_tokens: max_tokens, | 227 | max_tokens: max_tokens, |
| 228 | temperature | 228 | temperature:parseFloat(temperature), |
| 229 | }); | 229 | }); |
| 230 | input = response.data.choices[0].message.content | 230 | input = response.data.choices[0].message.content |
| 231 | } catch (e) { | 231 | } catch (e) { |
| ... | @@ -233,7 +233,7 @@ async function runGPTTurbo(req, res) { | ... | @@ -233,7 +233,7 @@ async function runGPTTurbo(req, res) { |
| 233 | if (error_msg.indexOf('maximum context length') >= 0) { | 233 | if (error_msg.indexOf('maximum context length') >= 0) { |
| 234 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; | 234 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; |
| 235 | } else { | 235 | } else { |
| 236 | // console.log(e.response); | 236 | console.log(e.response); |
| 237 | } | 237 | } |
| 238 | } finally { | 238 | } finally { |
| 239 | 239 | ||
| ... | @@ -300,7 +300,7 @@ async function runOpensource(req, res) { | ... | @@ -300,7 +300,7 @@ async function runOpensource(req, res) { |
| 300 | model: currentModel, | 300 | model: currentModel, |
| 301 | messages: JSON.parse(message), | 301 | messages: JSON.parse(message), |
| 302 | max_tokens: 2048, | 302 | max_tokens: 2048, |
| 303 | temperature, | 303 | temperature:parseFloat(temperature), |
| 304 | top_p: 0.7, | 304 | top_p: 0.7, |
| 305 | top_k: 50, | 305 | top_k: 50, |
| 306 | repetition_penalty: 1 | 306 | repetition_penalty: 1 |
| ... | @@ -334,7 +334,7 @@ async function runOpensource(req, res) { | ... | @@ -334,7 +334,7 @@ async function runOpensource(req, res) { |
| 334 | if (error_msg.indexOf('maximum context length') >= 0) { | 334 | if (error_msg.indexOf('maximum context length') >= 0) { |
| 335 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; | 335 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; |
| 336 | } else { | 336 | } else { |
| 337 | // console.log(e.response); | 337 | console.log(e.response); |
| 338 | } | 338 | } |
| 339 | } finally { | 339 | } finally { |
| 340 | let usage_params = { | 340 | let usage_params = { |
| ... | @@ -440,7 +440,7 @@ async function setChatUsage(params) { | ... | @@ -440,7 +440,7 @@ async function setChatUsage(params) { |
| 440 | 440 | ||
| 441 | async function validation (aiwp_app_id, req, res) { | 441 | async function validation (aiwp_app_id, req, res) { |
| 442 | const aiwp_logged_in = req.cookies[user_secret_id] ? decodeURIComponent(req.cookies[user_secret_id]) : ""; | 442 | const aiwp_logged_in = req.cookies[user_secret_id] ? decodeURIComponent(req.cookies[user_secret_id]) : ""; |
| 443 | const limit = req.cookies["WcvYPABR"] ? parseInt(req.cookies["WcvYPABR"].replace(/\D/g, '')) : 9999999999999999999; | 443 | const limit = req.cookies["WcvYPABR"] ? parseInt(req.cookies["WcvYPABR"].replace(/\D/g, '')) : 3; // change this to 99999 if working on dev |
| 444 | let IS_FREE_USER = false; | 444 | let IS_FREE_USER = false; |
| 445 | let TRIED_USAGE = 0; | 445 | let TRIED_USAGE = 0; |
| 446 | const ip_address = getClientIP(req); | 446 | const ip_address = getClientIP(req); | ... | ... |
-
Please register or sign in to post a comment