Merge branch '31675_chatbotplus_llm' into 'master'
31675_chatbotplus_llm See merge request !113
Showing
1 changed file
with
10 additions
and
1 deletions
| ... | @@ -288,14 +288,23 @@ async function runOpensource(req, res) { | ... | @@ -288,14 +288,23 @@ async function runOpensource(req, res) { |
| 288 | const response = await axios.post(endpoint_api_url + '/chat/completions', { | 288 | const response = await axios.post(endpoint_api_url + '/chat/completions', { |
| 289 | model: currentModel, | 289 | model: currentModel, |
| 290 | messages: JSON.parse(message), | 290 | messages: JSON.parse(message), |
| 291 | temperature | 291 | max_tokens: 2048, |
| 292 | temperature, | ||
| 293 | top_p: 0.7, | ||
| 294 | top_k: 50, | ||
| 295 | repetition_penalty: 1 | ||
| 292 | }, { | 296 | }, { |
| 293 | headers: { | 297 | headers: { |
| 294 | 'Content-Type': 'application/json', | 298 | 'Content-Type': 'application/json', |
| 295 | 'Authorization': 'Bearer ' + api_key | 299 | 'Authorization': 'Bearer ' + api_key |
| 296 | }, | 300 | }, |
| 297 | }).catch(error => { | 301 | }).catch(error => { |
| 302 | if(error.response?.data?.error?.param === 'max_tokens') { | ||
| 303 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; | ||
| 304 | } else { | ||
| 298 | error_msg = error.response.statusText ? error.response.statusText : ''; | 305 | error_msg = error.response.statusText ? error.response.statusText : ''; |
| 306 | } | ||
| 307 | console.log("err",error.response.data.error); | ||
| 299 | }); | 308 | }); |
| 300 | 309 | ||
| 301 | if (error_msg !== '') { | 310 | if (error_msg !== '') { | ... | ... |
-
Please register or sign in to post a comment