Merge branch 'jeff_fix_maxtoken' into 'master'
jeff_fix_maxtoken See merge request !27
Showing
1 changed file
with
34 additions
and
15 deletions
| ... | @@ -36,24 +36,43 @@ app.post('/api', async (req, res) => { | ... | @@ -36,24 +36,43 @@ app.post('/api', async (req, res) => { |
| 36 | 36 | ||
| 37 | let greetingPrompt = 'Hello, how can I assist you?' | 37 | let greetingPrompt = 'Hello, how can I assist you?' |
| 38 | const greetings = ['hi', 'hello', 'hey'] | 38 | const greetings = ['hi', 'hello', 'hey'] |
| 39 | |||
| 40 | if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) { | 39 | if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) { |
| 41 | greetingPrompt = 'Hello, how can I help you today?' | 40 | greetingPrompt = 'Hello, how can I help you today?' |
| 42 | } | 41 | } |
| 43 | 42 | let query_prompt = `${greetingPrompt}\n${message}`; | |
| 44 | const prompt = `${greetingPrompt}\n${message}`; | 43 | str_length = req.body.message.split(' ').length; |
| 45 | 44 | if (str_length>=800){ | |
| 46 | const response = await openai.createCompletion({ | 45 | arr_body = req.body.message.split("\n"); |
| 47 | model: `${currentModel}`,// "text-davinci-003", | 46 | if (arr_body.length>=4){ |
| 48 | prompt, | 47 | var i = arr_body.length-2 |
| 49 | max_tokens: 2500, | 48 | while (i--) { |
| 50 | temperature, | 49 | arr_body.splice(i, 1); |
| 51 | }); | 50 | } |
| 52 | 51 | query_prompt = arr_body.join("\n") | |
| 53 | 52 | } | |
| 54 | res.json({ | 53 | } |
| 55 | message: response.data.choices[0].text, | 54 | try { |
| 56 | }) | 55 | const response = await openai.createCompletion({ |
| 56 | model: `${currentModel}`,// "text-davinci-003", | ||
| 57 | prompt: query_prompt, | ||
| 58 | max_tokens: 3000, | ||
| 59 | temperature, | ||
| 60 | }); | ||
| 61 | res.json({ | ||
| 62 | message: response.data.choices[0].text, | ||
| 63 | }) | ||
| 64 | } catch (e) { | ||
| 65 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; | ||
| 66 | if (error_msg.indexOf('maximum context length')>=0){ | ||
| 67 | res.json({ | ||
| 68 | message: "The output for your prompt is too long for us to process. Please reduce your prompt and try again.", | ||
| 69 | }) | ||
| 70 | }else{ | ||
| 71 | console.log(e.response); | ||
| 72 | } | ||
| 73 | } finally { | ||
| 74 | // console.log('We do cleanup here'); | ||
| 75 | } | ||
| 57 | }); | 76 | }); |
| 58 | 77 | ||
| 59 | // Get Models Route | 78 | // Get Models Route | ... | ... |
-
Please register or sign in to post a comment