Merge branch '27034_spacing' into '1DEVT'
27034_spacing See merge request !49
Showing
1 changed file
with
11 additions
and
5 deletions
| ... | @@ -68,7 +68,6 @@ app.post('/api', async (req, res) => { | ... | @@ -68,7 +68,6 @@ app.post('/api', async (req, res) => { |
| 68 | }); | 68 | }); |
| 69 | let input = response.data.choices[0].text; | 69 | let input = response.data.choices[0].text; |
| 70 | res.json({ | 70 | res.json({ |
| 71 | input: input, | ||
| 72 | message: anchorme({ | 71 | message: anchorme({ |
| 73 | input, | 72 | input, |
| 74 | options: { | 73 | options: { |
| ... | @@ -96,7 +95,7 @@ app.post('/api', async (req, res) => { | ... | @@ -96,7 +95,7 @@ app.post('/api', async (req, res) => { |
| 96 | async function runGPTTurbo(req, res) { | 95 | async function runGPTTurbo(req, res) { |
| 97 | // "gpt-3.5-turbo" | 96 | // "gpt-3.5-turbo" |
| 98 | const { message, currentModel, temperature } = req.body; | 97 | const { message, currentModel, temperature } = req.body; |
| 99 | var returnMessage = ''; | 98 | var input = ''; |
| 100 | try { | 99 | try { |
| 101 | const response = await openai.createChatCompletion({ | 100 | const response = await openai.createChatCompletion({ |
| 102 | model: `${currentModel}`, | 101 | model: `${currentModel}`, |
| ... | @@ -104,18 +103,25 @@ async function runGPTTurbo(req, res) { | ... | @@ -104,18 +103,25 @@ async function runGPTTurbo(req, res) { |
| 104 | max_tokens: 3000, | 103 | max_tokens: 3000, |
| 105 | temperature | 104 | temperature |
| 106 | }); | 105 | }); |
| 107 | returnMessage = response.data.choices[0].message.content | 106 | input = response.data.choices[0].message.content |
| 108 | } catch (e) { | 107 | } catch (e) { |
| 109 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; | 108 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; |
| 110 | if (error_msg.indexOf('maximum context length')>=0){ | 109 | if (error_msg.indexOf('maximum context length')>=0){ |
| 111 | returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; | 110 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; |
| 112 | }else{ | 111 | }else{ |
| 113 | console.log(e.response); | 112 | console.log(e.response); |
| 114 | } | 113 | } |
| 115 | } finally { | 114 | } finally { |
| 116 | res.json({ | 115 | res.json({ |
| 117 | prompt: JSON.parse(message), | 116 | prompt: JSON.parse(message), |
| 118 | message: returnMessage | 117 | message: anchorme({ |
| 118 | input, | ||
| 119 | options: { | ||
| 120 | attributes: { | ||
| 121 | target: "_blank" | ||
| 122 | }, | ||
| 123 | } | ||
| 124 | }) | ||
| 119 | }); | 125 | }); |
| 120 | return; | 126 | return; |
| 121 | } | 127 | } | ... | ... |
-
Please register or sign in to post a comment