Merge branch '27034_spacing' into 'master'
27034_spacing See merge request !45
Showing
4 changed files
with
26 additions
and
6 deletions
| ... | @@ -127,6 +127,9 @@ | ... | @@ -127,6 +127,9 @@ |
| 127 | color:white; | 127 | color:white; |
| 128 | } | 128 | } |
| 129 | .message { | 129 | .message { |
| 130 | width: 100%; | ||
| 131 | overflow: hidden; | ||
| 132 | word-wrap: break-word; | ||
| 130 | padding-left: 40px; | 133 | padding-left: 40px; |
| 131 | padding-right: 40px; | 134 | padding-right: 40px; |
| 132 | } | 135 | } | ... | ... |
| ... | @@ -114,7 +114,7 @@ function App() { | ... | @@ -114,7 +114,7 @@ function App() { |
| 114 | const regex = new RegExp(`\\b(${programmingKeywords.join('|')})\\b`, 'gi'); | 114 | const regex = new RegExp(`\\b(${programmingKeywords.join('|')})\\b`, 'gi'); |
| 115 | const matches = parsedData.match(regex); | 115 | const matches = parsedData.match(regex); |
| 116 | if (!matches) { | 116 | if (!matches) { |
| 117 | var replaceTags = (parsedData.replace(/(?:\r\n|\r|\n)/g, '<br>').replace(/\./g, '. ')) | 117 | var replaceTags = (parsedData.replace(/(?:\r\n|\r|\n)/g, '<br>')) |
| 118 | } else { | 118 | } else { |
| 119 | replaceTags = (parsedData.replace(':',':<code>').replace('<?','<?').replace('?>','?>').replace(/\n/g, '<br>')) | 119 | replaceTags = (parsedData.replace(':',':<code>').replace('<?','<?').replace('?>','?>').replace(/\n/g, '<br>')) |
| 120 | } | 120 | } | ... | ... |
| ... | @@ -4,6 +4,7 @@ const bodyParser = require('body-parser') | ... | @@ -4,6 +4,7 @@ const bodyParser = require('body-parser') |
| 4 | const cors = require('cors') | 4 | const cors = require('cors') |
| 5 | require('dotenv').config() | 5 | require('dotenv').config() |
| 6 | const rateLimit = require('express-rate-limit') | 6 | const rateLimit = require('express-rate-limit') |
| 7 | const anchorme = require("anchorme").default; | ||
| 7 | 8 | ||
| 8 | // Open AI Configuration | 9 | // Open AI Configuration |
| 9 | // console.log(process.env.OPENAI_API_ORG) | 10 | // console.log(process.env.OPENAI_API_ORG) |
| ... | @@ -63,8 +64,16 @@ app.post('/api', async (req, res) => { | ... | @@ -63,8 +64,16 @@ app.post('/api', async (req, res) => { |
| 63 | max_tokens: 3000, | 64 | max_tokens: 3000, |
| 64 | temperature, | 65 | temperature, |
| 65 | }); | 66 | }); |
| 67 | let input = response.data.choices[0].text; | ||
| 66 | res.json({ | 68 | res.json({ |
| 67 | message: response.data.choices[0].text, | 69 | message: anchorme({ |
| 70 | input, | ||
| 71 | options: { | ||
| 72 | attributes: { | ||
| 73 | target: "_blank" | ||
| 74 | }, | ||
| 75 | } | ||
| 76 | }) | ||
| 68 | }) | 77 | }) |
| 69 | } catch (e) { | 78 | } catch (e) { |
| 70 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; | 79 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; |
| ... | @@ -83,7 +92,7 @@ app.post('/api', async (req, res) => { | ... | @@ -83,7 +92,7 @@ app.post('/api', async (req, res) => { |
| 83 | async function runGPTTurbo(req, res) { | 92 | async function runGPTTurbo(req, res) { |
| 84 | // "gpt-3.5-turbo" | 93 | // "gpt-3.5-turbo" |
| 85 | const { message, currentModel, temperature } = req.body; | 94 | const { message, currentModel, temperature } = req.body; |
| 86 | var returnMessage = ''; | 95 | var input = ''; |
| 87 | try { | 96 | try { |
| 88 | const response = await openai.createChatCompletion({ | 97 | const response = await openai.createChatCompletion({ |
| 89 | model: `${currentModel}`, | 98 | model: `${currentModel}`, |
| ... | @@ -91,18 +100,25 @@ async function runGPTTurbo(req, res) { | ... | @@ -91,18 +100,25 @@ async function runGPTTurbo(req, res) { |
| 91 | max_tokens: 3000, | 100 | max_tokens: 3000, |
| 92 | temperature | 101 | temperature |
| 93 | }); | 102 | }); |
| 94 | returnMessage = response.data.choices[0].message.content | 103 | input = response.data.choices[0].message.content |
| 95 | } catch (e) { | 104 | } catch (e) { |
| 96 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; | 105 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; |
| 97 | if (error_msg.indexOf('maximum context length')>=0){ | 106 | if (error_msg.indexOf('maximum context length')>=0){ |
| 98 | returnMessage = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; | 107 | input = "The output for your prompt is too long for us to process. Please reduce your prompt and try again."; |
| 99 | }else{ | 108 | }else{ |
| 100 | console.log(e.response); | 109 | console.log(e.response); |
| 101 | } | 110 | } |
| 102 | } finally { | 111 | } finally { |
| 103 | res.json({ | 112 | res.json({ |
| 104 | prompt: JSON.parse(message), | 113 | prompt: JSON.parse(message), |
| 105 | message: returnMessage | 114 | message: anchorme({ |
| 115 | input, | ||
| 116 | options: { | ||
| 117 | attributes: { | ||
| 118 | target: "_blank" | ||
| 119 | }, | ||
| 120 | } | ||
| 121 | }) | ||
| 106 | }); | 122 | }); |
| 107 | return; | 123 | return; |
| 108 | } | 124 | } | ... | ... |
| ... | @@ -9,6 +9,7 @@ | ... | @@ -9,6 +9,7 @@ |
| 9 | "author": "", | 9 | "author": "", |
| 10 | "license": "ISC", | 10 | "license": "ISC", |
| 11 | "dependencies": { | 11 | "dependencies": { |
| 12 | "anchorme": "^2.1.2", | ||
| 12 | "body-parser": "^1.20.1", | 13 | "body-parser": "^1.20.1", |
| 13 | "cors": "^2.8.5", | 14 | "cors": "^2.8.5", |
| 14 | "dotenv": "^16.0.3", | 15 | "dotenv": "^16.0.3", | ... | ... |
-
Please register or sign in to post a comment