864f97cc by jeff

jeff_fix_maxtoken

1 parent 2bf0467a
Showing 1 changed file with 31 additions and 2 deletions
...@@ -33,16 +33,45 @@ app.use(rateLimiter) ...@@ -33,16 +33,45 @@ app.use(rateLimiter)
33 // Primary Open AI Route 33 // Primary Open AI Route
34 app.post('/api', async (req, res) => { 34 app.post('/api', async (req, res) => {
35 const { message, currentModel, temperature } = req.body; 35 const { message, currentModel, temperature } = req.body;
36
37 let query_prompt = `${message}`;
38
39 str_length = req.body.message.split(' ').length;
40
41 if (str_length>=1000){
42 arr_body = req.body.message.split("\n");
43 if (arr_body.length>=4){
44 var i = arr_body.length-2
45 while (i--) {
46 arr_body.splice(i, 1);
47 }
48 query_prompt = arr_body.join("\n")
49 }
50 }
51
52 try {
36 const response = await openai.createCompletion({ 53 const response = await openai.createCompletion({
37 model: `${currentModel}`,// "text-davinci-003", 54 model: `${currentModel}`,// "text-davinci-003",
38 prompt: `${message}`, 55 prompt: query_prompt,
39 max_tokens: 2500, 56 max_tokens: 3000,
40 temperature, 57 temperature,
41 }); 58 });
42 59
43 res.json({ 60 res.json({
44 message: response.data.choices[0].text, 61 message: response.data.choices[0].text,
45 }) 62 })
63
64 } catch (e) {
65 // let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
66 // if (error_msg.indexOf('maximum context length')>=0){
67 // console.log(error_msg);
68 // }else{
69 // console.log(e.response);
70 // }
71 console.log(e.response);
72 } finally {
73 // console.log('We do cleanup here');
74 }
46 }); 75 });
47 76
48 // Get Models Route 77 // Get Models Route
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!