Merge branch '28594_usage_tracking' into '1DEVT'
28594_usage_tracking See merge request !70
Showing
3 changed files
with
88 additions
and
1 deletions
| ... | @@ -49,7 +49,8 @@ function App() { | ... | @@ -49,7 +49,8 @@ function App() { |
| 49 | let model = data.models.data[i]; | 49 | let model = data.models.data[i]; |
| 50 | if( !(model.id == "whisper-1" | 50 | if( !(model.id == "whisper-1" |
| 51 | || model.id == "gpt-4" | 51 | || model.id == "gpt-4" |
| 52 | || model.id == "gpt-4-0314") ) model_list.push(model); | 52 | || model.id == "gpt-4-0314" |
| 53 | || model.id == "gpt-4-0613") ) model_list.push(model); | ||
| 53 | } | 54 | } |
| 54 | setModels(model_list) | 55 | setModels(model_list) |
| 55 | }) | 56 | }) |
| ... | @@ -61,6 +62,12 @@ function App() { | ... | @@ -61,6 +62,12 @@ function App() { |
| 61 | } | 62 | } |
| 62 | 63 | ||
| 63 | async function submitPrompt() { | 64 | async function submitPrompt() { |
| 65 | |||
| 66 | const TPLogicRun = window.TPLogicRun; | ||
| 67 | if (typeof TPLogicRun === 'function') { | ||
| 68 | TPLogicRun(); | ||
| 69 | } | ||
| 70 | |||
| 64 | const userInput = ['what', 'why', 'when', 'where' , 'which', 'did', 'do', 'how', 'can', 'are', 'who']; | 71 | const userInput = ['what', 'why', 'when', 'where' , 'which', 'did', 'do', 'how', 'can', 'are', 'who']; |
| 65 | const userInputRegex = new RegExp(`\\b(${userInput.join('|')})\\b`, 'gi'); | 72 | const userInputRegex = new RegExp(`\\b(${userInput.join('|')})\\b`, 'gi'); |
| 66 | const inputMatches = chatInput.match(userInputRegex); | 73 | const inputMatches = chatInput.match(userInputRegex); | ... | ... |
| ... | @@ -5,6 +5,47 @@ const cors = require('cors') | ... | @@ -5,6 +5,47 @@ const cors = require('cors') |
| 5 | require('dotenv').config() | 5 | require('dotenv').config() |
| 6 | const rateLimit = require('express-rate-limit') | 6 | const rateLimit = require('express-rate-limit') |
| 7 | const anchorme = require("anchorme").default; | 7 | const anchorme = require("anchorme").default; |
| 8 | const axios = require('axios'); | ||
| 9 | const tiktoken = require('@dqbd/tiktoken'); | ||
| 10 | const tiktokenModels = [ | ||
| 11 | 'text-davinci-003', | ||
| 12 | 'text-davinci-002', | ||
| 13 | 'text-davinci-001', | ||
| 14 | 'text-curie-001', | ||
| 15 | 'text-babbage-001', | ||
| 16 | 'text-ada-001', | ||
| 17 | 'davinci', | ||
| 18 | 'curie', | ||
| 19 | 'babbage', | ||
| 20 | 'ada', | ||
| 21 | 'code-davinci-002', | ||
| 22 | 'code-davinci-001', | ||
| 23 | 'code-cushman-002', | ||
| 24 | 'code-cushman-001', | ||
| 25 | 'davinci-codex', | ||
| 26 | 'cushman-codex', | ||
| 27 | 'text-davinci-edit-001', | ||
| 28 | 'code-davinci-edit-001', | ||
| 29 | 'text-embedding-ada-002', | ||
| 30 | 'text-similarity-davinci-001', | ||
| 31 | 'text-similarity-curie-001', | ||
| 32 | 'text-similarity-babbage-001', | ||
| 33 | 'text-similarity-ada-001', | ||
| 34 | 'text-search-davinci-doc-001', | ||
| 35 | 'text-search-curie-doc-001', | ||
| 36 | 'text-search-babbage-doc-001', | ||
| 37 | 'text-search-ada-doc-001', | ||
| 38 | 'code-search-babbage-code-001', | ||
| 39 | 'code-search-ada-code-001', | ||
| 40 | 'gpt2', | ||
| 41 | 'gpt-4', | ||
| 42 | 'gpt-4-0314', | ||
| 43 | 'gpt-4-32k', | ||
| 44 | 'gpt-4-32k-0314', | ||
| 45 | 'gpt-3.5-turbo', | ||
| 46 | 'gpt-3.5-turbo-0301' | ||
| 47 | ]; | ||
| 48 | const encoding_for_model = tiktoken.encoding_for_model; | ||
| 8 | 49 | ||
| 9 | // Open AI Configuration | 50 | // Open AI Configuration |
| 10 | // console.log(process.env.OPENAI_API_ORG) | 51 | // console.log(process.env.OPENAI_API_ORG) |
| ... | @@ -67,6 +108,23 @@ app.post('/api', async (req, res) => { | ... | @@ -67,6 +108,23 @@ app.post('/api', async (req, res) => { |
| 67 | temperature, | 108 | temperature, |
| 68 | }); | 109 | }); |
| 69 | let input = response.data.choices[0].text; | 110 | let input = response.data.choices[0].text; |
| 111 | let usage = {}; | ||
| 112 | let enc = null; | ||
| 113 | try { | ||
| 114 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | ||
| 115 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | ||
| 116 | usage.completion_tokens = (enc.encode(input)).length; | ||
| 117 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ||
| 118 | } catch (e) { | ||
| 119 | console.log('Error encoding prompt text', e); | ||
| 120 | } | ||
| 121 | |||
| 122 | // TOKEN USAGE | ||
| 123 | axios.post(`${process.env.API_URL}e/set-chat-usage`, | ||
| 124 | { app: 'chatbot', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens }, | ||
| 125 | { headers: { 'content-type': 'application/x-www-form-urlencoded' } | ||
| 126 | }); | ||
| 127 | |||
| 70 | res.json({ | 128 | res.json({ |
| 71 | message: anchorme({ | 129 | message: anchorme({ |
| 72 | input, | 130 | input, |
| ... | @@ -96,6 +154,8 @@ async function runGPTTurbo(req, res) { | ... | @@ -96,6 +154,8 @@ async function runGPTTurbo(req, res) { |
| 96 | // "gpt-3.5-turbo" | 154 | // "gpt-3.5-turbo" |
| 97 | const { message, currentModel, temperature } = req.body; | 155 | const { message, currentModel, temperature } = req.body; |
| 98 | var input = ''; | 156 | var input = ''; |
| 157 | const message_history = JSON.parse(message); | ||
| 158 | const query_prompt = message_history.length ? message_history[message_history.length - 1].content : ""; | ||
| 99 | try { | 159 | try { |
| 100 | const response = await openai.createChatCompletion({ | 160 | const response = await openai.createChatCompletion({ |
| 101 | model: `${currentModel}`, | 161 | model: `${currentModel}`, |
| ... | @@ -112,6 +172,24 @@ async function runGPTTurbo(req, res) { | ... | @@ -112,6 +172,24 @@ async function runGPTTurbo(req, res) { |
| 112 | console.log(e.response); | 172 | console.log(e.response); |
| 113 | } | 173 | } |
| 114 | } finally { | 174 | } finally { |
| 175 | |||
| 176 | let usage = {}; | ||
| 177 | let enc = null; | ||
| 178 | try { | ||
| 179 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | ||
| 180 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | ||
| 181 | usage.completion_tokens = (enc.encode(input)).length; | ||
| 182 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ||
| 183 | } catch (e) { | ||
| 184 | console.log('Error encoding prompt text', e); | ||
| 185 | } | ||
| 186 | |||
| 187 | // TOKEN USAGE | ||
| 188 | axios.post(`${process.env.API_URL}e/set-chat-usage`, | ||
| 189 | { app: 'chatbot', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens }, | ||
| 190 | { headers: { 'content-type': 'application/x-www-form-urlencoded' } | ||
| 191 | }); | ||
| 192 | |||
| 115 | res.json({ | 193 | res.json({ |
| 116 | prompt: JSON.parse(message), | 194 | prompt: JSON.parse(message), |
| 117 | message: anchorme({ | 195 | message: anchorme({ | ... | ... |
| ... | @@ -9,7 +9,9 @@ | ... | @@ -9,7 +9,9 @@ |
| 9 | "author": "", | 9 | "author": "", |
| 10 | "license": "ISC", | 10 | "license": "ISC", |
| 11 | "dependencies": { | 11 | "dependencies": { |
| 12 | "@dqbd/tiktoken": "^1.0.7", | ||
| 12 | "anchorme": "^2.1.2", | 13 | "anchorme": "^2.1.2", |
| 14 | "axios": "^1.5.1", | ||
| 13 | "body-parser": "^1.20.1", | 15 | "body-parser": "^1.20.1", |
| 14 | "cors": "^2.8.5", | 16 | "cors": "^2.8.5", |
| 15 | "dotenv": "^16.0.3", | 17 | "dotenv": "^16.0.3", | ... | ... |
-
Please register or sign in to post a comment