Merge branch '28594_usage_tracking' into 'master'
28594_usage_tracking See merge request !72
Showing
2 changed files
with
76 additions
and
0 deletions
| ... | @@ -6,6 +6,47 @@ require('dotenv').config() | ... | @@ -6,6 +6,47 @@ require('dotenv').config() |
| 6 | const rateLimit = require('express-rate-limit') | 6 | const rateLimit = require('express-rate-limit') |
| 7 | const anchorme = require("anchorme").default; | 7 | const anchorme = require("anchorme").default; |
| 8 | const axios = require('axios'); | 8 | const axios = require('axios'); |
| 9 | const tiktoken = require('@dqbd/tiktoken'); | ||
| 10 | const tiktokenModels = [ | ||
| 11 | 'text-davinci-003', | ||
| 12 | 'text-davinci-002', | ||
| 13 | 'text-davinci-001', | ||
| 14 | 'text-curie-001', | ||
| 15 | 'text-babbage-001', | ||
| 16 | 'text-ada-001', | ||
| 17 | 'davinci', | ||
| 18 | 'curie', | ||
| 19 | 'babbage', | ||
| 20 | 'ada', | ||
| 21 | 'code-davinci-002', | ||
| 22 | 'code-davinci-001', | ||
| 23 | 'code-cushman-002', | ||
| 24 | 'code-cushman-001', | ||
| 25 | 'davinci-codex', | ||
| 26 | 'cushman-codex', | ||
| 27 | 'text-davinci-edit-001', | ||
| 28 | 'code-davinci-edit-001', | ||
| 29 | 'text-embedding-ada-002', | ||
| 30 | 'text-similarity-davinci-001', | ||
| 31 | 'text-similarity-curie-001', | ||
| 32 | 'text-similarity-babbage-001', | ||
| 33 | 'text-similarity-ada-001', | ||
| 34 | 'text-search-davinci-doc-001', | ||
| 35 | 'text-search-curie-doc-001', | ||
| 36 | 'text-search-babbage-doc-001', | ||
| 37 | 'text-search-ada-doc-001', | ||
| 38 | 'code-search-babbage-code-001', | ||
| 39 | 'code-search-ada-code-001', | ||
| 40 | 'gpt2', | ||
| 41 | 'gpt-4', | ||
| 42 | 'gpt-4-0314', | ||
| 43 | 'gpt-4-32k', | ||
| 44 | 'gpt-4-32k-0314', | ||
| 45 | 'gpt-3.5-turbo', | ||
| 46 | 'gpt-3.5-turbo-0301' | ||
| 47 | ]; | ||
| 48 | const encoding_for_model = tiktoken.encoding_for_model; | ||
| 49 | const user_secret_id = process.env.USER_SECRET_ID ? process.env.USER_SECRET_ID : "aiwp_logged_in"; | ||
| 9 | 50 | ||
| 10 | // Open AI Configuration | 51 | // Open AI Configuration |
| 11 | // console.log(process.env.OPENAI_API_ORG) | 52 | // console.log(process.env.OPENAI_API_ORG) |
| ... | @@ -79,6 +120,20 @@ app.post('/api', async (req, res) => { | ... | @@ -79,6 +120,20 @@ app.post('/api', async (req, res) => { |
| 79 | temperature, | 120 | temperature, |
| 80 | }); | 121 | }); |
| 81 | let input = response.data.choices[0].text; | 122 | let input = response.data.choices[0].text; |
| 123 | let usage = {}; | ||
| 124 | let enc = null; | ||
| 125 | try { | ||
| 126 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | ||
| 127 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | ||
| 128 | usage.completion_tokens = (enc.encode(input)).length; | ||
| 129 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ||
| 130 | |||
| 131 | // TOKEN USAGE | ||
| 132 | btutil_setChatUsage('chatbot+', usage.prompt_tokens, usage.total_tokens); | ||
| 133 | } catch (e) { | ||
| 134 | console.log('Error encoding prompt text', e); | ||
| 135 | } | ||
| 136 | |||
| 82 | res.json({ | 137 | res.json({ |
| 83 | message: anchorme({ | 138 | message: anchorme({ |
| 84 | input, | 139 | input, |
| ... | @@ -137,6 +192,24 @@ async function runGPTTurbo(req, res) { | ... | @@ -137,6 +192,24 @@ async function runGPTTurbo(req, res) { |
| 137 | console.log(e.response); | 192 | console.log(e.response); |
| 138 | } | 193 | } |
| 139 | } finally { | 194 | } finally { |
| 195 | |||
| 196 | let usage = {}; | ||
| 197 | let enc = null; | ||
| 198 | try { | ||
| 199 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | ||
| 200 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | ||
| 201 | usage.completion_tokens = (enc.encode(input)).length; | ||
| 202 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ||
| 203 | |||
| 204 | // TOKEN USAGE | ||
| 205 | axios.post(`${process.env.API_URL}/e/set-chat-usage`, | ||
| 206 | { aiwp_logged_in: req.cookies[user_secret_id], app: 'chatbot', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens }, | ||
| 207 | { headers: { 'content-type': 'application/x-www-form-urlencoded' } | ||
| 208 | }); | ||
| 209 | } catch (e) { | ||
| 210 | console.log('Error encoding prompt text', e); | ||
| 211 | } | ||
| 212 | |||
| 140 | res.json({ | 213 | res.json({ |
| 141 | prompt: JSON.parse(message), | 214 | prompt: JSON.parse(message), |
| 142 | message: anchorme({ | 215 | message: anchorme({ | ... | ... |
| ... | @@ -9,9 +9,12 @@ | ... | @@ -9,9 +9,12 @@ |
| 9 | "author": "", | 9 | "author": "", |
| 10 | "license": "ISC", | 10 | "license": "ISC", |
| 11 | "dependencies": { | 11 | "dependencies": { |
| 12 | "@dqbd/tiktoken": "^1.0.7", | ||
| 12 | "anchorme": "^2.1.2", | 13 | "anchorme": "^2.1.2", |
| 13 | "axios": "^1.5.1", | 14 | "axios": "^1.5.1", |
| 14 | "body-parser": "^1.20.1", | 15 | "body-parser": "^1.20.1", |
| 16 | "cookie": "0.5.0", | ||
| 17 | "cookie-parser": "1.4.6", | ||
| 15 | "cors": "^2.8.5", | 18 | "cors": "^2.8.5", |
| 16 | "dotenv": "^16.0.3", | 19 | "dotenv": "^16.0.3", |
| 17 | "express": "^4.18.2", | 20 | "express": "^4.18.2", | ... | ... |
-
Please register or sign in to post a comment