28594_usage_tracking
Showing
1 changed file
with
12 additions
and
3 deletions
| ... | @@ -46,7 +46,6 @@ const tiktokenModels = [ | ... | @@ -46,7 +46,6 @@ const tiktokenModels = [ |
| 46 | 'gpt-3.5-turbo-0301' | 46 | 'gpt-3.5-turbo-0301' |
| 47 | ]; | 47 | ]; |
| 48 | const encoding_for_model = tiktoken.encoding_for_model; | 48 | const encoding_for_model = tiktoken.encoding_for_model; |
| 49 | const user_secret_id = process.env.USER_SECRET_ID ? process.env.USER_SECRET_ID : "aiwp_logged_in"; | ||
| 50 | 49 | ||
| 51 | // Open AI Configuration | 50 | // Open AI Configuration |
| 52 | // console.log(process.env.OPENAI_API_ORG) | 51 | // console.log(process.env.OPENAI_API_ORG) |
| ... | @@ -71,6 +70,16 @@ app.use(cors()) | ... | @@ -71,6 +70,16 @@ app.use(cors()) |
| 71 | app.use(require('morgan')('dev')) | 70 | app.use(require('morgan')('dev')) |
| 72 | app.use(rateLimiter) | 71 | app.use(rateLimiter) |
| 73 | 72 | ||
| 73 | const cleanString = async (input) => { | ||
| 74 | var output = ""; | ||
| 75 | for (var i=0; i<input.length; i++) { | ||
| 76 | if (input.charCodeAt(i) <= 127) { | ||
| 77 | output += input.charAt(i); | ||
| 78 | } | ||
| 79 | } | ||
| 80 | return output; | ||
| 81 | }; | ||
| 82 | |||
| 74 | // Routing | 83 | // Routing |
| 75 | 84 | ||
| 76 | // Primary Open AI Route | 85 | // Primary Open AI Route |
| ... | @@ -124,8 +133,8 @@ app.post('/api', async (req, res) => { | ... | @@ -124,8 +133,8 @@ app.post('/api', async (req, res) => { |
| 124 | let enc = null; | 133 | let enc = null; |
| 125 | try { | 134 | try { |
| 126 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); | 135 | enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo'); |
| 127 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | 136 | usage.prompt_tokens = (enc.encode(await cleanString(query_prompt))).length; |
| 128 | usage.completion_tokens = (enc.encode(input)).length; | 137 | usage.completion_tokens = (enc.encode(await cleanString(input))).length; |
| 129 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | 138 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; |
| 130 | } catch (e) { | 139 | } catch (e) { |
| 131 | console.log('Error encoding prompt text', e); | 140 | console.log('Error encoding prompt text', e); | ... | ... |
-
Please register or sign in to post a comment