bf863973 by Jonille Arreglo

28594_usage_tracking

1 parent 36b71009
Showing 1 changed file with 4 additions and 15 deletions
......@@ -6,7 +6,7 @@ require('dotenv').config()
const rateLimit = require('express-rate-limit')
const anchorme = require("anchorme").default;
const axios = require('axios');
const tiktoken = require('@dqbd/tiktoken');
const { encodingForModel } = require('js-tiktoken');
const tiktokenModels = [
'text-davinci-003',
'text-davinci-002',
......@@ -45,7 +45,6 @@ const tiktokenModels = [
'gpt-3.5-turbo',
'gpt-3.5-turbo-0301'
];
const encoding_for_model = tiktoken.encoding_for_model;
// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
......@@ -70,16 +69,6 @@ app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)
const cleanString = async (input) => {
var output = "";
for (var i=0; i<input.length; i++) {
if (input.charCodeAt(i) <= 127) {
output += input.charAt(i);
}
}
return output;
};
// Routing
// Primary Open AI Route
......@@ -132,9 +121,9 @@ app.post('/api', async (req, res) => {
let usage = {};
let enc = null;
try {
enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo');
usage.prompt_tokens = (enc.encode(await cleanString(query_prompt))).length;
usage.completion_tokens = (enc.encode(await cleanString(input))).length;
enc = encodingForModel(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo');
usage.prompt_tokens = (enc.encode(query_prompt)).length;
usage.completion_tokens = (enc.encode(input)).length;
usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
} catch (e) {
console.log('Error encoding prompt text', e);
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!