17eb87aa by Jonille Arreglo

28594_usage_tracking

1 parent c954a68a
...@@ -5,6 +5,7 @@ const cors = require('cors') ...@@ -5,6 +5,7 @@ const cors = require('cors')
5 require('dotenv').config() 5 require('dotenv').config()
6 const rateLimit = require('express-rate-limit') 6 const rateLimit = require('express-rate-limit')
7 const anchorme = require("anchorme").default; 7 const anchorme = require("anchorme").default;
8 const axios = require('axios');
8 const tiktoken = require('@dqbd/tiktoken'); 9 const tiktoken = require('@dqbd/tiktoken');
9 const tiktokenModels = [ 10 const tiktokenModels = [
10 'text-davinci-003', 11 'text-davinci-003',
...@@ -105,6 +106,23 @@ app.post('/api', async (req, res) => { ...@@ -105,6 +106,23 @@ app.post('/api', async (req, res) => {
105 temperature, 106 temperature,
106 }); 107 });
107 let input = response.data.choices[0].text; 108 let input = response.data.choices[0].text;
109 let usage = {};
110 let enc = null;
111 try {
112 enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo');
113 usage.prompt_tokens = (enc.encode(query_prompt)).length;
114 usage.completion_tokens = (enc.encode(input)).length;
115 usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
116 } catch (e) {
117 console.log('Error encoding prompt text', e);
118 }
119
120 // TOKEN USAGE
121 axios.post(`${process.env.API_URL}e/set-chat-usage`,
122 { app: 'chatbot', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens },
123 { headers: { 'content-type': 'application/x-www-form-urlencoded' }
124 });
125
108 res.json({ 126 res.json({
109 message: anchorme({ 127 message: anchorme({
110 input, 128 input,
...@@ -133,6 +151,8 @@ async function runGPTTurbo(req, res) { ...@@ -133,6 +151,8 @@ async function runGPTTurbo(req, res) {
133 // "gpt-3.5-turbo" 151 // "gpt-3.5-turbo"
134 const { message, currentModel, temperature } = req.body; 152 const { message, currentModel, temperature } = req.body;
135 var input = ''; 153 var input = '';
154 const message_history = JSON.parse(message);
155 const query_prompt = message_history.length ? message_history[message_history.length - 1].content : "";
136 try { 156 try {
137 const response = await openai.createChatCompletion({ 157 const response = await openai.createChatCompletion({
138 model: `${currentModel}`, 158 model: `${currentModel}`,
...@@ -149,6 +169,24 @@ async function runGPTTurbo(req, res) { ...@@ -149,6 +169,24 @@ async function runGPTTurbo(req, res) {
149 console.log(e.response); 169 console.log(e.response);
150 } 170 }
151 } finally { 171 } finally {
172
173 let usage = {};
174 let enc = null;
175 try {
176 enc = encoding_for_model(tiktokenModels.includes(currentModel) ? currentModel : 'gpt-3.5-turbo');
177 usage.prompt_tokens = (enc.encode(query_prompt)).length;
178 usage.completion_tokens = (enc.encode(input)).length;
179 usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
180 } catch (e) {
181 console.log('Error encoding prompt text', e);
182 }
183
184 // TOKEN USAGE
185 axios.post(`${process.env.API_URL}e/set-chat-usage`,
186 { app: 'chatbot', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens },
187 { headers: { 'content-type': 'application/x-www-form-urlencoded' }
188 });
189
152 res.json({ 190 res.json({
153 prompt: JSON.parse(message), 191 prompt: JSON.parse(message),
154 message: anchorme({ 192 message: anchorme({
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
11 "dependencies": { 11 "dependencies": {
12 "@dqbd/tiktoken": "^1.0.7", 12 "@dqbd/tiktoken": "^1.0.7",
13 "anchorme": "^2.1.2", 13 "anchorme": "^2.1.2",
14 "axios": "^1.5.1",
14 "body-parser": "^1.20.1", 15 "body-parser": "^1.20.1",
15 "cors": "^2.8.5", 16 "cors": "^2.8.5",
16 "dotenv": "^16.0.3", 17 "dotenv": "^16.0.3",
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!