Skip to content
Toggle navigation
Toggle navigation
This project
Loading...
Sign in
Administrator
/
chatgpt.ai-pro.org
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
3
Wiki
Network
Create a new issue
Builds
Commits
Issue Boards
Files
Commits
Network
Compare
Branches
Tags
bf863973
authored
2023-11-15 17:54:04 +0800
by
Jonille Arreglo
Browse Files
Options
Browse Files
Tag
Download
Email Patches
Plain Diff
28594_usage_tracking
1 parent
36b71009
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
15 deletions
index.js
index.js
View file @
bf86397
...
...
@@ -6,7 +6,7 @@ require('dotenv').config()
const
rateLimit
=
require
(
'express-rate-limit'
)
const
anchorme
=
require
(
"anchorme"
).
default
;
const
axios
=
require
(
'axios'
);
const
tiktoken
=
require
(
'@dqbd/
tiktoken'
);
const
{
encodingForModel
}
=
require
(
'js-
tiktoken'
);
const
tiktokenModels
=
[
'text-davinci-003'
,
'text-davinci-002'
,
...
...
@@ -45,7 +45,6 @@ const tiktokenModels = [
'gpt-3.5-turbo'
,
'gpt-3.5-turbo-0301'
];
const
encoding_for_model
=
tiktoken
.
encoding_for_model
;
// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
...
...
@@ -70,16 +69,6 @@ app.use(cors())
app
.
use
(
require
(
'morgan'
)(
'dev'
))
app
.
use
(
rateLimiter
)
const
cleanString
=
async
(
input
)
=>
{
var
output
=
""
;
for
(
var
i
=
0
;
i
<
input
.
length
;
i
++
)
{
if
(
input
.
charCodeAt
(
i
)
<=
127
)
{
output
+=
input
.
charAt
(
i
);
}
}
return
output
;
};
// Routing
// Primary Open AI Route
...
...
@@ -132,9 +121,9 @@ app.post('/api', async (req, res) => {
let
usage
=
{};
let
enc
=
null
;
try
{
enc
=
encoding
_for_m
odel
(
tiktokenModels
.
includes
(
currentModel
)
?
currentModel
:
'gpt-3.5-turbo'
);
usage
.
prompt_tokens
=
(
enc
.
encode
(
await
cleanString
(
query_prompt
)
)).
length
;
usage
.
completion_tokens
=
(
enc
.
encode
(
await
cleanString
(
input
)
)).
length
;
enc
=
encoding
ForM
odel
(
tiktokenModels
.
includes
(
currentModel
)
?
currentModel
:
'gpt-3.5-turbo'
);
usage
.
prompt_tokens
=
(
enc
.
encode
(
query_prompt
)).
length
;
usage
.
completion_tokens
=
(
enc
.
encode
(
input
)).
length
;
usage
.
total_tokens
=
usage
.
prompt_tokens
+
usage
.
completion_tokens
;
}
catch
(
e
)
{
console
.
log
(
'Error encoding prompt text'
,
e
);
...
...
Write
Preview
Styling with
Markdown
is supported
Attach a file
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to post a comment