#31531 - Apply tokenization logic on other LLMs
Showing
1 changed file
with
3 additions
and
11 deletions
| ... | @@ -280,6 +280,7 @@ async function runOpensource(req, res) { | ... | @@ -280,6 +280,7 @@ async function runOpensource(req, res) { |
| 280 | const validate = await validation(aiwp_app_id, req, res); | 280 | const validate = await validation(aiwp_app_id, req, res); |
| 281 | if(!validate) return; | 281 | if(!validate) return; |
| 282 | const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate; | 282 | const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate; |
| 283 | let usage = {}; | ||
| 283 | 284 | ||
| 284 | try { | 285 | try { |
| 285 | let error_msg = ""; | 286 | let error_msg = ""; |
| ... | @@ -304,6 +305,7 @@ async function runOpensource(req, res) { | ... | @@ -304,6 +305,7 @@ async function runOpensource(req, res) { |
| 304 | input = response.data.choices[0].message.content | 305 | input = response.data.choices[0].message.content |
| 305 | } | 306 | } |
| 306 | 307 | ||
| 308 | usage = response.data.usage | ||
| 307 | } catch (e) { | 309 | } catch (e) { |
| 308 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; | 310 | let error_msg = e.response.data.error.message ? e.response.data.error.message : ''; |
| 309 | if (error_msg.indexOf('maximum context length') >= 0) { | 311 | if (error_msg.indexOf('maximum context length') >= 0) { |
| ... | @@ -312,20 +314,10 @@ async function runOpensource(req, res) { | ... | @@ -312,20 +314,10 @@ async function runOpensource(req, res) { |
| 312 | // console.log(e.response); | 314 | // console.log(e.response); |
| 313 | } | 315 | } |
| 314 | } finally { | 316 | } finally { |
| 315 | |||
| 316 | let usage = {}; | ||
| 317 | let enc = null; | ||
| 318 | try { | ||
| 319 | enc = encodingForModel('gpt-3.5-turbo'); | ||
| 320 | usage.prompt_tokens = (enc.encode(query_prompt)).length; | ||
| 321 | usage.completion_tokens = (enc.encode(input)).length; | ||
| 322 | usage.total_tokens = usage.prompt_tokens + usage.completion_tokens; | ||
| 323 | } catch (e) { | ||
| 324 | console.log('Error encoding prompt text', e); | ||
| 325 | } | ||
| 326 | let usage_params = { | 317 | let usage_params = { |
| 327 | "aiwp_logged_in": aiwp_logged_in, "app": "chatbot+", "prompt_token": usage.prompt_tokens, "total_token": usage.total_tokens, "aiwp_app_id":aiwp_app_id , "usage_tries": TRIED_USAGE | 318 | "aiwp_logged_in": aiwp_logged_in, "app": "chatbot+", "prompt_token": usage.prompt_tokens, "total_token": usage.total_tokens, "aiwp_app_id":aiwp_app_id , "usage_tries": TRIED_USAGE |
| 328 | }; | 319 | }; |
| 320 | |||
| 329 | if(IS_FREE_USER) { | 321 | if(IS_FREE_USER) { |
| 330 | await setUsage(usage_params); | 322 | await setUsage(usage_params); |
| 331 | } else { | 323 | } else { | ... | ... |
-
Please register or sign in to post a comment