4bcf56e7 by Marwin Cañeta

#31531 - Apply tokenization logic on other LLMs

1 parent 0fe0aa08
......@@ -6,6 +6,7 @@
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
"js-cookie": "^3.0.5",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-scripts": "5.0.1",
......
......@@ -4,11 +4,13 @@ import './color_theme_1.css';
import { useState, useEffect } from 'react';
import SideMenu from './SideMenu'
import ChatBox from './ChatBox'
import Cookies from 'js-cookie'
function App() {
useEffect(() => {
getEngines();
displayTogetherAiResponse()
}, [])
const [chatInput, setChatInput] = useState("");
......@@ -30,6 +32,19 @@ function App() {
const [chatLogOpenSource, setChatLogOpenSource] = useState([]);
const displayTogetherAiResponse = () => {
const queryParams = new URLSearchParams(window.location.search)
const cookieName = 'P6XcW47o'
const exists = queryParams.has(cookieName)
const enable = queryParams.get(cookieName)
if (exists && (enable == '1' || enable == '0')) {
Cookies.set(cookieName, enable, {
expires: 1
})
}
}
function clearChat(){
setChatLog([]);
setChatLogTurbo(defaultChatLogTurbo);
......@@ -115,6 +130,8 @@ function App() {
let intervalId = startInterval();
try {
const togetherAiResponse = Cookies.get('P6XcW47o') === '1'
const response = await fetch(process.env.REACT_APP_SERVER_URL + "/api", {
method: "POST",
headers: {
......@@ -123,12 +140,19 @@ function App() {
body: JSON.stringify({
message: messages,
currentModel,
temperature
temperature,
P6XcW47o: togetherAiResponse
})
});
const data = await response.json();
const parsedData = data.message ? data.message.trim() : "";
if (togetherAiResponse) {
if (currentModel !== 'gpt-3.5-turbo') {
console.log(data)
}
}
if(data.status === 'invalid'){
if(data.limited) {
window.btutil_maxUsage();
......
......@@ -272,7 +272,7 @@ const get_endpoint_api_key = (currentModel) => {
return key
}
async function runOpensource(req, res) {
const { message, currentModel, temperature } = req.body;
const { message, currentModel, temperature, P6XcW47o: together_ai_response = null } = req.body;
var input = '';
const message_history = JSON.parse(message);
const query_prompt = message_history.length ? message_history[message_history.length - 1].content : "";
......@@ -281,6 +281,8 @@ async function runOpensource(req, res) {
if(!validate) return;
const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate;
let usage = {};
let result_response = {}
let together_ai = null
try {
let error_msg = "";
......@@ -315,6 +317,10 @@ async function runOpensource(req, res) {
}
usage = response.data.usage
if (together_ai_response) {
together_ai = response.data
}
} catch (e) {
let error_msg = e.response.data.error.message ? e.response.data.error.message : '';
if (error_msg.indexOf('maximum context length') >= 0) {
......@@ -333,7 +339,7 @@ async function runOpensource(req, res) {
await setChatUsage(usage_params);
}
res.json({
result_response = {
prompt: JSON.parse(message),
usage: usage,
message: anchorme({
......@@ -344,7 +350,13 @@ async function runOpensource(req, res) {
},
}
})
});
}
if (together_ai !== null) {
result_response['together_ai'] = together_ai
}
res.json(result_response);
return;
}
}
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!