235e9839 by Administrator

Merge branch '30852_chatbot_chatgpt' into 'master'

30852 chatbot chatgpt

See merge request !100
2 parents 2530d769 cbee02a0
......@@ -71,14 +71,6 @@ function App() {
TPLogicRun();
}
// eslint-disable-next-line
btutil_getChatUsage();
// eslint-disable-next-line
let maxTokens = btutilCommon_getCookie("mucnxwlyxt");
if (maxTokens==='1'){
return;
}
const userInput = ['what', 'why', 'when', 'where' , 'which', 'did', 'do', 'how', 'can', 'are', 'who'];
const userInputRegex = new RegExp(`\\b(${userInput.join('|')})\\b`, 'gi');
const inputMatches = chatInput.match(userInputRegex);
......@@ -120,6 +112,7 @@ function App() {
}
let intervalId = startInterval();
try {
const response = await fetch(process.env.REACT_APP_SERVER_URL + "/api", {
method: "POST",
......@@ -133,7 +126,20 @@ function App() {
})
});
const data = await response.json();
const parsedData = data.message.trim();
const parsedData = data.message ? data.message.trim() : "";
if(data.status === 'invalid'){
if(data.limited) {
window.btutil_modalRegisterUpgrade();
return;
}
if(data && data.status === 'max-tokens') {
window.btutil_maxUsage();
return;
}
window.btutil_modalRegisterUpgrade(true);
return;
}
// "gpt-3.5-turbo"
let chatLogTurboNew = chatLogTurbo;
let chatLogOpenSourceNew = chatLogOpenSource;
......@@ -144,9 +150,7 @@ function App() {
});
userModifiedInput = "";
}
if(data.usage) {
window.btutil_setChatUsage('chatbot+', data.usage.prompt_tokens, data.usage.total_tokens);
}
chatLogTurboNew.push({ role: "user", content: userModifiedInput });
chatLogTurboNew.push({ role: "assistant", content: parsedData });
......@@ -187,6 +191,7 @@ function App() {
}
} catch (error) {
console.log(error)
const errorMsg = "We apologize for any inconvenience caused due to the delay in the response time. Please try again.";
setChatLog([...chatLogNew, { user: "gpt", message: `<div class="errormsg"><span>i</span><div class="msg">${errorMsg}</div></div>`} ])
}
......
const { Configuration, OpenAIApi } = require("openai");
const express = require('express')
const bodyParser = require('body-parser')
const cookieParser = require("cookie-parser")
const cors = require('cors')
require('dotenv').config()
const rateLimit = require('express-rate-limit')
const fetch = require('node-fetch');
const anchorme = require("anchorme").default;
const axios = require('axios');
const { encodingForModel } = require('js-tiktoken');
......@@ -68,10 +70,13 @@ app.use(bodyParser.json())
app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)
app.use(cookieParser());
const max_tokens = process.env.MAX_TOKENS_chatbot_plus ? parseInt(process.env.MAX_TOKENS_chatbot_plus) : 512;
// Routing
const hostapi = process.env.REACT_APP_HOST_API || "https://api.ai-pro.org";
const user_secret_id = process.env.USER_SECRET_ID || "aiwp_logged_in";
const aiwp_app_id = "chatbot+";
// Primary Open AI Route
app.post('/api', async (req, res) => {
if(!req.get('origin') || (!req.get('origin').includes(req.get('host')))) {
......@@ -83,14 +88,20 @@ app.post('/api', async (req, res) => {
if (currentModel == "gpt-3.5-turbo" || currentModel == "gpt-3.5-turbo-0301") {
runGPTTurbo(req, res);
return;
}
if (currentModel == "openchat_3.5" || currentModel == "zephyr-7B-beta") {
runOpensource(req, res);
return;
}
const validate = await validation(aiwp_app_id, req, res);
if(!validate) return;
const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate;
let greetingPrompt = 'Hello, how can I assist you?'
const greetings = ['hi', 'hello', 'hey']
if (greetings.some((greeting) => message.toLowerCase().includes(greeting))) {
......@@ -136,10 +147,15 @@ app.post('/api', async (req, res) => {
usage.prompt_tokens = (enc.encode(query_prompt)).length;
usage.completion_tokens = (enc.encode(input)).length;
usage.total_tokens = usage.prompt_tokens + usage.completion_tokens;
} catch (e) {
console.log('Error encoding prompt text', e);
}
if(IS_FREE_USER) {
await setUsage({
aiwp_logged_in, app: 'chatbot+', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens, aiwp_app_id, usage_tries: TRIED_USAGE
});
}
res.json({
usage: usage,
message: anchorme({
......@@ -174,6 +190,10 @@ async function runGPTTurbo(req, res) {
const moderation = await axios.post("https://api.openai.com/v1/moderations", {
input: query_prompt
}, { headers: { 'content-type': 'application/json', 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}` } });
const validate = await validation(aiwp_app_id, req, res);
if(!validate) return;
const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate;
if (moderation.data.results[0].flagged) {
res.json({
......@@ -210,7 +230,11 @@ async function runGPTTurbo(req, res) {
} catch (e) {
console.log('Error encoding prompt text', e);
}
if(IS_FREE_USER) {
await setUsage({
aiwp_logged_in, app: 'chatbot+', prompt_token: usage.prompt_tokens, total_token: usage.total_tokens, aiwp_app_id, usage_tries: TRIED_USAGE
});
}
res.json({
prompt: JSON.parse(message),
usage: usage,
......@@ -244,7 +268,11 @@ async function runOpensource(req, res) {
var input = '';
const message_history = JSON.parse(message);
const query_prompt = message_history.length ? message_history[message_history.length - 1].content : "";
const validate = await validation(aiwp_app_id, req, res);
if(!validate) return;
const { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE} = validate;
try {
let error_msg = "";
const endpoint_api_url = get_endpoint_api_url(currentModel);
......@@ -303,6 +331,114 @@ async function runOpensource(req, res) {
}
}
async function authenticate(params) {
let data = await fetch(`${hostapi}/e/authenticate/v2`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(params),
referrer: "https://chatgpt.ai-pro.org"
});
return await data.json();
}
async function getLimitedUsage(params) {
let data = await fetch(`${hostapi}/e/get-usage`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(params),
referrer: "https://chatgpt.ai-pro.org"
});
return await data.json();
}
async function getUsage(params) {
let data = await fetch(`${hostapi}/e/get-chat-usage`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(params),
referrer: "https://chatgpt.ai-pro.org"
});
return await data.json();
}
async function setUsage(params) {
fetch(`${hostapi}/e/set-usage`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(params),
referrer: "https://chatgpt.ai-pro.org"
});
fetch(`${hostapi}/e/set-chat-usage`, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(params),
referrer: "https://chatgpt.ai-pro.org"
});
}
async function validation (aiwp_app_id, req, res) {
const aiwp_logged_in = req.cookies[user_secret_id] ? decodeURIComponent(req.cookies[user_secret_id]) : "";
const limit = req.cookies["WcvYPABR"] ? parseInt(req.cookies["WcvYPABR"].replace(/\D/g, '')) : 3;
let IS_FREE_USER = false;
let TRIED_USAGE = 0;
if (aiwp_logged_in) {
let auth = await authenticate({ aiwp_logged_in, user_event_data: {}, user_event: 'endpoint' });
if (!auth.success) {
IS_FREE_USER = true;
if (auth.is_restrict) {
res.json({ status: "invalid", restrict: true, redirect: auth.redirect });
res.end();
return false;
} else if (typeof auth.has_pro_access === "undefined" && !auth.has_pro_access) {
res.json({ status: "invalid", restrict: true });
res.end();
return false;
}
}
if (!auth.subscription_type || (auth.auth_version === 'v2' && auth.subscription_type.toLowerCase() === 'basic')) {
res.json({ status: "invalid" });
res.status(200);
return false;
}
let data = await getUsage({
aiwp_logged_in, app: 'chatbot+'
});
if (!(data.success === 1 && data.status === 'valid')) {
res.json({ status: "invalid", data });
res.status(200);
return false;
}
} else {
IS_FREE_USER = true;
let data = await getLimitedUsage({
aiwp_app_id
});
if (data.usage !== null) {
TRIED_USAGE = parseInt(data.usage);
}
}
if (IS_FREE_USER && TRIED_USAGE >= limit) {
res.json({ status: "invalid", limited: true });
res.end();
return false;
}
if (IS_FREE_USER) TRIED_USAGE++;
return { IS_FREE_USER, aiwp_logged_in, TRIED_USAGE };
};
// Get Models Route
......
......@@ -10,17 +10,18 @@
"author": "",
"license": "ISC",
"dependencies": {
"js-tiktoken": "1.0.7",
"anchorme": "^2.1.2",
"axios": "^1.5.1",
"body-parser": "^1.20.1",
"cookie": "0.5.0",
"cookie-parser": "1.4.6",
"cookie-parser": "^1.4.6",
"cors": "^2.8.5",
"dotenv": "^16.0.3",
"express": "^4.18.2",
"express-rate-limit": "^6.7.0",
"js-tiktoken": "1.0.7",
"morgan": "^1.10.0",
"node-fetch": "^2.7.0",
"openai": "^3.2.0"
}
}
......
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!