App.js 5.17 KB
import './normal.css';
import './App.css';
import './color_theme_1.css';
import { useState, useEffect } from 'react';
import SideMenu from './SideMenu'
import ChatBox from './ChatBox'

function App() {

  useEffect(() => {
    getEngines();
  }, [])

  const [chatInput, setChatInput] = useState("");
  const [models, setModels] = useState([]);
  const [temperature, setTemperature] = useState(0.7);
  const [currentModel, setCurrentModel] = useState("text-davinci-003");
  const [chatLog, setChatLog] = useState([{
    user: "gpt",
    message: "Welcome to AI-PRO... How can I help you?"
  }]);

  // clear chats
  function clearChat(){
    setChatLog([]);
  }

  function getEngines(){
    fetch(process.env.REACT_APP_SERVER_URL + "/models")
    .then(res => res.json())
    .then(data => {
      console.log(data.models.data)
      // set models in order alpahbetically
      data.models.data.sort((a, b) => {
        if(a.id < b.id) { return -1; }
        if(a.id > b.id) { return 1; }
        return 0;
      })
      setModels(data.models.data)
    })
  }

  async function handleSubmit(e){
    e.preventDefault();
    let chatLogNew = [...chatLog, { user: "me", message: `${chatInput}`} ]
    setChatInput("");
    setChatLog(chatLogNew)
    // fetch response to the api combining the chat log array of messages and seinding it as a message to localhost:3000 as a post
    const messages = chatLogNew.map((message) => message.message).join("\n")

    const response = await fetch(process.env.REACT_APP_SERVER_URL + "/api", {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
        "Accept":"application/json"
      },
      body: JSON.stringify({
        message: messages,
        currentModel,
       })
      });
    const data = await response.json();

    const parsedData = data.message.trim();

    setChatLog([...chatLogNew, { user: "gpt", message: `${data.message}`} ])
    var scrollToTheBottomChatLog = document.getElementsByClassName("chat-log")[0];
    scrollToTheBottomChatLog.scrollTop = scrollToTheBottomChatLog.scrollHeight;


      // var oHttp = new XMLHttpRequest();
      // oHttp.open("POST", "https://api.openai.com/v1/completions");
      // oHttp.setRequestHeader("Accept", "application/json");
      // oHttp.setRequestHeader("Content-Type", "application/json");
      // oHttp.setRequestHeader("Authorization", "Bearer " + "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")

      // oHttp.onreadystatechange = function () {
      //   if (oHttp.readyState === 4) {
      //     var s = ''
      //     var oJson = {}
      //     if (s != "") s += "\n";
      //     try {
      //       oJson = JSON.parse(oHttp.responseText);
      //     } catch (ex) {
      //       s += "Error: " + ex.message
      //     }
      //     if (oJson.error && oJson.error.message) {
      //       s += "Error: " + oJson.error.message;
      //     } else if (oJson.choices && oJson.choices[0].text) {
      //       s = oJson.choices[0].text;
      //       var a = s.split("?\n");
      //       if (a.length == 2) {
      //         s = a[1];
      //       }
      //       // if (selLang.value != "en-US") {
      //       //   var a = s.split("?\n");
      //       //   if (a.length == 2) {
      //       //     s = a[1];
      //       //   }
      //       // }
      //       if (s == "") s = "No response";
      //       console.log('ssssssssssssssssssssss',s);
      //        var replaceBR= (s.replace(/(?:\r\n|\r|\n)/g, "<br>")).replace(/\r?\n|\r/, "");

      //       setChatLog([...chatLogNew, { user: "gpt", message: `${replaceBR}`} ]);
      //     }
      //   }
      // };

      // var sModel = currentModel;// "text-davinci-003";
      // var iMaxTokens = 100;
      // var sUserId = "1";
      // var dTemperature =temperature;

      // var data = {
      //   model: sModel,
      //   prompt: messages,
      //   max_tokens: iMaxTokens,
      //   //user: sUserId,
      //   temperature: dTemperature,
      //  // frequency_penalty: 0.0, //Number between -2.0 and 2.0  Positive value decrease the model's likelihood to repeat the same line verbatim.
      //   //presence_penalty: 0.0,  //Number between -2.0 and 2.0. Positive values increase the model's likelihood to talk about new topics.
      //   //stop: ["#", ";"] //Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
      // }
      var oHttp = new XMLHttpRequest();
      oHttp.open("POST", "/your-endpoint", true);
      oHttp.setRequestHeader("Content-Type", "application/json;charset=UTF-8");
      oHttp.send(JSON.stringify(data));

  }

  function handleTemp(temp) {
    if(temp > 1){
      setTemperature(1)
    } else if (temp < 0){
      setTemperature(0)
    } else {
      setTemperature(temp)
    }

  }

  return (
    <div className="App">
      <SideMenu
        currentModel={currentModel}
        setCurrentModel={setCurrentModel}
        models={models}
        setTemperature={handleTemp}
        temperature={temperature}
        clearChat={clearChat}
      />
      <ChatBox
        chatInput={chatInput}
        chatLog={chatLog}
        setChatInput={setChatInput}
        handleSubmit={handleSubmit} />
    </div>
  );
}


export default App;