index.js 1.47 KB
const { Configuration, OpenAIApi } = require("openai");
const express = require('express')
const bodyParser = require('body-parser')
const cors = require('cors')
require('dotenv').config()
const rateLimit = require('express-rate-limit')

// Open AI Configuration
// console.log(process.env.OPENAI_API_ORG)
const configuration = new Configuration({
	organization: process.env.OPENAI_API_ORG,
    apiKey: process.env.OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);

const rateLimiter = rateLimit({
  windowMs: 1000 * 60 * 1, // 1 minute (refreshTime)
  max: 3000, // limit each IP to x requests per windowMs (refreshTime)
  message: 'Sorry, too many requests. Please try again in a bit!',
});

// Express Configuration
const app = express()
const port = 3080

app.use(bodyParser.json())
app.use(cors())
app.use(require('morgan')('dev'))
app.use(rateLimiter)

// Routing

// Primary Open AI Route
app.post('/api', async (req, res) => {
	const { message, currentModel, temperature } = req.body;
	const response = await openai.createCompletion({
		model: `${currentModel}`,// "text-davinci-003",
		prompt: `${message}`,
		max_tokens: 2500,
		temperature,
	  });

	res.json({
		message: response.data.choices[0].text,
	})
});

// Get Models Route
app.get('/models', async (req, res) => {
	const response = await openai.listEngines();
	res.json({
		models: response.data
	})
});

// Start the server
app.listen(port, () => {
	  console.log(`Example app listening at http://localhost:${port}`)
});