kopia lustrzana https://github.com/transitive-bullshit/chatgpt-api
feat: transition from chatgpt to agentic
rodzic
ef7db54c8d
commit
89c86af7ef
12
.env.example
12
.env.example
|
@ -1,12 +0,0 @@
|
|||
# ------------------------------------------------------------------------------
|
||||
# This is an example .env file.
|
||||
#
|
||||
# All of these environment vars must be defined either in your environment or in
|
||||
# a local .env file in order to run the demo for this project.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OpenAI
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
OPENAI_API_KEY=
|
|
@ -1,34 +0,0 @@
|
|||
name: Bug Report
|
||||
description: Create a bug report
|
||||
labels: ['template: bug']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Verify latest release
|
||||
description: '`chatgpt@latest` is the latest version of `chatgpt`. Some issues may already be fixed in the latest version, so please verify that your issue reproduces before opening a new issue.'
|
||||
options:
|
||||
- label: I verified that the issue exists in the latest `chatgpt` release
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Verify webapp is working
|
||||
description: 'Verify that the [ChatGPT webapp](https://chat.openai.com/) is working for you locally using the same browser and account.'
|
||||
options:
|
||||
- label: I verify that the ChatGPT webapp is working properly for this account.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment details
|
||||
description: Please enter Node.js version, browser version, and OS version.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the Bug
|
||||
description: A clear and concise description of what the bug is and as much detail as possible on how to reproduce it.
|
||||
validations:
|
||||
required: true
|
|
@ -1,14 +0,0 @@
|
|||
name: Feature Request
|
||||
description: Create a feature rqeuest
|
||||
labels: ['template: enhancement']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this feature request.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the feature
|
||||
description: What would you like to see added / supported?
|
||||
validations:
|
||||
required: true
|
|
@ -1,5 +0,0 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Join the Discord
|
||||
url: https://discord.gg/HW3EFG6p
|
||||
about: Ask questions and discuss with other community members
|
|
@ -1 +0,0 @@
|
|||
github: [transitive-bullshit]
|
|
@ -1,53 +0,0 @@
|
|||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test Node.js ${{ matrix.node-version }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
node-version:
|
||||
- 19
|
||||
- 18
|
||||
- 16
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
id: pnpm-install
|
||||
with:
|
||||
version: 7
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
|
||||
|
||||
- uses: actions/cache@v3
|
||||
name: Setup pnpm cache
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run test
|
||||
env:
|
||||
SESSION_TOKEN: 'fake-session-token-for-CI'
|
||||
run: pnpm run test
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
npm run pre-commit
|
1
.npmrc
1
.npmrc
|
@ -1 +0,0 @@
|
|||
enable-pre-post-scripts=true
|
|
@ -1,7 +0,0 @@
|
|||
.snapshots/
|
||||
build/
|
||||
dist/
|
||||
node_modules/
|
||||
.next/
|
||||
.vercel/
|
||||
third-party/
|
|
@ -1,16 +0,0 @@
|
|||
module.exports = {
|
||||
plugins: [require('@trivago/prettier-plugin-sort-imports')],
|
||||
singleQuote: true,
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
useTabs: false,
|
||||
tabWidth: 2,
|
||||
bracketSpacing: true,
|
||||
bracketSameLine: false,
|
||||
arrowParens: 'always',
|
||||
trailingComma: 'none',
|
||||
importOrder: ['^node:.*', '<THIRD_PARTY_MODULES>', '^[./]'],
|
||||
importOrderSeparation: true,
|
||||
importOrderSortSpecifiers: true,
|
||||
importOrderGroupNamespaceSpecifiers: true
|
||||
}
|
149
bin/cli.js
149
bin/cli.js
|
@ -1,149 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
import crypto from 'node:crypto'
|
||||
|
||||
import * as url from 'url'
|
||||
import { cac } from 'cac'
|
||||
import Conf from 'conf'
|
||||
import { readPackageUp } from 'read-pkg-up'
|
||||
|
||||
import { ChatGPTAPI } from '../build/index.js'
|
||||
|
||||
async function main() {
|
||||
const dirname = url.fileURLToPath(new URL('.', import.meta.url))
|
||||
const pkg = await readPackageUp({ cwd: dirname })
|
||||
const version = (pkg && pkg.packageJson && pkg.packageJson.version) || '4'
|
||||
const config = new Conf({ projectName: 'chatgpt' })
|
||||
|
||||
const cli = cac('chatgpt')
|
||||
cli
|
||||
.command('<prompt>', 'Ask ChatGPT a question')
|
||||
.option('-c, --continue', 'Continue last conversation', {
|
||||
default: false
|
||||
})
|
||||
.option('-d, --debug', 'Enables debug logging', {
|
||||
default: false
|
||||
})
|
||||
.option('-s, --stream', 'Streams the response', {
|
||||
default: true
|
||||
})
|
||||
.option('-s, --store', 'Enables the local message cache', {
|
||||
default: true
|
||||
})
|
||||
.option('-t, --timeout <timeout>', 'Timeout in milliseconds')
|
||||
.option('-k, --apiKey <apiKey>', 'OpenAI API key')
|
||||
.option('-o, --apiOrg <apiOrg>', 'OpenAI API key')
|
||||
.option('-m, --model <model>', 'Model (gpt-3.5-turbo, gpt-4)', {
|
||||
default: 'gpt-3.5-turbo'
|
||||
})
|
||||
.option('--host <host>', 'API host base')
|
||||
.option(
|
||||
'-n, --conversationName <conversationName>',
|
||||
'Unique name for the conversation'
|
||||
)
|
||||
.action(async (prompt, options) => {
|
||||
const apiOrg = options.apiOrg || process.env.OPENAI_API_ORG
|
||||
const apiKey = options.apiKey || process.env.OPENAI_API_KEY
|
||||
const apiBaseUrl = options.host || process.env.OPENAI_API_BASE_URL
|
||||
if (!apiKey) {
|
||||
console.error('error: either set OPENAI_API_KEY or use --apiKey\n')
|
||||
cli.outputHelp()
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const apiKeyHash = hash(apiKey)
|
||||
const conversationName = options.conversationName || 'default'
|
||||
const conversationKey = `${conversationName}:${apiKeyHash}`
|
||||
const conversation =
|
||||
options.continue && options.store
|
||||
? config.get(conversationKey, {}) || {}
|
||||
: {}
|
||||
const model = options.model
|
||||
let conversationId = undefined
|
||||
let parentMessageId = undefined
|
||||
|
||||
if (conversation.lastMessageId) {
|
||||
const lastMessage = conversation[conversation.lastMessageId]
|
||||
if (lastMessage) {
|
||||
conversationId = lastMessage.conversationId
|
||||
parentMessageId = lastMessage.id
|
||||
}
|
||||
}
|
||||
|
||||
if (options.debug) {
|
||||
console.log('using config', config.path)
|
||||
}
|
||||
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey,
|
||||
apiOrg,
|
||||
apiBaseUrl,
|
||||
debug: options.debug,
|
||||
completionParams: {
|
||||
model
|
||||
},
|
||||
getMessageById: async (id) => {
|
||||
if (options.store) {
|
||||
return conversation[id]
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
upsertMessage: async (message) => {
|
||||
if (options.store) {
|
||||
conversation[message.id] = message
|
||||
conversation.lastMessageId = message.id
|
||||
config.set(conversationKey, conversation)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const res = await api.sendMessage(prompt, {
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
timeoutMs: options.timeout || undefined,
|
||||
onProgress: options.stream
|
||||
? (progress) => {
|
||||
if (progress.delta) {
|
||||
process.stdout.write(progress.delta)
|
||||
}
|
||||
}
|
||||
: undefined
|
||||
})
|
||||
|
||||
if (options.stream) {
|
||||
process.stdout.write('\n')
|
||||
} else {
|
||||
console.log(res.text)
|
||||
}
|
||||
})
|
||||
|
||||
cli.command('rm-cache', 'Clears the local message cache').action(() => {
|
||||
config.clear()
|
||||
console.log('cleared cache', config.path)
|
||||
})
|
||||
|
||||
cli.command('ls-cache', 'Prints the local message cache path').action(() => {
|
||||
console.log(config.path)
|
||||
})
|
||||
|
||||
cli.help()
|
||||
cli.version(version)
|
||||
|
||||
try {
|
||||
cli.parse()
|
||||
} catch (err) {
|
||||
console.error(`error: ${err.message}\n`)
|
||||
cli.outputHelp()
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
function hash(d) {
|
||||
const buffer = Buffer.isBuffer(d) ? d : Buffer.from(d.toString())
|
||||
return crypto.createHash('sha256').update(buffer).digest('hex')
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,69 +0,0 @@
|
|||
import dotenv from 'dotenv-safe'
|
||||
import { oraPromise } from 'ora'
|
||||
|
||||
import { ChatGPTAPI } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo CLI for testing conversation support.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo-conversation.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
debug: false
|
||||
})
|
||||
|
||||
const prompt = 'Write a poem about cats.'
|
||||
|
||||
let res = await oraPromise(api.sendMessage(prompt), {
|
||||
text: prompt
|
||||
})
|
||||
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt2 = 'Can you make it cuter and shorter?'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt2, {
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt2
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt3 = 'Now write it in French.'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt3, {
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt3
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt4 = 'What were we talking about again?'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt4, {
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt4
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,35 +0,0 @@
|
|||
import dotenv from 'dotenv-safe'
|
||||
import { oraPromise } from 'ora'
|
||||
|
||||
import { ChatGPTAPI } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo CLI for testing the GPT-4 model.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo-gpt-4.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
debug: true,
|
||||
completionParams: {
|
||||
model: 'gpt-4'
|
||||
}
|
||||
})
|
||||
|
||||
const prompt = 'When should you use Python vs TypeScript?'
|
||||
|
||||
const res = await oraPromise(api.sendMessage(prompt), {
|
||||
text: prompt
|
||||
})
|
||||
console.log(res.text)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,32 +0,0 @@
|
|||
import dotenv from 'dotenv-safe'
|
||||
|
||||
import { ChatGPTAPI } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo CLI for testing the `onProgress` streaming support.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo-on-progress.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
||||
|
||||
const prompt =
|
||||
'Write a python version of bubble sort. Do not include example usage.'
|
||||
|
||||
console.log(prompt)
|
||||
const res = await api.sendMessage(prompt, {
|
||||
onProgress: (partialResponse) => {
|
||||
console.log(partialResponse.text)
|
||||
}
|
||||
})
|
||||
console.log(res.text)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,71 +0,0 @@
|
|||
import KeyvRedis from '@keyv/redis'
|
||||
import dotenv from 'dotenv-safe'
|
||||
import Keyv from 'keyv'
|
||||
import { oraPromise } from 'ora'
|
||||
|
||||
import { ChatGPTAPI, type ChatMessage } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo CLI for testing message persistence with redis.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo-persistence.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
const redisUrl = process.env.REDIS_URL || 'redis://localhost:6379'
|
||||
const store = new KeyvRedis(redisUrl)
|
||||
const messageStore = new Keyv({ store, namespace: 'chatgpt-demo' })
|
||||
|
||||
let res: ChatMessage
|
||||
|
||||
{
|
||||
// create an initial conversation in one client
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
messageStore
|
||||
})
|
||||
|
||||
const prompt = 'What are the top 5 anime of all time?'
|
||||
|
||||
res = await oraPromise(api.sendMessage(prompt), {
|
||||
text: prompt
|
||||
})
|
||||
console.log('\n' + res.text + '\n')
|
||||
}
|
||||
|
||||
{
|
||||
// follow up with a second client using the same underlying redis store
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
messageStore
|
||||
})
|
||||
|
||||
const prompt = 'Can you give 5 more?'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt, {
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
}
|
||||
|
||||
// wait for redis to finish and then disconnect
|
||||
await new Promise<void>((resolve) => {
|
||||
setTimeout(() => {
|
||||
messageStore.disconnect()
|
||||
resolve()
|
||||
}, 1000)
|
||||
})
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,79 +0,0 @@
|
|||
import dotenv from 'dotenv-safe'
|
||||
import { oraPromise } from 'ora'
|
||||
|
||||
import { ChatGPTUnofficialProxyAPI } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo for testing conversation support using a reverse proxy which provides
|
||||
* access to the unofficial ChatGPT API.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo-reverse-proxy.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
// WARNING: this method will expose your access token to a third-party. Please be
|
||||
// aware of the risks before using this method.
|
||||
const api = new ChatGPTUnofficialProxyAPI({
|
||||
// optionally override the default reverse proxy URL (or use one of your own...)
|
||||
// apiReverseProxyUrl: 'https://chat.duti.tech/api/conversation',
|
||||
// apiReverseProxyUrl: 'https://gpt.pawan.krd/backend-api/conversation',
|
||||
|
||||
accessToken: process.env.OPENAI_ACCESS_TOKEN,
|
||||
debug: false
|
||||
})
|
||||
|
||||
const prompt = 'Write a poem about cats.'
|
||||
|
||||
let res = await oraPromise(api.sendMessage(prompt), {
|
||||
text: prompt
|
||||
})
|
||||
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt2 = 'Can you make it cuter and shorter?'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt2, {
|
||||
conversationId: res.conversationId,
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt2
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt3 = 'Now write it in French.'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt3, {
|
||||
conversationId: res.conversationId,
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt3
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
|
||||
const prompt4 = 'What were we talking about again?'
|
||||
|
||||
res = await oraPromise(
|
||||
api.sendMessage(prompt4, {
|
||||
conversationId: res.conversationId,
|
||||
parentMessageId: res.id
|
||||
}),
|
||||
{
|
||||
text: prompt4
|
||||
}
|
||||
)
|
||||
console.log('\n' + res.text + '\n')
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1,33 +0,0 @@
|
|||
import dotenv from 'dotenv-safe'
|
||||
import { oraPromise } from 'ora'
|
||||
|
||||
import { ChatGPTAPI } from '../src'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
/**
|
||||
* Demo CLI for testing basic functionality.
|
||||
*
|
||||
* ```
|
||||
* npx tsx demos/demo.ts
|
||||
* ```
|
||||
*/
|
||||
async function main() {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
debug: false
|
||||
})
|
||||
|
||||
const prompt =
|
||||
'Write a python version of bubble sort. Do not include example usage.'
|
||||
|
||||
const res = await oraPromise(api.sendMessage(prompt), {
|
||||
text: prompt
|
||||
})
|
||||
console.log(res.text)
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
})
|
|
@ -1 +0,0 @@
|
|||
TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false.
|
|
@ -1,128 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / ChatGPTAPI
|
||||
|
||||
# Class: ChatGPTAPI
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Constructors
|
||||
|
||||
- [constructor](ChatGPTAPI.md#constructor)
|
||||
|
||||
### Accessors
|
||||
|
||||
- [apiKey](ChatGPTAPI.md#apikey)
|
||||
- [apiOrg](ChatGPTAPI.md#apiorg)
|
||||
|
||||
### Methods
|
||||
|
||||
- [sendMessage](ChatGPTAPI.md#sendmessage)
|
||||
|
||||
## Constructors
|
||||
|
||||
### constructor
|
||||
|
||||
• **new ChatGPTAPI**(`opts`)
|
||||
|
||||
Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `opts` | [`ChatGPTAPIOptions`](../modules.md#chatgptapioptions) |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:51](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L51)
|
||||
|
||||
## Accessors
|
||||
|
||||
### apiKey
|
||||
|
||||
• `get` **apiKey**(): `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:345](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L345)
|
||||
|
||||
• `set` **apiKey**(`apiKey`): `void`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `apiKey` | `string` |
|
||||
|
||||
#### Returns
|
||||
|
||||
`void`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:349](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L349)
|
||||
|
||||
___
|
||||
|
||||
### apiOrg
|
||||
|
||||
• `get` **apiOrg**(): `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:353](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L353)
|
||||
|
||||
• `set` **apiOrg**(`apiOrg`): `void`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `apiOrg` | `string` |
|
||||
|
||||
#### Returns
|
||||
|
||||
`void`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:357](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L357)
|
||||
|
||||
## Methods
|
||||
|
||||
### sendMessage
|
||||
|
||||
▸ **sendMessage**(`text`, `opts?`): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
|
||||
|
||||
Sends a message to the OpenAI chat completions endpoint, waits for the response
|
||||
to resolve, and returns the response.
|
||||
|
||||
If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
|
||||
If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
|
||||
Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `text` | `string` |
|
||||
| `opts` | [`SendMessageOptions`](../modules.md#sendmessageoptions) |
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
|
||||
|
||||
The response from ChatGPT
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-api.ts:137](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-api.ts#L137)
|
|
@ -1,99 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / ChatGPTError
|
||||
|
||||
# Class: ChatGPTError
|
||||
|
||||
## Hierarchy
|
||||
|
||||
- `Error`
|
||||
|
||||
↳ **`ChatGPTError`**
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Constructors
|
||||
|
||||
- [constructor](ChatGPTError.md#constructor)
|
||||
|
||||
### Properties
|
||||
|
||||
- [accountId](ChatGPTError.md#accountid)
|
||||
- [isFinal](ChatGPTError.md#isfinal)
|
||||
- [statusCode](ChatGPTError.md#statuscode)
|
||||
- [statusText](ChatGPTError.md#statustext)
|
||||
|
||||
## Constructors
|
||||
|
||||
### constructor
|
||||
|
||||
• **new ChatGPTError**(`message?`)
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `message?` | `string` |
|
||||
|
||||
#### Inherited from
|
||||
|
||||
Error.constructor
|
||||
|
||||
#### Defined in
|
||||
|
||||
node_modules/.pnpm/typescript@5.0.4/node_modules/typescript/lib/lib.es5.d.ts:1060
|
||||
|
||||
• **new ChatGPTError**(`message?`, `options?`)
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `message?` | `string` |
|
||||
| `options?` | `ErrorOptions` |
|
||||
|
||||
#### Inherited from
|
||||
|
||||
Error.constructor
|
||||
|
||||
#### Defined in
|
||||
|
||||
node_modules/.pnpm/typescript@5.0.4/node_modules/typescript/lib/lib.es2022.error.d.ts:28
|
||||
|
||||
## Properties
|
||||
|
||||
### accountId
|
||||
|
||||
• `Optional` **accountId**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:86](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L86)
|
||||
|
||||
___
|
||||
|
||||
### isFinal
|
||||
|
||||
• `Optional` **isFinal**: `boolean`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:85](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L85)
|
||||
|
||||
___
|
||||
|
||||
### statusCode
|
||||
|
||||
• `Optional` **statusCode**: `number`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:83](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L83)
|
||||
|
||||
___
|
||||
|
||||
### statusText
|
||||
|
||||
• `Optional` **statusText**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:84](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L84)
|
|
@ -1,104 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / ChatGPTUnofficialProxyAPI
|
||||
|
||||
# Class: ChatGPTUnofficialProxyAPI
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Constructors
|
||||
|
||||
- [constructor](ChatGPTUnofficialProxyAPI.md#constructor)
|
||||
|
||||
### Accessors
|
||||
|
||||
- [accessToken](ChatGPTUnofficialProxyAPI.md#accesstoken)
|
||||
|
||||
### Methods
|
||||
|
||||
- [sendMessage](ChatGPTUnofficialProxyAPI.md#sendmessage)
|
||||
|
||||
## Constructors
|
||||
|
||||
### constructor
|
||||
|
||||
• **new ChatGPTUnofficialProxyAPI**(`opts`)
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `opts` | `Object` | - |
|
||||
| `opts.accessToken` | `string` | - |
|
||||
| `opts.apiReverseProxyUrl?` | `string` | **`Default Value`** `https://bypass.duti.tech/api/conversation` * |
|
||||
| `opts.debug?` | `boolean` | **`Default Value`** `false` * |
|
||||
| `opts.fetch?` | (`input`: `RequestInfo` \| `URL`, `init?`: `RequestInit`) => `Promise`<`Response`\> | - |
|
||||
| `opts.headers?` | `Record`<`string`, `string`\> | **`Default Value`** `undefined` * |
|
||||
| `opts.model?` | `string` | **`Default Value`** `text-davinci-002-render-sha` * |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-unofficial-proxy-api.ts:20](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-unofficial-proxy-api.ts#L20)
|
||||
|
||||
## Accessors
|
||||
|
||||
### accessToken
|
||||
|
||||
• `get` **accessToken**(): `string`
|
||||
|
||||
#### Returns
|
||||
|
||||
`string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-unofficial-proxy-api.ts:66](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-unofficial-proxy-api.ts#L66)
|
||||
|
||||
• `set` **accessToken**(`value`): `void`
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `value` | `string` |
|
||||
|
||||
#### Returns
|
||||
|
||||
`void`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-unofficial-proxy-api.ts:70](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-unofficial-proxy-api.ts#L70)
|
||||
|
||||
## Methods
|
||||
|
||||
### sendMessage
|
||||
|
||||
▸ **sendMessage**(`text`, `opts?`): `Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
|
||||
|
||||
Sends a message to ChatGPT, waits for the response to resolve, and returns
|
||||
the response.
|
||||
|
||||
If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
|
||||
If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
If you want to receive the full response, including message and conversation IDs,
|
||||
you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
|
||||
helper.
|
||||
|
||||
Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `text` | `string` |
|
||||
| `opts` | [`SendMessageBrowserOptions`](../modules.md#sendmessagebrowseroptions) |
|
||||
|
||||
#### Returns
|
||||
|
||||
`Promise`<[`ChatMessage`](../interfaces/ChatMessage.md)\>
|
||||
|
||||
The response from ChatGPT
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/chatgpt-unofficial-proxy-api.ts:97](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/chatgpt-unofficial-proxy-api.ts#L97)
|
|
@ -1,96 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / ChatMessage
|
||||
|
||||
# Interface: ChatMessage
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [conversationId](ChatMessage.md#conversationid)
|
||||
- [delta](ChatMessage.md#delta)
|
||||
- [detail](ChatMessage.md#detail)
|
||||
- [id](ChatMessage.md#id)
|
||||
- [name](ChatMessage.md#name)
|
||||
- [parentMessageId](ChatMessage.md#parentmessageid)
|
||||
- [role](ChatMessage.md#role)
|
||||
- [text](ChatMessage.md#text)
|
||||
|
||||
## Properties
|
||||
|
||||
### conversationId
|
||||
|
||||
• `Optional` **conversationId**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:79](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L79)
|
||||
|
||||
___
|
||||
|
||||
### delta
|
||||
|
||||
• `Optional` **delta**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:70](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L70)
|
||||
|
||||
___
|
||||
|
||||
### detail
|
||||
|
||||
• `Optional` **detail**: [`CreateChatCompletionResponse`](openai.CreateChatCompletionResponse.md) \| [`CreateChatCompletionStreamResponse`](CreateChatCompletionStreamResponse.md)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:71](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L71)
|
||||
|
||||
___
|
||||
|
||||
### id
|
||||
|
||||
• **id**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:66](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L66)
|
||||
|
||||
___
|
||||
|
||||
### name
|
||||
|
||||
• `Optional` **name**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:69](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L69)
|
||||
|
||||
___
|
||||
|
||||
### parentMessageId
|
||||
|
||||
• `Optional` **parentMessageId**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:76](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L76)
|
||||
|
||||
___
|
||||
|
||||
### role
|
||||
|
||||
• **role**: [`Role`](../modules.md#role)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:68](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L68)
|
||||
|
||||
___
|
||||
|
||||
### text
|
||||
|
||||
• **text**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:67](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L67)
|
|
@ -1,100 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / CreateChatCompletionStreamResponse
|
||||
|
||||
# Interface: CreateChatCompletionStreamResponse
|
||||
|
||||
## Hierarchy
|
||||
|
||||
- [`CreateChatCompletionDeltaResponse`](openai.CreateChatCompletionDeltaResponse.md)
|
||||
|
||||
↳ **`CreateChatCompletionStreamResponse`**
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [choices](CreateChatCompletionStreamResponse.md#choices)
|
||||
- [created](CreateChatCompletionStreamResponse.md#created)
|
||||
- [id](CreateChatCompletionStreamResponse.md#id)
|
||||
- [model](CreateChatCompletionStreamResponse.md#model)
|
||||
- [object](CreateChatCompletionStreamResponse.md#object)
|
||||
- [usage](CreateChatCompletionStreamResponse.md#usage)
|
||||
|
||||
## Properties
|
||||
|
||||
### choices
|
||||
|
||||
• **choices**: [{ `delta`: { `content?`: `string` ; `role`: [`Role`](../modules.md#role) } ; `finish_reason`: `string` ; `index`: `number` }]
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateChatCompletionDeltaResponse](openai.CreateChatCompletionDeltaResponse.md).[choices](openai.CreateChatCompletionDeltaResponse.md#choices)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:198](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L198)
|
||||
|
||||
___
|
||||
|
||||
### created
|
||||
|
||||
• **created**: `number`
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateChatCompletionDeltaResponse](openai.CreateChatCompletionDeltaResponse.md).[created](openai.CreateChatCompletionDeltaResponse.md#created)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:196](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L196)
|
||||
|
||||
___
|
||||
|
||||
### id
|
||||
|
||||
• **id**: `string`
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateChatCompletionDeltaResponse](openai.CreateChatCompletionDeltaResponse.md).[id](openai.CreateChatCompletionDeltaResponse.md#id)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:194](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L194)
|
||||
|
||||
___
|
||||
|
||||
### model
|
||||
|
||||
• **model**: `string`
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateChatCompletionDeltaResponse](openai.CreateChatCompletionDeltaResponse.md).[model](openai.CreateChatCompletionDeltaResponse.md#model)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:197](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L197)
|
||||
|
||||
___
|
||||
|
||||
### object
|
||||
|
||||
• **object**: ``"chat.completion.chunk"``
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateChatCompletionDeltaResponse](openai.CreateChatCompletionDeltaResponse.md).[object](openai.CreateChatCompletionDeltaResponse.md#object)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:195](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L195)
|
||||
|
||||
___
|
||||
|
||||
### usage
|
||||
|
||||
• **usage**: [`CreateCompletionStreamResponseUsage`](CreateCompletionStreamResponseUsage.md)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:97](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L97)
|
|
@ -1,86 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / CreateCompletionStreamResponseUsage
|
||||
|
||||
# Interface: CreateCompletionStreamResponseUsage
|
||||
|
||||
**`Export`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
## Hierarchy
|
||||
|
||||
- [`CreateCompletionResponseUsage`](openai.CreateCompletionResponseUsage.md)
|
||||
|
||||
↳ **`CreateCompletionStreamResponseUsage`**
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [completion\_tokens](CreateCompletionStreamResponseUsage.md#completion_tokens)
|
||||
- [estimated](CreateCompletionStreamResponseUsage.md#estimated)
|
||||
- [prompt\_tokens](CreateCompletionStreamResponseUsage.md#prompt_tokens)
|
||||
- [total\_tokens](CreateCompletionStreamResponseUsage.md#total_tokens)
|
||||
|
||||
## Properties
|
||||
|
||||
### completion\_tokens
|
||||
|
||||
• **completion\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateCompletionResponseUsage](openai.CreateCompletionResponseUsage.md).[completion_tokens](openai.CreateCompletionResponseUsage.md#completion_tokens)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:438](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L438)
|
||||
|
||||
___
|
||||
|
||||
### estimated
|
||||
|
||||
• **estimated**: ``true``
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:102](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L102)
|
||||
|
||||
___
|
||||
|
||||
### prompt\_tokens
|
||||
|
||||
• **prompt\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateCompletionResponseUsage](openai.CreateCompletionResponseUsage.md).[prompt_tokens](openai.CreateCompletionResponseUsage.md#prompt_tokens)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:432](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L432)
|
||||
|
||||
___
|
||||
|
||||
### total\_tokens
|
||||
|
||||
• **total\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Inherited from
|
||||
|
||||
[CreateCompletionResponseUsage](openai.CreateCompletionResponseUsage.md).[total_tokens](openai.CreateCompletionResponseUsage.md#total_tokens)
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:444](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L444)
|
|
@ -1,65 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / ChatCompletionRequestMessage
|
||||
|
||||
# Interface: ChatCompletionRequestMessage
|
||||
|
||||
[openai](../modules/openai.md).ChatCompletionRequestMessage
|
||||
|
||||
**`Export`**
|
||||
|
||||
ChatCompletionRequestMessage
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [content](openai.ChatCompletionRequestMessage.md#content)
|
||||
- [name](openai.ChatCompletionRequestMessage.md#name)
|
||||
- [role](openai.ChatCompletionRequestMessage.md#role)
|
||||
|
||||
## Properties
|
||||
|
||||
### content
|
||||
|
||||
• **content**: `string`
|
||||
|
||||
The contents of the message
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
ChatCompletionRequestMessage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:227](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L227)
|
||||
|
||||
___
|
||||
|
||||
### name
|
||||
|
||||
• `Optional` **name**: `string`
|
||||
|
||||
The name of the user in a multi-user chat
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
ChatCompletionRequestMessage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:233](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L233)
|
||||
|
||||
___
|
||||
|
||||
### role
|
||||
|
||||
• **role**: [`ChatCompletionRequestMessageRoleEnum`](../modules/openai.md#chatcompletionrequestmessageroleenum-1)
|
||||
|
||||
The role of the author of this message.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
ChatCompletionRequestMessage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:221](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L221)
|
|
@ -1,48 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / ChatCompletionResponseMessage
|
||||
|
||||
# Interface: ChatCompletionResponseMessage
|
||||
|
||||
[openai](../modules/openai.md).ChatCompletionResponseMessage
|
||||
|
||||
**`Export`**
|
||||
|
||||
ChatCompletionResponseMessage
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [content](openai.ChatCompletionResponseMessage.md#content)
|
||||
- [role](openai.ChatCompletionResponseMessage.md#role)
|
||||
|
||||
## Properties
|
||||
|
||||
### content
|
||||
|
||||
• **content**: `string`
|
||||
|
||||
The contents of the message
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
ChatCompletionResponseMessage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:259](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L259)
|
||||
|
||||
___
|
||||
|
||||
### role
|
||||
|
||||
• **role**: [`ChatCompletionResponseMessageRoleEnum`](../modules/openai.md#chatcompletionresponsemessageroleenum-1)
|
||||
|
||||
The role of the author of this message.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
ChatCompletionResponseMessage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:253](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L253)
|
|
@ -1,71 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / CreateChatCompletionDeltaResponse
|
||||
|
||||
# Interface: CreateChatCompletionDeltaResponse
|
||||
|
||||
[openai](../modules/openai.md).CreateChatCompletionDeltaResponse
|
||||
|
||||
## Hierarchy
|
||||
|
||||
- **`CreateChatCompletionDeltaResponse`**
|
||||
|
||||
↳ [`CreateChatCompletionStreamResponse`](CreateChatCompletionStreamResponse.md)
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [choices](openai.CreateChatCompletionDeltaResponse.md#choices)
|
||||
- [created](openai.CreateChatCompletionDeltaResponse.md#created)
|
||||
- [id](openai.CreateChatCompletionDeltaResponse.md#id)
|
||||
- [model](openai.CreateChatCompletionDeltaResponse.md#model)
|
||||
- [object](openai.CreateChatCompletionDeltaResponse.md#object)
|
||||
|
||||
## Properties
|
||||
|
||||
### choices
|
||||
|
||||
• **choices**: [{ `delta`: { `content?`: `string` ; `role`: [`Role`](../modules.md#role) } ; `finish_reason`: `string` ; `index`: `number` }]
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:198](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L198)
|
||||
|
||||
___
|
||||
|
||||
### created
|
||||
|
||||
• **created**: `number`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:196](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L196)
|
||||
|
||||
___
|
||||
|
||||
### id
|
||||
|
||||
• **id**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:194](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L194)
|
||||
|
||||
___
|
||||
|
||||
### model
|
||||
|
||||
• **model**: `string`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:197](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L197)
|
||||
|
||||
___
|
||||
|
||||
### object
|
||||
|
||||
• **object**: ``"chat.completion.chunk"``
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:195](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L195)
|
|
@ -1,216 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / CreateChatCompletionRequest
|
||||
|
||||
# Interface: CreateChatCompletionRequest
|
||||
|
||||
[openai](../modules/openai.md).CreateChatCompletionRequest
|
||||
|
||||
**`Export`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [frequency\_penalty](openai.CreateChatCompletionRequest.md#frequency_penalty)
|
||||
- [logit\_bias](openai.CreateChatCompletionRequest.md#logit_bias)
|
||||
- [max\_tokens](openai.CreateChatCompletionRequest.md#max_tokens)
|
||||
- [messages](openai.CreateChatCompletionRequest.md#messages)
|
||||
- [model](openai.CreateChatCompletionRequest.md#model)
|
||||
- [n](openai.CreateChatCompletionRequest.md#n)
|
||||
- [presence\_penalty](openai.CreateChatCompletionRequest.md#presence_penalty)
|
||||
- [stop](openai.CreateChatCompletionRequest.md#stop)
|
||||
- [stream](openai.CreateChatCompletionRequest.md#stream)
|
||||
- [temperature](openai.CreateChatCompletionRequest.md#temperature)
|
||||
- [top\_p](openai.CreateChatCompletionRequest.md#top_p)
|
||||
- [user](openai.CreateChatCompletionRequest.md#user)
|
||||
|
||||
## Properties
|
||||
|
||||
### frequency\_penalty
|
||||
|
||||
• `Optional` **frequency\_penalty**: `number`
|
||||
|
||||
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:333](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L333)
|
||||
|
||||
___
|
||||
|
||||
### logit\_bias
|
||||
|
||||
• `Optional` **logit\_bias**: `object`
|
||||
|
||||
Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:339](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L339)
|
||||
|
||||
___
|
||||
|
||||
### max\_tokens
|
||||
|
||||
• `Optional` **max\_tokens**: `number`
|
||||
|
||||
The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:321](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L321)
|
||||
|
||||
___
|
||||
|
||||
### messages
|
||||
|
||||
• **messages**: [`ChatCompletionRequestMessage`](openai.ChatCompletionRequestMessage.md)[]
|
||||
|
||||
The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:285](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L285)
|
||||
|
||||
___
|
||||
|
||||
### model
|
||||
|
||||
• **model**: `string`
|
||||
|
||||
ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:279](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L279)
|
||||
|
||||
___
|
||||
|
||||
### n
|
||||
|
||||
• `Optional` **n**: `number`
|
||||
|
||||
How many chat completion choices to generate for each input message.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:303](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L303)
|
||||
|
||||
___
|
||||
|
||||
### presence\_penalty
|
||||
|
||||
• `Optional` **presence\_penalty**: `number`
|
||||
|
||||
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:327](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L327)
|
||||
|
||||
___
|
||||
|
||||
### stop
|
||||
|
||||
• `Optional` **stop**: [`CreateChatCompletionRequestStop`](../modules/openai.md#createchatcompletionrequeststop)
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:315](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L315)
|
||||
|
||||
___
|
||||
|
||||
### stream
|
||||
|
||||
• `Optional` **stream**: `boolean`
|
||||
|
||||
If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:309](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L309)
|
||||
|
||||
___
|
||||
|
||||
### temperature
|
||||
|
||||
• `Optional` **temperature**: `number`
|
||||
|
||||
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:291](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L291)
|
||||
|
||||
___
|
||||
|
||||
### top\_p
|
||||
|
||||
• `Optional` **top\_p**: `number`
|
||||
|
||||
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:297](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L297)
|
||||
|
||||
___
|
||||
|
||||
### user
|
||||
|
||||
• `Optional` **user**: `string`
|
||||
|
||||
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionRequest
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:345](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L345)
|
|
@ -1,104 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / CreateChatCompletionResponse
|
||||
|
||||
# Interface: CreateChatCompletionResponse
|
||||
|
||||
[openai](../modules/openai.md).CreateChatCompletionResponse
|
||||
|
||||
**`Export`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [choices](openai.CreateChatCompletionResponse.md#choices)
|
||||
- [created](openai.CreateChatCompletionResponse.md#created)
|
||||
- [id](openai.CreateChatCompletionResponse.md#id)
|
||||
- [model](openai.CreateChatCompletionResponse.md#model)
|
||||
- [object](openai.CreateChatCompletionResponse.md#object)
|
||||
- [usage](openai.CreateChatCompletionResponse.md#usage)
|
||||
|
||||
## Properties
|
||||
|
||||
### choices
|
||||
|
||||
• **choices**: [`CreateChatCompletionResponseChoicesInner`](openai.CreateChatCompletionResponseChoicesInner.md)[]
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:388](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L388)
|
||||
|
||||
___
|
||||
|
||||
### created
|
||||
|
||||
• **created**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:376](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L376)
|
||||
|
||||
___
|
||||
|
||||
### id
|
||||
|
||||
• **id**: `string`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:364](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L364)
|
||||
|
||||
___
|
||||
|
||||
### model
|
||||
|
||||
• **model**: `string`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:382](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L382)
|
||||
|
||||
___
|
||||
|
||||
### object
|
||||
|
||||
• **object**: `string`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:370](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L370)
|
||||
|
||||
___
|
||||
|
||||
### usage
|
||||
|
||||
• `Optional` **usage**: [`CreateCompletionResponseUsage`](openai.CreateCompletionResponseUsage.md)
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponse
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:394](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L394)
|
|
@ -1,59 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / CreateChatCompletionResponseChoicesInner
|
||||
|
||||
# Interface: CreateChatCompletionResponseChoicesInner
|
||||
|
||||
[openai](../modules/openai.md).CreateChatCompletionResponseChoicesInner
|
||||
|
||||
**`Export`**
|
||||
|
||||
CreateChatCompletionResponseChoicesInner
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [finish\_reason](openai.CreateChatCompletionResponseChoicesInner.md#finish_reason)
|
||||
- [index](openai.CreateChatCompletionResponseChoicesInner.md#index)
|
||||
- [message](openai.CreateChatCompletionResponseChoicesInner.md#message)
|
||||
|
||||
## Properties
|
||||
|
||||
### finish\_reason
|
||||
|
||||
• `Optional` **finish\_reason**: `string`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponseChoicesInner
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:419](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L419)
|
||||
|
||||
___
|
||||
|
||||
### index
|
||||
|
||||
• `Optional` **index**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponseChoicesInner
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:407](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L407)
|
||||
|
||||
___
|
||||
|
||||
### message
|
||||
|
||||
• `Optional` **message**: [`ChatCompletionResponseMessage`](openai.ChatCompletionResponseMessage.md)
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateChatCompletionResponseChoicesInner
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:413](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L413)
|
|
@ -1,65 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / [openai](../modules/openai.md) / CreateCompletionResponseUsage
|
||||
|
||||
# Interface: CreateCompletionResponseUsage
|
||||
|
||||
[openai](../modules/openai.md).CreateCompletionResponseUsage
|
||||
|
||||
**`Export`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
## Hierarchy
|
||||
|
||||
- **`CreateCompletionResponseUsage`**
|
||||
|
||||
↳ [`CreateCompletionStreamResponseUsage`](CreateCompletionStreamResponseUsage.md)
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Properties
|
||||
|
||||
- [completion\_tokens](openai.CreateCompletionResponseUsage.md#completion_tokens)
|
||||
- [prompt\_tokens](openai.CreateCompletionResponseUsage.md#prompt_tokens)
|
||||
- [total\_tokens](openai.CreateCompletionResponseUsage.md#total_tokens)
|
||||
|
||||
## Properties
|
||||
|
||||
### completion\_tokens
|
||||
|
||||
• **completion\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:438](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L438)
|
||||
|
||||
___
|
||||
|
||||
### prompt\_tokens
|
||||
|
||||
• **prompt\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:432](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L432)
|
||||
|
||||
___
|
||||
|
||||
### total\_tokens
|
||||
|
||||
• **total\_tokens**: `number`
|
||||
|
||||
**`Memberof`**
|
||||
|
||||
CreateCompletionResponseUsage
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:444](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L444)
|
333
docs/modules.md
333
docs/modules.md
|
@ -1,333 +0,0 @@
|
|||
[chatgpt](readme.md) / Exports
|
||||
|
||||
# chatgpt
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Namespaces
|
||||
|
||||
- [openai](modules/openai.md)
|
||||
|
||||
### Classes
|
||||
|
||||
- [ChatGPTAPI](classes/ChatGPTAPI.md)
|
||||
- [ChatGPTError](classes/ChatGPTError.md)
|
||||
- [ChatGPTUnofficialProxyAPI](classes/ChatGPTUnofficialProxyAPI.md)
|
||||
|
||||
### Interfaces
|
||||
|
||||
- [ChatMessage](interfaces/ChatMessage.md)
|
||||
- [CreateChatCompletionStreamResponse](interfaces/CreateChatCompletionStreamResponse.md)
|
||||
- [CreateCompletionStreamResponseUsage](interfaces/CreateCompletionStreamResponseUsage.md)
|
||||
|
||||
### Type Aliases
|
||||
|
||||
- [ChatGPTAPIOptions](modules.md#chatgptapioptions)
|
||||
- [ContentType](modules.md#contenttype)
|
||||
- [ConversationJSONBody](modules.md#conversationjsonbody)
|
||||
- [ConversationResponseEvent](modules.md#conversationresponseevent)
|
||||
- [FetchFn](modules.md#fetchfn)
|
||||
- [GetMessageByIdFunction](modules.md#getmessagebyidfunction)
|
||||
- [Message](modules.md#message)
|
||||
- [MessageActionType](modules.md#messageactiontype)
|
||||
- [MessageContent](modules.md#messagecontent)
|
||||
- [MessageMetadata](modules.md#messagemetadata)
|
||||
- [Prompt](modules.md#prompt)
|
||||
- [PromptContent](modules.md#promptcontent)
|
||||
- [Role](modules.md#role)
|
||||
- [SendMessageBrowserOptions](modules.md#sendmessagebrowseroptions)
|
||||
- [SendMessageOptions](modules.md#sendmessageoptions)
|
||||
- [UpsertMessageFunction](modules.md#upsertmessagefunction)
|
||||
|
||||
## Type Aliases
|
||||
|
||||
### ChatGPTAPIOptions
|
||||
|
||||
Ƭ **ChatGPTAPIOptions**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `apiBaseUrl?` | `string` | **`Default Value`** `'https://api.openai.com'` * |
|
||||
| `apiKey` | `string` | - |
|
||||
| `apiOrg?` | `string` | - |
|
||||
| `completionParams?` | `Partial`<`Omit`<[`CreateChatCompletionRequest`](interfaces/openai.CreateChatCompletionRequest.md), ``"messages"`` \| ``"n"`` \| ``"stream"``\>\> | - |
|
||||
| `debug?` | `boolean` | **`Default Value`** `false` * |
|
||||
| `fetch?` | [`FetchFn`](modules.md#fetchfn) | - |
|
||||
| `getMessageById?` | [`GetMessageByIdFunction`](modules.md#getmessagebyidfunction) | - |
|
||||
| `maxModelTokens?` | `number` | **`Default Value`** `4096` * |
|
||||
| `maxResponseTokens?` | `number` | **`Default Value`** `1000` * |
|
||||
| `messageStore?` | `Keyv` | - |
|
||||
| `systemMessage?` | `string` | - |
|
||||
| `upsertMessage?` | [`UpsertMessageFunction`](modules.md#upsertmessagefunction) | - |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:7](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L7)
|
||||
|
||||
___
|
||||
|
||||
### ContentType
|
||||
|
||||
Ƭ **ContentType**: ``"text"``
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:152](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L152)
|
||||
|
||||
___
|
||||
|
||||
### ConversationJSONBody
|
||||
|
||||
Ƭ **ConversationJSONBody**: `Object`
|
||||
|
||||
https://chat.openapi.com/backend-api/conversation
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `action` | `string` | The action to take |
|
||||
| `conversation_id?` | `string` | The ID of the conversation |
|
||||
| `messages` | [`Prompt`](modules.md#prompt)[] | Prompts to provide |
|
||||
| `model` | `string` | The model to use |
|
||||
| `parent_message_id` | `string` | The parent message ID |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:108](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L108)
|
||||
|
||||
___
|
||||
|
||||
### ConversationResponseEvent
|
||||
|
||||
Ƭ **ConversationResponseEvent**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `conversation_id?` | `string` |
|
||||
| `error?` | `string` \| ``null`` |
|
||||
| `message?` | [`Message`](modules.md#message) |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:166](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L166)
|
||||
|
||||
___
|
||||
|
||||
### FetchFn
|
||||
|
||||
Ƭ **FetchFn**: typeof `fetch`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:5](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L5)
|
||||
|
||||
___
|
||||
|
||||
### GetMessageByIdFunction
|
||||
|
||||
Ƭ **GetMessageByIdFunction**: (`id`: `string`) => `Promise`<[`ChatMessage`](interfaces/ChatMessage.md)\>
|
||||
|
||||
#### Type declaration
|
||||
|
||||
▸ (`id`): `Promise`<[`ChatMessage`](interfaces/ChatMessage.md)\>
|
||||
|
||||
Returns a chat message from a store by it's ID (or null if not found).
|
||||
|
||||
##### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `id` | `string` |
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<[`ChatMessage`](interfaces/ChatMessage.md)\>
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:90](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L90)
|
||||
|
||||
___
|
||||
|
||||
### Message
|
||||
|
||||
Ƭ **Message**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `content` | [`MessageContent`](modules.md#messagecontent) |
|
||||
| `create_time` | `string` \| ``null`` |
|
||||
| `end_turn` | ``null`` |
|
||||
| `id` | `string` |
|
||||
| `metadata` | [`MessageMetadata`](modules.md#messagemetadata) |
|
||||
| `recipient` | `string` |
|
||||
| `role` | [`Role`](modules.md#role) |
|
||||
| `update_time` | `string` \| ``null`` |
|
||||
| `user` | `string` \| ``null`` |
|
||||
| `weight` | `number` |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:172](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L172)
|
||||
|
||||
___
|
||||
|
||||
### MessageActionType
|
||||
|
||||
Ƭ **MessageActionType**: ``"next"`` \| ``"variant"``
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:53](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L53)
|
||||
|
||||
___
|
||||
|
||||
### MessageContent
|
||||
|
||||
Ƭ **MessageContent**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `content_type` | `string` |
|
||||
| `parts` | `string`[] |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:185](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L185)
|
||||
|
||||
___
|
||||
|
||||
### MessageMetadata
|
||||
|
||||
Ƭ **MessageMetadata**: `any`
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:190](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L190)
|
||||
|
||||
___
|
||||
|
||||
### Prompt
|
||||
|
||||
Ƭ **Prompt**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `content` | [`PromptContent`](modules.md#promptcontent) | The content of the prompt |
|
||||
| `id` | `string` | The ID of the prompt |
|
||||
| `role` | [`Role`](modules.md#role) | The role played in the prompt |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:135](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L135)
|
||||
|
||||
___
|
||||
|
||||
### PromptContent
|
||||
|
||||
Ƭ **PromptContent**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `content_type` | [`ContentType`](modules.md#contenttype) | The content type of the prompt |
|
||||
| `parts` | `string`[] | The parts to the prompt |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:154](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L154)
|
||||
|
||||
___
|
||||
|
||||
### Role
|
||||
|
||||
Ƭ **Role**: ``"user"`` \| ``"assistant"`` \| ``"system"``
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:3](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L3)
|
||||
|
||||
___
|
||||
|
||||
### SendMessageBrowserOptions
|
||||
|
||||
Ƭ **SendMessageBrowserOptions**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `abortSignal?` | `AbortSignal` |
|
||||
| `action?` | [`MessageActionType`](modules.md#messageactiontype) |
|
||||
| `conversationId?` | `string` |
|
||||
| `messageId?` | `string` |
|
||||
| `onProgress?` | (`partialResponse`: [`ChatMessage`](interfaces/ChatMessage.md)) => `void` |
|
||||
| `parentMessageId?` | `string` |
|
||||
| `timeoutMs?` | `number` |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:55](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L55)
|
||||
|
||||
___
|
||||
|
||||
### SendMessageOptions
|
||||
|
||||
Ƭ **SendMessageOptions**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type | Description |
|
||||
| :------ | :------ | :------ |
|
||||
| `abortSignal?` | `AbortSignal` | - |
|
||||
| `completionParams?` | `Partial`<`Omit`<[`CreateChatCompletionRequest`](interfaces/openai.CreateChatCompletionRequest.md), ``"messages"`` \| ``"n"`` \| ``"stream"``\>\> | - |
|
||||
| `conversationId?` | `string` | - |
|
||||
| `messageId?` | `string` | - |
|
||||
| `name?` | `string` | The name of a user in a multi-user chat. |
|
||||
| `onProgress?` | (`partialResponse`: [`ChatMessage`](interfaces/ChatMessage.md)) => `void` | - |
|
||||
| `parentMessageId?` | `string` | - |
|
||||
| `stream?` | `boolean` | - |
|
||||
| `systemMessage?` | `string` | - |
|
||||
| `timeoutMs?` | `number` | - |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:37](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L37)
|
||||
|
||||
___
|
||||
|
||||
### UpsertMessageFunction
|
||||
|
||||
Ƭ **UpsertMessageFunction**: (`message`: [`ChatMessage`](interfaces/ChatMessage.md)) => `Promise`<`void`\>
|
||||
|
||||
#### Type declaration
|
||||
|
||||
▸ (`message`): `Promise`<`void`\>
|
||||
|
||||
Upserts a chat message to a store.
|
||||
|
||||
##### Parameters
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `message` | [`ChatMessage`](interfaces/ChatMessage.md) |
|
||||
|
||||
##### Returns
|
||||
|
||||
`Promise`<`void`\>
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:93](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L93)
|
|
@ -1,102 +0,0 @@
|
|||
[chatgpt](../readme.md) / [Exports](../modules.md) / openai
|
||||
|
||||
# Namespace: openai
|
||||
|
||||
## Table of contents
|
||||
|
||||
### Interfaces
|
||||
|
||||
- [ChatCompletionRequestMessage](../interfaces/openai.ChatCompletionRequestMessage.md)
|
||||
- [ChatCompletionResponseMessage](../interfaces/openai.ChatCompletionResponseMessage.md)
|
||||
- [CreateChatCompletionDeltaResponse](../interfaces/openai.CreateChatCompletionDeltaResponse.md)
|
||||
- [CreateChatCompletionRequest](../interfaces/openai.CreateChatCompletionRequest.md)
|
||||
- [CreateChatCompletionResponse](../interfaces/openai.CreateChatCompletionResponse.md)
|
||||
- [CreateChatCompletionResponseChoicesInner](../interfaces/openai.CreateChatCompletionResponseChoicesInner.md)
|
||||
- [CreateCompletionResponseUsage](../interfaces/openai.CreateCompletionResponseUsage.md)
|
||||
|
||||
### Type Aliases
|
||||
|
||||
- [ChatCompletionRequestMessageRoleEnum](openai.md#chatcompletionrequestmessageroleenum)
|
||||
- [ChatCompletionResponseMessageRoleEnum](openai.md#chatcompletionresponsemessageroleenum)
|
||||
- [CreateChatCompletionRequestStop](openai.md#createchatcompletionrequeststop)
|
||||
|
||||
### Variables
|
||||
|
||||
- [ChatCompletionRequestMessageRoleEnum](openai.md#chatcompletionrequestmessageroleenum-1)
|
||||
- [ChatCompletionResponseMessageRoleEnum](openai.md#chatcompletionresponsemessageroleenum-1)
|
||||
|
||||
## Type Aliases
|
||||
|
||||
### ChatCompletionRequestMessageRoleEnum
|
||||
|
||||
Ƭ **ChatCompletionRequestMessageRoleEnum**: typeof [`ChatCompletionRequestMessageRoleEnum`](openai.md#chatcompletionrequestmessageroleenum-1)[keyof typeof [`ChatCompletionRequestMessageRoleEnum`](openai.md#chatcompletionrequestmessageroleenum-1)]
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:235](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L235)
|
||||
|
||||
[src/types.ts:240](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L240)
|
||||
|
||||
___
|
||||
|
||||
### ChatCompletionResponseMessageRoleEnum
|
||||
|
||||
Ƭ **ChatCompletionResponseMessageRoleEnum**: typeof [`ChatCompletionResponseMessageRoleEnum`](openai.md#chatcompletionresponsemessageroleenum-1)[keyof typeof [`ChatCompletionResponseMessageRoleEnum`](openai.md#chatcompletionresponsemessageroleenum-1)]
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:261](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L261)
|
||||
|
||||
[src/types.ts:266](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L266)
|
||||
|
||||
___
|
||||
|
||||
### CreateChatCompletionRequestStop
|
||||
|
||||
Ƭ **CreateChatCompletionRequestStop**: `string`[] \| `string`
|
||||
|
||||
**`Export`**
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:352](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L352)
|
||||
|
||||
## Variables
|
||||
|
||||
### ChatCompletionRequestMessageRoleEnum
|
||||
|
||||
• `Const` **ChatCompletionRequestMessageRoleEnum**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `Assistant` | ``"assistant"`` |
|
||||
| `System` | ``"system"`` |
|
||||
| `User` | ``"user"`` |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:235](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L235)
|
||||
|
||||
[src/types.ts:240](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L240)
|
||||
|
||||
___
|
||||
|
||||
### ChatCompletionResponseMessageRoleEnum
|
||||
|
||||
• `Const` **ChatCompletionResponseMessageRoleEnum**: `Object`
|
||||
|
||||
#### Type declaration
|
||||
|
||||
| Name | Type |
|
||||
| :------ | :------ |
|
||||
| `Assistant` | ``"assistant"`` |
|
||||
| `System` | ``"system"`` |
|
||||
| `User` | ``"user"`` |
|
||||
|
||||
#### Defined in
|
||||
|
||||
[src/types.ts:261](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L261)
|
||||
|
||||
[src/types.ts:266](https://github.com/transitive-bullshit/chatgpt-api/blob/fb06beb/src/types.ts#L266)
|
564
docs/readme.md
564
docs/readme.md
|
@ -1,564 +0,0 @@
|
|||
chatgpt / [Exports](modules.md)
|
||||
|
||||
# ChatGPT API <!-- omit in toc -->
|
||||
|
||||
> Node.js client for the official [ChatGPT](https://openai.com/blog/chatgpt/) API.
|
||||
|
||||
[](https://www.npmjs.com/package/chatgpt) [](https://github.com/transitive-bullshit/chatgpt-api/actions/workflows/test.yml) [](https://github.com/transitive-bullshit/chatgpt-api/blob/main/license) [](https://prettier.io)
|
||||
|
||||
- [Intro](#intro)
|
||||
- [Updates](#updates)
|
||||
- [CLI](#cli)
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Usage - ChatGPTAPI](#usage---chatgptapi)
|
||||
- [Usage - ChatGPTUnofficialProxyAPI](#usage---chatgptunofficialproxyapi)
|
||||
- [Reverse Proxy](#reverse-proxy)
|
||||
- [Access Token](#access-token)
|
||||
- [Docs](#docs)
|
||||
- [Demos](#demos)
|
||||
- [Projects](#projects)
|
||||
- [Compatibility](#compatibility)
|
||||
- [Credits](#credits)
|
||||
- [License](#license)
|
||||
|
||||
## Intro
|
||||
|
||||
This package is a Node.js wrapper around [ChatGPT](https://openai.com/blog/chatgpt) by [OpenAI](https://openai.com). TS batteries included. ✨
|
||||
|
||||
<p align="center">
|
||||
<img alt="Example usage" src="/media/demo.gif">
|
||||
</p>
|
||||
|
||||
## Updates
|
||||
|
||||
<details open>
|
||||
<summary><strong>April 10, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
This package now **fully supports GPT-4**! 🔥
|
||||
|
||||
We also just released a [TypeScript chatgpt-plugin package](https://github.com/transitive-bullshit/chatgpt-plugin-ts) which contains helpers and examples to make it as easy as possible to start building your own ChatGPT Plugins in JS/TS. Even if you don't have developer access to ChatGPT Plugins yet, you can still use the [chatgpt-plugin](https://github.com/transitive-bullshit/chatgpt-plugin-ts) repo to get a head start on building your own plugins locally.
|
||||
|
||||
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
||||
|
||||
```bash
|
||||
npx chatgpt@latest --model gpt-4 "Hello world"
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
||||
</p>
|
||||
|
||||
We still support both the official ChatGPT API and the unofficial proxy API, but we now recommend using the official API since it's significantly more robust and supports **GPT-4**.
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | ------- | ------------------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ ChatGPT webapp |
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Previous Updates</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
<details>
|
||||
<summary><strong>March 1, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
The [official OpenAI chat completions API](https://platform.openai.com/docs/guides/chat) has been released, and it is now the default for this package! 🔥
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | -------- | ----------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We may remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 19, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
We now provide three ways of accessing the unofficial ChatGPT API, all of which have tradeoffs:
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | -------- | ----------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ☑️ Mimics ChatGPT |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
||||
| `ChatGPTAPIBrowser` (v3) | ✅ Yes | ❌ No | ✅ Real ChatGPT |
|
||||
|
||||
**Note**: I recommend that you use either `ChatGPTAPI` or `ChatGPTUnofficialProxyAPI`.
|
||||
|
||||
1. `ChatGPTAPI` - (Used to use) `text-davinci-003` to mimic ChatGPT via the official OpenAI completions API (most robust approach, but it's not free and doesn't use a model fine-tuned for chat)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
3. `ChatGPTAPIBrowser` - (_deprecated_; v3.5.1 of this package) Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 5, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
OpenAI has disabled the leaked chat model we were previously using, so we're now defaulting to `text-davinci-003`, which is not free.
|
||||
|
||||
We've found several other hidden, fine-tuned chat models, but OpenAI keeps disabling them, so we're searching for alternative workarounds.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 1, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
This package no longer requires any browser hacks – **it is now using the official OpenAI completions API** with a leaked model that ChatGPT uses under the hood. 🔥
|
||||
|
||||
```ts
|
||||
import { ChatGPTAPI } from 'chatgpt'
|
||||
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
Please upgrade to `chatgpt@latest` (at least [v4.0.0](https://github.com/transitive-bullshit/chatgpt-api/releases/tag/v4.0.0)). The updated version is **significantly more lightweight and robust** compared with previous versions. You also don't have to worry about IP issues or rate limiting.
|
||||
|
||||
Huge shoutout to [@waylaidwanderer](https://github.com/waylaidwanderer) for discovering the leaked chat model!
|
||||
|
||||
</details>
|
||||
</details>
|
||||
|
||||
If you run into any issues, we do have a pretty active [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers from the Node.js & Python communities.
|
||||
|
||||
Lastly, please consider starring this repo and <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a> to help support the project.
|
||||
|
||||
Thanks && cheers,
|
||||
[Travis](https://twitter.com/transitive_bs)
|
||||
|
||||
## CLI
|
||||
|
||||
To run the CLI, you'll need an [OpenAI API key](https://platform.openai.com/overview):
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="sk-TODO"
|
||||
npx chatgpt "your prompt here"
|
||||
```
|
||||
|
||||
By default, the response is streamed to stdout, the results are stored in a local config file, and every invocation starts a new conversation. You can use `-c` to continue the previous conversation and `--no-stream` to disable streaming.
|
||||
|
||||
```
|
||||
Usage:
|
||||
$ chatgpt <prompt>
|
||||
|
||||
Commands:
|
||||
<prompt> Ask ChatGPT a question
|
||||
rm-cache Clears the local message cache
|
||||
ls-cache Prints the local message cache path
|
||||
|
||||
For more info, run any command with the `--help` flag:
|
||||
$ chatgpt --help
|
||||
$ chatgpt rm-cache --help
|
||||
$ chatgpt ls-cache --help
|
||||
|
||||
Options:
|
||||
-c, --continue Continue last conversation (default: false)
|
||||
-d, --debug Enables debug logging (default: false)
|
||||
-s, --stream Streams the response (default: true)
|
||||
-s, --store Enables the local message cache (default: true)
|
||||
-t, --timeout Timeout in milliseconds
|
||||
-k, --apiKey OpenAI API key
|
||||
-o, --apiOrg OpenAI API organization
|
||||
-n, --conversationName Unique name for the conversation
|
||||
-h, --help Display this message
|
||||
-v, --version Display version number
|
||||
```
|
||||
|
||||
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
||||
</p>
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
npm install chatgpt
|
||||
```
|
||||
|
||||
Make sure you're using `node >= 18` so `fetch` is available (or `node >= 14` if you install a [fetch polyfill](https://github.com/developit/unfetch#usage-as-a-polyfill)).
|
||||
|
||||
## Usage
|
||||
|
||||
To use this module from Node.js, you need to pick between two methods:
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | ------- | ------------------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ Real ChatGPT webapp |
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free). You can override the model, completion params, and system message to fully customize your assistant.
|
||||
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
Both approaches have very similar APIs, so it should be simple to swap between them.
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI and it also supports `gpt-4`. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
### Usage - ChatGPTAPI
|
||||
|
||||
Sign up for an [OpenAI API key](https://platform.openai.com/overview) and store it in your environment.
|
||||
|
||||
```ts
|
||||
import { ChatGPTAPI } from 'chatgpt'
|
||||
|
||||
async function example() {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
You can override the default `model` (`gpt-3.5-turbo`) and any [OpenAI chat completion params](https://platform.openai.com/docs/api-reference/chat/create) using `completionParams`:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
completionParams: {
|
||||
model: 'gpt-4',
|
||||
temperature: 0.5,
|
||||
top_p: 0.8
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
If you want to track the conversation, you'll need to pass the `parentMessageId` like this:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
||||
|
||||
// send a message and wait for the response
|
||||
let res = await api.sendMessage('What is OpenAI?')
|
||||
console.log(res.text)
|
||||
|
||||
// send a follow-up
|
||||
res = await api.sendMessage('Can you expand on that?', {
|
||||
parentMessageId: res.id
|
||||
})
|
||||
console.log(res.text)
|
||||
|
||||
// send another follow-up
|
||||
res = await api.sendMessage('What were we talking about?', {
|
||||
parentMessageId: res.id
|
||||
})
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
You can add streaming via the `onProgress` handler:
|
||||
|
||||
```ts
|
||||
const res = await api.sendMessage('Write a 500 word essay on frogs.', {
|
||||
// print the partial response as the AI is "typing"
|
||||
onProgress: (partialResponse) => console.log(partialResponse.text)
|
||||
})
|
||||
|
||||
// print the full text at the end
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
You can add a timeout using the `timeoutMs` option:
|
||||
|
||||
```ts
|
||||
// timeout after 2 minutes (which will also abort the underlying HTTP request)
|
||||
const response = await api.sendMessage(
|
||||
'write me a really really long essay on frogs',
|
||||
{
|
||||
timeoutMs: 2 * 60 * 1000
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
If you want to see more info about what's actually being sent to [OpenAI's chat completions API](https://platform.openai.com/docs/api-reference/chat/create), set the `debug: true` option in the `ChatGPTAPI` constructor:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
debug: true
|
||||
})
|
||||
```
|
||||
|
||||
We default to a basic `systemMessage`. You can override this in either the `ChatGPTAPI` constructor or `sendMessage`:
|
||||
|
||||
```ts
|
||||
const res = await api.sendMessage('what is the answer to the universe?', {
|
||||
systemMessage: `You are ChatGPT, a large language model trained by OpenAI. You answer as concisely as possible for each responseIf you are generating a list, do not have too many items.
|
||||
Current date: ${new Date().toISOString()}\n\n`
|
||||
})
|
||||
```
|
||||
|
||||
Note that we automatically handle appending the previous messages to the prompt and attempt to optimize for the available tokens (which defaults to `4096`).
|
||||
|
||||
<details>
|
||||
<summary>Usage in CommonJS (Dynamic import)</summary>
|
||||
|
||||
```js
|
||||
async function example() {
|
||||
// To use ESM in CommonJS, you can use a dynamic import like this:
|
||||
const { ChatGPTAPI } = await import('chatgpt')
|
||||
// You can also try dynamic importing like this:
|
||||
// const importDynamic = new Function('modulePath', 'return import(modulePath)')
|
||||
// const { ChatGPTAPI } = await importDynamic('chatgpt')
|
||||
|
||||
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Usage - ChatGPTUnofficialProxyAPI
|
||||
|
||||
The API for `ChatGPTUnofficialProxyAPI` is almost exactly the same. You just need to provide a ChatGPT `accessToken` instead of an OpenAI API key.
|
||||
|
||||
```ts
|
||||
import { ChatGPTUnofficialProxyAPI } from 'chatgpt'
|
||||
|
||||
async function example() {
|
||||
const api = new ChatGPTUnofficialProxyAPI({
|
||||
accessToken: process.env.OPENAI_ACCESS_TOKEN
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
See [demos/demo-reverse-proxy](./demos/demo-reverse-proxy.ts) for a full example:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-reverse-proxy.ts
|
||||
```
|
||||
|
||||
`ChatGPTUnofficialProxyAPI` messages also contain a `conversationid` in addition to `parentMessageId`, since the ChatGPT webapp can't reference messages across different accounts & conversations.
|
||||
|
||||
#### Reverse Proxy
|
||||
|
||||
You can override the reverse proxy by passing `apiReverseProxyUrl`:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTUnofficialProxyAPI({
|
||||
accessToken: process.env.OPENAI_ACCESS_TOKEN,
|
||||
apiReverseProxyUrl: 'https://your-example-server.com/api/conversation'
|
||||
})
|
||||
```
|
||||
|
||||
Known reverse proxies run by community members include:
|
||||
|
||||
| Reverse Proxy URL | Author | Rate Limits | Last Checked |
|
||||
| ------------------------------------------------- | -------------------------------------------- | ---------------------------- | ------------ |
|
||||
| `https://ai.fakeopen.com/api/conversation` | [@pengzhile](https://github.com/pengzhile) | 5 req / 10 seconds by IP | 4/18/2023 |
|
||||
| `https://api.pawan.krd/backend-api/conversation` | [@PawanOsman](https://github.com/PawanOsman) | 50 req / 15 seconds (~3 r/s) | 3/23/2023 |
|
||||
|
||||
Note: info on how the reverse proxies work is not being published at this time in order to prevent OpenAI from disabling access.
|
||||
|
||||
#### Access Token
|
||||
|
||||
To use `ChatGPTUnofficialProxyAPI`, you'll need an OpenAI access token from the ChatGPT webapp. To do this, you can use any of the following methods which take an `email` and `password` and return an access token:
|
||||
|
||||
- Node.js libs
|
||||
- [ericlewis/openai-authenticator](https://github.com/ericlewis/openai-authenticator)
|
||||
- [michael-dm/openai-token](https://github.com/michael-dm/openai-token)
|
||||
- [allanoricil/chat-gpt-authenticator](https://github.com/AllanOricil/chat-gpt-authenticator)
|
||||
- Python libs
|
||||
- [acheong08/OpenAIAuth](https://github.com/acheong08/OpenAIAuth)
|
||||
|
||||
These libraries work with email + password accounts (e.g., they do not support accounts where you auth via Microsoft / Google).
|
||||
|
||||
Alternatively, you can manually get an `accessToken` by logging in to the ChatGPT webapp and then opening `https://chat.openai.com/api/auth/session`, which will return a JSON object containing your `accessToken` string.
|
||||
|
||||
Access tokens last for days.
|
||||
|
||||
**Note**: using a reverse proxy will expose your access token to a third-party. There shouldn't be any adverse effects possible from this, but please consider the risks before using this method.
|
||||
|
||||
## Docs
|
||||
|
||||
See the [auto-generated docs](./docs/classes/ChatGPTAPI.md) for more info on methods and parameters.
|
||||
|
||||
## Demos
|
||||
|
||||
Most of the demos use `ChatGPTAPI`. It should be pretty easy to convert them to use `ChatGPTUnofficialProxyAPI` if you'd rather use that approach. The only thing that needs to change is how you initialize the api with an `accessToken` instead of an `apiKey`.
|
||||
|
||||
To run the included demos:
|
||||
|
||||
1. clone repo
|
||||
2. install node deps
|
||||
3. set `OPENAI_API_KEY` in .env
|
||||
|
||||
A [basic demo](./demos/demo.ts) is included for testing purposes:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo.ts
|
||||
```
|
||||
|
||||
A [demo showing on progress handler](./demos/demo-on-progress.ts):
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-on-progress.ts
|
||||
```
|
||||
|
||||
The on progress demo uses the optional `onProgress` parameter to `sendMessage` to receive intermediary results as ChatGPT is "typing".
|
||||
|
||||
A [conversation demo](./demos/demo-conversation.ts):
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-conversation.ts
|
||||
```
|
||||
|
||||
A [persistence demo](./demos/demo-persistence.ts) shows how to store messages in Redis for persistence:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-persistence.ts
|
||||
```
|
||||
|
||||
Any [keyv adaptor](https://github.com/jaredwray/keyv) is supported for persistence, and there are overrides if you'd like to use a different way of storing / retrieving messages.
|
||||
|
||||
Note that persisting message is required for remembering the context of previous conversations beyond the scope of the current Node.js process, since by default, we only store messages in memory. Here's an [external demo](https://github.com/transitive-bullshit/chatgpt-twitter-bot/blob/main/src/index.ts#L86-L95) of using a completely custom database solution to persist messages.
|
||||
|
||||
**Note**: Persistence is handled automatically when using `ChatGPTUnofficialProxyAPI` because it is connecting indirectly to ChatGPT.
|
||||
|
||||
## Projects
|
||||
|
||||
All of these awesome projects are built using the `chatgpt` package. 🤯
|
||||
|
||||
- [Twitter Bot](https://github.com/transitive-bullshit/chatgpt-twitter-bot) powered by ChatGPT ✨
|
||||
- Mention [@ChatGPTBot](https://twitter.com/ChatGPTBot) on Twitter with your prompt to try it out
|
||||
- [ChatGPT API Server](https://github.com/waylaidwanderer/node-chatgpt-api) - API server for this package with support for multiple OpenAI accounts, proxies, and load-balancing requests between accounts.
|
||||
- [ChatGPT Prompts](https://github.com/pacholoamit/chatgpt-prompts) - A collection of 140+ of the best ChatGPT prompts from the community.
|
||||
- [Lovelines.xyz](https://lovelines.xyz?ref=chatgpt-api)
|
||||
- [Chrome Extension](https://github.com/gragland/chatgpt-everywhere) ([demo](https://twitter.com/gabe_ragland/status/1599466486422470656))
|
||||
- [VSCode Extension #1](https://github.com/mpociot/chatgpt-vscode) ([demo](https://twitter.com/marcelpociot/status/1599180144551526400), [updated version](https://github.com/timkmecl/chatgpt-vscode), [marketplace](https://marketplace.visualstudio.com/items?itemName=timkmecl.chatgpt))
|
||||
- [VSCode Extension #2](https://github.com/barnesoir/chatgpt-vscode-plugin) ([marketplace](https://marketplace.visualstudio.com/items?itemName=JayBarnes.chatgpt-vscode-plugin))
|
||||
- [VSCode Extension #3](https://github.com/gencay/vscode-chatgpt) ([marketplace](https://marketplace.visualstudio.com/items?itemName=gencay.vscode-chatgpt))
|
||||
- [VSCode Extension #4](https://github.com/dogukanakkaya/chatgpt-code-vscode-extension) ([marketplace](https://marketplace.visualstudio.com/items?itemName=dogukanakkaya.chatgpt-code))
|
||||
- [Raycast Extension #1](https://github.com/abielzulio/chatgpt-raycast) ([demo](https://twitter.com/abielzulio/status/1600176002042191875))
|
||||
- [Raycast Extension #2](https://github.com/domnantas/raycast-chatgpt)
|
||||
- [Telegram Bot #1](https://github.com/realies/chatgpt-telegram-bot)
|
||||
- [Telegram Bot #2](https://github.com/dawangraoming/chatgpt-telegram-bot)
|
||||
- [Telegram Bot #3](https://github.com/RainEggplant/chatgpt-telegram-bot) (group privacy mode, ID-based auth)
|
||||
- [Telegram Bot #4](https://github.com/ArdaGnsrn/chatgpt-telegram) (queue system, ID-based chat thread)
|
||||
- [Telegram Bot #5](https://github.com/azoway/chatgpt-telegram-bot) (group privacy mode, ID-based chat thread)
|
||||
- [Deno Telegram Bot](https://github.com/Ciyou/chatbot-telegram)
|
||||
- [Go Telegram Bot](https://github.com/m1guelpf/chatgpt-telegram)
|
||||
- [Telegram Bot for YouTube Summaries](https://github.com/codextde/youtube-summary)
|
||||
- [GitHub ProBot](https://github.com/oceanlvr/ChatGPTBot)
|
||||
- [Discord Bot #1](https://github.com/onury5506/Discord-ChatGPT-Bot)
|
||||
- [Discord Bot #2](https://github.com/Nageld/ChatGPT-Bot)
|
||||
- [Discord Bot #3](https://github.com/leinstay/gptbot)
|
||||
- [Discord Bot #4 (selfbot)](https://github.com/0x7030676e31/cumsocket)
|
||||
- [Discord Bot #5](https://github.com/itskdhere/ChatGPT-Discord-BOT)
|
||||
- [Discord Bot #6 (Shakespeare bot)](https://gist.github.com/TheBrokenRail/4b37e7c44e8f721d8bd845050d034c16)
|
||||
- [Discord Bot #7](https://github.com/Elitezen/discordjs-chatgpt)
|
||||
- [Zoom Chat](https://github.com/shixin-guo/my-bot)
|
||||
- [WeChat Bot #1](https://github.com/AutumnWhj/ChatGPT-wechat-bot)
|
||||
- [WeChat Bot #2](https://github.com/fuergaosi233/wechat-chatgpt)
|
||||
- [WeChat Bot #3](https://github.com/wangrongding/wechat-bot) (
|
||||
- [WeChat Bot #4](https://github.com/darknightlab/wechat-bot)
|
||||
- [WeChat Bot #5](https://github.com/sunshanpeng/wechaty-chatgpt)
|
||||
- [WeChat Bot #6](https://github.com/formulahendry/chatgpt-wechat-bot)
|
||||
- [WeChat Bot #7](https://github.com/gfl94/Chatbot004)
|
||||
- [QQ Bot (plugin for Yunzai-bot)](https://github.com/ikechan8370/chatgpt-plugin)
|
||||
- [QQ Bot (plugin for KiviBot)](https://github.com/KiviBotLab/kivibot-plugin-chatgpt)
|
||||
- [QQ Bot (oicq)](https://github.com/easydu2002/chat_gpt_oicq)
|
||||
- [QQ Bot (oicq + RabbitMQ)](https://github.com/linsyking/ChatGPT-QQBot)
|
||||
- [QQ Bot (go-cqhttp)](https://github.com/PairZhu/ChatGPT-QQRobot)
|
||||
- [EXM smart contracts](https://github.com/decentldotland/molecule)
|
||||
- [Flutter ChatGPT API](https://github.com/coskuncay/flutter_chatgpt_api)
|
||||
- [Carik Bot](https://github.com/luridarmawan/Carik)
|
||||
- [Github Action for reviewing PRs](https://github.com/kxxt/chatgpt-action/)
|
||||
- [WhatsApp Bot #1](https://github.com/askrella/whatsapp-chatgpt) (DALL-E + Whisper support 💪)
|
||||
- [WhatsApp Bot #2](https://github.com/amosayomide05/chatgpt-whatsapp-bot)
|
||||
- [WhatsApp Bot #3](https://github.com/pascalroget/whatsgpt) (multi-user support)
|
||||
- [WhatsApp Bot #4](https://github.com/noelzappy/chatgpt-whatsapp) (schedule periodic messages)
|
||||
- [WhatsApp Bot #5](https://github.com/hujanais/bs-chat-gpt3-api) (RaspberryPi + ngrok + Twilio)
|
||||
- [WhatsApp Bot #6](https://github.com/dannysantino/whatsgpt) (Session and chat history storage with MongoStore)
|
||||
- [Matrix Bot](https://github.com/matrixgpt/matrix-chatgpt-bot)
|
||||
- [Rental Cover Letter Generator](https://sharehouse.app/ai)
|
||||
- [Assistant CLI](https://github.com/diciaup/assistant-cli)
|
||||
- [Teams Bot](https://github.com/formulahendry/chatgpt-teams-bot)
|
||||
- [Askai](https://github.com/yudax42/askai)
|
||||
- [TalkGPT](https://github.com/ShadovvBeast/TalkGPT)
|
||||
- [ChatGPT With Voice](https://github.com/thanhsonng/chatgpt-voice)
|
||||
- [iOS Shortcut](https://github.com/leecobaby/shortcuts/blob/master/other/ChatGPT_EN.md)
|
||||
- [Slack Bot #1](https://github.com/trietphm/chatgpt-slackbot/)
|
||||
- [Slack Bot #2](https://github.com/lokwkin/chatgpt-slackbot-node/) (with queueing mechanism)
|
||||
- [Slack Bot #3](https://github.com/NessunKim/slack-chatgpt/)
|
||||
- [Slack Bot #4](https://github.com/MarkusGalant/chatgpt-slackbot-serverless/) ( Serverless AWS Lambda )
|
||||
- [Slack Bot #5](https://github.com/benjiJanssens/SlackGPT) (Hosted)
|
||||
- [Add to Slack](https://slackgpt.benji.sh/slack/install)
|
||||
- [Electron Bot](https://github.com/ShiranAbir/chaty)
|
||||
- [Kodyfire CLI](https://github.com/nooqta/chatgpt-kodyfire)
|
||||
- [Twitch Bot](https://github.com/BennyDeeDev/chatgpt-twitch-bot)
|
||||
- [Continuous Conversation](https://github.com/DanielTerletzkiy/chat-gtp-assistant)
|
||||
- [Figma plugin](https://github.com/frederickk/chatgpt-figma-plugin)
|
||||
- [NestJS server](https://github.com/RusDyn/chatgpt_nestjs_server)
|
||||
- [NestJS ChatGPT Starter Boilerplate](https://github.com/mitkodkn/nestjs-chatgpt-starter)
|
||||
- [Wordsmith: Add-in for Microsoft Word](https://github.com/xtremehpx/Wordsmith)
|
||||
- [QuizGPT: Create Kahoot quizzes with ChatGPT](https://github.com/Kladdy/quizgpt)
|
||||
- [openai-chatgpt: Talk to ChatGPT from the terminal](https://github.com/gmpetrov/openai-chatgpt)
|
||||
- [Clippy the Saleforce chatbot](https://github.com/sebas00/chatgptclippy) ClippyJS joke bot
|
||||
- [ai-assistant](https://github.com/youking-lib/ai-assistant) Chat assistant
|
||||
- [Feishu Bot](https://github.com/linjungz/feishu-chatgpt-bot)
|
||||
- [DomainGPT: Discover available domain names](https://github.com/billylo1/DomainGPT)
|
||||
- [AI Poem Generator](https://aipoemgenerator.com/)
|
||||
- [Next.js ChatGPT With Firebase](https://github.com/youngle316/chatgpt)
|
||||
- [ai-commit – GPT-3 Commit Message Generator](https://github.com/insulineru/ai-commit)
|
||||
- [AItinerary – ChatGPT itinerary Generator](https://aitinerary.ai)
|
||||
- [wechaty-chatgpt - A chatbot based on Wechaty & ChatGPT](https://github.com/zhengxs2018/wechaty-chatgpt)
|
||||
- [Julius GPT](https://github.com/christophebe/julius-gpt) - Generate and publish your content from the CLI
|
||||
- [OpenAI-API-Service](https://github.com/Jarvan-via/api-service) - Provides OpenAI related APIs for businesses
|
||||
- [Discord Daily News Bot](https://github.com/ZirionNeft/chatgpt-discord-daily-news-bot) - Discord bot that generate funny daily news
|
||||
|
||||
If you create a cool integration, feel free to open a PR and add it to the list.
|
||||
|
||||
## Compatibility
|
||||
|
||||
- This package is ESM-only.
|
||||
- This package supports `node >= 14`.
|
||||
- This module assumes that `fetch` is installed.
|
||||
- In `node >= 18`, it's installed by default.
|
||||
- In `node < 18`, you need to install a polyfill like `unfetch/polyfill` ([guide](https://github.com/developit/unfetch#usage-as-a-polyfill)) or `isomorphic-fetch` ([guide](https://github.com/matthew-andrews/isomorphic-fetch#readme)).
|
||||
- If you want to build a website using `chatgpt`, we recommend using it only from your backend API
|
||||
|
||||
## Credits
|
||||
|
||||
- Huge thanks to [@waylaidwanderer](https://github.com/waylaidwanderer), [@abacaj](https://github.com/abacaj), [@wong2](https://github.com/wong2), [@simon300000](https://github.com/simon300000), [@RomanHotsiy](https://github.com/RomanHotsiy), [@ElijahPepe](https://github.com/ElijahPepe), and all the other contributors 💪
|
||||
- [OpenAI](https://openai.com) for creating [ChatGPT](https://openai.com/blog/chatgpt/) 🔥
|
||||
- I run the [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers – come join us!
|
||||
|
||||
## License
|
||||
|
||||
MIT © [Travis Fischer](https://transitivebullsh.it)
|
||||
|
||||
If you found this project interesting, please consider [sponsoring me](https://github.com/sponsors/transitive-bullshit) or <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a>
|
21
license
21
license
|
@ -1,21 +0,0 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2023 Travis Fischer
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
BIN
media/demo.gif
BIN
media/demo.gif
Plik binarny nie jest wyświetlany.
Przed Szerokość: | Wysokość: | Rozmiar: 122 KiB |
|
@ -1,76 +0,0 @@
|
|||
# VHS documentation
|
||||
#
|
||||
# @see https://github.com/charmbracelet/vhs
|
||||
#
|
||||
# ```
|
||||
# vhs < media/demo.tape
|
||||
# ```
|
||||
#
|
||||
# Output:
|
||||
# Output <path>.gif Create a GIF output at the given <path>
|
||||
# Output <path>.mp4 Create an MP4 output at the given <path>
|
||||
# Output <path>.webm Create a WebM output at the given <path>
|
||||
#
|
||||
# Require:
|
||||
# Require <string> Ensure a program is on the $PATH to proceed
|
||||
#
|
||||
# Settings:
|
||||
# Set FontSize <number> Set the font size of the terminal
|
||||
# Set FontFamily <string> Set the font family of the terminal
|
||||
# Set Height <number> Set the height of the terminal
|
||||
# Set Width <number> Set the width of the terminal
|
||||
# Set LetterSpacing <float> Set the font letter spacing (tracking)
|
||||
# Set LineHeight <float> Set the font line height
|
||||
# Set LoopOffset <float>% Set the starting frame offset for the GIF loop
|
||||
# Set Theme <json|string> Set the theme of the terminal
|
||||
# Set Padding <number> Set the padding of the terminal
|
||||
# Set Framerate <number> Set the framerate of the recording
|
||||
# Set PlaybackSpeed <float> Set the playback speed of the recording
|
||||
#
|
||||
# Sleep:
|
||||
# Sleep <time> Sleep for a set amount of <time> in seconds
|
||||
#
|
||||
# Type:
|
||||
# Type[@<time>] "<characters>" Type <characters> into the terminal with a
|
||||
# <time> delay between each character
|
||||
#
|
||||
# Keys:
|
||||
# Backspace[@<time>] [number] Press the Backspace key
|
||||
# Down[@<time>] [number] Press the Down key
|
||||
# Enter[@<time>] [number] Press the Enter key
|
||||
# Space[@<time>] [number] Press the Space key
|
||||
# Tab[@<time>] [number] Press the Tab key
|
||||
# Left[@<time>] [number] Press the Left Arrow key
|
||||
# Right[@<time>] [number] Press the Right Arrow key
|
||||
# Up[@<time>] [number] Press the Up Arrow key
|
||||
# Down[@<time>] [number] Press the Down Arrow key
|
||||
# Ctrl+<key> Press the Control key + <key> (e.g. Ctrl+C)
|
||||
#
|
||||
# Display:
|
||||
# Hide Hide the subsequent commands from the output
|
||||
# Show Show the subsequent commands in the output
|
||||
|
||||
Output media/demo.gif
|
||||
|
||||
Hide
|
||||
Require npx
|
||||
|
||||
#Set Shell bash
|
||||
Set FontSize 22
|
||||
Set Width 1200
|
||||
Set Height 600
|
||||
Set Padding 24
|
||||
Set LoopOffset 75%
|
||||
|
||||
Type " "
|
||||
|
||||
Sleep 980ms
|
||||
Backspace 1
|
||||
Show
|
||||
Type "npx tsx src/demo.ts"
|
||||
Sleep 500ms
|
||||
Enter
|
||||
Show
|
||||
|
||||
Sleep 7s
|
||||
Sleep 8s
|
Plik binarny nie jest wyświetlany.
Przed Szerokość: | Wysokość: | Rozmiar: 207 KiB |
Plik diff jest za duży
Load Diff
89
package.json
89
package.json
|
@ -1,89 +0,0 @@
|
|||
{
|
||||
"name": "chatgpt",
|
||||
"version": "5.2.5",
|
||||
"description": "Node.js client for the official ChatGPT API.",
|
||||
"author": "Travis Fischer <travis@transitivebullsh.it>",
|
||||
"repository": "transitive-bullshit/chatgpt-api",
|
||||
"license": "MIT",
|
||||
"type": "module",
|
||||
"source": "./src/index.ts",
|
||||
"types": "./build/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./build/index.d.ts",
|
||||
"import": "./build/index.js",
|
||||
"default": "./build/index.js"
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"build",
|
||||
"bin"
|
||||
],
|
||||
"bin": "./bin/cli.js",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsup",
|
||||
"dev": "tsup --watch",
|
||||
"clean": "del build",
|
||||
"prebuild": "run-s clean",
|
||||
"predev": "run-s clean",
|
||||
"pretest": "run-s build",
|
||||
"docs": "typedoc",
|
||||
"prepare": "husky install",
|
||||
"pre-commit": "lint-staged",
|
||||
"test": "run-p test:*",
|
||||
"test:prettier": "prettier '**/*.{js,jsx,ts,tsx}' --check"
|
||||
},
|
||||
"dependencies": {
|
||||
"cac": "^6.7.14",
|
||||
"conf": "^11.0.1",
|
||||
"eventsource-parser": "^1.0.0",
|
||||
"js-tiktoken": "^1.0.5",
|
||||
"keyv": "^4.5.2",
|
||||
"p-timeout": "^6.1.1",
|
||||
"quick-lru": "^6.1.1",
|
||||
"read-pkg-up": "^9.1.0",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@keyv/redis": "^2.5.7",
|
||||
"@trivago/prettier-plugin-sort-imports": "^4.1.1",
|
||||
"@types/node": "^18.16.3",
|
||||
"@types/uuid": "^9.0.1",
|
||||
"del-cli": "^5.0.0",
|
||||
"dotenv-safe": "^8.2.0",
|
||||
"husky": "^8.0.3",
|
||||
"lint-staged": "^13.2.2",
|
||||
"npm-run-all": "^4.1.5",
|
||||
"ora": "^6.3.0",
|
||||
"prettier": "^2.8.8",
|
||||
"tsup": "^6.7.0",
|
||||
"tsx": "^3.12.7",
|
||||
"typedoc": "^0.24.6",
|
||||
"typedoc-plugin-markdown": "^3.15.3",
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{ts,tsx}": [
|
||||
"prettier --write"
|
||||
]
|
||||
},
|
||||
"keywords": [
|
||||
"openai",
|
||||
"chatgpt",
|
||||
"chat",
|
||||
"gpt",
|
||||
"gpt-3",
|
||||
"gpt3",
|
||||
"gpt4",
|
||||
"chatbot",
|
||||
"machine learning",
|
||||
"conversation",
|
||||
"conversational ai",
|
||||
"ai",
|
||||
"ml",
|
||||
"bot"
|
||||
]
|
||||
}
|
3167
pnpm-lock.yaml
3167
pnpm-lock.yaml
Plik diff jest za duży
Load Diff
565
readme.md
565
readme.md
|
@ -1,565 +0,0 @@
|
|||
# ChatGPT API <!-- omit in toc -->
|
||||
|
||||
> Node.js client for the official [ChatGPT](https://openai.com/blog/chatgpt/) API.
|
||||
|
||||
[](https://www.npmjs.com/package/chatgpt) [](https://github.com/transitive-bullshit/chatgpt-api/actions/workflows/test.yml) [](https://github.com/transitive-bullshit/chatgpt-api/blob/main/license) [](https://prettier.io)
|
||||
|
||||
- [Intro](#intro)
|
||||
- [Updates](#updates)
|
||||
- [CLI](#cli)
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Usage - ChatGPTAPI](#usage---chatgptapi)
|
||||
- [Usage - ChatGPTUnofficialProxyAPI](#usage---chatgptunofficialproxyapi)
|
||||
- [Reverse Proxy](#reverse-proxy)
|
||||
- [Access Token](#access-token)
|
||||
- [Docs](#docs)
|
||||
- [Demos](#demos)
|
||||
- [Projects](#projects)
|
||||
- [Compatibility](#compatibility)
|
||||
- [Credits](#credits)
|
||||
- [License](#license)
|
||||
|
||||
## Intro
|
||||
|
||||
This package is a Node.js wrapper around [ChatGPT](https://openai.com/blog/chatgpt) by [OpenAI](https://openai.com). TS batteries included. ✨
|
||||
|
||||
<p align="center">
|
||||
<img alt="Example usage" src="/media/demo.gif">
|
||||
</p>
|
||||
|
||||
## Updates
|
||||
|
||||
<details open>
|
||||
<summary><strong>April 10, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
This package now **fully supports GPT-4**! 🔥
|
||||
|
||||
We also just released a [TypeScript chatgpt-plugin package](https://github.com/transitive-bullshit/chatgpt-plugin-ts) which contains helpers and examples to make it as easy as possible to start building your own ChatGPT Plugins in JS/TS. Even if you don't have developer access to ChatGPT Plugins yet, you can still use the [chatgpt-plugin](https://github.com/transitive-bullshit/chatgpt-plugin-ts) repo to get a head start on building your own plugins locally.
|
||||
|
||||
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
||||
|
||||
```bash
|
||||
npx chatgpt@latest --model gpt-4 "Hello world"
|
||||
```
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
||||
</p>
|
||||
|
||||
We still support both the official ChatGPT API and the unofficial proxy API, but we now recommend using the official API since it's significantly more robust and supports **GPT-4**.
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | ------- | ------------------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ ChatGPT webapp |
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Previous Updates</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
<details>
|
||||
<summary><strong>March 1, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
The [official OpenAI chat completions API](https://platform.openai.com/docs/guides/chat) has been released, and it is now the default for this package! 🔥
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | -------- | ----------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI. We may remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 19, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
We now provide three ways of accessing the unofficial ChatGPT API, all of which have tradeoffs:
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | -------- | ----------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ☑️ Mimics ChatGPT |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Real ChatGPT |
|
||||
| `ChatGPTAPIBrowser` (v3) | ✅ Yes | ❌ No | ✅ Real ChatGPT |
|
||||
|
||||
**Note**: I recommend that you use either `ChatGPTAPI` or `ChatGPTUnofficialProxyAPI`.
|
||||
|
||||
1. `ChatGPTAPI` - (Used to use) `text-davinci-003` to mimic ChatGPT via the official OpenAI completions API (most robust approach, but it's not free and doesn't use a model fine-tuned for chat)
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
3. `ChatGPTAPIBrowser` - (_deprecated_; v3.5.1 of this package) Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 5, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
OpenAI has disabled the leaked chat model we were previously using, so we're now defaulting to `text-davinci-003`, which is not free.
|
||||
|
||||
We've found several other hidden, fine-tuned chat models, but OpenAI keeps disabling them, so we're searching for alternative workarounds.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Feb 1, 2023</strong></summary>
|
||||
|
||||
<br/>
|
||||
|
||||
This package no longer requires any browser hacks – **it is now using the official OpenAI completions API** with a leaked model that ChatGPT uses under the hood. 🔥
|
||||
|
||||
```ts
|
||||
import { ChatGPTAPI } from 'chatgpt'
|
||||
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
Please upgrade to `chatgpt@latest` (at least [v4.0.0](https://github.com/transitive-bullshit/chatgpt-api/releases/tag/v4.0.0)). The updated version is **significantly more lightweight and robust** compared with previous versions. You also don't have to worry about IP issues or rate limiting.
|
||||
|
||||
Huge shoutout to [@waylaidwanderer](https://github.com/waylaidwanderer) for discovering the leaked chat model!
|
||||
|
||||
</details>
|
||||
</details>
|
||||
|
||||
If you run into any issues, we do have a pretty active [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers from the Node.js & Python communities.
|
||||
|
||||
Lastly, please consider starring this repo and <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a> to help support the project.
|
||||
|
||||
Thanks && cheers,
|
||||
[Travis](https://twitter.com/transitive_bs)
|
||||
|
||||
## CLI
|
||||
|
||||
To run the CLI, you'll need an [OpenAI API key](https://platform.openai.com/overview):
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="sk-TODO"
|
||||
npx chatgpt "your prompt here"
|
||||
```
|
||||
|
||||
By default, the response is streamed to stdout, the results are stored in a local config file, and every invocation starts a new conversation. You can use `-c` to continue the previous conversation and `--no-stream` to disable streaming.
|
||||
|
||||
```
|
||||
Usage:
|
||||
$ chatgpt <prompt>
|
||||
|
||||
Commands:
|
||||
<prompt> Ask ChatGPT a question
|
||||
rm-cache Clears the local message cache
|
||||
ls-cache Prints the local message cache path
|
||||
|
||||
For more info, run any command with the `--help` flag:
|
||||
$ chatgpt --help
|
||||
$ chatgpt rm-cache --help
|
||||
$ chatgpt ls-cache --help
|
||||
|
||||
Options:
|
||||
-c, --continue Continue last conversation (default: false)
|
||||
-d, --debug Enables debug logging (default: false)
|
||||
-s, --stream Streams the response (default: true)
|
||||
-s, --store Enables the local message cache (default: true)
|
||||
-t, --timeout Timeout in milliseconds
|
||||
-k, --apiKey OpenAI API key
|
||||
-o, --apiOrg OpenAI API organization
|
||||
-n, --conversationName Unique name for the conversation
|
||||
-h, --help Display this message
|
||||
-v, --version Display version number
|
||||
```
|
||||
|
||||
If you have access to the `gpt-4` model, you can run the following to test out the CLI with GPT-4:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://user-images.githubusercontent.com/552829/229368245-d22fbac7-4b56-4a5e-810b-5ac5793b6ac3.png" width="600px" alt="Using the chatgpt CLI with gpt-4">
|
||||
</p>
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
npm install chatgpt
|
||||
```
|
||||
|
||||
Make sure you're using `node >= 18` so `fetch` is available (or `node >= 14` if you install a [fetch polyfill](https://github.com/developit/unfetch#usage-as-a-polyfill)).
|
||||
|
||||
## Usage
|
||||
|
||||
To use this module from Node.js, you need to pick between two methods:
|
||||
|
||||
| Method | Free? | Robust? | Quality? |
|
||||
| --------------------------- | ------ | ------- | ------------------------------- |
|
||||
| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅️ Real ChatGPT models + GPT-4 |
|
||||
| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ❌ No️ | ✅ Real ChatGPT webapp |
|
||||
|
||||
1. `ChatGPTAPI` - Uses the `gpt-3.5-turbo` model with the official OpenAI chat completions API (official, robust approach, but it's not free). You can override the model, completion params, and system message to fully customize your assistant.
|
||||
|
||||
2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but relies on a third-party server and is rate-limited)
|
||||
|
||||
Both approaches have very similar APIs, so it should be simple to swap between them.
|
||||
|
||||
**Note**: We strongly recommend using `ChatGPTAPI` since it uses the officially supported API from OpenAI and it also supports `gpt-4`. We will likely remove support for `ChatGPTUnofficialProxyAPI` in a future release.
|
||||
|
||||
### Usage - ChatGPTAPI
|
||||
|
||||
Sign up for an [OpenAI API key](https://platform.openai.com/overview) and store it in your environment.
|
||||
|
||||
```ts
|
||||
import { ChatGPTAPI } from 'chatgpt'
|
||||
|
||||
async function example() {
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
You can override the default `model` (`gpt-3.5-turbo`) and any [OpenAI chat completion params](https://platform.openai.com/docs/api-reference/chat/create) using `completionParams`:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
completionParams: {
|
||||
model: 'gpt-4',
|
||||
temperature: 0.5,
|
||||
top_p: 0.8
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
If you want to track the conversation, you'll need to pass the `parentMessageId` like this:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
||||
|
||||
// send a message and wait for the response
|
||||
let res = await api.sendMessage('What is OpenAI?')
|
||||
console.log(res.text)
|
||||
|
||||
// send a follow-up
|
||||
res = await api.sendMessage('Can you expand on that?', {
|
||||
parentMessageId: res.id
|
||||
})
|
||||
console.log(res.text)
|
||||
|
||||
// send another follow-up
|
||||
res = await api.sendMessage('What were we talking about?', {
|
||||
parentMessageId: res.id
|
||||
})
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
You can add streaming via the `onProgress` handler:
|
||||
|
||||
```ts
|
||||
const res = await api.sendMessage('Write a 500 word essay on frogs.', {
|
||||
// print the partial response as the AI is "typing"
|
||||
onProgress: (partialResponse) => console.log(partialResponse.text)
|
||||
})
|
||||
|
||||
// print the full text at the end
|
||||
console.log(res.text)
|
||||
```
|
||||
|
||||
You can add a timeout using the `timeoutMs` option:
|
||||
|
||||
```ts
|
||||
// timeout after 2 minutes (which will also abort the underlying HTTP request)
|
||||
const response = await api.sendMessage(
|
||||
'write me a really really long essay on frogs',
|
||||
{
|
||||
timeoutMs: 2 * 60 * 1000
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
If you want to see more info about what's actually being sent to [OpenAI's chat completions API](https://platform.openai.com/docs/api-reference/chat/create), set the `debug: true` option in the `ChatGPTAPI` constructor:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTAPI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
debug: true
|
||||
})
|
||||
```
|
||||
|
||||
We default to a basic `systemMessage`. You can override this in either the `ChatGPTAPI` constructor or `sendMessage`:
|
||||
|
||||
```ts
|
||||
const res = await api.sendMessage('what is the answer to the universe?', {
|
||||
systemMessage: `You are ChatGPT, a large language model trained by OpenAI. You answer as concisely as possible for each responseIf you are generating a list, do not have too many items.
|
||||
Current date: ${new Date().toISOString()}\n\n`
|
||||
})
|
||||
```
|
||||
|
||||
Note that we automatically handle appending the previous messages to the prompt and attempt to optimize for the available tokens (which defaults to `4096`).
|
||||
|
||||
<details>
|
||||
<summary>Usage in CommonJS (Dynamic import)</summary>
|
||||
|
||||
```js
|
||||
async function example() {
|
||||
// To use ESM in CommonJS, you can use a dynamic import like this:
|
||||
const { ChatGPTAPI } = await import('chatgpt')
|
||||
// You can also try dynamic importing like this:
|
||||
// const importDynamic = new Function('modulePath', 'return import(modulePath)')
|
||||
// const { ChatGPTAPI } = await importDynamic('chatgpt')
|
||||
|
||||
const api = new ChatGPTAPI({ apiKey: process.env.OPENAI_API_KEY })
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Usage - ChatGPTUnofficialProxyAPI
|
||||
|
||||
The API for `ChatGPTUnofficialProxyAPI` is almost exactly the same. You just need to provide a ChatGPT `accessToken` instead of an OpenAI API key.
|
||||
|
||||
```ts
|
||||
import { ChatGPTUnofficialProxyAPI } from 'chatgpt'
|
||||
|
||||
async function example() {
|
||||
const api = new ChatGPTUnofficialProxyAPI({
|
||||
accessToken: process.env.OPENAI_ACCESS_TOKEN
|
||||
})
|
||||
|
||||
const res = await api.sendMessage('Hello World!')
|
||||
console.log(res.text)
|
||||
}
|
||||
```
|
||||
|
||||
See [demos/demo-reverse-proxy](./demos/demo-reverse-proxy.ts) for a full example:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-reverse-proxy.ts
|
||||
```
|
||||
|
||||
`ChatGPTUnofficialProxyAPI` messages also contain a `conversationid` in addition to `parentMessageId`, since the ChatGPT webapp can't reference messages across different accounts & conversations.
|
||||
|
||||
#### Reverse Proxy
|
||||
|
||||
You can override the reverse proxy by passing `apiReverseProxyUrl`:
|
||||
|
||||
```ts
|
||||
const api = new ChatGPTUnofficialProxyAPI({
|
||||
accessToken: process.env.OPENAI_ACCESS_TOKEN,
|
||||
apiReverseProxyUrl: 'https://your-example-server.com/api/conversation'
|
||||
})
|
||||
```
|
||||
|
||||
Known reverse proxies run by community members include:
|
||||
|
||||
| Reverse Proxy URL | Author | Rate Limits | Last Checked |
|
||||
| ------------------------------------------------- | -------------------------------------------- | ---------------------------- | ------------ |
|
||||
| `https://ai.fakeopen.com/api/conversation` | [@pengzhile](https://github.com/pengzhile) | 5 req / 10 seconds by IP | 4/18/2023 |
|
||||
| `https://api.pawan.krd/backend-api/conversation` | [@PawanOsman](https://github.com/PawanOsman) | 50 req / 15 seconds (~3 r/s) | 3/23/2023 |
|
||||
|
||||
Note: info on how the reverse proxies work is not being published at this time in order to prevent OpenAI from disabling access.
|
||||
|
||||
#### Access Token
|
||||
|
||||
To use `ChatGPTUnofficialProxyAPI`, you'll need an OpenAI access token from the ChatGPT webapp. To do this, you can use any of the following methods which take an `email` and `password` and return an access token:
|
||||
|
||||
- Node.js libs
|
||||
- [ericlewis/openai-authenticator](https://github.com/ericlewis/openai-authenticator)
|
||||
- [michael-dm/openai-token](https://github.com/michael-dm/openai-token)
|
||||
- [allanoricil/chat-gpt-authenticator](https://github.com/AllanOricil/chat-gpt-authenticator)
|
||||
- Go libs
|
||||
- [acheong08/OpenAIAuth](https://github.com/acheong08/OpenAIAuth)
|
||||
|
||||
These libraries work with email + password accounts (e.g., they do not support accounts where you auth via Microsoft / Google).
|
||||
|
||||
Alternatively, you can manually get an `accessToken` by logging in to the ChatGPT webapp and then opening `https://chat.openai.com/api/auth/session`, which will return a JSON object containing your `accessToken` string.
|
||||
|
||||
Access tokens last for days.
|
||||
|
||||
**Note**: using a reverse proxy will expose your access token to a third-party. There shouldn't be any adverse effects possible from this, but please consider the risks before using this method.
|
||||
|
||||
## Docs
|
||||
|
||||
See the [auto-generated docs](./docs/classes/ChatGPTAPI.md) for more info on methods and parameters.
|
||||
|
||||
## Demos
|
||||
|
||||
Most of the demos use `ChatGPTAPI`. It should be pretty easy to convert them to use `ChatGPTUnofficialProxyAPI` if you'd rather use that approach. The only thing that needs to change is how you initialize the api with an `accessToken` instead of an `apiKey`.
|
||||
|
||||
To run the included demos:
|
||||
|
||||
1. clone repo
|
||||
2. install node deps
|
||||
3. set `OPENAI_API_KEY` in .env
|
||||
|
||||
A [basic demo](./demos/demo.ts) is included for testing purposes:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo.ts
|
||||
```
|
||||
|
||||
A [demo showing on progress handler](./demos/demo-on-progress.ts):
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-on-progress.ts
|
||||
```
|
||||
|
||||
The on progress demo uses the optional `onProgress` parameter to `sendMessage` to receive intermediary results as ChatGPT is "typing".
|
||||
|
||||
A [conversation demo](./demos/demo-conversation.ts):
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-conversation.ts
|
||||
```
|
||||
|
||||
A [persistence demo](./demos/demo-persistence.ts) shows how to store messages in Redis for persistence:
|
||||
|
||||
```bash
|
||||
npx tsx demos/demo-persistence.ts
|
||||
```
|
||||
|
||||
Any [keyv adaptor](https://github.com/jaredwray/keyv) is supported for persistence, and there are overrides if you'd like to use a different way of storing / retrieving messages.
|
||||
|
||||
Note that persisting message is required for remembering the context of previous conversations beyond the scope of the current Node.js process, since by default, we only store messages in memory. Here's an [external demo](https://github.com/transitive-bullshit/chatgpt-twitter-bot/blob/main/src/index.ts#L86-L95) of using a completely custom database solution to persist messages.
|
||||
|
||||
**Note**: Persistence is handled automatically when using `ChatGPTUnofficialProxyAPI` because it is connecting indirectly to ChatGPT.
|
||||
|
||||
## Projects
|
||||
|
||||
All of these awesome projects are built using the `chatgpt` package. 🤯
|
||||
|
||||
- [Twitter Bot](https://github.com/transitive-bullshit/chatgpt-twitter-bot) powered by ChatGPT ✨
|
||||
- Mention [@ChatGPTBot](https://twitter.com/ChatGPTBot) on Twitter with your prompt to try it out
|
||||
- [ChatGPT API Server](https://github.com/waylaidwanderer/node-chatgpt-api) - API server for this package with support for multiple OpenAI accounts, proxies, and load-balancing requests between accounts.
|
||||
- [ChatGPT Prompts](https://github.com/pacholoamit/chatgpt-prompts) - A collection of 140+ of the best ChatGPT prompts from the community.
|
||||
- [Lovelines.xyz](https://lovelines.xyz?ref=chatgpt-api)
|
||||
- [Chrome Extension](https://github.com/gragland/chatgpt-everywhere) ([demo](https://twitter.com/gabe_ragland/status/1599466486422470656))
|
||||
- [VSCode Extension #1](https://github.com/mpociot/chatgpt-vscode) ([demo](https://twitter.com/marcelpociot/status/1599180144551526400), [updated version](https://github.com/timkmecl/chatgpt-vscode), [marketplace](https://marketplace.visualstudio.com/items?itemName=timkmecl.chatgpt))
|
||||
- [VSCode Extension #2](https://github.com/barnesoir/chatgpt-vscode-plugin) ([marketplace](https://marketplace.visualstudio.com/items?itemName=JayBarnes.chatgpt-vscode-plugin))
|
||||
- [VSCode Extension #3](https://github.com/gencay/vscode-chatgpt) ([marketplace](https://marketplace.visualstudio.com/items?itemName=gencay.vscode-chatgpt))
|
||||
- [VSCode Extension #4](https://github.com/dogukanakkaya/chatgpt-code-vscode-extension) ([marketplace](https://marketplace.visualstudio.com/items?itemName=dogukanakkaya.chatgpt-code))
|
||||
- [Raycast Extension #1](https://github.com/abielzulio/chatgpt-raycast) ([demo](https://twitter.com/abielzulio/status/1600176002042191875))
|
||||
- [Raycast Extension #2](https://github.com/domnantas/raycast-chatgpt)
|
||||
- [Telegram Bot #1](https://github.com/realies/chatgpt-telegram-bot)
|
||||
- [Telegram Bot #2](https://github.com/dawangraoming/chatgpt-telegram-bot)
|
||||
- [Telegram Bot #3](https://github.com/RainEggplant/chatgpt-telegram-bot) (group privacy mode, ID-based auth)
|
||||
- [Telegram Bot #4](https://github.com/ArdaGnsrn/chatgpt-telegram) (queue system, ID-based chat thread)
|
||||
- [Telegram Bot #5](https://github.com/azoway/chatgpt-telegram-bot) (group privacy mode, ID-based chat thread)
|
||||
- [Deno Telegram Bot](https://github.com/Ciyou/chatbot-telegram)
|
||||
- [Go Telegram Bot](https://github.com/m1guelpf/chatgpt-telegram)
|
||||
- [Telegram Bot for YouTube Summaries](https://github.com/codextde/youtube-summary)
|
||||
- [GitHub ProBot](https://github.com/oceanlvr/ChatGPTBot)
|
||||
- [Discord Bot #1](https://github.com/onury5506/Discord-ChatGPT-Bot)
|
||||
- [Discord Bot #2](https://github.com/Nageld/ChatGPT-Bot)
|
||||
- [Discord Bot #3](https://github.com/leinstay/gptbot)
|
||||
- [Discord Bot #4 (selfbot)](https://github.com/0x7030676e31/cumsocket)
|
||||
- [Discord Bot #5](https://github.com/itskdhere/ChatGPT-Discord-BOT)
|
||||
- [Discord Bot #6 (Shakespeare bot)](https://gist.github.com/TheBrokenRail/4b37e7c44e8f721d8bd845050d034c16)
|
||||
- [Discord Bot #7](https://github.com/Elitezen/discordjs-chatgpt)
|
||||
- [Zoom Chat](https://github.com/shixin-guo/my-bot)
|
||||
- [WeChat Bot #1](https://github.com/AutumnWhj/ChatGPT-wechat-bot)
|
||||
- [WeChat Bot #2](https://github.com/fuergaosi233/wechat-chatgpt)
|
||||
- [WeChat Bot #3](https://github.com/wangrongding/wechat-bot) (
|
||||
- [WeChat Bot #4](https://github.com/darknightlab/wechat-bot)
|
||||
- [WeChat Bot #5](https://github.com/sunshanpeng/wechaty-chatgpt)
|
||||
- [WeChat Bot #6](https://github.com/formulahendry/chatgpt-wechat-bot)
|
||||
- [WeChat Bot #7](https://github.com/gfl94/Chatbot004)
|
||||
- [QQ Bot (plugin for Yunzai-bot)](https://github.com/ikechan8370/chatgpt-plugin)
|
||||
- [QQ Bot (plugin for KiviBot)](https://github.com/KiviBotLab/kivibot-plugin-chatgpt)
|
||||
- [QQ Bot (oicq)](https://github.com/easydu2002/chat_gpt_oicq)
|
||||
- [QQ Bot (oicq + RabbitMQ)](https://github.com/linsyking/ChatGPT-QQBot)
|
||||
- [QQ Bot (go-cqhttp)](https://github.com/PairZhu/ChatGPT-QQRobot)
|
||||
- [QQ Bot (plugin for Yunzai-Bot + Bull)](https://github.com/Micuks/chatGPT-yunzai) (Lightweight, Google Bard support 💪)
|
||||
- [EXM smart contracts](https://github.com/decentldotland/molecule)
|
||||
- [Flutter ChatGPT API](https://github.com/coskuncay/flutter_chatgpt_api)
|
||||
- [Carik Bot](https://github.com/luridarmawan/Carik)
|
||||
- [Github Action for reviewing PRs](https://github.com/kxxt/chatgpt-action/)
|
||||
- [WhatsApp Bot #1](https://github.com/askrella/whatsapp-chatgpt) (DALL-E + Whisper support 💪)
|
||||
- [WhatsApp Bot #2](https://github.com/amosayomide05/chatgpt-whatsapp-bot)
|
||||
- [WhatsApp Bot #3](https://github.com/pascalroget/whatsgpt) (multi-user support)
|
||||
- [WhatsApp Bot #4](https://github.com/noelzappy/chatgpt-whatsapp) (schedule periodic messages)
|
||||
- [WhatsApp Bot #5](https://github.com/hujanais/bs-chat-gpt3-api) (RaspberryPi + ngrok + Twilio)
|
||||
- [WhatsApp Bot #6](https://github.com/dannysantino/whatsgpt) (Session and chat history storage with MongoStore)
|
||||
- [Matrix Bot](https://github.com/matrixgpt/matrix-chatgpt-bot)
|
||||
- [Rental Cover Letter Generator](https://sharehouse.app/ai)
|
||||
- [Assistant CLI](https://github.com/diciaup/assistant-cli)
|
||||
- [Teams Bot](https://github.com/formulahendry/chatgpt-teams-bot)
|
||||
- [Askai](https://github.com/yudax42/askai)
|
||||
- [TalkGPT](https://github.com/ShadovvBeast/TalkGPT)
|
||||
- [ChatGPT With Voice](https://github.com/thanhsonng/chatgpt-voice)
|
||||
- [iOS Shortcut](https://github.com/leecobaby/shortcuts/blob/master/other/ChatGPT_EN.md)
|
||||
- [Slack Bot #1](https://github.com/trietphm/chatgpt-slackbot/)
|
||||
- [Slack Bot #2](https://github.com/lokwkin/chatgpt-slackbot-node/) (with queueing mechanism)
|
||||
- [Slack Bot #3](https://github.com/NessunKim/slack-chatgpt/)
|
||||
- [Slack Bot #4](https://github.com/MarkusGalant/chatgpt-slackbot-serverless/) ( Serverless AWS Lambda )
|
||||
- [Slack Bot #5](https://github.com/benjiJanssens/SlackGPT) (Hosted)
|
||||
- [Add to Slack](https://slackgpt.benji.sh/slack/install)
|
||||
- [Electron Bot](https://github.com/ShiranAbir/chaty)
|
||||
- [Kodyfire CLI](https://github.com/nooqta/chatgpt-kodyfire)
|
||||
- [Twitch Bot](https://github.com/BennyDeeDev/chatgpt-twitch-bot)
|
||||
- [Continuous Conversation](https://github.com/DanielTerletzkiy/chat-gtp-assistant)
|
||||
- [Figma plugin](https://github.com/frederickk/chatgpt-figma-plugin)
|
||||
- [NestJS server](https://github.com/RusDyn/chatgpt_nestjs_server)
|
||||
- [NestJS ChatGPT Starter Boilerplate](https://github.com/mitkodkn/nestjs-chatgpt-starter)
|
||||
- [Wordsmith: Add-in for Microsoft Word](https://github.com/xtremehpx/Wordsmith)
|
||||
- [QuizGPT: Create Kahoot quizzes with ChatGPT](https://github.com/Kladdy/quizgpt)
|
||||
- [openai-chatgpt: Talk to ChatGPT from the terminal](https://github.com/gmpetrov/openai-chatgpt)
|
||||
- [Clippy the Saleforce chatbot](https://github.com/sebas00/chatgptclippy) ClippyJS joke bot
|
||||
- [ai-assistant](https://github.com/youking-lib/ai-assistant) Chat assistant
|
||||
- [Feishu Bot](https://github.com/linjungz/feishu-chatgpt-bot)
|
||||
- [DomainGPT: Discover available domain names](https://github.com/billylo1/DomainGPT)
|
||||
- [AI Poem Generator](https://aipoemgenerator.com/)
|
||||
- [Next.js ChatGPT With Firebase](https://github.com/youngle316/chatgpt)
|
||||
- [ai-commit – GPT-3 Commit Message Generator](https://github.com/insulineru/ai-commit)
|
||||
- [AItinerary – ChatGPT itinerary Generator](https://aitinerary.ai)
|
||||
- [wechaty-chatgpt - A chatbot based on Wechaty & ChatGPT](https://github.com/zhengxs2018/wechaty-chatgpt)
|
||||
- [Julius GPT](https://github.com/christophebe/julius-gpt) - Generate and publish your content from the CLI
|
||||
- [OpenAI-API-Service](https://github.com/Jarvan-via/api-service) - Provides OpenAI related APIs for businesses
|
||||
- [Discord Daily News Bot](https://github.com/ZirionNeft/chatgpt-discord-daily-news-bot) - Discord bot that generate funny daily news
|
||||
- [ai-assistant](https://github.com/chenweiyi/ai-assistant) - Create a chat website similar to ChatGPT
|
||||
- [Tulsk – AI-Driven Project Management](https://tulsk.io)
|
||||
|
||||
If you create a cool integration, feel free to open a PR and add it to the list.
|
||||
|
||||
## Compatibility
|
||||
|
||||
- This package is ESM-only.
|
||||
- This package supports `node >= 14`.
|
||||
- This module assumes that `fetch` is installed.
|
||||
- In `node >= 18`, it's installed by default.
|
||||
- In `node < 18`, you need to install a polyfill like `unfetch/polyfill` ([guide](https://github.com/developit/unfetch#usage-as-a-polyfill)) or `isomorphic-fetch` ([guide](https://github.com/matthew-andrews/isomorphic-fetch#readme)).
|
||||
- If you want to build a website using `chatgpt`, we recommend using it only from your backend API
|
||||
|
||||
## Credits
|
||||
|
||||
- Huge thanks to [@waylaidwanderer](https://github.com/waylaidwanderer), [@abacaj](https://github.com/abacaj), [@wong2](https://github.com/wong2), [@simon300000](https://github.com/simon300000), [@RomanHotsiy](https://github.com/RomanHotsiy), [@ElijahPepe](https://github.com/ElijahPepe), and all the other contributors 💪
|
||||
- [OpenAI](https://openai.com) for creating [ChatGPT](https://openai.com/blog/chatgpt/) 🔥
|
||||
- I run the [ChatGPT Hackers Discord](https://www.chatgpthackers.dev/) with over 8k developers – come join us!
|
||||
|
||||
## License
|
||||
|
||||
MIT © [Travis Fischer](https://transitivebullsh.it)
|
||||
|
||||
If you found this project interesting, please consider [sponsoring me](https://github.com/sponsors/transitive-bullshit) or <a href="https://twitter.com/transitive_bs">following me on twitter <img src="https://storage.googleapis.com/saasify-assets/twitter-logo.svg" alt="twitter" height="24px" align="center"></a>
|
|
@ -1,470 +0,0 @@
|
|||
import Keyv from 'keyv'
|
||||
import pTimeout from 'p-timeout'
|
||||
import QuickLRU from 'quick-lru'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import * as tokenizer from './tokenizer'
|
||||
import * as types from './types'
|
||||
import { fetch as globalFetch } from './fetch'
|
||||
import { fetchSSE } from './fetch-sse'
|
||||
|
||||
const CHATGPT_MODEL = 'gpt-3.5-turbo'
|
||||
|
||||
const USER_LABEL_DEFAULT = 'User'
|
||||
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
|
||||
|
||||
export class ChatGPTAPI {
|
||||
protected _apiKey: string
|
||||
protected _apiBaseUrl: string
|
||||
protected _apiOrg?: string
|
||||
protected _debug: boolean
|
||||
|
||||
protected _systemMessage: string
|
||||
protected _completionParams: Omit<
|
||||
types.openai.CreateChatCompletionRequest,
|
||||
'messages' | 'n'
|
||||
>
|
||||
protected _maxModelTokens: number
|
||||
protected _maxResponseTokens: number
|
||||
protected _fetch: types.FetchFn
|
||||
|
||||
protected _getMessageById: types.GetMessageByIdFunction
|
||||
protected _upsertMessage: types.UpsertMessageFunction
|
||||
|
||||
protected _messageStore: Keyv<types.ChatMessage>
|
||||
|
||||
/**
|
||||
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
||||
*
|
||||
* @param apiKey - OpenAI API key (required).
|
||||
* @param apiOrg - Optional OpenAI API organization (optional).
|
||||
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
|
||||
* @param debug - Optional enables logging debugging info to stdout.
|
||||
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
|
||||
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
|
||||
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
|
||||
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
||||
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
||||
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
||||
*/
|
||||
constructor(opts: types.ChatGPTAPIOptions) {
|
||||
const {
|
||||
apiKey,
|
||||
apiOrg,
|
||||
apiBaseUrl = 'https://api.openai.com/v1',
|
||||
debug = false,
|
||||
messageStore,
|
||||
completionParams,
|
||||
systemMessage,
|
||||
maxModelTokens = 4000,
|
||||
maxResponseTokens = 1000,
|
||||
getMessageById,
|
||||
upsertMessage,
|
||||
fetch = globalFetch
|
||||
} = opts
|
||||
|
||||
this._apiKey = apiKey
|
||||
this._apiOrg = apiOrg
|
||||
this._apiBaseUrl = apiBaseUrl
|
||||
this._debug = !!debug
|
||||
this._fetch = fetch
|
||||
|
||||
this._completionParams = {
|
||||
model: CHATGPT_MODEL,
|
||||
temperature: 0.8,
|
||||
top_p: 1.0,
|
||||
presence_penalty: 1.0,
|
||||
...completionParams
|
||||
}
|
||||
|
||||
this._systemMessage = systemMessage
|
||||
|
||||
if (this._systemMessage === undefined) {
|
||||
const currentDate = new Date().toISOString().split('T')[0]
|
||||
this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
|
||||
}
|
||||
|
||||
this._maxModelTokens = maxModelTokens
|
||||
this._maxResponseTokens = maxResponseTokens
|
||||
|
||||
this._getMessageById = getMessageById ?? this._defaultGetMessageById
|
||||
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
|
||||
|
||||
if (messageStore) {
|
||||
this._messageStore = messageStore
|
||||
} else {
|
||||
this._messageStore = new Keyv<types.ChatMessage, any>({
|
||||
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
|
||||
})
|
||||
}
|
||||
|
||||
if (!this._apiKey) {
|
||||
throw new Error('OpenAI missing required apiKey')
|
||||
}
|
||||
|
||||
if (!this._fetch) {
|
||||
throw new Error('Invalid environment; fetch is not defined')
|
||||
}
|
||||
|
||||
if (typeof this._fetch !== 'function') {
|
||||
throw new Error('Invalid "fetch" is not a function')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the OpenAI chat completions endpoint, waits for the response
|
||||
* to resolve, and returns the response.
|
||||
*
|
||||
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
*
|
||||
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
*
|
||||
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
||||
*
|
||||
* @param message - The prompt message to send
|
||||
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
||||
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
|
||||
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
||||
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
|
||||
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
||||
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
||||
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||
*
|
||||
* @returns The response from ChatGPT
|
||||
*/
|
||||
async sendMessage(
|
||||
text: string,
|
||||
opts: types.SendMessageOptions = {}
|
||||
): Promise<types.ChatMessage> {
|
||||
const {
|
||||
parentMessageId,
|
||||
messageId = uuidv4(),
|
||||
timeoutMs,
|
||||
onProgress,
|
||||
stream = onProgress ? true : false,
|
||||
completionParams,
|
||||
conversationId
|
||||
} = opts
|
||||
|
||||
let { abortSignal } = opts
|
||||
|
||||
let abortController: AbortController = null
|
||||
if (timeoutMs && !abortSignal) {
|
||||
abortController = new AbortController()
|
||||
abortSignal = abortController.signal
|
||||
}
|
||||
|
||||
const message: types.ChatMessage = {
|
||||
role: 'user',
|
||||
id: messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
text
|
||||
}
|
||||
|
||||
const latestQuestion = message
|
||||
|
||||
const { messages, maxTokens, numTokens } = await this._buildMessages(
|
||||
text,
|
||||
opts
|
||||
)
|
||||
|
||||
const result: types.ChatMessage = {
|
||||
role: 'assistant',
|
||||
id: uuidv4(),
|
||||
conversationId,
|
||||
parentMessageId: messageId,
|
||||
text: ''
|
||||
}
|
||||
|
||||
const responseP = new Promise<types.ChatMessage>(
|
||||
async (resolve, reject) => {
|
||||
const url = `${this._apiBaseUrl}/chat/completions`
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this._apiKey}`
|
||||
}
|
||||
const body = {
|
||||
max_tokens: maxTokens,
|
||||
...this._completionParams,
|
||||
...completionParams,
|
||||
messages,
|
||||
stream
|
||||
}
|
||||
|
||||
// Support multiple organizations
|
||||
// See https://platform.openai.com/docs/api-reference/authentication
|
||||
if (this._apiOrg) {
|
||||
headers['OpenAI-Organization'] = this._apiOrg
|
||||
}
|
||||
|
||||
if (this._debug) {
|
||||
console.log(`sendMessage (${numTokens} tokens)`, body)
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
fetchSSE(
|
||||
url,
|
||||
{
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: abortSignal,
|
||||
onMessage: (data: string) => {
|
||||
if (data === '[DONE]') {
|
||||
result.text = result.text.trim()
|
||||
return resolve(result)
|
||||
}
|
||||
|
||||
try {
|
||||
const response: types.openai.CreateChatCompletionDeltaResponse =
|
||||
JSON.parse(data)
|
||||
|
||||
if (response.id) {
|
||||
result.id = response.id
|
||||
}
|
||||
|
||||
if (response.choices?.length) {
|
||||
const delta = response.choices[0].delta
|
||||
result.delta = delta.content
|
||||
if (delta?.content) result.text += delta.content
|
||||
|
||||
if (delta.role) {
|
||||
result.role = delta.role
|
||||
}
|
||||
|
||||
result.detail = response
|
||||
onProgress?.(result)
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('OpenAI stream SEE event unexpected error', err)
|
||||
return reject(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
this._fetch
|
||||
).catch(reject)
|
||||
} else {
|
||||
try {
|
||||
const res = await this._fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: abortSignal
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const reason = await res.text()
|
||||
const msg = `OpenAI error ${
|
||||
res.status || res.statusText
|
||||
}: ${reason}`
|
||||
const error = new types.ChatGPTError(msg, { cause: res })
|
||||
error.statusCode = res.status
|
||||
error.statusText = res.statusText
|
||||
return reject(error)
|
||||
}
|
||||
|
||||
const response: types.openai.CreateChatCompletionResponse =
|
||||
await res.json()
|
||||
if (this._debug) {
|
||||
console.log(response)
|
||||
}
|
||||
|
||||
if (response?.id) {
|
||||
result.id = response.id
|
||||
}
|
||||
|
||||
if (response?.choices?.length) {
|
||||
const message = response.choices[0].message
|
||||
result.text = message.content
|
||||
if (message.role) {
|
||||
result.role = message.role
|
||||
}
|
||||
} else {
|
||||
const res = response as any
|
||||
return reject(
|
||||
new Error(
|
||||
`OpenAI error: ${
|
||||
res?.detail?.message || res?.detail || 'unknown'
|
||||
}`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
result.detail = response
|
||||
|
||||
return resolve(result)
|
||||
} catch (err) {
|
||||
return reject(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
).then(async (message) => {
|
||||
if (message.detail && !message.detail.usage) {
|
||||
try {
|
||||
const promptTokens = numTokens
|
||||
const completionTokens = await this._getTokenCount(message.text)
|
||||
message.detail.usage = {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
estimated: true
|
||||
}
|
||||
} catch (err) {
|
||||
// TODO: this should really never happen, but if it does,
|
||||
// we should handle notify the user gracefully
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.all([
|
||||
this._upsertMessage(latestQuestion),
|
||||
this._upsertMessage(message)
|
||||
]).then(() => message)
|
||||
})
|
||||
|
||||
if (timeoutMs) {
|
||||
if (abortController) {
|
||||
// This will be called when a timeout occurs in order for us to forcibly
|
||||
// ensure that the underlying HTTP request is aborted.
|
||||
;(responseP as any).cancel = () => {
|
||||
abortController.abort()
|
||||
}
|
||||
}
|
||||
|
||||
return pTimeout(responseP, {
|
||||
milliseconds: timeoutMs,
|
||||
message: 'OpenAI timed out waiting for response'
|
||||
})
|
||||
} else {
|
||||
return responseP
|
||||
}
|
||||
}
|
||||
|
||||
get apiKey(): string {
|
||||
return this._apiKey
|
||||
}
|
||||
|
||||
set apiKey(apiKey: string) {
|
||||
this._apiKey = apiKey
|
||||
}
|
||||
|
||||
get apiOrg(): string {
|
||||
return this._apiOrg
|
||||
}
|
||||
|
||||
set apiOrg(apiOrg: string) {
|
||||
this._apiOrg = apiOrg
|
||||
}
|
||||
|
||||
protected async _buildMessages(text: string, opts: types.SendMessageOptions) {
|
||||
const { systemMessage = this._systemMessage } = opts
|
||||
let { parentMessageId } = opts
|
||||
|
||||
const userLabel = USER_LABEL_DEFAULT
|
||||
const assistantLabel = ASSISTANT_LABEL_DEFAULT
|
||||
|
||||
const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
|
||||
let messages: types.openai.ChatCompletionRequestMessage[] = []
|
||||
|
||||
if (systemMessage) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: systemMessage
|
||||
})
|
||||
}
|
||||
|
||||
const systemMessageOffset = messages.length
|
||||
let nextMessages = text
|
||||
? messages.concat([
|
||||
{
|
||||
role: 'user',
|
||||
content: text,
|
||||
name: opts.name
|
||||
}
|
||||
])
|
||||
: messages
|
||||
let numTokens = 0
|
||||
|
||||
do {
|
||||
const prompt = nextMessages
|
||||
.reduce((prompt, message) => {
|
||||
switch (message.role) {
|
||||
case 'system':
|
||||
return prompt.concat([`Instructions:\n${message.content}`])
|
||||
case 'user':
|
||||
return prompt.concat([`${userLabel}:\n${message.content}`])
|
||||
default:
|
||||
return prompt.concat([`${assistantLabel}:\n${message.content}`])
|
||||
}
|
||||
}, [] as string[])
|
||||
.join('\n\n')
|
||||
|
||||
const nextNumTokensEstimate = await this._getTokenCount(prompt)
|
||||
const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
|
||||
|
||||
if (prompt && !isValidPrompt) {
|
||||
break
|
||||
}
|
||||
|
||||
messages = nextMessages
|
||||
numTokens = nextNumTokensEstimate
|
||||
|
||||
if (!isValidPrompt) {
|
||||
break
|
||||
}
|
||||
|
||||
if (!parentMessageId) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessage = await this._getMessageById(parentMessageId)
|
||||
if (!parentMessage) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessageRole = parentMessage.role || 'user'
|
||||
|
||||
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
|
||||
{
|
||||
role: parentMessageRole,
|
||||
content: parentMessage.text,
|
||||
name: parentMessage.name
|
||||
},
|
||||
...nextMessages.slice(systemMessageOffset)
|
||||
])
|
||||
|
||||
parentMessageId = parentMessage.parentMessageId
|
||||
} while (true)
|
||||
|
||||
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
|
||||
// for the response.
|
||||
const maxTokens = Math.max(
|
||||
1,
|
||||
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
|
||||
)
|
||||
|
||||
return { messages, maxTokens, numTokens }
|
||||
}
|
||||
|
||||
protected async _getTokenCount(text: string) {
|
||||
// TODO: use a better fix in the tokenizer
|
||||
text = text.replace(/<\|endoftext\|>/g, '')
|
||||
|
||||
return tokenizer.encode(text).length
|
||||
}
|
||||
|
||||
protected async _defaultGetMessageById(
|
||||
id: string
|
||||
): Promise<types.ChatMessage> {
|
||||
const res = await this._messageStore.get(id)
|
||||
return res
|
||||
}
|
||||
|
||||
protected async _defaultUpsertMessage(
|
||||
message: types.ChatMessage
|
||||
): Promise<void> {
|
||||
await this._messageStore.set(message.id, message)
|
||||
}
|
||||
}
|
|
@ -1,268 +0,0 @@
|
|||
import pTimeout from 'p-timeout'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import * as types from './types'
|
||||
import { fetch as globalFetch } from './fetch'
|
||||
import { fetchSSE } from './fetch-sse'
|
||||
import { isValidUUIDv4 } from './utils'
|
||||
|
||||
export class ChatGPTUnofficialProxyAPI {
|
||||
protected _accessToken: string
|
||||
protected _apiReverseProxyUrl: string
|
||||
protected _debug: boolean
|
||||
protected _model: string
|
||||
protected _headers: Record<string, string>
|
||||
protected _fetch: types.FetchFn
|
||||
|
||||
/**
|
||||
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
||||
*/
|
||||
constructor(opts: {
|
||||
accessToken: string
|
||||
|
||||
/** @defaultValue `https://bypass.duti.tech/api/conversation` **/
|
||||
apiReverseProxyUrl?: string
|
||||
|
||||
/** @defaultValue `text-davinci-002-render-sha` **/
|
||||
model?: string
|
||||
|
||||
/** @defaultValue `false` **/
|
||||
debug?: boolean
|
||||
|
||||
/** @defaultValue `undefined` **/
|
||||
headers?: Record<string, string>
|
||||
|
||||
fetch?: types.FetchFn
|
||||
}) {
|
||||
const {
|
||||
accessToken,
|
||||
apiReverseProxyUrl = 'https://bypass.duti.tech/api/conversation',
|
||||
model = 'text-davinci-002-render-sha',
|
||||
debug = false,
|
||||
headers,
|
||||
fetch = globalFetch
|
||||
} = opts
|
||||
|
||||
this._accessToken = accessToken
|
||||
this._apiReverseProxyUrl = apiReverseProxyUrl
|
||||
this._debug = !!debug
|
||||
this._model = model
|
||||
this._fetch = fetch
|
||||
this._headers = headers
|
||||
|
||||
if (!this._accessToken) {
|
||||
throw new Error('ChatGPT invalid accessToken')
|
||||
}
|
||||
|
||||
if (!this._fetch) {
|
||||
throw new Error('Invalid environment; fetch is not defined')
|
||||
}
|
||||
|
||||
if (typeof this._fetch !== 'function') {
|
||||
throw new Error('Invalid "fetch" is not a function')
|
||||
}
|
||||
}
|
||||
|
||||
get accessToken(): string {
|
||||
return this._accessToken
|
||||
}
|
||||
|
||||
set accessToken(value: string) {
|
||||
this._accessToken = value
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to ChatGPT, waits for the response to resolve, and returns
|
||||
* the response.
|
||||
*
|
||||
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
*
|
||||
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
* If you want to receive the full response, including message and conversation IDs,
|
||||
* you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
|
||||
* helper.
|
||||
*
|
||||
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.
|
||||
*
|
||||
* @param message - The prompt message to send
|
||||
* @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)
|
||||
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
||||
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
||||
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
||||
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
||||
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||
*
|
||||
* @returns The response from ChatGPT
|
||||
*/
|
||||
async sendMessage(
|
||||
text: string,
|
||||
opts: types.SendMessageBrowserOptions = {}
|
||||
): Promise<types.ChatMessage> {
|
||||
if (!!opts.conversationId !== !!opts.parentMessageId) {
|
||||
throw new Error(
|
||||
'ChatGPTUnofficialProxyAPI.sendMessage: conversationId and parentMessageId must both be set or both be undefined'
|
||||
)
|
||||
}
|
||||
|
||||
if (opts.conversationId && !isValidUUIDv4(opts.conversationId)) {
|
||||
throw new Error(
|
||||
'ChatGPTUnofficialProxyAPI.sendMessage: conversationId is not a valid v4 UUID'
|
||||
)
|
||||
}
|
||||
|
||||
if (opts.parentMessageId && !isValidUUIDv4(opts.parentMessageId)) {
|
||||
throw new Error(
|
||||
'ChatGPTUnofficialProxyAPI.sendMessage: parentMessageId is not a valid v4 UUID'
|
||||
)
|
||||
}
|
||||
|
||||
if (opts.messageId && !isValidUUIDv4(opts.messageId)) {
|
||||
throw new Error(
|
||||
'ChatGPTUnofficialProxyAPI.sendMessage: messageId is not a valid v4 UUID'
|
||||
)
|
||||
}
|
||||
|
||||
const {
|
||||
conversationId,
|
||||
parentMessageId = uuidv4(),
|
||||
messageId = uuidv4(),
|
||||
action = 'next',
|
||||
timeoutMs,
|
||||
onProgress
|
||||
} = opts
|
||||
|
||||
let { abortSignal } = opts
|
||||
|
||||
let abortController: AbortController = null
|
||||
if (timeoutMs && !abortSignal) {
|
||||
abortController = new AbortController()
|
||||
abortSignal = abortController.signal
|
||||
}
|
||||
|
||||
const body: types.ConversationJSONBody = {
|
||||
action,
|
||||
messages: [
|
||||
{
|
||||
id: messageId,
|
||||
role: 'user',
|
||||
content: {
|
||||
content_type: 'text',
|
||||
parts: [text]
|
||||
}
|
||||
}
|
||||
],
|
||||
model: this._model,
|
||||
parent_message_id: parentMessageId
|
||||
}
|
||||
|
||||
if (conversationId) {
|
||||
body.conversation_id = conversationId
|
||||
}
|
||||
|
||||
const result: types.ChatMessage = {
|
||||
role: 'assistant',
|
||||
id: uuidv4(),
|
||||
parentMessageId: messageId,
|
||||
conversationId,
|
||||
text: ''
|
||||
}
|
||||
|
||||
const responseP = new Promise<types.ChatMessage>((resolve, reject) => {
|
||||
const url = this._apiReverseProxyUrl
|
||||
const headers = {
|
||||
...this._headers,
|
||||
Authorization: `Bearer ${this._accessToken}`,
|
||||
Accept: 'text/event-stream',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
if (this._debug) {
|
||||
console.log('POST', url, { body, headers })
|
||||
}
|
||||
|
||||
fetchSSE(
|
||||
url,
|
||||
{
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: abortSignal,
|
||||
onMessage: (data: string) => {
|
||||
if (data === '[DONE]') {
|
||||
return resolve(result)
|
||||
}
|
||||
|
||||
try {
|
||||
const convoResponseEvent: types.ConversationResponseEvent =
|
||||
JSON.parse(data)
|
||||
if (convoResponseEvent.conversation_id) {
|
||||
result.conversationId = convoResponseEvent.conversation_id
|
||||
}
|
||||
|
||||
if (convoResponseEvent.message?.id) {
|
||||
result.id = convoResponseEvent.message.id
|
||||
}
|
||||
|
||||
const message = convoResponseEvent.message
|
||||
// console.log('event', JSON.stringify(convoResponseEvent, null, 2))
|
||||
|
||||
if (message) {
|
||||
let text = message?.content?.parts?.[0]
|
||||
|
||||
if (text) {
|
||||
result.text = text
|
||||
|
||||
if (onProgress) {
|
||||
onProgress(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (this._debug) {
|
||||
console.warn('chatgpt unexpected JSON error', err)
|
||||
}
|
||||
// reject(err)
|
||||
}
|
||||
},
|
||||
onError: (err) => {
|
||||
reject(err)
|
||||
}
|
||||
},
|
||||
this._fetch
|
||||
).catch((err) => {
|
||||
const errMessageL = err.toString().toLowerCase()
|
||||
|
||||
if (
|
||||
result.text &&
|
||||
(errMessageL === 'error: typeerror: terminated' ||
|
||||
errMessageL === 'typeerror: terminated')
|
||||
) {
|
||||
// OpenAI sometimes forcefully terminates the socket from their end before
|
||||
// the HTTP request has resolved cleanly. In my testing, these cases tend to
|
||||
// happen when OpenAI has already send the last `response`, so we can ignore
|
||||
// the `fetch` error in this case.
|
||||
return resolve(result)
|
||||
} else {
|
||||
return reject(err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
if (timeoutMs) {
|
||||
if (abortController) {
|
||||
// This will be called when a timeout occurs in order for us to forcibly
|
||||
// ensure that the underlying HTTP request is aborted.
|
||||
;(responseP as any).cancel = () => {
|
||||
abortController.abort()
|
||||
}
|
||||
}
|
||||
|
||||
return pTimeout(responseP, {
|
||||
milliseconds: timeoutMs,
|
||||
message: 'ChatGPT timed out waiting for response'
|
||||
})
|
||||
} else {
|
||||
return responseP
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
import { createParser } from 'eventsource-parser'
|
||||
|
||||
import * as types from './types'
|
||||
import { fetch as globalFetch } from './fetch'
|
||||
import { streamAsyncIterable } from './stream-async-iterable'
|
||||
|
||||
export async function fetchSSE(
|
||||
url: string,
|
||||
options: Parameters<typeof fetch>[1] & {
|
||||
onMessage: (data: string) => void
|
||||
onError?: (error: any) => void
|
||||
},
|
||||
fetch: types.FetchFn = globalFetch
|
||||
) {
|
||||
const { onMessage, onError, ...fetchOptions } = options
|
||||
const res = await fetch(url, fetchOptions)
|
||||
if (!res.ok) {
|
||||
let reason: string
|
||||
|
||||
try {
|
||||
reason = await res.text()
|
||||
} catch (err) {
|
||||
reason = res.statusText
|
||||
}
|
||||
|
||||
const msg = `ChatGPT error ${res.status}: ${reason}`
|
||||
const error = new types.ChatGPTError(msg, { cause: res })
|
||||
error.statusCode = res.status
|
||||
error.statusText = res.statusText
|
||||
throw error
|
||||
}
|
||||
|
||||
const parser = createParser((event) => {
|
||||
if (event.type === 'event') {
|
||||
onMessage(event.data)
|
||||
}
|
||||
})
|
||||
|
||||
// handle special response errors
|
||||
const feed = (chunk: string) => {
|
||||
let response = null
|
||||
|
||||
try {
|
||||
response = JSON.parse(chunk)
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
|
||||
if (response?.detail?.type === 'invalid_request_error') {
|
||||
const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
|
||||
const error = new types.ChatGPTError(msg, { cause: response })
|
||||
error.statusCode = response.detail.code
|
||||
error.statusText = response.detail.message
|
||||
|
||||
if (onError) {
|
||||
onError(error)
|
||||
} else {
|
||||
console.error(error)
|
||||
}
|
||||
|
||||
// don't feed to the event parser
|
||||
return
|
||||
}
|
||||
|
||||
parser.feed(chunk)
|
||||
}
|
||||
|
||||
if (!res.body.getReader) {
|
||||
// Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
|
||||
// web standards, so this is a workaround...
|
||||
const body: NodeJS.ReadableStream = res.body as any
|
||||
|
||||
if (!body.on || !body.read) {
|
||||
throw new types.ChatGPTError('unsupported "fetch" implementation')
|
||||
}
|
||||
|
||||
body.on('readable', () => {
|
||||
let chunk: string | Buffer
|
||||
while (null !== (chunk = body.read())) {
|
||||
feed(chunk.toString())
|
||||
}
|
||||
})
|
||||
} else {
|
||||
for await (const chunk of streamAsyncIterable(res.body)) {
|
||||
const str = new TextDecoder().decode(chunk)
|
||||
feed(str)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
/// <reference lib="dom" />
|
||||
|
||||
const fetch = globalThis.fetch
|
||||
|
||||
export { fetch }
|
|
@ -1,3 +0,0 @@
|
|||
export * from './chatgpt-api'
|
||||
export * from './chatgpt-unofficial-proxy-api'
|
||||
export * from './types'
|
|
@ -1,14 +0,0 @@
|
|||
export async function* streamAsyncIterable<T>(stream: ReadableStream<T>) {
|
||||
const reader = stream.getReader()
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) {
|
||||
return
|
||||
}
|
||||
yield value
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
import { getEncoding } from 'js-tiktoken'
|
||||
|
||||
// TODO: make this configurable
|
||||
const tokenizer = getEncoding('cl100k_base')
|
||||
|
||||
export function encode(input: string): Uint32Array {
|
||||
return new Uint32Array(tokenizer.encode(input))
|
||||
}
|
446
src/types.ts
446
src/types.ts
|
@ -1,446 +0,0 @@
|
|||
import Keyv from 'keyv'
|
||||
|
||||
export type Role = 'user' | 'assistant' | 'system'
|
||||
|
||||
export type FetchFn = typeof fetch
|
||||
|
||||
export type ChatGPTAPIOptions = {
|
||||
apiKey: string
|
||||
|
||||
/** @defaultValue `'https://api.openai.com'` **/
|
||||
apiBaseUrl?: string
|
||||
|
||||
apiOrg?: string
|
||||
|
||||
/** @defaultValue `false` **/
|
||||
debug?: boolean
|
||||
|
||||
completionParams?: Partial<
|
||||
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||
>
|
||||
|
||||
systemMessage?: string
|
||||
|
||||
/** @defaultValue `4096` **/
|
||||
maxModelTokens?: number
|
||||
|
||||
/** @defaultValue `1000` **/
|
||||
maxResponseTokens?: number
|
||||
|
||||
messageStore?: Keyv
|
||||
getMessageById?: GetMessageByIdFunction
|
||||
upsertMessage?: UpsertMessageFunction
|
||||
|
||||
fetch?: FetchFn
|
||||
}
|
||||
|
||||
export type SendMessageOptions = {
|
||||
/** The name of a user in a multi-user chat. */
|
||||
name?: string
|
||||
parentMessageId?: string
|
||||
conversationId?: string
|
||||
messageId?: string
|
||||
stream?: boolean
|
||||
systemMessage?: string
|
||||
timeoutMs?: number
|
||||
onProgress?: (partialResponse: ChatMessage) => void
|
||||
abortSignal?: AbortSignal
|
||||
completionParams?: Partial<
|
||||
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||
>
|
||||
}
|
||||
|
||||
export type MessageActionType = 'next' | 'variant'
|
||||
|
||||
export type SendMessageBrowserOptions = {
|
||||
conversationId?: string
|
||||
parentMessageId?: string
|
||||
messageId?: string
|
||||
action?: MessageActionType
|
||||
timeoutMs?: number
|
||||
onProgress?: (partialResponse: ChatMessage) => void
|
||||
abortSignal?: AbortSignal
|
||||
}
|
||||
|
||||
export interface ChatMessage {
|
||||
id: string
|
||||
text: string
|
||||
role: Role
|
||||
name?: string
|
||||
delta?: string
|
||||
detail?:
|
||||
| openai.CreateChatCompletionResponse
|
||||
| CreateChatCompletionStreamResponse
|
||||
|
||||
// relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
|
||||
parentMessageId?: string
|
||||
|
||||
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
|
||||
conversationId?: string
|
||||
}
|
||||
|
||||
export class ChatGPTError extends Error {
|
||||
statusCode?: number
|
||||
statusText?: string
|
||||
isFinal?: boolean
|
||||
accountId?: string
|
||||
}
|
||||
|
||||
/** Returns a chat message from a store by it's ID (or null if not found). */
|
||||
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
|
||||
|
||||
/** Upserts a chat message to a store. */
|
||||
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
|
||||
|
||||
export interface CreateChatCompletionStreamResponse
|
||||
extends openai.CreateChatCompletionDeltaResponse {
|
||||
usage: CreateCompletionStreamResponseUsage
|
||||
}
|
||||
|
||||
export interface CreateCompletionStreamResponseUsage
|
||||
extends openai.CreateCompletionResponseUsage {
|
||||
estimated: true
|
||||
}
|
||||
|
||||
/**
|
||||
* https://chat.openapi.com/backend-api/conversation
|
||||
*/
|
||||
export type ConversationJSONBody = {
|
||||
/**
|
||||
* The action to take
|
||||
*/
|
||||
action: string
|
||||
|
||||
/**
|
||||
* The ID of the conversation
|
||||
*/
|
||||
conversation_id?: string
|
||||
|
||||
/**
|
||||
* Prompts to provide
|
||||
*/
|
||||
messages: Prompt[]
|
||||
|
||||
/**
|
||||
* The model to use
|
||||
*/
|
||||
model: string
|
||||
|
||||
/**
|
||||
* The parent message ID
|
||||
*/
|
||||
parent_message_id: string
|
||||
}
|
||||
|
||||
export type Prompt = {
|
||||
/**
|
||||
* The content of the prompt
|
||||
*/
|
||||
content: PromptContent
|
||||
|
||||
/**
|
||||
* The ID of the prompt
|
||||
*/
|
||||
id: string
|
||||
|
||||
/**
|
||||
* The role played in the prompt
|
||||
*/
|
||||
role: Role
|
||||
}
|
||||
|
||||
export type ContentType = 'text'
|
||||
|
||||
export type PromptContent = {
|
||||
/**
|
||||
* The content type of the prompt
|
||||
*/
|
||||
content_type: ContentType
|
||||
|
||||
/**
|
||||
* The parts to the prompt
|
||||
*/
|
||||
parts: string[]
|
||||
}
|
||||
|
||||
export type ConversationResponseEvent = {
|
||||
message?: Message
|
||||
conversation_id?: string
|
||||
error?: string | null
|
||||
}
|
||||
|
||||
export type Message = {
|
||||
id: string
|
||||
content: MessageContent
|
||||
role: Role
|
||||
user: string | null
|
||||
create_time: string | null
|
||||
update_time: string | null
|
||||
end_turn: null
|
||||
weight: number
|
||||
recipient: string
|
||||
metadata: MessageMetadata
|
||||
}
|
||||
|
||||
export type MessageContent = {
|
||||
content_type: string
|
||||
parts: string[]
|
||||
}
|
||||
|
||||
export type MessageMetadata = any
|
||||
|
||||
export namespace openai {
|
||||
export interface CreateChatCompletionDeltaResponse {
|
||||
id: string
|
||||
object: 'chat.completion.chunk'
|
||||
created: number
|
||||
model: string
|
||||
choices: [
|
||||
{
|
||||
delta: {
|
||||
role: Role
|
||||
content?: string
|
||||
}
|
||||
index: number
|
||||
finish_reason: string | null
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface ChatCompletionRequestMessage
|
||||
*/
|
||||
export interface ChatCompletionRequestMessage {
|
||||
/**
|
||||
* The role of the author of this message.
|
||||
* @type {string}
|
||||
* @memberof ChatCompletionRequestMessage
|
||||
*/
|
||||
role: ChatCompletionRequestMessageRoleEnum
|
||||
/**
|
||||
* The contents of the message
|
||||
* @type {string}
|
||||
* @memberof ChatCompletionRequestMessage
|
||||
*/
|
||||
content: string
|
||||
/**
|
||||
* The name of the user in a multi-user chat
|
||||
* @type {string}
|
||||
* @memberof ChatCompletionRequestMessage
|
||||
*/
|
||||
name?: string
|
||||
}
|
||||
export declare const ChatCompletionRequestMessageRoleEnum: {
|
||||
readonly System: 'system'
|
||||
readonly User: 'user'
|
||||
readonly Assistant: 'assistant'
|
||||
}
|
||||
export declare type ChatCompletionRequestMessageRoleEnum =
|
||||
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface ChatCompletionResponseMessage
|
||||
*/
|
||||
export interface ChatCompletionResponseMessage {
|
||||
/**
|
||||
* The role of the author of this message.
|
||||
* @type {string}
|
||||
* @memberof ChatCompletionResponseMessage
|
||||
*/
|
||||
role: ChatCompletionResponseMessageRoleEnum
|
||||
/**
|
||||
* The contents of the message
|
||||
* @type {string}
|
||||
* @memberof ChatCompletionResponseMessage
|
||||
*/
|
||||
content: string
|
||||
}
|
||||
export declare const ChatCompletionResponseMessageRoleEnum: {
|
||||
readonly System: 'system'
|
||||
readonly User: 'user'
|
||||
readonly Assistant: 'assistant'
|
||||
}
|
||||
export declare type ChatCompletionResponseMessageRoleEnum =
|
||||
(typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface CreateChatCompletionRequest
|
||||
*/
|
||||
export interface CreateChatCompletionRequest {
|
||||
/**
|
||||
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
model: string
|
||||
/**
|
||||
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
|
||||
* @type {Array<ChatCompletionRequestMessage>}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
messages: Array<ChatCompletionRequestMessage>
|
||||
/**
|
||||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
temperature?: number | null
|
||||
/**
|
||||
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
top_p?: number | null
|
||||
/**
|
||||
* How many chat completion choices to generate for each input message.
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
n?: number | null
|
||||
/**
|
||||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
|
||||
* @type {boolean}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
stream?: boolean | null
|
||||
/**
|
||||
*
|
||||
* @type {CreateChatCompletionRequestStop}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
stop?: CreateChatCompletionRequestStop
|
||||
/**
|
||||
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
max_tokens?: number
|
||||
/**
|
||||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
presence_penalty?: number | null
|
||||
/**
|
||||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
frequency_penalty?: number | null
|
||||
/**
|
||||
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
|
||||
* @type {object}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
logit_bias?: object | null
|
||||
/**
|
||||
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionRequest
|
||||
*/
|
||||
user?: string
|
||||
}
|
||||
/**
|
||||
* @type CreateChatCompletionRequestStop
|
||||
* Up to 4 sequences where the API will stop generating further tokens.
|
||||
* @export
|
||||
*/
|
||||
export declare type CreateChatCompletionRequestStop = Array<string> | string
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface CreateChatCompletionResponse
|
||||
*/
|
||||
export interface CreateChatCompletionResponse {
|
||||
/**
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
id: string
|
||||
/**
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
object: string
|
||||
/**
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
created: number
|
||||
/**
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
model: string
|
||||
/**
|
||||
*
|
||||
* @type {Array<CreateChatCompletionResponseChoicesInner>}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
choices: Array<CreateChatCompletionResponseChoicesInner>
|
||||
/**
|
||||
*
|
||||
* @type {CreateCompletionResponseUsage}
|
||||
* @memberof CreateChatCompletionResponse
|
||||
*/
|
||||
usage?: CreateCompletionResponseUsage
|
||||
}
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface CreateChatCompletionResponseChoicesInner
|
||||
*/
|
||||
export interface CreateChatCompletionResponseChoicesInner {
|
||||
/**
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof CreateChatCompletionResponseChoicesInner
|
||||
*/
|
||||
index?: number
|
||||
/**
|
||||
*
|
||||
* @type {ChatCompletionResponseMessage}
|
||||
* @memberof CreateChatCompletionResponseChoicesInner
|
||||
*/
|
||||
message?: ChatCompletionResponseMessage
|
||||
/**
|
||||
*
|
||||
* @type {string}
|
||||
* @memberof CreateChatCompletionResponseChoicesInner
|
||||
*/
|
||||
finish_reason?: string
|
||||
}
|
||||
/**
|
||||
*
|
||||
* @export
|
||||
* @interface CreateCompletionResponseUsage
|
||||
*/
|
||||
export interface CreateCompletionResponseUsage {
|
||||
/**
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof CreateCompletionResponseUsage
|
||||
*/
|
||||
prompt_tokens: number
|
||||
/**
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof CreateCompletionResponseUsage
|
||||
*/
|
||||
completion_tokens: number
|
||||
/**
|
||||
*
|
||||
* @type {number}
|
||||
* @memberof CreateCompletionResponseUsage
|
||||
*/
|
||||
total_tokens: number
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
const uuidv4Re =
|
||||
/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i
|
||||
|
||||
export function isValidUUIDv4(str: string): boolean {
|
||||
return str && uuidv4Re.test(str)
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "es2020",
|
||||
"lib": ["esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"strict": false,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "node",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"baseUrl": ".",
|
||||
"outDir": "build",
|
||||
"noEmit": true
|
||||
},
|
||||
"exclude": ["node_modules", "build"],
|
||||
"include": ["**/*.ts"]
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
import { defineConfig } from 'tsup'
|
||||
|
||||
export default defineConfig([
|
||||
{
|
||||
entry: ['src/index.ts', 'src/cli.ts'],
|
||||
outDir: 'build',
|
||||
target: 'node16',
|
||||
platform: 'node',
|
||||
format: ['esm'],
|
||||
splitting: false,
|
||||
sourcemap: true,
|
||||
minify: false,
|
||||
shims: true,
|
||||
dts: true
|
||||
}
|
||||
])
|
14
typedoc.json
14
typedoc.json
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"$schema": "https://typedoc.org/schema.json",
|
||||
"entryPoints": ["./src/index.ts"],
|
||||
"exclude": ["**/*.test.ts"],
|
||||
"plugin": ["typedoc-plugin-markdown"],
|
||||
"out": "docs",
|
||||
"hideBreadcrumbs": false,
|
||||
"hideInPageTOC": false,
|
||||
"excludePrivate": true,
|
||||
"excludeProtected": true,
|
||||
"excludeExternals": true,
|
||||
"excludeInternal": true,
|
||||
"entryDocument": "readme.md"
|
||||
}
|
Ładowanie…
Reference in New Issue