diff --git a/legacy/demos/demo-reverse-proxy.ts b/legacy/demos/demo-reverse-proxy.ts
index 325766b9..c4ecbe94 100644
--- a/legacy/demos/demo-reverse-proxy.ts
+++ b/legacy/demos/demo-reverse-proxy.ts
@@ -1,13 +1,13 @@
import dotenv from 'dotenv-safe'
import { oraPromise } from 'ora'
-import { ChatGPTAPI } from '../src'
+import { ChatGPTUnofficialProxyAPI } from '../src'
dotenv.config()
/**
- * Demo CLI for testing conversation support using a reverse proxy that mimic's.
- * OpenAI's completions API ChatGPT's unofficial API.
+ * Demo for testing conversation support using a reverse proxy which provides
+ * access to the unofficial ChatGPT API.
*
* ```
* npx tsx demos/demo-reverse-proxy.ts
@@ -16,21 +16,19 @@ dotenv.config()
async function main() {
// WARNING: this method will expose your access token to a third-party. Please be
// aware of the risks before using this method.
- const api = new ChatGPTAPI({
- // TODO: this is a placeholder URL; there are several available reverse proxies,
- // but we're not including them here out of an abundance of caution.
- // More info on proxy servers in Discord: https://discord.gg/v9gERj825w
- apiReverseProxyUrl: 'https://your-secret-proxy-url.com/completions',
+ const api = new ChatGPTUnofficialProxyAPI({
+ // optionally override the default reverse proxy URL (or use one of your own...)
+ // apiReverseProxyUrl: 'https://chat.duti.tech/api/conversation',
+ // apiReverseProxyUrl: 'https://gpt.pawan.krd/backend-api/conversation',
// change this to an `accessToken` extracted from the ChatGPT site's `https://chat.openai.com/api/auth/session` response
- apiKey: process.env.OPENAI_ACCESS_TOKEN,
- completionParams: {
- // override this depending on the ChatGPT model you want to use
- // NOTE: if you are on a paid plan, you can't use the free model and vice-versa
- // model: 'text-davinci-002-render' // free, default model
- model: 'text-davinci-002-render-sha' // paid, default model (turbo)
- // model: 'text-davinci-002-render-paid' // paid, legacy model
- },
+ // or use https://github.com/acheong08/OpenAIAuth to get the token programatically (python)
+ accessToken: process.env.OPENAI_ACCESS_TOKEN,
+
+ // optionally override the default model (this must be a chatgpt model; not an OpenAI model)
+ // model: 'text-davinci-002-render-sha' // default model for free and paid users (used to be called turbo in the UI)
+ // model: 'text-davinci-002-render-paid' // legacy paid model
+
debug: false
})
diff --git a/legacy/readme.md b/legacy/readme.md
index e9b43fd6..400bb1d9 100644
--- a/legacy/readme.md
+++ b/legacy/readme.md
@@ -1,4 +1,45 @@
-# Update February 1, 2023
+
+
+
+
+## Updates
+
+
+Feb 19, 2023
+
+We now provide three ways of accessing the unofficial ChatGPT API, all of which have tradeoffs:
+
+1. `ChatGPTAPI` - Uses `text-davinci-003` to mimic ChatGPT via the official OpenAI completions API (most robust approach, but it's not free and doesn't use a model fine-tuned for chat)
+2. `ChatGPTUnofficialProxyAPI` - Uses an unofficial proxy server to access ChatGPT's backend API in a way that circumvents Cloudflare (uses the real ChatGPT and is pretty lightweight, but can be less robust and is pretty rate-limited)
+3. `ChatGPTAPIBrowser` - (v3.5.1 of this package) Uses Puppeteer to access the official ChatGPT webapp (uses the real ChatGPT, but very flaky, heavyweight, and error prone)
+
+| Method | Free? | Robust? | Lightweight? | Quality? |
+| --------------------------- | ------ | -------- | ------------ | ----------------- |
+| `ChatGPTAPI` | ❌ No | ✅ Yes | ✅ Yes | ☑️ Mimics ChatGPT |
+| `ChatGPTUnofficialProxyAPI` | ✅ Yes | ☑️ Maybe | ✅ Yes | ✅ Real ChatGPT |
+| `ChatGPAPIBrowser` (v3) | ✅ Yes | ❌ No | ❌ No | ✅ Real ChatGPT |
+
+_Note_: all three methods share a very similar API that is designed to mimic the upcoming official ChatGPT API once it's released.
+
+_Note_: I do not plan on continuing support for the browser-based version, because it is flaky, a pain to maintain, and because I want to focus on APIs which are closer to the upcoming official API.
+
+
+
+Previous Updates
+
+
+
+
+Feb 5, 2023
+
+OpenAI has disabled the leaked chat model we were previously using, so we're now defaulting to `text-davinci-003`, which is not free.
+
+We've found several other hidden, fine-tuned chat models, but OpenAI keeps disabling them, so we're searching for alternative workarounds.
+
+
+
+
+Feb 1, 2023
This package no longer requires any browser hacks – **it is now using the official OpenAI completions API** with a leaked model that ChatGPT uses under the hood. 🔥
@@ -17,6 +58,9 @@ Please upgrade to `chatgpt@latest` (at least [v4.0.0](https://github.com/transit
Huge shoutout to [@waylaidwanderer](https://github.com/waylaidwanderer) for discovering the leaked chat model!
+
+
+
If you run into any issues, we do have a pretty active [Discord](https://discord.gg/v9gERj825w) with a bunch of ChatGPT hackers from the Node.js & Python communities.
Lastly, please consider starring this repo and following me on twitter
to help support the project.
@@ -24,12 +68,6 @@ Lastly, please consider starring this repo and
-
-
-
# ChatGPT API
> Node.js client for the unofficial [ChatGPT](https://openai.com/blog/chatgpt/) API.
@@ -38,7 +76,9 @@ Thanks && cheers,
- [Intro](#intro)
- [Install](#install)
-- [Usage](#usage)
+- [Usage (ChatGPTAPI)](#usage-chatgptapi)
+- [Usage (ChatGPTUnofficialProxyAPI)](#usage-chatgptunofficialproxyapi)
+ - [Access Token](#access-token)
- [Docs](#docs)
- [Demos](#demos)
- [Projects](#projects)
@@ -60,7 +100,7 @@ npm install chatgpt
Make sure you're using `node >= 18` so `fetch` is available (or `node >= 14` if you install a [fetch polyfill](https://github.com/developit/unfetch#usage-as-a-polyfill)).
-## Usage
+## Usage (ChatGPTAPI)
Sign up for an [OpenAI API key](https://platform.openai.com/overview) and store it in your environment.
@@ -162,6 +202,46 @@ async function example() {
+## Usage (ChatGPTUnofficialProxyAPI)
+
+The API is almost exactly the same for the `ChatGPTUnofficialProxyAPI`; you just need to provide a ChatGPT `accessToken` instead of an OpenAI API key.
+
+```ts
+import { ChatGPTUnofficialProxyAPI } from 'chatgpt'
+
+async function example() {
+ const api = new ChatGPTUnofficialProxyAPI({
+ accessToken: process.env.OPENAI_ACCESS_TOKEN
+ })
+
+ const res = await api.sendMessage('Hello World!')
+ console.log(res.text)
+}
+```
+
+See [demos/demo-reverse-proxy](./demos/demo-reverse-proxy.ts) for a full example:
+
+```bash
+npx tsx demos/demo-reverse-proxy.ts
+```
+
+Known reverse proxies include:
+
+| Reverse Proxy URL | Author | Rate Limits | Last Checked |
+| ------------------------------------------------ | -------------------------------------------- | ----------- | ------------ |
+| `https://chat.duti.tech/api/conversation` | [@acheong08](https://github.com/acheong08) | 50 req/min | 2/19/2023 |
+| `https://gpt.pawan.krd/backend-api/conversation` | [@PawanOsman](https://github.com/PawanOsman) | ? | 2/19/2023 |
+
+**Note**: using a reverse proxy will expose your access token to a third-party. There shouldn't be any adverse effects possible from this, but please be aware of the risks before using this method.
+
+### Access Token
+
+To use `ChatGPTUnofficialProxyAPI`, you'll need a ChatGPT access token. You can either:
+
+1. Use [acheong08/OpenAIAuth](https://github.com/acheong08/OpenAIAuth), which is a python script to login and get an access token automatically. This works with email + password accounts (e.g., it does not support accounts where you auth using Microsoft / Google).
+
+2. You can manually get an `accessToken` by logging in to the ChatGPT webapp, opening up the Network tab of devtools, refreshing the page, and then looking at the JSON response to `https://chat.openai.com/api/auth/session`, which will have your `accessToken` string.
+
### Docs
See the [auto-generated docs](./docs/classes/ChatGPTAPI.md) for more info on methods and parameters.
@@ -286,7 +366,7 @@ If you create a cool integration, feel free to open a PR and add it to the list.
- This package supports `node >= 14`.
- This module assumes that `fetch` is installed.
- In `node >= 18`, it's installed by default.
- - In `node < 18`, you need to install a polyfill like `unfetch/polyfill` ([guide](https://github.com/developit/unfetch#usage-as-a-polyfill)) or `isomorphic-fetch` ([guide](https://github.com/matthew-andrews/isomorphic-fetch#readme)).
+ - In `node < 18`, you need to install a polyfill like `unfetch/polyfill` ([guide](https://github.com/developit/unfetch#usage-as-a-polyfill)) or `isomorphic-fetch` ([guide](https://github.com/matthew-andrews/isomorphic-fetch#readme)).
- If you want to build a website using `chatgpt`, we recommend using it only from your backend API
## Credits
diff --git a/legacy/src/chatgpt-unofficial-proxy-api.ts b/legacy/src/chatgpt-unofficial-proxy-api.ts
new file mode 100644
index 00000000..d73a0d96
--- /dev/null
+++ b/legacy/src/chatgpt-unofficial-proxy-api.ts
@@ -0,0 +1,234 @@
+import pTimeout from 'p-timeout'
+import { v4 as uuidv4 } from 'uuid'
+
+import * as types from './types'
+import { fetch as globalFetch } from './fetch'
+import { fetchSSE } from './fetch-sse'
+
+export class ChatGPTUnofficialProxyAPI {
+ protected _accessToken: string
+ protected _apiReverseProxyUrl: string
+ protected _debug: boolean
+ protected _model: string
+ protected _headers: Record
+ protected _fetch: types.FetchFn
+
+ /**
+ * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
+ */
+ constructor(opts: {
+ accessToken: string
+
+ /** @defaultValue `https://chat.openai.com/backend-api/conversation` **/
+ apiReverseProxyUrl?: string
+
+ /** @defaultValue `text-davinci-002-render-sha` **/
+ model?: string
+
+ /** @defaultValue `false` **/
+ debug?: boolean
+
+ /** @defaultValue `undefined` **/
+ headers?: Record
+
+ fetch?: types.FetchFn
+ }) {
+ const {
+ accessToken,
+ apiReverseProxyUrl = 'https://chat.duti.tech/api/conversation',
+ model = 'text-davinci-002-render-sha',
+ debug = false,
+ headers,
+ fetch = globalFetch
+ } = opts
+
+ this._accessToken = accessToken
+ this._apiReverseProxyUrl = apiReverseProxyUrl
+ this._debug = !!debug
+ this._model = model
+ this._fetch = fetch
+ this._headers = headers
+
+ if (!this._accessToken) {
+ throw new Error('ChatGPT invalid accessToken')
+ }
+
+ if (!this._fetch) {
+ throw new Error('Invalid environment; fetch is not defined')
+ }
+
+ if (typeof this._fetch !== 'function') {
+ throw new Error('Invalid "fetch" is not a function')
+ }
+ }
+
+ get accessToken(): string {
+ return this._accessToken
+ }
+
+ set accessToken(value: string) {
+ this._accessToken = value
+ }
+
+ /**
+ * Sends a message to ChatGPT, waits for the response to resolve, and returns
+ * the response.
+ *
+ * If you want your response to have historical context, you must provide a valid `parentMessageId`.
+ *
+ * If you want to receive a stream of partial responses, use `opts.onProgress`.
+ * If you want to receive the full response, including message and conversation IDs,
+ * you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
+ * helper.
+ *
+ * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.
+ *
+ * @param message - The prompt message to send
+ * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)
+ * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
+ * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
+ * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
+ * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
+ * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
+ *
+ * @returns The response from ChatGPT
+ */
+ async sendMessage(
+ text: string,
+ opts: types.SendMessageBrowserOptions = {}
+ ): Promise {
+ const {
+ conversationId,
+ parentMessageId = uuidv4(),
+ messageId = uuidv4(),
+ action = 'next',
+ timeoutMs,
+ onProgress
+ } = opts
+
+ let { abortSignal } = opts
+
+ let abortController: AbortController = null
+ if (timeoutMs && !abortSignal) {
+ abortController = new AbortController()
+ abortSignal = abortController.signal
+ }
+
+ const body: types.ConversationJSONBody = {
+ action,
+ messages: [
+ {
+ id: messageId,
+ role: 'user',
+ content: {
+ content_type: 'text',
+ parts: [text]
+ }
+ }
+ ],
+ model: this._model,
+ parent_message_id: parentMessageId
+ }
+
+ if (conversationId) {
+ body.conversation_id = conversationId
+ }
+
+ const result: types.ChatMessage = {
+ role: 'assistant',
+ id: uuidv4(),
+ parentMessageId: messageId,
+ conversationId,
+ text: ''
+ }
+
+ const responseP = new Promise((resolve, reject) => {
+ const url = this._apiReverseProxyUrl
+ const headers = {
+ ...this._headers,
+ Authorization: `Bearer ${this._accessToken}`,
+ Accept: 'text/event-stream',
+ 'Content-Type': 'application/json'
+ }
+
+ if (this._debug) {
+ console.log('POST', url, { body, headers })
+ }
+
+ fetchSSE(url, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify(body),
+ signal: abortSignal,
+ onMessage: (data: string) => {
+ if (data === '[DONE]') {
+ return resolve(result)
+ }
+
+ try {
+ const convoResponseEvent: types.ConversationResponseEvent =
+ JSON.parse(data)
+ if (convoResponseEvent.conversation_id) {
+ result.conversationId = convoResponseEvent.conversation_id
+ }
+
+ if (convoResponseEvent.message?.id) {
+ result.id = convoResponseEvent.message.id
+ }
+
+ const message = convoResponseEvent.message
+ // console.log('event', JSON.stringify(convoResponseEvent, null, 2))
+
+ if (message) {
+ let text = message?.content?.parts?.[0]
+
+ if (text) {
+ result.text = text
+
+ if (onProgress) {
+ onProgress(result)
+ }
+ }
+ }
+ } catch (err) {
+ // ignore for now; there seem to be some non-json messages
+ // console.warn('fetchSSE onMessage unexpected error', err)
+ }
+ }
+ }).catch((err) => {
+ const errMessageL = err.toString().toLowerCase()
+
+ if (
+ result.text &&
+ (errMessageL === 'error: typeerror: terminated' ||
+ errMessageL === 'typeerror: terminated')
+ ) {
+ // OpenAI sometimes forcefully terminates the socket from their end before
+ // the HTTP request has resolved cleanly. In my testing, these cases tend to
+ // happen when OpenAI has already send the last `response`, so we can ignore
+ // the `fetch` error in this case.
+ return resolve(result)
+ } else {
+ return reject(err)
+ }
+ })
+ })
+
+ if (timeoutMs) {
+ if (abortController) {
+ // This will be called when a timeout occurs in order for us to forcibly
+ // ensure that the underlying HTTP request is aborted.
+ ;(responseP as any).cancel = () => {
+ abortController.abort()
+ }
+ }
+
+ return pTimeout(responseP, {
+ milliseconds: timeoutMs,
+ message: 'ChatGPT timed out waiting for response'
+ })
+ } else {
+ return responseP
+ }
+ }
+}
diff --git a/legacy/src/index.ts b/legacy/src/index.ts
index 82ff2ad4..f8acafe2 100644
--- a/legacy/src/index.ts
+++ b/legacy/src/index.ts
@@ -1,2 +1,3 @@
export * from './chatgpt-api'
+export * from './chatgpt-unofficial-proxy-api'
export * from './types'
diff --git a/legacy/src/types.ts b/legacy/src/types.ts
index d5e1e6db..abb531c6 100644
--- a/legacy/src/types.ts
+++ b/legacy/src/types.ts
@@ -14,6 +14,18 @@ export type SendMessageOptions = {
abortSignal?: AbortSignal
}
+export type MessageActionType = 'next' | 'variant'
+
+export type SendMessageBrowserOptions = {
+ conversationId?: string
+ parentMessageId?: string
+ messageId?: string
+ action?: MessageActionType
+ timeoutMs?: number
+ onProgress?: (partialResponse: ChatMessage) => void
+ abortSignal?: AbortSignal
+}
+
export interface ChatMessage {
id: string
text: string
@@ -23,9 +35,21 @@ export interface ChatMessage {
detail?: any
}
+export type ChatGPTErrorType =
+ | 'unknown'
+ | 'chatgpt:pool:account-on-cooldown'
+ | 'chatgpt:pool:account-not-found'
+ | 'chatgpt:pool:no-accounts'
+ | 'chatgpt:pool:timeout'
+ | 'chatgpt:pool:rate-limit'
+ | 'chatgpt:pool:unavailable'
+
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
+ isFinal?: boolean
+ accountId?: string
+ type?: ChatGPTErrorType
}
/** Returns a chat message from a store by it's ID (or null if not found). */
@@ -117,6 +141,10 @@ export namespace openai {
// 'n'?: number | null;
}
+ export type ReverseProxyCompletionParams = CompletionParams & {
+ paid?: boolean
+ }
+
export type CompletionResponse = {
id: string
object: string
@@ -144,3 +172,100 @@ export namespace openai {
total_tokens: number
}
}
+
+/**
+ * https://chat.openapi.com/backend-api/conversation
+ */
+export type ConversationJSONBody = {
+ /**
+ * The action to take
+ */
+ action: string
+
+ /**
+ * The ID of the conversation
+ */
+ conversation_id?: string
+
+ /**
+ * Prompts to provide
+ */
+ messages: Prompt[]
+
+ /**
+ * The model to use
+ */
+ model: string
+
+ /**
+ * The parent message ID
+ */
+ parent_message_id: string
+}
+
+export type Prompt = {
+ /**
+ * The content of the prompt
+ */
+ content: PromptContent
+
+ /**
+ * The ID of the prompt
+ */
+ id: string
+
+ /**
+ * The role played in the prompt
+ */
+ role: Role
+}
+
+export type ContentType = 'text'
+
+export type PromptContent = {
+ /**
+ * The content type of the prompt
+ */
+ content_type: ContentType
+
+ /**
+ * The parts to the prompt
+ */
+ parts: string[]
+}
+
+export type ConversationResponseEvent = {
+ message?: Message
+ conversation_id?: string
+ error?: string | null
+}
+
+export type Message = {
+ id: string
+ content: MessageContent
+ role: Role
+ user: string | null
+ create_time: string | null
+ update_time: string | null
+ end_turn: null
+ weight: number
+ recipient: string
+ metadata: MessageMetadata
+}
+
+export type MessageContent = {
+ content_type: string
+ parts: string[]
+}
+
+export type MessageMetadata = any
+
+export type GetAccessTokenFn = ({
+ email,
+ password,
+ sessionToken
+}: {
+ email: string
+ password: string
+ sessionToken?: string
+}) => string | Promise