chatgpt-api/src/chatgpt-api.ts

343 wiersze
10 KiB
TypeScript
Czysty Zwykły widok Historia

2022-12-05 05:13:36 +00:00
import ExpiryMap from 'expiry-map'
import pTimeout from 'p-timeout'
2022-12-05 05:13:36 +00:00
import { v4 as uuidv4 } from 'uuid'
import * as types from './types'
import { ChatGPTConversation } from './chatgpt-conversation'
import { fetch } from './fetch'
import { fetchSSE } from './fetch-sse'
2022-12-05 05:13:36 +00:00
import { markdownToText } from './utils'
const KEY_ACCESS_TOKEN = 'accessToken'
const USER_AGENT =
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
2022-12-02 23:43:59 +00:00
export class ChatGPTAPI {
2022-12-05 05:13:36 +00:00
protected _sessionToken: string
2022-12-02 23:43:59 +00:00
protected _markdown: boolean
2022-12-05 05:13:36 +00:00
protected _apiBaseUrl: string
protected _backendApiBaseUrl: string
protected _userAgent: string
protected _headers: Record<string, string>
2022-12-05 05:34:15 +00:00
2022-12-07 04:07:14 +00:00
// Stores access tokens for `accessTokenTTL` milliseconds before needing to refresh
// (defaults to 60 seconds)
protected _accessTokenCache: ExpiryMap<string, string>
2022-12-02 23:43:59 +00:00
protected _user: types.User | null = null
2022-12-02 23:43:59 +00:00
/**
2022-12-05 05:13:36 +00:00
* Creates a new client wrapper around the unofficial ChatGPT REST API.
*
* @param opts.sessionToken = **Required** OpenAI session token which can be found in a valid session's cookies (see readme for instructions)
* @param apiBaseUrl - Optional override; the base URL for ChatGPT webapp's API (`/api`)
* @param backendApiBaseUrl - Optional override; the base URL for the ChatGPT backend API (`/backend-api`)
* @param userAgent - Optional override; the `user-agent` header to use with ChatGPT requests
2022-12-07 04:07:14 +00:00
* @param accessTokenTTL - Optional override; how long in milliseconds access tokens should last before being forcefully refreshed
2022-12-02 23:43:59 +00:00
*/
2022-12-05 05:13:36 +00:00
constructor(opts: {
sessionToken: string
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
/** @defaultValue `true` **/
markdown?: boolean
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
/** @defaultValue `'https://chat.openai.com/api'` **/
apiBaseUrl?: string
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
/** @defaultValue `'https://chat.openai.com/backend-api'` **/
backendApiBaseUrl?: string
/** @defaultValue `'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'` **/
userAgent?: string
2022-12-07 04:07:14 +00:00
/** @defaultValue 60000 (60 seconds) */
accessTokenTTL?: number
accessToken?: string
2022-12-05 05:13:36 +00:00
}) {
2022-12-02 23:43:59 +00:00
const {
2022-12-05 05:13:36 +00:00
sessionToken,
markdown = true,
apiBaseUrl = 'https://chat.openai.com/api',
backendApiBaseUrl = 'https://chat.openai.com/backend-api',
2022-12-07 04:07:14 +00:00
userAgent = USER_AGENT,
accessTokenTTL = 60000, // 60 seconds
accessToken
2022-12-02 23:43:59 +00:00
} = opts
2022-12-05 05:13:36 +00:00
this._sessionToken = sessionToken
2022-12-02 23:43:59 +00:00
this._markdown = !!markdown
2022-12-05 05:13:36 +00:00
this._apiBaseUrl = apiBaseUrl
this._backendApiBaseUrl = backendApiBaseUrl
this._userAgent = userAgent
this._headers = {
'User-Agent': this._userAgent,
'x-openai-assistant-app-id': '',
'accept-language': 'en-US,en;q=0.9',
origin: 'https://chat.openai.com',
referer: 'https://chat.openai.com/chat'
}
2022-12-03 00:04:53 +00:00
2022-12-07 04:07:14 +00:00
this._accessTokenCache = new ExpiryMap<string, string>(accessTokenTTL)
this._accessTokenCache.set(KEY_ACCESS_TOKEN, accessToken ?? '')
2022-12-07 04:07:14 +00:00
2022-12-05 05:13:36 +00:00
if (!this._sessionToken) {
throw new types.ChatGPTError('ChatGPT invalid session token')
2022-12-03 00:04:53 +00:00
}
2022-12-02 23:43:59 +00:00
}
/**
* Gets the currently signed-in user, if authenticated, `null` otherwise.
*/
get user() {
return this._user
}
2022-12-05 05:13:36 +00:00
/**
* Sends a message to ChatGPT, waits for the response to resolve, and returns
* the response.
*
2022-12-07 04:07:14 +00:00
* If you want to receive a stream of partial responses, use `opts.onProgress`.
* If you want to receive the full response, including message and conversation IDs,
* you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`
* helper.
*
* @param message - The prompt message to send
* @param opts.conversationId - Optional ID of a conversation to continue
* @param opts.parentMessageId - Optional ID of the previous message in the conversation
2022-12-07 04:07:14 +00:00
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
2022-12-07 00:19:30 +00:00
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.onConversationResponse - Optional callback which will be invoked every time the partial response is updated with the full conversation response
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
*
* @returns The response from ChatGPT
2022-12-05 05:13:36 +00:00
*/
async sendMessage(
message: string,
2022-12-07 00:19:30 +00:00
opts: types.SendMessageOptions = {}
2022-12-05 05:13:36 +00:00
): Promise<string> {
2022-12-06 07:38:32 +00:00
const {
conversationId,
parentMessageId = uuidv4(),
2022-12-07 04:07:14 +00:00
timeoutMs,
2022-12-06 07:38:32 +00:00
onProgress,
2022-12-07 04:07:14 +00:00
onConversationResponse
2022-12-06 07:38:32 +00:00
} = opts
2022-12-05 05:13:36 +00:00
2022-12-07 04:07:14 +00:00
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
2022-12-05 05:13:36 +00:00
const accessToken = await this.refreshAccessToken()
const body: types.ConversationJSONBody = {
action: 'next',
messages: [
{
id: uuidv4(),
role: 'user',
content: {
content_type: 'text',
parts: [message]
}
}
],
model: 'text-davinci-002-render',
2022-12-06 07:38:32 +00:00
parent_message_id: parentMessageId
}
if (conversationId) {
body.conversation_id = conversationId
2022-12-05 05:13:36 +00:00
}
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
const url = `${this._backendApiBaseUrl}/conversation`
let response = ''
2022-12-07 04:07:14 +00:00
const responseP = new Promise<string>((resolve, reject) => {
fetchSSE(url, {
2022-12-05 05:13:36 +00:00
method: 'POST',
headers: {
...this._headers,
2022-12-05 05:13:36 +00:00
Authorization: `Bearer ${accessToken}`,
Accept: 'text/event-stream',
'Content-Type': 'application/json'
2022-12-05 05:13:36 +00:00
},
body: JSON.stringify(body),
signal: abortSignal,
2022-12-05 05:13:36 +00:00
onMessage: (data: string) => {
if (data === '[DONE]') {
return resolve(response)
}
try {
const parsedData: types.ConversationResponseEvent = JSON.parse(data)
2022-12-06 07:38:32 +00:00
if (onConversationResponse) {
onConversationResponse(parsedData)
}
2022-12-07 00:19:30 +00:00
2022-12-05 05:13:36 +00:00
const message = parsedData.message
2022-12-05 05:34:15 +00:00
// console.log('event', JSON.stringify(parsedData, null, 2))
2022-12-05 05:13:36 +00:00
if (message) {
let text = message?.content?.parts?.[0]
if (text) {
if (!this._markdown) {
text = markdownToText(text)
}
response = text
if (onProgress) {
onProgress(text)
}
}
}
} catch (err) {
console.warn('fetchSSE onMessage unexpected error', err)
reject(err)
}
}
}).catch(reject)
})
2022-12-07 04:07:14 +00:00
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'ChatGPT timed out waiting for response'
})
} else {
return responseP
}
2022-12-02 23:43:59 +00:00
}
2022-12-07 04:07:14 +00:00
/**
* @returns `true` if the client has a valid acces token or `false` if refreshing
* the token fails.
*/
async getIsAuthenticated() {
try {
void (await this.refreshAccessToken())
return true
} catch (err) {
return false
}
}
/**
* Refreshes the client's access token which will succeed only if the session
* is still valid.
*/
async ensureAuth() {
return await this.refreshAccessToken()
}
/**
* Attempts to refresh the current access token using the ChatGPT
* `sessionToken` cookie.
*
* Access tokens will be cached for up to `accessTokenTTL` milliseconds to
* prevent refreshing access tokens too frequently.
*
* @returns A valid access token
* @throws An error if refreshing the access token fails.
*/
2022-12-05 05:13:36 +00:00
async refreshAccessToken(): Promise<string> {
const cachedAccessToken = this._accessTokenCache.get(KEY_ACCESS_TOKEN)
if (cachedAccessToken) {
return cachedAccessToken
2022-12-02 23:43:59 +00:00
}
let response: Response
2022-12-05 05:13:36 +00:00
try {
const res = await fetch(`${this._apiBaseUrl}/auth/session`, {
2022-12-05 05:13:36 +00:00
headers: {
...this._headers,
cookie: `__Secure-next-auth.session-token=${this._sessionToken}`
2022-12-05 05:13:36 +00:00
}
2022-12-07 00:19:30 +00:00
}).then((r) => {
response = r
2022-12-07 00:19:30 +00:00
if (!r.ok) {
const error = new types.ChatGPTError(`${r.status} ${r.statusText}`)
error.response = r
error.statusCode = r.status
error.statusText = r.statusText
throw error
2022-12-07 00:19:30 +00:00
}
return r.json() as any as types.SessionResult
})
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
const accessToken = res?.accessToken
2022-12-02 23:43:59 +00:00
2022-12-05 05:13:36 +00:00
if (!accessToken) {
const error = new types.ChatGPTError('Unauthorized')
error.response = response
error.statusCode = response?.status
error.statusText = response?.statusText
throw error
2022-12-02 23:43:59 +00:00
}
const appError = res?.error
if (appError) {
if (appError === 'RefreshAccessTokenError') {
const error = new types.ChatGPTError('session token may have expired')
error.response = response
error.statusCode = response?.status
error.statusText = response?.statusText
throw error
} else {
const error = new types.ChatGPTError(appError)
error.response = response
error.statusCode = response?.status
error.statusText = response?.statusText
throw error
}
}
if (res.user) {
this._user = res.user
}
2022-12-05 05:13:36 +00:00
this._accessTokenCache.set(KEY_ACCESS_TOKEN, accessToken)
return accessToken
} catch (err: any) {
const error = new types.ChatGPTError(
`ChatGPT failed to refresh auth token. ${err.toString()}`
)
error.response = response
error.statusCode = response?.status
error.statusText = response?.statusText
error.originalError = err
throw error
2022-12-05 05:13:36 +00:00
}
2022-12-05 05:14:23 +00:00
}
2022-12-06 07:38:32 +00:00
/**
* Gets a new ChatGPTConversation instance, which can be used to send multiple
* messages as part of a single conversation.
2022-12-06 07:38:32 +00:00
*
* @param opts.conversationId - Optional ID of the previous message in a conversation
* @param opts.parentMessageId - Optional ID of the previous message in a conversation
* @returns The new conversation instance
2022-12-06 07:38:32 +00:00
*/
2022-12-06 13:46:55 +00:00
getConversation(
opts: { conversationId?: string; parentMessageId?: string } = {}
) {
return new ChatGPTConversation(this, opts)
2022-12-06 07:38:32 +00:00
}
2022-12-02 23:43:59 +00:00
}