From a7035cabbe98d04596c64c7cc3d498879d8dd395 Mon Sep 17 00:00:00 2001 From: Travis Fischer Date: Tue, 6 Dec 2022 22:07:14 -0600 Subject: [PATCH] feat: fixes and documenting methods --- legacy/package.json | 9 +-- legacy/pnpm-lock.yaml | 7 +++ legacy/src/chatgpt-api.test.ts | 51 ++++++++++++++++- legacy/src/chatgpt-api.ts | 102 +++++++++++++++++++++++++-------- legacy/src/fetch.ts | 5 +- legacy/src/types.ts | 1 + 6 files changed, 141 insertions(+), 34 deletions(-) diff --git a/legacy/package.json b/legacy/package.json index 3094656a..100a49ff 100644 --- a/legacy/package.json +++ b/legacy/package.json @@ -40,6 +40,7 @@ "dependencies": { "eventsource-parser": "^0.0.5", "expiry-map": "^2.0.0", + "p-timeout": "^6.0.0", "remark": "^14.0.2", "strip-markdown": "^5.0.0", "uuid": "^9.0.0" @@ -62,6 +63,9 @@ "typedoc-plugin-markdown": "^3.13.6", "typescript": "^4.9.3" }, + "optionalDependencies": { + "undici": "^5.13.0" + }, "lint-staged": { "*.{ts,tsx}": [ "prettier --write" @@ -89,8 +93,5 @@ "ai", "ml", "bot" - ], - "optionalDependencies": { - "undici": "^5.13.0" - } + ] } diff --git a/legacy/pnpm-lock.yaml b/legacy/pnpm-lock.yaml index 4ddaac7e..8bb76f74 100644 --- a/legacy/pnpm-lock.yaml +++ b/legacy/pnpm-lock.yaml @@ -13,6 +13,7 @@ specifiers: lint-staged: ^13.0.3 npm-run-all: ^4.1.5 ora: ^6.1.2 + p-timeout: ^6.0.0 prettier: ^2.8.0 remark: ^14.0.2 strip-markdown: ^5.0.0 @@ -27,6 +28,7 @@ specifiers: dependencies: eventsource-parser: 0.0.5 expiry-map: 2.0.0 + p-timeout: 6.0.0 remark: 14.0.2 strip-markdown: 5.0.0 uuid: 9.0.0 @@ -2651,6 +2653,11 @@ packages: engines: {node: '>=12'} dev: true + /p-timeout/6.0.0: + resolution: {integrity: sha512-5iS61MOdUMemWH9CORQRxVXTp9g5K8rPnI9uQpo97aWgsH3vVXKjkIhDi+OgIDmN3Ly9+AZ2fZV01Wut1yzfKA==} + engines: {node: '>=14.16'} + dev: false + /parse-json/4.0.0: resolution: {integrity: sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==} engines: {node: '>=4'} diff --git a/legacy/src/chatgpt-api.test.ts b/legacy/src/chatgpt-api.test.ts index 090afba9..fd4bdd6a 100644 --- a/legacy/src/chatgpt-api.test.ts +++ b/legacy/src/chatgpt-api.test.ts @@ -36,12 +36,14 @@ test('ChatGPTAPI valid session token', async (t) => { await t.notThrowsAsync( (async () => { - const api = new ChatGPTAPI({ sessionToken: process.env.SESSION_TOKEN }) + const chatgpt = new ChatGPTAPI({ + sessionToken: process.env.SESSION_TOKEN + }) // Don't make any real API calls using our session token if we're running on CI if (!isCI) { - await api.ensureAuth() - const response = await api.sendMessage('test') + await chatgpt.ensureAuth() + const response = await chatgpt.sendMessage('test') console.log('chatgpt response', response) t.truthy(response) @@ -68,3 +70,46 @@ if (!isCI) { ) }) } + +if (!isCI) { + test('ChatGPTAPI timeout', async (t) => { + t.timeout(30 * 1000) // 30 seconds + + await t.throwsAsync( + async () => { + const chatgpt = new ChatGPTAPI({ + sessionToken: process.env.SESSION_TOKEN + }) + + await chatgpt.sendMessage('test', { + timeoutMs: 1 + }) + }, + { + message: 'ChatGPT timed out waiting for response' + } + ) + }) + + test('ChatGPTAPI abort', async (t) => { + t.timeout(30 * 1000) // 30 seconds + + await t.throwsAsync( + async () => { + const chatgpt = new ChatGPTAPI({ + sessionToken: process.env.SESSION_TOKEN + }) + + const abortController = new AbortController() + setTimeout(() => abortController.abort(new Error('testing abort')), 10) + + await chatgpt.sendMessage('test', { + abortSignal: abortController.signal + }) + }, + { + message: 'testing abort' + } + ) + }) +} diff --git a/legacy/src/chatgpt-api.ts b/legacy/src/chatgpt-api.ts index bca3515f..e02c811f 100644 --- a/legacy/src/chatgpt-api.ts +++ b/legacy/src/chatgpt-api.ts @@ -1,4 +1,5 @@ import ExpiryMap from 'expiry-map' +import pTimeout, { TimeoutError } from 'p-timeout' import { v4 as uuidv4 } from 'uuid' import * as types from './types' @@ -18,8 +19,9 @@ export class ChatGPTAPI { protected _backendApiBaseUrl: string protected _userAgent: string - // stores access tokens for up to 10 seconds before needing to refresh - protected _accessTokenCache = new ExpiryMap(10 * 1000) + // Stores access tokens for `accessTokenTTL` milliseconds before needing to refresh + // (defaults to 60 seconds) + protected _accessTokenCache: ExpiryMap /** * Creates a new client wrapper around the unofficial ChatGPT REST API. @@ -28,6 +30,7 @@ export class ChatGPTAPI { * @param apiBaseUrl - Optional override; the base URL for ChatGPT webapp's API (`/api`) * @param backendApiBaseUrl - Optional override; the base URL for the ChatGPT backend API (`/backend-api`) * @param userAgent - Optional override; the `user-agent` header to use with ChatGPT requests + * @param accessTokenTTL - Optional override; how long in milliseconds access tokens should last before being forcefully refreshed */ constructor(opts: { sessionToken: string @@ -43,13 +46,17 @@ export class ChatGPTAPI { /** @defaultValue `'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'` **/ userAgent?: string + + /** @defaultValue 60000 (60 seconds) */ + accessTokenTTL?: number }) { const { sessionToken, markdown = true, apiBaseUrl = 'https://chat.openai.com/api', backendApiBaseUrl = 'https://chat.openai.com/backend-api', - userAgent = USER_AGENT + userAgent = USER_AGENT, + accessTokenTTL = 60000 // 60 seconds } = opts this._sessionToken = sessionToken @@ -58,31 +65,26 @@ export class ChatGPTAPI { this._backendApiBaseUrl = backendApiBaseUrl this._userAgent = userAgent + this._accessTokenCache = new ExpiryMap(accessTokenTTL) + if (!this._sessionToken) { throw new Error('ChatGPT invalid session token') } } - async getIsAuthenticated() { - try { - void (await this.refreshAccessToken()) - return true - } catch (err) { - return false - } - } - - async ensureAuth() { - return await this.refreshAccessToken() - } - /** * Sends a message to ChatGPT, waits for the response to resolve, and returns * the response. * + * If you want to receive a stream of partial responses, use `opts.onProgress`. + * If you want to receive the full response, including message and conversation IDs, + * you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation` + * helper. + * * @param message - The prompt message to send * @param opts.conversationId - Optional ID of a conversation to continue * @param opts.parentMessageId - Optional ID of the previous message in the conversation + * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout) * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated * @param opts.onConversationResponse - Optional callback which will be invoked every time the partial response is updated with the full conversation response * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController) @@ -96,11 +98,19 @@ export class ChatGPTAPI { const { conversationId, parentMessageId = uuidv4(), + timeoutMs, onProgress, - onConversationResponse, - abortSignal + onConversationResponse } = opts + let { abortSignal } = opts + + let abortController: AbortController = null + if (timeoutMs && !abortSignal) { + abortController = new AbortController() + abortSignal = abortController.signal + } + const accessToken = await this.refreshAccessToken() const body: types.ConversationJSONBody = { @@ -124,14 +134,9 @@ export class ChatGPTAPI { } const url = `${this._backendApiBaseUrl}/conversation` - - // TODO: What's the best way to differentiate btwn wanting just the response text - // versus wanting the full response message, so you can extract the ID and other - // metadata? - // let fullResponse: types.Message = null let response = '' - return new Promise((resolve, reject) => { + const responseP = new Promise((resolve, reject) => { fetchSSE(url, { method: 'POST', headers: { @@ -164,7 +169,6 @@ export class ChatGPTAPI { } response = text - // fullResponse = message if (onProgress) { onProgress(text) @@ -178,8 +182,56 @@ export class ChatGPTAPI { } }).catch(reject) }) + + if (timeoutMs) { + if (abortController) { + // This will be called when a timeout occurs in order for us to forcibly + // ensure that the underlying HTTP request is aborted. + ;(responseP as any).cancel = () => { + abortController.abort() + } + } + + return pTimeout(responseP, { + milliseconds: timeoutMs, + message: 'ChatGPT timed out waiting for response' + }) + } else { + return responseP + } } + /** + * @returns `true` if the client has a valid acces token or `false` if refreshing + * the token fails. + */ + async getIsAuthenticated() { + try { + void (await this.refreshAccessToken()) + return true + } catch (err) { + return false + } + } + + /** + * Refreshes the client's access token which will succeed only if the session + * is still valid. + */ + async ensureAuth() { + return await this.refreshAccessToken() + } + + /** + * Attempts to refresh the current access token using the ChatGPT + * `sessionToken` cookie. + * + * Access tokens will be cached for up to `accessTokenTTL` milliseconds to + * prevent refreshing access tokens too frequently. + * + * @returns A valid access token + * @throws An error if refreshing the access token fails. + */ async refreshAccessToken(): Promise { const cachedAccessToken = this._accessTokenCache.get(KEY_ACCESS_TOKEN) if (cachedAccessToken) { diff --git a/legacy/src/fetch.ts b/legacy/src/fetch.ts index 308761c9..84480101 100644 --- a/legacy/src/fetch.ts +++ b/legacy/src/fetch.ts @@ -2,8 +2,9 @@ // Use `undici` for node.js 16 and 17 // Use `fetch` for node.js >= 18 -// Use `fetch` for browsers -// Use `fetch` for all other environments +// Use `fetch` for all other environments, including browsers +// NOTE: The top-level await is removed in a `postbuild` npm script for the +// browser build const fetch = globalThis.fetch ?? ((await import('undici')).fetch as unknown as typeof globalThis.fetch) diff --git a/legacy/src/types.ts b/legacy/src/types.ts index 80e70040..ae73f6c2 100644 --- a/legacy/src/types.ts +++ b/legacy/src/types.ts @@ -277,6 +277,7 @@ export type MessageMetadata = any export type SendMessageOptions = { conversationId?: string parentMessageId?: string + timeoutMs?: number onProgress?: (partialResponse: string) => void onConversationResponse?: (response: ConversationResponseEvent) => void abortSignal?: AbortSignal