2022-12-05 05:13:36 +00:00
export type Role = 'user' | 'assistant'
2023-02-14 06:30:06 +00:00
export type FetchFn = typeof fetch
2022-12-07 00:19:30 +00:00
export type SendMessageOptions = {
conversationId? : string
parentMessageId? : string
2022-12-12 16:37:44 +00:00
messageId? : string
2023-02-01 09:14:10 +00:00
stream? : boolean
promptPrefix? : string
promptSuffix? : string
2022-12-07 04:07:14 +00:00
timeoutMs? : number
2023-02-01 09:14:10 +00:00
onProgress ? : ( partialResponse : ChatMessage ) = > void
2022-12-07 00:19:30 +00:00
abortSignal? : AbortSignal
}
2023-02-01 09:14:10 +00:00
export interface ChatMessage {
id : string
text : string
role : Role
parentMessageId? : string
2022-12-16 06:28:30 +00:00
conversationId? : string
2023-02-13 05:36:42 +00:00
detail? : any
2022-12-16 06:28:30 +00:00
}
2023-02-01 09:14:10 +00:00
export class ChatGPTError extends Error {
statusCode? : number
statusText? : string
2022-12-16 06:28:30 +00:00
}
2023-02-01 10:48:36 +00:00
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = ( id : string ) = > Promise < ChatMessage >
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = ( message : ChatMessage ) = > Promise < void >
export namespace openai {
export type CompletionParams = {
/** ID of the model to use. */
model : string
/** The string prompt to generate a completion for. */
prompt : string
/ * *
* The suffix that comes after a completion of inserted text .
* /
suffix? : string
/ * *
* The maximum number of tokens to generate in the completion . The token count of your prompt plus ` max_tokens ` cannot exceed the model \ ' s context length . Most models have a context length of 2048 tokens ( except for the newest models , which support 4096 ) .
* /
max_tokens? : number
/ * *
* What [ sampling temperature ] ( https : //towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or `top_p` but not both.
* /
temperature? : number
/ * *
* An alternative to sampling with temperature , called nucleus sampling , where the model considers the results of the tokens with top_p probability mass . So 0.1 means only the tokens comprising the top 10 % probability mass are considered . We generally recommend altering this or ` temperature ` but not both .
* /
top_p? : number
/ * *
* Include the log probabilities on the ` logprobs ` most likely tokens , as well the chosen tokens . For example , if ` logprobs ` is 5 , the API will return a list of the 5 most likely tokens . The API will always return the ` logprob ` of the sampled token , so there may be up to ` logprobs+1 ` elements in the response . The maximum value for ` logprobs ` is 5 . If you need more than this , please contact us through our [ Help center ] ( https : //help.openai.com) and describe your use case.
* /
logprobs? : number
/ * *
* Echo back the prompt in addition to the completion
* /
echo? : boolean
/ * *
* Up to 4 sequences where the API will stop generating further tokens . The returned text will not contain the stop sequence .
* /
stop? : string [ ]
/ * *
* Number between - 2.0 and 2.0 . Positive values penalize new tokens based on whether they appear in the text so far , increasing the model \ ' s likelihood to talk about new topics . [ See more information about frequency and presence penalties . ] ( / d o c s / a p i - r e f e r e n c e / p a r a m e t e r - d e t a i l s )
* /
presence_penalty? : number
/ * *
* Number between - 2.0 and 2.0 . Positive values penalize new tokens based on their existing frequency in the text so far , decreasing the model \ ' s likelihood to repeat the same line verbatim . [ See more information about frequency and presence penalties . ] ( / d o c s / a p i - r e f e r e n c e / p a r a m e t e r - d e t a i l s )
* /
frequency_penalty? : number
/ * *
* Generates ` best_of ` completions server - side and returns the \ "best\" ( the one with the highest log probability per token ) . Results cannot be streamed . When used with ` n ` , ` best_of ` controls the number of candidate completions and ` n ` specifies how many to return – ` best_of ` must be greater than ` n ` . * * Note : * * Because this parameter generates many completions , it can quickly consume your token quota . Use carefully and ensure that you have reasonable settings for ` max_tokens ` and ` stop ` .
* /
best_of? : number
/ * *
* Modify the likelihood of specified tokens appearing in the completion . Accepts a json object that maps tokens ( specified by their token ID in the GPT tokenizer ) to an associated bias value from - 100 to 100 . You can use this [ tokenizer tool ] ( / t o k e n i z e r ? v i e w = b p e ) ( w h i c h w o r k s f o r b o t h G P T - 2 a n d G P T - 3 ) t o c o n v e r t t e x t t o t o k e n I D s . M a t h e m a t i c a l l y , t h e b i a s i s a d d e d t o t h e l o g i t s g e n e r a t e d b y t h e m o d e l p r i o r t o s a m p l i n g . T h e e x a c t e f f e c t w i l l v a r y p e r m o d e l , b u t v a l u e s b e t w e e n - 1 a n d 1 s h o u l d d e c r e a s e o r i n c r e a s e l i k e l i h o o d o f s e l e c t i o n ; v a l u e s l i k e - 1 0 0 o r 1 0 0 s h o u l d r e s u l t i n a b a n o r e x c l u s i v e s e l e c t i o n o f t h e r e l e v a n t t o k e n . A s a n e x a m p l e , y o u c a n p a s s ` { \ " 5 0 2 5 6 \ " : - 1 0 0 } ` t o p r e v e n t t h e < | e n d o f t e x t | > t o k e n f r o m b e i n g g e n e r a t e d .
* /
logit_bias? : Record < string , number >
/ * *
* A unique identifier representing your end - user , which will help OpenAI to monitor and detect abuse . [ Learn more ] ( / d o c s / u s a g e - p o l i c i e s / e n d - u s e r - i d s ) .
* /
user? : string
/ * N O T E : t h i s i s h a n d l e d b y t h e ` s e n d M e s s a g e ` f u n c t i o n .
*
* Whether to stream back partial progress . If set , tokens will be sent as data - only [ server - sent events ] ( https : //developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* /
// stream?: boolean | null
/ * *
* NOT SUPPORTED
* /
/ * *
* How many completions to generate for each prompt . * * Note : * * Because this parameter generates many completions , it can quickly consume your token quota . Use carefully and ensure that you have reasonable settings for ` max_tokens ` and ` stop ` .
* /
// 'n'?: number | null;
}
export type CompletionResponse = {
id : string
object : string
created : number
model : string
choices : CompletionResponseChoices
usage? : CompletionResponseUsage
}
export type CompletionResponseChoices = {
text? : string
index? : number
logprobs ? : {
tokens? : Array < string >
token_logprobs? : Array < number >
top_logprobs? : Array < object >
text_offset? : Array < number >
} | null
finish_reason? : string
} [ ]
export type CompletionResponseUsage = {
prompt_tokens : number
completion_tokens : number
total_tokens : number
}
}