kopia lustrzana https://github.com/transitive-bullshit/chatgpt-api
refactor: add docs and minor code improvement
rodzic
0099d86da5
commit
9d3a7bc05f
|
@ -18,6 +18,23 @@ export type NovuTriggerEventResponse = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export type NovuTriggerOptions = {
|
||||||
|
/**
|
||||||
|
* Name of the event to trigger. This should match the name of an existing notification template in Novu.
|
||||||
|
*/
|
||||||
|
name: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Payload to use for the event. This will be used to populate any handlebars placeholders in the notification template.
|
||||||
|
*/
|
||||||
|
payload: Record<string, unknown>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List of subscribers to send the notification to
|
||||||
|
*/
|
||||||
|
to: NovuSubscriber[]
|
||||||
|
}
|
||||||
|
|
||||||
export class NovuClient {
|
export class NovuClient {
|
||||||
api: typeof defaultKy
|
api: typeof defaultKy
|
||||||
|
|
||||||
|
@ -45,15 +62,12 @@ export class NovuClient {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async triggerEvent({
|
/**
|
||||||
name,
|
* Triggers an event in Novu.
|
||||||
payload,
|
*
|
||||||
to
|
* @returns response from the Novu API containing details about the triggered event.
|
||||||
}: {
|
*/
|
||||||
name: string
|
async triggerEvent({ name, payload, to }: NovuTriggerOptions) {
|
||||||
payload: Record<string, unknown>
|
|
||||||
to: NovuSubscriber[]
|
|
||||||
}) {
|
|
||||||
return this.api
|
return this.api
|
||||||
.post('events/trigger', {
|
.post('events/trigger', {
|
||||||
headers: {
|
headers: {
|
||||||
|
|
|
@ -90,10 +90,15 @@ export async function getTokenizerForModel(
|
||||||
return getTokenizerForEncoding(encoding, options)
|
return getTokenizerForEncoding(encoding, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the Tiktoken model name for a OpenAI model name.
|
||||||
|
*
|
||||||
|
* @param modelName - full OpenAI model name
|
||||||
|
* @returns Tiktoken model name
|
||||||
|
*/
|
||||||
export function getModelNameForTiktoken(modelName: string): TiktokenModel {
|
export function getModelNameForTiktoken(modelName: string): TiktokenModel {
|
||||||
if (modelName.startsWith('gpt-3.5-turbo-16k-')) {
|
if (modelName.startsWith('gpt-3.5-turbo-16k-')) {
|
||||||
// TODO: remove this once the model is added to tiktoken
|
return 'gpt-3.5-turbo-16k'
|
||||||
return 'gpt-3.5-turbo-16k' as TiktokenModel
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (modelName.startsWith('gpt-3.5-turbo-')) {
|
if (modelName.startsWith('gpt-3.5-turbo-')) {
|
||||||
|
@ -111,6 +116,12 @@ export function getModelNameForTiktoken(modelName: string): TiktokenModel {
|
||||||
return modelName as TiktokenModel
|
return modelName as TiktokenModel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the context size for a given embedding model.
|
||||||
|
*
|
||||||
|
* @param modelName - optional name of the embedding model. If not provided, returns a default context size.
|
||||||
|
* @returns context size for the given embedding model
|
||||||
|
*/
|
||||||
export function getContextSizeForEmbedding(modelName?: string): number {
|
export function getContextSizeForEmbedding(modelName?: string): number {
|
||||||
switch (modelName) {
|
switch (modelName) {
|
||||||
case 'text-embedding-ada-002':
|
case 'text-embedding-ada-002':
|
||||||
|
@ -120,6 +131,12 @@ export function getContextSizeForEmbedding(modelName?: string): number {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the context size for a given large language model (LLM).
|
||||||
|
*
|
||||||
|
* @param model - name of the model
|
||||||
|
* @returns context size for the model
|
||||||
|
*/
|
||||||
export function getContextSizeForModel(model: string): number {
|
export function getContextSizeForModel(model: string): number {
|
||||||
const modelName = getModelNameForTiktoken(model)
|
const modelName = getModelNameForTiktoken(model)
|
||||||
|
|
||||||
|
@ -159,6 +176,13 @@ export function getContextSizeForModel(model: string): number {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculates the maximum number of tokens that can be added to a prompt for a given LLM without exceeding the context size limit.
|
||||||
|
*
|
||||||
|
* @param prompt - prompt string
|
||||||
|
* @param modelName - name of the model
|
||||||
|
* @returns maximum number of tokens that can be added to the prompt
|
||||||
|
*/
|
||||||
export async function calculateMaxTokens({
|
export async function calculateMaxTokens({
|
||||||
prompt,
|
prompt,
|
||||||
modelName
|
modelName
|
||||||
|
@ -166,9 +190,7 @@ export async function calculateMaxTokens({
|
||||||
prompt: string
|
prompt: string
|
||||||
modelName: string
|
modelName: string
|
||||||
}) {
|
}) {
|
||||||
// fallback to approximate calculation if tiktoken is not available
|
let numTokens: number
|
||||||
let numTokens = Math.ceil(prompt.length / 4)
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const tokenizer = await getTokenizerForModel(modelName)
|
const tokenizer = await getTokenizerForModel(modelName)
|
||||||
numTokens = tokenizer.encode(prompt).length
|
numTokens = tokenizer.encode(prompt).length
|
||||||
|
@ -177,6 +199,8 @@ export async function calculateMaxTokens({
|
||||||
`calculateMaxTokens error for model "${modelName}", falling back to approximate count`,
|
`calculateMaxTokens error for model "${modelName}", falling back to approximate count`,
|
||||||
err.toString()
|
err.toString()
|
||||||
)
|
)
|
||||||
|
// Fallback to approximate calculation if tiktoken is not available:
|
||||||
|
numTokens = Math.ceil(prompt.length / 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxTokens = getContextSizeForModel(modelName)
|
const maxTokens = getContextSizeForModel(modelName)
|
||||||
|
|
Ładowanie…
Reference in New Issue