import Mustache from 'mustache' import type { SetRequired } from 'type-fest' import { ZodRawShape, ZodTypeAny, z } from 'zod' import { printNode, zodToTs } from 'zod-to-ts' import * as types from './types' const defaultOpenAIModel = 'gpt-3.5-turbo' export class Agentic { _client: types.openai.OpenAIClient _verbosity: number _defaults: Pick< types.BaseLLMOptions, 'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig' > constructor(opts: { openai: types.openai.OpenAIClient verbosity?: number defaults?: Pick< types.BaseLLMOptions, 'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig' > }) { this._client = opts.openai this._verbosity = opts.verbosity ?? 0 this._defaults = { provider: 'openai', model: defaultOpenAIModel, modelParams: {}, timeoutMs: 30000, retryConfig: { attempts: 3, strategy: 'heal', ...opts.defaults?.retryConfig }, ...opts.defaults } } gpt4( promptOrChatCompletionParams: string | types.openai.ChatCompletionParams ) { let options: Omit if (typeof promptOrChatCompletionParams === 'string') { options = { messages: [ { role: 'user', content: promptOrChatCompletionParams } ] } } else { options = promptOrChatCompletionParams if (!options.messages) { throw new Error('messages must be provided') } } return new OpenAIChatModelBuilder(this._client, { ...(this._defaults as any), // TODO model: 'gpt-4', ...options }) } } export abstract class BaseLLMCallBuilder< TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny, TOutput extends ZodRawShape | ZodTypeAny = z.ZodType, TModelParams extends Record = Record > { _options: types.BaseLLMOptions constructor(options: types.BaseLLMOptions) { this._options = options } input( inputSchema: U ): BaseLLMCallBuilder { ;( this as unknown as BaseLLMCallBuilder )._options.input = inputSchema return this as unknown as BaseLLMCallBuilder } output( outputSchema: U ): BaseLLMCallBuilder { ;( this as unknown as BaseLLMCallBuilder )._options.output = outputSchema return this as unknown as BaseLLMCallBuilder } examples(examples: types.LLMExample[]) { this._options.examples = examples return this } retry(retryConfig: types.LLMRetryConfig) { this._options.retryConfig = retryConfig return this } abstract call( input?: types.ParsedData ): Promise> // TODO // abstract stream({ // input: TInput, // onProgress: types.ProgressFunction // }): Promise } export abstract class ChatModelBuilder< TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny, TOutput extends ZodRawShape | ZodTypeAny = z.ZodType, TModelParams extends Record = Record > extends BaseLLMCallBuilder { _messages: types.ChatMessage[] constructor(options: types.ChatModelOptions) { super(options) this._messages = options.messages } } export class OpenAIChatModelBuilder< TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny, TOutput extends ZodRawShape | ZodTypeAny = z.ZodType > extends ChatModelBuilder< TInput, TOutput, SetRequired, 'model'> > { _client: types.openai.OpenAIClient constructor( client: types.openai.OpenAIClient, options: types.ChatModelOptions< TInput, TOutput, Omit > ) { super({ provider: 'openai', model: defaultOpenAIModel, ...options }) this._client = client } override async call( input?: types.ParsedData ): Promise> { if (this._options.input) { const inputSchema = this._options.input instanceof z.ZodType ? this._options.input : z.object(this._options.input) // TODO: handle errors gracefully input = inputSchema.parse(input) } // TODO: construct messages const messages = this._messages const completion = await this._client.createChatCompletion({ model: defaultOpenAIModel, // TODO: this shouldn't be necessary but TS is complaining ...this._options.modelParams, messages }) if (this._options.output) { const outputSchema = this._options.output instanceof z.ZodType ? this._options.output : z.object(this._options.output) // TODO: convert string => object if necessary // TODO: handle errors, retry logic, and self-healing return outputSchema.parse(completion.message.content) } else { return completion.message.content as any } } protected async _buildMessages(text: string, opts: types.SendMessageOptions) { const { systemMessage = this._systemMessage } = opts let { parentMessageId } = opts const userLabel = USER_LABEL_DEFAULT const assistantLabel = ASSISTANT_LABEL_DEFAULT const maxNumTokens = this._maxModelTokens - this._maxResponseTokens let messages: types.openai.ChatCompletionRequestMessage[] = [] if (systemMessage) { messages.push({ role: 'system', content: systemMessage }) } const systemMessageOffset = messages.length let nextMessages = text ? messages.concat([ { role: 'user', content: text, name: opts.name } ]) : messages let numTokens = 0 do { const prompt = nextMessages .reduce((prompt, message) => { switch (message.role) { case 'system': return prompt.concat([`Instructions:\n${message.content}`]) case 'user': return prompt.concat([`${userLabel}:\n${message.content}`]) default: return prompt.concat([`${assistantLabel}:\n${message.content}`]) } }, [] as string[]) .join('\n\n') const nextNumTokensEstimate = await this._getTokenCount(prompt) const isValidPrompt = nextNumTokensEstimate <= maxNumTokens if (prompt && !isValidPrompt) { break } messages = nextMessages numTokens = nextNumTokensEstimate if (!isValidPrompt) { break } if (!parentMessageId) { break } const parentMessage = await this._getMessageById(parentMessageId) if (!parentMessage) { break } const parentMessageRole = parentMessage.role || 'user' nextMessages = nextMessages.slice(0, systemMessageOffset).concat([ { role: parentMessageRole, content: parentMessage.text, name: parentMessage.name }, ...nextMessages.slice(systemMessageOffset) ]) parentMessageId = parentMessage.parentMessageId } while (true) // Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens // for the response. const maxTokens = Math.max( 1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens) ) return { messages, maxTokens, numTokens } } protected async _getTokenCount(text: string) { // TODO: use a better fix in the tokenizer text = text.replace(/<\|endoftext\|>/g, '') return tokenizer.encode(text).length } }