diff --git a/examples/hf0-demo.ts b/examples/hf0-demo.ts index ae9b46d..e66f10d 100644 --- a/examples/hf0-demo.ts +++ b/examples/hf0-demo.ts @@ -57,12 +57,14 @@ async function main() { messages: [ { role: 'system', - content: `You are a McKinsey analyst who is an expert at writing executive summaries.` + content: `You are a McKinsey analyst who is an expert at writing executive summaries. Be srue to cite your sources using markdown.` }, { role: 'user', - content: `Write a thorough executive summary on this topic: {{topic}}. - In order to do this, you will need to answer the following questions: \n{{#questions}}- {{.}}\n{{/questions}}` + content: (input) => + `Write a thorough executive summary on this topic: ${topic}. In order to do this, you will need to answer the following questions: \n${input.questions + .map((q) => `- ${q}`) + .join('\n')}` } ], model: 'gpt-4-32k' diff --git a/examples/search-and-crawl.ts b/examples/search-and-crawl.ts index 4f0a071..fe8d527 100644 --- a/examples/search-and-crawl.ts +++ b/examples/search-and-crawl.ts @@ -30,7 +30,7 @@ async function main() { topic: z.string() }) ) - .callWithMetadata({ topic }) + .call({ topic }) console.log('\n\n\n') console.log(res) diff --git a/examples/summarize-news.ts b/examples/summarize-news.ts index 55ebe0d..0c5fcab 100644 --- a/examples/summarize-news.ts +++ b/examples/summarize-news.ts @@ -11,7 +11,7 @@ async function main() { const topic = process.argv[2] || 'HF0 accelerator' const res = await agentic - .gpt4(`Summarize the latest news on {{topic}} using markdown.`) + .gpt3(`Summarize the latest news on {{topic}} using markdown.`) .tools([new SerpAPITool()]) .input( z.object({ diff --git a/src/agentic.ts b/src/agentic.ts index 9b448fb..08165af 100644 --- a/src/agentic.ts +++ b/src/agentic.ts @@ -7,7 +7,7 @@ import { HumanFeedbackOptions, HumanFeedbackType } from './human-feedback' import { HumanFeedbackMechanismCLI } from './human-feedback/cli' import { OpenAIChatCompletion } from './llms/openai' import { defaultLogger } from './logger' -import { defaultIDGeneratorFn } from './utils' +import { defaultIDGeneratorFn, isFunction, isString } from './utils' export class Agentic { // _taskMap: WeakMap> @@ -103,16 +103,24 @@ export class Agentic { return this._idGeneratorFn } - openaiChatCompletion( - promptOrChatCompletionParams: string | types.openai.ChatCompletionParams, // TODO: make more strict? + openaiChatCompletion( + promptOrChatCompletionParams: + | types.ChatMessageContent + | SetOptional, 'model'>, modelParams?: SetOptional< - types.openai.ChatCompletionParams, + types.OpenAIChatCompletionParamsInput, 'model' | 'messages' > ) { - let options: Partial + let options: SetOptional< + types.OpenAIChatCompletionParamsInput, + 'model' + > - if (typeof promptOrChatCompletionParams === 'string') { + if ( + isString(promptOrChatCompletionParams) || + isFunction(promptOrChatCompletionParams) + ) { options = { ...modelParams, messages: [ @@ -130,9 +138,9 @@ export class Agentic { } } - return new OpenAIChatCompletion({ + return new OpenAIChatCompletion({ + ...this._openaiModelDefaults, agentic: this, - ...(this._openaiModelDefaults as any), // TODO ...options }) } @@ -140,18 +148,24 @@ export class Agentic { /** * Shortcut for creating an OpenAI chat completion call with the `gpt-3.5-turbo` model. */ - gpt3( + gpt3( promptOrChatCompletionParams: - | string - | SetOptional, + | types.ChatMessageContent + | SetOptional, 'model'>, modelParams?: SetOptional< - types.openai.ChatCompletionParams, + types.OpenAIChatCompletionParamsInput, 'model' | 'messages' > ) { - let options: SetOptional + let options: SetOptional< + types.OpenAIChatCompletionParamsInput, + 'model' + > - if (typeof promptOrChatCompletionParams === 'string') { + if ( + isString(promptOrChatCompletionParams) || + isFunction(promptOrChatCompletionParams) + ) { options = { ...modelParams, messages: [ @@ -169,9 +183,9 @@ export class Agentic { } } - return new OpenAIChatCompletion({ + return new OpenAIChatCompletion({ + ...this._openaiModelDefaults, agentic: this, - ...(this._openaiModelDefaults as any), // TODO model: 'gpt-3.5-turbo', ...options }) @@ -180,18 +194,24 @@ export class Agentic { /** * Shortcut for creating an OpenAI chat completion call with the `gpt-4` model. */ - gpt4( + gpt4( promptOrChatCompletionParams: - | string - | SetOptional, + | types.ChatMessageContent + | SetOptional, 'model'>, modelParams?: SetOptional< - types.openai.ChatCompletionParams, + types.OpenAIChatCompletionParamsInput, 'model' | 'messages' > ) { - let options: SetOptional + let options: SetOptional< + types.OpenAIChatCompletionParamsInput, + 'model' + > - if (typeof promptOrChatCompletionParams === 'string') { + if ( + isString(promptOrChatCompletionParams) || + isFunction(promptOrChatCompletionParams) + ) { options = { ...modelParams, messages: [ @@ -209,9 +229,9 @@ export class Agentic { } } - return new OpenAIChatCompletion({ + return new OpenAIChatCompletion({ + ...this._openaiModelDefaults, agentic: this, - ...(this._openaiModelDefaults as any), // TODO model: 'gpt-4', ...options }) diff --git a/src/types.ts b/src/types.ts index 07499f2..3f8ecd9 100644 --- a/src/types.ts +++ b/src/types.ts @@ -80,9 +80,17 @@ export type ChatMessageContent = export type ChatMessageInput = | ChatMessage | { + role: ChatMessageRole + name?: string + function_call?: any content: ChatMessageContent } +export type OpenAIChatCompletionParamsInput = + Omit & { + messages: ChatMessageInput[] + } + export interface ChatModelOptions< TInput extends TaskInput = void, TOutput extends TaskOutput = string, diff --git a/test/utils.test.ts b/test/utils.test.ts index d664000..641a56c 100644 --- a/test/utils.test.ts +++ b/test/utils.test.ts @@ -35,6 +35,8 @@ test('sleep should delay execution', async (t) => { const start = Date.now() await sleep(1000) // for example, 1000ms / 1sec const end = Date.now() + + // NOTE (travis): I was seeing sporadic failures on CI here, so I added a 10ms buffer const duration = end - start t.true(duration >= 1000 - 10) })