old-agentic-v1^2
Travis Fischer 2023-06-19 17:29:40 -07:00
rodzic 44d251e6de
commit d8fb25f9c2
6 zmienionych plików z 61 dodań i 29 usunięć

Wyświetl plik

@ -57,12 +57,14 @@ async function main() {
messages: [
{
role: 'system',
content: `You are a McKinsey analyst who is an expert at writing executive summaries.`
content: `You are a McKinsey analyst who is an expert at writing executive summaries. Be srue to cite your sources using markdown.`
},
{
role: 'user',
content: `Write a thorough executive summary on this topic: {{topic}}.
In order to do this, you will need to answer the following questions: \n{{#questions}}- {{.}}\n{{/questions}}`
content: (input) =>
`Write a thorough executive summary on this topic: ${topic}. In order to do this, you will need to answer the following questions: \n${input.questions
.map((q) => `- ${q}`)
.join('\n')}`
}
],
model: 'gpt-4-32k'

Wyświetl plik

@ -30,7 +30,7 @@ async function main() {
topic: z.string()
})
)
.callWithMetadata({ topic })
.call({ topic })
console.log('\n\n\n')
console.log(res)

Wyświetl plik

@ -11,7 +11,7 @@ async function main() {
const topic = process.argv[2] || 'HF0 accelerator'
const res = await agentic
.gpt4(`Summarize the latest news on {{topic}} using markdown.`)
.gpt3(`Summarize the latest news on {{topic}} using markdown.`)
.tools([new SerpAPITool()])
.input(
z.object({

Wyświetl plik

@ -7,7 +7,7 @@ import { HumanFeedbackOptions, HumanFeedbackType } from './human-feedback'
import { HumanFeedbackMechanismCLI } from './human-feedback/cli'
import { OpenAIChatCompletion } from './llms/openai'
import { defaultLogger } from './logger'
import { defaultIDGeneratorFn } from './utils'
import { defaultIDGeneratorFn, isFunction, isString } from './utils'
export class Agentic {
// _taskMap: WeakMap<string, BaseTask<any, any>>
@ -103,16 +103,24 @@ export class Agentic {
return this._idGeneratorFn
}
openaiChatCompletion(
promptOrChatCompletionParams: string | types.openai.ChatCompletionParams, // TODO: make more strict?
openaiChatCompletion<TInput extends types.TaskInput = any>(
promptOrChatCompletionParams:
| types.ChatMessageContent<TInput>
| SetOptional<types.OpenAIChatCompletionParamsInput<TInput>, 'model'>,
modelParams?: SetOptional<
types.openai.ChatCompletionParams,
types.OpenAIChatCompletionParamsInput,
'model' | 'messages'
>
) {
let options: Partial<types.openai.ChatCompletionParams>
let options: SetOptional<
types.OpenAIChatCompletionParamsInput<TInput>,
'model'
>
if (typeof promptOrChatCompletionParams === 'string') {
if (
isString(promptOrChatCompletionParams) ||
isFunction(promptOrChatCompletionParams)
) {
options = {
...modelParams,
messages: [
@ -130,9 +138,9 @@ export class Agentic {
}
}
return new OpenAIChatCompletion({
return new OpenAIChatCompletion<TInput>({
...this._openaiModelDefaults,
agentic: this,
...(this._openaiModelDefaults as any), // TODO
...options
})
}
@ -140,18 +148,24 @@ export class Agentic {
/**
* Shortcut for creating an OpenAI chat completion call with the `gpt-3.5-turbo` model.
*/
gpt3(
gpt3<TInput extends types.TaskInput = any>(
promptOrChatCompletionParams:
| string
| SetOptional<types.openai.ChatCompletionParams, 'model'>,
| types.ChatMessageContent<TInput>
| SetOptional<types.OpenAIChatCompletionParamsInput<TInput>, 'model'>,
modelParams?: SetOptional<
types.openai.ChatCompletionParams,
types.OpenAIChatCompletionParamsInput,
'model' | 'messages'
>
) {
let options: SetOptional<types.openai.ChatCompletionParams, 'model'>
let options: SetOptional<
types.OpenAIChatCompletionParamsInput<TInput>,
'model'
>
if (typeof promptOrChatCompletionParams === 'string') {
if (
isString(promptOrChatCompletionParams) ||
isFunction(promptOrChatCompletionParams)
) {
options = {
...modelParams,
messages: [
@ -169,9 +183,9 @@ export class Agentic {
}
}
return new OpenAIChatCompletion({
return new OpenAIChatCompletion<TInput>({
...this._openaiModelDefaults,
agentic: this,
...(this._openaiModelDefaults as any), // TODO
model: 'gpt-3.5-turbo',
...options
})
@ -180,18 +194,24 @@ export class Agentic {
/**
* Shortcut for creating an OpenAI chat completion call with the `gpt-4` model.
*/
gpt4(
gpt4<TInput extends types.TaskInput = any>(
promptOrChatCompletionParams:
| string
| SetOptional<types.openai.ChatCompletionParams, 'model'>,
| types.ChatMessageContent<TInput>
| SetOptional<types.OpenAIChatCompletionParamsInput<TInput>, 'model'>,
modelParams?: SetOptional<
types.openai.ChatCompletionParams,
types.OpenAIChatCompletionParamsInput,
'model' | 'messages'
>
) {
let options: SetOptional<types.openai.ChatCompletionParams, 'model'>
let options: SetOptional<
types.OpenAIChatCompletionParamsInput<TInput>,
'model'
>
if (typeof promptOrChatCompletionParams === 'string') {
if (
isString(promptOrChatCompletionParams) ||
isFunction(promptOrChatCompletionParams)
) {
options = {
...modelParams,
messages: [
@ -209,9 +229,9 @@ export class Agentic {
}
}
return new OpenAIChatCompletion({
return new OpenAIChatCompletion<TInput>({
...this._openaiModelDefaults,
agentic: this,
...(this._openaiModelDefaults as any), // TODO
model: 'gpt-4',
...options
})

Wyświetl plik

@ -80,9 +80,17 @@ export type ChatMessageContent<TInput extends TaskInput = void> =
export type ChatMessageInput<TInput extends TaskInput = void> =
| ChatMessage
| {
role: ChatMessageRole
name?: string
function_call?: any
content: ChatMessageContent<TInput>
}
export type OpenAIChatCompletionParamsInput<TInput extends TaskInput = void> =
Omit<openai.ChatCompletionParams, 'messages'> & {
messages: ChatMessageInput<TInput>[]
}
export interface ChatModelOptions<
TInput extends TaskInput = void,
TOutput extends TaskOutput = string,

2
test/utils.test.ts vendored
Wyświetl plik

@ -35,6 +35,8 @@ test('sleep should delay execution', async (t) => {
const start = Date.now()
await sleep(1000) // for example, 1000ms / 1sec
const end = Date.now()
// NOTE (travis): I was seeing sporadic failures on CI here, so I added a 10ms buffer
const duration = end - start
t.true(duration >= 1000 - 10)
})