feat: add initial support for OpenAI functions w/ chat completion

old-agentic-v1^2
Travis Fischer 2023-06-13 19:49:54 -07:00
rodzic f648f3216b
commit 7afbc0d259
29 zmienionych plików z 284 dodań i 56 usunięć

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -0,0 +1,19 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { z } from 'zod'
import { Agentic, CalculatorTool } from '@/index'
async function main() {
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
const agentic = new Agentic({ openai })
const example = await agentic
.gpt4('What is 5 * 50?')
.tools([new CalculatorTool({ agentic })])
.output(z.object({ answer: z.number() }))
.call()
console.log(example)
}
main()

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic, MetaphorSearchTool } from '@/index'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -38,11 +38,13 @@
"test:eslint": "eslint \"**/*.ts\""
},
"dependencies": {
"@agentic/openai-fetch": "^1.5.3",
"@anthropic-ai/sdk": "^0.4.4",
"@inquirer/checkbox": "^1.3.1",
"@inquirer/editor": "^1.2.0",
"@inquirer/input": "^1.2.1",
"@inquirer/select": "^1.2.1",
"@types/json-schema": "^7.0.12",
"debug": "^4.3.4",
"expr-eval": "^2.0.2",
"handlebars": "^4.7.7",
@ -52,7 +54,6 @@
"ky": "^0.33.3",
"nanoid": "^4.0.2",
"normalize-url": "^8.0.0",
"openai-fetch": "^1.5.1",
"p-map": "^6.0.0",
"p-retry": "^5.1.2",
"p-timeout": "^6.1.2",
@ -60,6 +61,7 @@
"ts-dedent": "^2.2.0",
"uuid": "^9.0.0",
"zod": "^3.21.4",
"zod-to-json-schema": "^3.21.1",
"zod-to-ts": "^1.1.4",
"zod-validation-error": "^1.3.0"
},

Wyświetl plik

@ -1,10 +1,13 @@
lockfileVersion: '6.1'
lockfileVersion: '6.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
dependencies:
'@agentic/openai-fetch':
specifier: ^1.5.3
version: 1.5.3
'@anthropic-ai/sdk':
specifier: ^0.4.4
version: 0.4.4
@ -20,6 +23,9 @@ dependencies:
'@inquirer/select':
specifier: ^1.2.1
version: 1.2.1
'@types/json-schema':
specifier: ^7.0.12
version: 7.0.12
debug:
specifier: ^4.3.4
version: 4.3.4
@ -47,9 +53,6 @@ dependencies:
normalize-url:
specifier: ^8.0.0
version: 8.0.0
openai-fetch:
specifier: ^1.5.1
version: 1.5.1
p-map:
specifier: ^6.0.0
version: 6.0.0
@ -71,6 +74,9 @@ dependencies:
zod:
specifier: ^3.21.4
version: 3.21.4
zod-to-json-schema:
specifier: ^3.21.1
version: 3.21.1(zod@3.21.4)
zod-to-ts:
specifier: ^1.1.4
version: 1.1.4(typescript@5.1.3)(zod@3.21.4)
@ -163,6 +169,13 @@ devDependencies:
packages:
/@agentic/openai-fetch@1.5.3:
resolution: {integrity: sha512-4c5YWz6jQdGxxM+SVhf0XW3mKYnFr56hntPep+y7wRfkjUl6lgZiuU3J61esQ8bj8vSFkgSfwjf3DeZIi/IEsg==}
dependencies:
ky: 0.33.3
zod: 3.21.4
dev: false
/@anthropic-ai/sdk@0.4.4:
resolution: {integrity: sha512-Z/39nQi1sSUCeLII3lsAbL1u+0JF6cR2XmUEX9sLH0VtxmIjY6cjOUYjCkYh4oapTxOkhAFnVSAFJ6cxml2qXg==}
dependencies:
@ -756,7 +769,6 @@ packages:
/@types/json-schema@7.0.12:
resolution: {integrity: sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==}
dev: true
/@types/minimist@1.2.2:
resolution: {integrity: sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==}
@ -2989,13 +3001,6 @@ packages:
mimic-fn: 4.0.0
dev: true
/openai-fetch@1.5.1:
resolution: {integrity: sha512-LDSsXTFa2ssjYTZY51+B/69wXg8/UteqKyPtuFa+bMFRav7ACQXi3AJl+gieh3BF8La95NHCE0FS8t0F8fRHwA==}
dependencies:
ky: 0.33.3
zod: 3.21.4
dev: false
/optionator@0.9.1:
resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==}
engines: {node: '>= 0.8.0'}
@ -4147,6 +4152,14 @@ packages:
engines: {node: '>=12.20'}
dev: true
/zod-to-json-schema@3.21.1(zod@3.21.4):
resolution: {integrity: sha512-y5g0MPxDq+YG/T+cHGPYH4PcBpyCqwK6wxeJ76MR563y0gk/14HKfebq8xHiItY7lkc9GDFygCnkvNDTvAhYAg==}
peerDependencies:
zod: ^3.21.4
dependencies:
zod: 3.21.4
dev: false
/zod-to-ts@1.1.4(typescript@5.1.3)(zod@3.21.4):
resolution: {integrity: sha512-jsCg+pTNxLAdJOfW4ul+SpechdGYEJPPnssSbqWdR2LSIkotT22k+UvqPb1nEHwe/YbEcbUOlZUfGM0npgR+Jg==}
peerDependencies:

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic, HumanFeedbackSelect } from '@/index'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic, HumanFeedbackSingle } from '@/index'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -1,5 +1,5 @@
import { OpenAIClient } from '@agentic/openai-fetch'
import 'dotenv/config'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { Agentic, MetaphorSearchTool } from '@/index'

Wyświetl plik

@ -0,0 +1,74 @@
import { z } from 'zod'
// export type ChatMessageRole = 'user' | 'system' | 'assistant'
export const ChatMessageRoleSchema = z.union([
z.literal('user'),
z.literal('system'),
z.literal('assistant'),
z.literal('function')
])
export type ChatMessageRole = z.infer<typeof ChatMessageRoleSchema>
export interface ChatMessageBase {
role: ChatMessageRole
content: string
name?: string
}
export interface ChatMessageUser extends ChatMessageBase {
role: 'user'
}
export interface ChatMessageSystem extends ChatMessageBase {
role: 'system'
}
export interface ChatMessageAssistant extends ChatMessageBase {
role: 'assistant'
}
export interface ChatMessageFunctionCall extends ChatMessageBase {
role: 'assistant'
function_call: FunctionCall
}
export interface FunctionCall {
name: string
arguments: string
}
export interface ChatMessageFunction extends ChatMessageBase {
role: 'function'
name: string
}
export type ChatMessage =
| ChatMessageUser
| ChatMessageSystem
| ChatMessageAssistant
| ChatMessageFunctionCall
| ChatMessageFunction
export interface FunctionDefinition {
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
*/
name: string
/**
* The description of what the function does.
*/
description?: string
/**
* The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
*/
parameters?: { [key: string]: any }
}
export type FunctionCallOptions =
| 'none'
| 'auto'
| {
name: string
}

Wyświetl plik

@ -47,6 +47,14 @@ export class AnthropicChatModel<
}
}
public override get nameForModel(): string {
return 'anthropic_chat'
}
public override get nameForHuman(): string {
return 'AnthropicChatModel'
}
protected override async _createChatCompletion(
messages: types.ChatMessage[]
): Promise<types.BaseChatCompletionResponse<anthropic.CompletionResponse>> {
@ -62,6 +70,7 @@ export class AnthropicChatModel<
return message.content
}
})
.filter(Boolean)
.join('') + anthropic.AI_PROMPT
// TODO: support streaming

Wyświetl plik

@ -3,6 +3,7 @@ import pMap from 'p-map'
import { dedent } from 'ts-dedent'
import { type SetRequired } from 'type-fest'
import { ZodType, z } from 'zod'
import { zodToJsonSchema } from 'zod-to-json-schema'
import { printNode, zodToTs } from 'zod-to-ts'
import * as errors from '@/errors'
@ -14,6 +15,7 @@ import {
extractJSONObjectFromString
} from '@/utils'
import { BaseTask } from '../task'
import { BaseLLM } from './llm'
export abstract class BaseChatModel<
@ -22,7 +24,8 @@ export abstract class BaseChatModel<
TModelParams extends Record<string, any> = Record<string, any>,
TChatCompletionResponse extends Record<string, any> = Record<string, any>
> extends BaseLLM<TInput, TOutput, TModelParams> {
_messages: types.ChatMessage[]
protected _messages: types.ChatMessage[]
protected _tools?: BaseTask<any, any>[]
constructor(
options: SetRequired<
@ -33,6 +36,7 @@ export abstract class BaseChatModel<
super(options)
this._messages = options.messages
this._tools = options.tools
}
// TODO: use polymorphic `this` type to return correct BaseLLM subclass type
@ -57,6 +61,11 @@ export abstract class BaseChatModel<
return refinedInstance
}
tools(tools: BaseTask<any, any>[]): this {
this._tools = tools
return this
}
protected abstract _createChatCompletion(
messages: types.ChatMessage[]
): Promise<types.BaseChatCompletionResponse<TChatCompletionResponse>>
@ -70,9 +79,6 @@ export abstract class BaseChatModel<
input = this.inputSchema.parse(input)
}
// TODO: validate input message variables against input schema
console.log({ input })
const messages = this._messages
.map((message) => {
return {
@ -263,9 +269,15 @@ export abstract class BaseChatModel<
const numTokensPerMessage = await pMap(
messages,
async (message) => {
let content = message.content || ''
if (message.function_call) {
// TODO: this case needs testing
content = message.function_call.arguments
}
const [numTokensContent, numTokensRole, numTokensName] =
await Promise.all([
this.getNumTokens(message.content),
this.getNumTokens(content),
this.getNumTokens(message.role),
message.name
? this.getNumTokens(message.name).then((n) => n + tokensPerName)

Wyświetl plik

@ -69,7 +69,11 @@ export abstract class BaseLLM<
}
}
public override get name(): string {
public override get nameForModel(): string {
return `${this._provider}_chat`
}
public override get nameForHuman(): string {
return `${this._provider}:chat:${this._model}`
}

Wyświetl plik

@ -38,16 +38,26 @@ export class OpenAIChatModel<
}
}
public override get nameForModel(): string {
return 'openai_chat'
}
public override get nameForHuman(): string {
return 'OpenAIChatModel'
}
protected override async _createChatCompletion(
messages: types.ChatMessage[]
): Promise<
types.BaseChatCompletionResponse<types.openai.ChatCompletionResponse>
> {
return this._client.createChatCompletion({
const res = await this._client.createChatCompletion({
...this._modelParams,
model: this._model,
messages
})
return res
}
public override clone(): OpenAIChatModel<TInput, TOutput> {
@ -61,6 +71,7 @@ export class OpenAIChatModel<
model: this._model,
examples: this._examples,
messages: this._messages,
tools: this._tools,
...this._modelParams
})
}

Wyświetl plik

@ -46,12 +46,20 @@ export abstract class BaseTask<TInput = void, TOutput = string> {
public abstract get inputSchema(): ZodType<TInput>
public abstract get outputSchema(): ZodType<TOutput>
public abstract get name(): string
public abstract get nameForModel(): string
public get nameForHuman(): string {
return this.nameForModel
}
public get descForModel(): string {
return ''
}
// TODO: is this really necessary?
public clone(): BaseTask<TInput, TOutput> {
// TODO: override in subclass if needed
throw new Error(`clone not implemented for task "${this.name}"`)
throw new Error(`clone not implemented for task "${this.nameForModel}"`)
}
public retryConfig(retryConfig: types.RetryConfig): this {
@ -81,7 +89,7 @@ export abstract class BaseTask<TInput = void, TOutput = string> {
input,
attemptNumber: 0,
metadata: {
taskName: this.name,
taskName: this.nameForModel,
taskId: this.id,
callId: this._agentic.idGeneratorFn()
}

Wyświetl plik

@ -91,6 +91,11 @@ export async function getTokenizerForModel(
}
export function getModelNameForTiktoken(modelName: string): TiktokenModel {
if (modelName.startsWith('gpt-3.5-turbo-16k-')) {
// TODO: remove this once the model is added to tiktoken
return 'gpt-3.5-turbo-16k' as TiktokenModel
}
if (modelName.startsWith('gpt-3.5-turbo-')) {
return 'gpt-3.5-turbo'
}
@ -119,6 +124,9 @@ export function getContextSizeForModel(model: string): number {
const modelName = getModelNameForTiktoken(model)
switch (modelName) {
case 'gpt-3.5-turbo-16k' as TiktokenModel:
return 16384
case 'gpt-3.5-turbo':
return 4096

Wyświetl plik

@ -29,11 +29,15 @@ export class CalculatorTool extends BaseTask<
return CalculatorOutputSchema
}
public override get name(): string {
public override get nameForModel(): string {
return 'calculator'
}
public get descriptionForModel(): string {
public override get nameForHuman(): string {
return 'Calculator'
}
public override get descForModel(): string {
return 'Useful for getting the result of a math expression. The input to this tool should be a valid mathematical expression that could be executed by a simple calculator.'
}

Wyświetl plik

@ -33,8 +33,8 @@ export class MetaphorSearchTool extends BaseTask<
return metaphor.MetaphorSearchOutputSchema
}
public override get name(): string {
return 'metaphor-search'
public override get nameForModel(): string {
return 'metaphor_web_search'
}
protected override async _call(

Wyświetl plik

@ -63,8 +63,8 @@ export class NovuNotificationTool extends BaseTask<
return NovuNotificationToolOutputSchema
}
public override get name(): string {
return 'novu'
public override get nameForModel(): string {
return 'novu_send_notification'
}
protected override async _call(

Wyświetl plik

@ -1,10 +1,11 @@
import * as openai from '@agentic/openai-fetch'
import * as anthropic from '@anthropic-ai/sdk'
import * as openai from 'openai-fetch'
import type { Options as RetryOptions } from 'p-retry'
import type { JsonObject } from 'type-fest'
import { SafeParseReturnType, ZodType, ZodTypeAny, output, z } from 'zod'
import type { Agentic } from './agentic'
import type { BaseTask } from './task'
export { openai }
export { anthropic }
@ -54,19 +55,8 @@ export interface LLMOptions<
promptSuffix?: string
}
// export type ChatMessageRole = 'user' | 'system' | 'assistant'
export const ChatMessageRoleSchema = z.union([
z.literal('user'),
z.literal('system'),
z.literal('assistant')
])
export type ChatMessageRole = z.infer<typeof ChatMessageRoleSchema>
export interface ChatMessage {
role: ChatMessageRole
content: string
name?: string
}
export type ChatMessage = openai.ChatMessage
export type ChatMessageRole = openai.ChatMessageRole
export interface ChatModelOptions<
TInput = void,
@ -74,6 +64,7 @@ export interface ChatModelOptions<
TModelParams extends Record<string, any> = Record<string, any>
> extends BaseLLMOptions<TInput, TOutput, TModelParams> {
messages: ChatMessage[]
tools?: BaseTask<any, any>[]
}
export interface BaseChatCompletionResponse<

Wyświetl plik

@ -1,11 +1,11 @@
import * as anthropic from '@anthropic-ai/sdk'
import { OpenAIClient } from '@agentic/openai-fetch'
import KeyvRedis from '@keyv/redis'
import 'dotenv/config'
import hashObject from 'hash-obj'
import Redis from 'ioredis'
import Keyv from 'keyv'
import defaultKy from 'ky'
import { OpenAIClient } from 'openai-fetch'
import pMemoize from 'p-memoize'
import { Agentic } from '@/agentic'

Wyświetl plik

@ -0,0 +1,72 @@
import test from 'ava'
import * as types from '@/types'
import { createOpenAITestClient } from '../_utils'
test('OpenAIClient - createChatCompletion - functions', async (t) => {
const openai = createOpenAITestClient()
const model = 'gpt-3.5-turbo-0613'
const messages: types.ChatMessage[] = [
{
role: 'user',
content: 'Whats the weather like in Boston right now?'
}
]
const functions = [
{
name: 'get_current_weather',
description: 'Get the current weather in a given location',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state, e.g. San Francisco, CA'
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit']
}
},
required: ['location']
}
}
]
const res0 = await openai.createChatCompletion({
model,
messages,
functions
})
// console.log(JSON.stringify(res0, null, 2))
t.is(res0.message.role, 'assistant')
t.is(res0.message.content as any, null)
t.is(res0.message.function_call!.name, 'get_current_weather')
const args = JSON.parse(res0.message.function_call!.arguments)
t.deepEqual(args, { location: 'Boston' })
const weatherMock = { temperature: 22, unit: 'celsius', description: 'Sunny' }
const res1 = await openai.createChatCompletion({
model,
messages: [
...messages,
res0.message,
{
role: 'function',
name: 'get_current_weather',
content: JSON.stringify(weatherMock)
}
],
functions
})
// console.log(JSON.stringify(res1, null, 2))
t.is(res1.message.role, 'assistant')
t.true(res1.message.content.length > 0)
t.is(res1.message.function_call, undefined)
})

Wyświetl plik

@ -7,6 +7,7 @@ import './_utils'
const models = [
'gpt-3.5-turbo',
'gpt-4',
'gpt-4-0613',
'text-davinci-003',
'code-davinci-002'
]