kopia lustrzana https://github.com/transitive-bullshit/chatgpt-api
feat: add human feedback skeleton and couple Task to Agentic runtime
rodzic
1a283ef573
commit
3c27840337
|
@ -5,11 +5,9 @@ import { z } from 'zod'
|
|||
import { Agentic, MetaphorSearchTool } from '../src'
|
||||
|
||||
async function main() {
|
||||
const metaphorSearch = new MetaphorSearchTool()
|
||||
|
||||
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
||||
|
||||
const $ = new Agentic({ openai })
|
||||
const metaphorSearch = new MetaphorSearchTool({ agentic: $ })
|
||||
|
||||
const { results: searchResults } = await metaphorSearch.call({
|
||||
query: 'news from today, 2023',
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
/**
|
||||
|
||||
export type Metadata = Record<string, unknown>;
|
||||
|
||||
|
||||
export abstract class BaseTask<
|
||||
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||
> {
|
||||
|
||||
// ...
|
||||
|
||||
private _preHooks: ((input?: types.ParsedData<TInput>) => void | Promise<void>, metadata: types.Metadata)[] = [];
|
||||
private _postHooks: ((result: types.ParsedData<TOutput>, metadata: types.Metadata) => void | Promise<void>)[] = [];
|
||||
|
||||
|
||||
public registerPreHook(hook: (input?: types.ParsedData<TInput>) => void | Promise<void>): this {
|
||||
this._preHooks.push(hook);
|
||||
return this;
|
||||
}
|
||||
|
||||
public registerPostHook(hook: (result: types.ParsedData<TOutput>) => void | Promise<void>): this {
|
||||
this._postHooks.push(hook);
|
||||
return this;
|
||||
}
|
||||
|
||||
public async callWithMetadata(
|
||||
input?: types.ParsedData<TInput>,
|
||||
options: { dryRun?: boolean } = {}
|
||||
): Promise<{result: types.ParsedData<TOutput> | undefined, metadata: types.Metadata}> {
|
||||
const metadata: types.Metadata = {};
|
||||
|
||||
if (options.dryRun) {
|
||||
return console.log( '// TODO: implement' )
|
||||
}
|
||||
|
||||
for (const hook of this._preHooks) {
|
||||
await hook(input);
|
||||
}
|
||||
|
||||
const result = await this._call(input);
|
||||
|
||||
for (const hook of this._postHooks) {
|
||||
await hook(result, metadata);
|
||||
}
|
||||
|
||||
return {result, metadata};
|
||||
}
|
||||
}
|
||||
|
||||
**/
|
|
@ -1,26 +1,41 @@
|
|||
import * as types from './types'
|
||||
import { defaultOpenAIModel } from './constants'
|
||||
// import { BaseTask } from './task'
|
||||
import {
|
||||
HumanFeedbackMechanism,
|
||||
HumanFeedbackMechanismCLI
|
||||
} from './human-feedback'
|
||||
import { OpenAIChatModel } from './openai'
|
||||
|
||||
export class Agentic {
|
||||
_client: types.openai.OpenAIClient
|
||||
_verbosity: number
|
||||
_defaults: Pick<
|
||||
// _taskMap: WeakMap<string, BaseTask<any, any>>
|
||||
|
||||
protected _openai?: types.openai.OpenAIClient
|
||||
protected _anthropic?: types.anthropic.Client
|
||||
|
||||
protected _verbosity: number
|
||||
protected _openaiModelDefaults: Pick<
|
||||
types.BaseLLMOptions,
|
||||
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
||||
>
|
||||
protected _defaultHumanFeedbackMechamism?: HumanFeedbackMechanism
|
||||
|
||||
constructor(opts: {
|
||||
openai: types.openai.OpenAIClient
|
||||
openai?: types.openai.OpenAIClient
|
||||
anthropic?: types.anthropic.Client
|
||||
verbosity?: number
|
||||
defaults?: Pick<
|
||||
types.BaseLLMOptions,
|
||||
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
||||
>
|
||||
defaultHumanFeedbackMechanism?: HumanFeedbackMechanism
|
||||
}) {
|
||||
this._client = opts.openai
|
||||
this._openai = opts.openai
|
||||
this._anthropic = opts.anthropic
|
||||
|
||||
this._verbosity = opts.verbosity ?? 0
|
||||
this._defaults = {
|
||||
|
||||
this._openaiModelDefaults = {
|
||||
provider: 'openai',
|
||||
model: defaultOpenAIModel,
|
||||
modelParams: {},
|
||||
|
@ -32,6 +47,25 @@ export class Agentic {
|
|||
},
|
||||
...opts.defaults
|
||||
}
|
||||
|
||||
// TODO
|
||||
// this._anthropicModelDefaults = {}
|
||||
|
||||
this._defaultHumanFeedbackMechamism =
|
||||
opts.defaultHumanFeedbackMechanism ??
|
||||
new HumanFeedbackMechanismCLI({ agentic: this })
|
||||
}
|
||||
|
||||
public get openai(): types.openai.OpenAIClient {
|
||||
return this._openai!
|
||||
}
|
||||
|
||||
public get anthropic(): types.anthropic.Client {
|
||||
return this._anthropic!
|
||||
}
|
||||
|
||||
public get defaultHumanFeedbackMechamism() {
|
||||
return this._defaultHumanFeedbackMechamism
|
||||
}
|
||||
|
||||
llm(
|
||||
|
@ -58,8 +92,9 @@ export class Agentic {
|
|||
}
|
||||
}
|
||||
|
||||
return new OpenAIChatModel(this._client, {
|
||||
...(this._defaults as any), // TODO
|
||||
return new OpenAIChatModel({
|
||||
agentic: this,
|
||||
...(this._openaiModelDefaults as any), // TODO
|
||||
...options
|
||||
})
|
||||
}
|
||||
|
@ -88,8 +123,9 @@ export class Agentic {
|
|||
}
|
||||
}
|
||||
|
||||
return new OpenAIChatModel(this._client, {
|
||||
...(this._defaults as any), // TODO
|
||||
return new OpenAIChatModel({
|
||||
agentic: this,
|
||||
...(this._openaiModelDefaults as any), // TODO
|
||||
model: 'gpt-3.5-turbo',
|
||||
...options
|
||||
})
|
||||
|
@ -119,8 +155,9 @@ export class Agentic {
|
|||
}
|
||||
}
|
||||
|
||||
return new OpenAIChatModel(this._client, {
|
||||
...(this._defaults as any), // TODO
|
||||
return new OpenAIChatModel({
|
||||
agentic: this,
|
||||
...(this._openaiModelDefaults as any), // TODO
|
||||
model: 'gpt-4',
|
||||
...options
|
||||
})
|
||||
|
|
|
@ -23,7 +23,6 @@ export class AnthropicChatModel<
|
|||
_client: anthropic.Client
|
||||
|
||||
constructor(
|
||||
client: anthropic.Client,
|
||||
options: types.ChatModelOptions<
|
||||
TInput,
|
||||
TOutput,
|
||||
|
@ -39,7 +38,7 @@ export class AnthropicChatModel<
|
|||
...options
|
||||
})
|
||||
|
||||
this._client = client
|
||||
this._client = this._agentic.anthropic
|
||||
}
|
||||
|
||||
protected override async _createChatCompletion(
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
import { ZodRawShape, ZodTypeAny } from 'zod'
|
||||
|
||||
import { Agentic } from './agentic'
|
||||
import { BaseTask } from './task'
|
||||
|
||||
export type HumanFeedbackType = 'confirm' | 'selectOne' | 'selectN'
|
||||
|
||||
export type HumanFeedbackOptions = {
|
||||
type: HumanFeedbackType
|
||||
|
||||
/**
|
||||
* Whether to allow exiting
|
||||
*/
|
||||
bail?: boolean
|
||||
|
||||
editing?: boolean
|
||||
|
||||
annotations?: boolean
|
||||
|
||||
feedbackMechanism?: HumanFeedbackMechanism
|
||||
}
|
||||
|
||||
export abstract class HumanFeedbackMechanism {
|
||||
protected _agentic: Agentic
|
||||
|
||||
constructor({ agentic }: { agentic: Agentic }) {
|
||||
this._agentic = agentic
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
export class HumanFeedbackMechanismCLI extends HumanFeedbackMechanism {
|
||||
// TODO
|
||||
constructor(opts: { agentic: Agentic }) {
|
||||
super(opts)
|
||||
}
|
||||
}
|
||||
|
||||
export function withHumanFeedback<
|
||||
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||
>(
|
||||
task: BaseTask<TInput, TOutput>,
|
||||
options: HumanFeedbackOptions = {
|
||||
type: 'confirm',
|
||||
bail: false,
|
||||
editing: false,
|
||||
annotations: false
|
||||
}
|
||||
) {
|
||||
const { feedbackMechanism = task.agentic.defaultHumanFeedbackMechamism } =
|
||||
options
|
||||
|
||||
// TODO
|
||||
return task
|
||||
}
|
|
@ -4,7 +4,8 @@ export * from './llm'
|
|||
export * from './openai'
|
||||
export * from './anthropic'
|
||||
export * from './tokenizer'
|
||||
export * from './human-feedback'
|
||||
|
||||
export * from './services/metaphor'
|
||||
export * from './services/serpapi'
|
||||
export * from './tools/metaphor'
|
||||
export * from './tools/feedback'
|
||||
|
|
|
@ -17,7 +17,6 @@ export class OpenAIChatModel<
|
|||
_client: types.openai.OpenAIClient
|
||||
|
||||
constructor(
|
||||
client: types.openai.OpenAIClient,
|
||||
options: types.ChatModelOptions<
|
||||
TInput,
|
||||
TOutput,
|
||||
|
@ -30,7 +29,7 @@ export class OpenAIChatModel<
|
|||
...options
|
||||
})
|
||||
|
||||
this._client = client
|
||||
this._client = this._agentic.openai
|
||||
}
|
||||
|
||||
protected override async _createChatCompletion(
|
||||
|
|
13
src/task.ts
13
src/task.ts
|
@ -1,6 +1,7 @@
|
|||
import { ZodRawShape, ZodTypeAny, z } from 'zod'
|
||||
import { ZodRawShape, ZodTypeAny } from 'zod'
|
||||
|
||||
import * as types from './types'
|
||||
import { Agentic } from './agentic'
|
||||
|
||||
/**
|
||||
* A `Task` is a typed, async function call that may be non-deterministic.
|
||||
|
@ -13,16 +14,22 @@ import * as types from './types'
|
|||
*/
|
||||
export abstract class BaseTask<
|
||||
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||
TOutput extends ZodRawShape | ZodTypeAny = z.ZodTypeAny
|
||||
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||
> {
|
||||
protected _agentic: Agentic
|
||||
protected _timeoutMs: number | undefined
|
||||
protected _retryConfig: types.RetryConfig | undefined
|
||||
|
||||
constructor(options: types.BaseTaskOptions = {}) {
|
||||
constructor(options: types.BaseTaskOptions) {
|
||||
this._agentic = options.agentic
|
||||
this._timeoutMs = options.timeoutMs
|
||||
this._retryConfig = options.retryConfig
|
||||
}
|
||||
|
||||
public get agentic(): Agentic {
|
||||
return this._agentic
|
||||
}
|
||||
|
||||
public abstract get inputSchema(): TInput
|
||||
public abstract get outputSchema(): TOutput
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import { z } from 'zod'
|
||||
|
||||
import { Agentic } from '../agentic'
|
||||
import { MetaphorClient } from '../services/metaphor'
|
||||
import { BaseTask } from '../task'
|
||||
|
||||
|
@ -35,12 +36,14 @@ export class MetaphorSearchTool extends BaseTask<
|
|||
_metaphorClient: MetaphorClient
|
||||
|
||||
constructor({
|
||||
agentic,
|
||||
metaphorClient = new MetaphorClient()
|
||||
}: {
|
||||
agentic: Agentic
|
||||
metaphorClient?: MetaphorClient
|
||||
} = {}) {
|
||||
}) {
|
||||
super({
|
||||
// TODO
|
||||
agentic
|
||||
})
|
||||
|
||||
this._metaphorClient = metaphorClient
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import * as anthropic from '@anthropic-ai/sdk'
|
||||
import * as openai from 'openai-fetch'
|
||||
import {
|
||||
SafeParseReturnType,
|
||||
|
@ -8,7 +9,10 @@ import {
|
|||
z
|
||||
} from 'zod'
|
||||
|
||||
import type { Agentic } from './agentic'
|
||||
|
||||
export { openai }
|
||||
export { anthropic }
|
||||
|
||||
export type ParsedData<T extends ZodRawShape | ZodTypeAny> =
|
||||
T extends ZodTypeAny
|
||||
|
@ -25,13 +29,15 @@ export type SafeParsedData<T extends ZodRawShape | ZodTypeAny> =
|
|||
: never
|
||||
|
||||
export interface BaseTaskOptions {
|
||||
agentic: Agentic
|
||||
|
||||
timeoutMs?: number
|
||||
retryConfig?: RetryConfig
|
||||
|
||||
// TODO
|
||||
// caching config
|
||||
// logging config
|
||||
// reference to agentic context
|
||||
// human feedback config
|
||||
}
|
||||
|
||||
export interface BaseLLMOptions<
|
||||
|
|
|
@ -7,6 +7,8 @@ import Keyv from 'keyv'
|
|||
import { OpenAIClient } from 'openai-fetch'
|
||||
import pMemoize from 'p-memoize'
|
||||
|
||||
import { Agentic } from '../src'
|
||||
|
||||
export const fakeOpenAIAPIKey = 'fake-openai-api-key'
|
||||
export const fakeAnthropicAPIKey = 'fake-anthropic-api-key'
|
||||
|
||||
|
@ -86,3 +88,11 @@ export function createAnthropicTestClient() {
|
|||
|
||||
return new AnthropicTestClient(apiKey)
|
||||
}
|
||||
|
||||
export function createTestAgenticRuntime() {
|
||||
const openai = createOpenAITestClient()
|
||||
const anthropic = createAnthropicTestClient()
|
||||
|
||||
const agentic = new Agentic({ openai, anthropic })
|
||||
return agentic
|
||||
}
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
import test from 'ava'
|
||||
import { expectTypeOf } from 'expect-type'
|
||||
|
||||
import { AnthropicChatModel } from '../src/anthropic'
|
||||
import { createAnthropicTestClient } from './_utils'
|
||||
import { AnthropicChatModel } from '../src'
|
||||
import { createTestAgenticRuntime } from './_utils'
|
||||
|
||||
test('AnthropicChatModel ⇒ string output', async (t) => {
|
||||
t.timeout(2 * 60 * 1000)
|
||||
const client = createAnthropicTestClient()
|
||||
const agentic = createTestAgenticRuntime()
|
||||
|
||||
const builder = new AnthropicChatModel(client, {
|
||||
const builder = new AnthropicChatModel({
|
||||
agentic,
|
||||
modelParams: {
|
||||
temperature: 0,
|
||||
max_tokens_to_sample: 30
|
||||
|
|
|
@ -2,14 +2,15 @@ import test from 'ava'
|
|||
import { expectTypeOf } from 'expect-type'
|
||||
import { z } from 'zod'
|
||||
|
||||
import { OpenAIChatModel } from '../src/openai'
|
||||
import { createOpenAITestClient } from './_utils'
|
||||
import { OpenAIChatModel } from '../src'
|
||||
import { createTestAgenticRuntime } from './_utils'
|
||||
|
||||
test('OpenAIChatModel ⇒ string output', async (t) => {
|
||||
t.timeout(2 * 60 * 1000)
|
||||
const client = createOpenAITestClient()
|
||||
const agentic = createTestAgenticRuntime()
|
||||
|
||||
const builder = new OpenAIChatModel(client, {
|
||||
const builder = new OpenAIChatModel({
|
||||
agentic,
|
||||
modelParams: {
|
||||
temperature: 0,
|
||||
max_tokens: 30
|
||||
|
@ -40,9 +41,10 @@ test('OpenAIChatModel ⇒ string output', async (t) => {
|
|||
|
||||
test('OpenAIChatModel ⇒ json output', async (t) => {
|
||||
t.timeout(2 * 60 * 1000)
|
||||
const client = createOpenAITestClient()
|
||||
const agentic = createTestAgenticRuntime()
|
||||
|
||||
const builder = new OpenAIChatModel(client, {
|
||||
const builder = new OpenAIChatModel({
|
||||
agentic,
|
||||
modelParams: {
|
||||
temperature: 0.5
|
||||
},
|
||||
|
@ -65,9 +67,10 @@ test('OpenAIChatModel ⇒ json output', async (t) => {
|
|||
|
||||
test('OpenAIChatModel ⇒ boolean output', async (t) => {
|
||||
t.timeout(2 * 60 * 1000)
|
||||
const client = createOpenAITestClient()
|
||||
const agentic = createTestAgenticRuntime()
|
||||
|
||||
const builder = new OpenAIChatModel(client, {
|
||||
const builder = new OpenAIChatModel({
|
||||
agentic,
|
||||
modelParams: {
|
||||
temperature: 0,
|
||||
max_tokens: 30
|
||||
|
|
|
@ -12,6 +12,6 @@ test('SerpAPIClient.search', async (t) => {
|
|||
const client = new SerpAPIClient()
|
||||
|
||||
const result = await client.search('coffee')
|
||||
console.log(result)
|
||||
// console.log(result)
|
||||
t.truthy(result)
|
||||
})
|
||||
|
|
Ładowanie…
Reference in New Issue