kopia lustrzana https://github.com/transitive-bullshit/chatgpt-api
Merge branch 'main' of https://github.com/transitive-bullshit/agentic
commit
af85ede1ff
|
@ -5,11 +5,9 @@ import { z } from 'zod'
|
||||||
import { Agentic, MetaphorSearchTool } from '../src'
|
import { Agentic, MetaphorSearchTool } from '../src'
|
||||||
|
|
||||||
async function main() {
|
async function main() {
|
||||||
const metaphorSearch = new MetaphorSearchTool()
|
|
||||||
|
|
||||||
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
||||||
|
|
||||||
const $ = new Agentic({ openai })
|
const $ = new Agentic({ openai })
|
||||||
|
const metaphorSearch = new MetaphorSearchTool({ agentic: $ })
|
||||||
|
|
||||||
const { results: searchResults } = await metaphorSearch.call({
|
const { results: searchResults } = await metaphorSearch.call({
|
||||||
query: 'news from today, 2023',
|
query: 'news from today, 2023',
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
/**
|
||||||
|
|
||||||
|
export type Metadata = Record<string, unknown>;
|
||||||
|
|
||||||
|
|
||||||
|
export abstract class BaseTask<
|
||||||
|
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||||
|
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||||
|
> {
|
||||||
|
|
||||||
|
// ...
|
||||||
|
|
||||||
|
private _preHooks: ((input?: types.ParsedData<TInput>) => void | Promise<void>, metadata: types.Metadata)[] = [];
|
||||||
|
private _postHooks: ((result: types.ParsedData<TOutput>, metadata: types.Metadata) => void | Promise<void>)[] = [];
|
||||||
|
|
||||||
|
|
||||||
|
public registerPreHook(hook: (input?: types.ParsedData<TInput>) => void | Promise<void>): this {
|
||||||
|
this._preHooks.push(hook);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public registerPostHook(hook: (result: types.ParsedData<TOutput>) => void | Promise<void>): this {
|
||||||
|
this._postHooks.push(hook);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public async callWithMetadata(
|
||||||
|
input?: types.ParsedData<TInput>,
|
||||||
|
options: { dryRun?: boolean } = {}
|
||||||
|
): Promise<{result: types.ParsedData<TOutput> | undefined, metadata: types.Metadata}> {
|
||||||
|
const metadata: types.Metadata = {};
|
||||||
|
|
||||||
|
if (options.dryRun) {
|
||||||
|
return console.log( '// TODO: implement' )
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const hook of this._preHooks) {
|
||||||
|
await hook(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await this._call(input);
|
||||||
|
|
||||||
|
for (const hook of this._postHooks) {
|
||||||
|
await hook(result, metadata);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {result, metadata};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
**/
|
|
@ -1,26 +1,41 @@
|
||||||
import * as types from './types'
|
import * as types from './types'
|
||||||
import { defaultOpenAIModel } from './constants'
|
import { defaultOpenAIModel } from './constants'
|
||||||
|
// import { BaseTask } from './task'
|
||||||
|
import {
|
||||||
|
HumanFeedbackMechanism,
|
||||||
|
HumanFeedbackMechanismCLI
|
||||||
|
} from './human-feedback'
|
||||||
import { OpenAIChatModel } from './openai'
|
import { OpenAIChatModel } from './openai'
|
||||||
|
|
||||||
export class Agentic {
|
export class Agentic {
|
||||||
_client: types.openai.OpenAIClient
|
// _taskMap: WeakMap<string, BaseTask<any, any>>
|
||||||
_verbosity: number
|
|
||||||
_defaults: Pick<
|
protected _openai?: types.openai.OpenAIClient
|
||||||
|
protected _anthropic?: types.anthropic.Client
|
||||||
|
|
||||||
|
protected _verbosity: number
|
||||||
|
protected _openaiModelDefaults: Pick<
|
||||||
types.BaseLLMOptions,
|
types.BaseLLMOptions,
|
||||||
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
||||||
>
|
>
|
||||||
|
protected _defaultHumanFeedbackMechamism?: HumanFeedbackMechanism
|
||||||
|
|
||||||
constructor(opts: {
|
constructor(opts: {
|
||||||
openai: types.openai.OpenAIClient
|
openai?: types.openai.OpenAIClient
|
||||||
|
anthropic?: types.anthropic.Client
|
||||||
verbosity?: number
|
verbosity?: number
|
||||||
defaults?: Pick<
|
openaiModelDefaults?: Pick<
|
||||||
types.BaseLLMOptions,
|
types.BaseLLMOptions,
|
||||||
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
|
||||||
>
|
>
|
||||||
|
defaultHumanFeedbackMechanism?: HumanFeedbackMechanism
|
||||||
}) {
|
}) {
|
||||||
this._client = opts.openai
|
this._openai = opts.openai
|
||||||
|
this._anthropic = opts.anthropic
|
||||||
|
|
||||||
this._verbosity = opts.verbosity ?? 0
|
this._verbosity = opts.verbosity ?? 0
|
||||||
this._defaults = {
|
|
||||||
|
this._openaiModelDefaults = {
|
||||||
provider: 'openai',
|
provider: 'openai',
|
||||||
model: defaultOpenAIModel,
|
model: defaultOpenAIModel,
|
||||||
modelParams: {},
|
modelParams: {},
|
||||||
|
@ -28,10 +43,29 @@ export class Agentic {
|
||||||
retryConfig: {
|
retryConfig: {
|
||||||
attempts: 3,
|
attempts: 3,
|
||||||
strategy: 'heal',
|
strategy: 'heal',
|
||||||
...opts.defaults?.retryConfig
|
...opts.openaiModelDefaults?.retryConfig
|
||||||
},
|
},
|
||||||
...opts.defaults
|
...opts.openaiModelDefaults
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
// this._anthropicModelDefaults = {}
|
||||||
|
|
||||||
|
this._defaultHumanFeedbackMechamism =
|
||||||
|
opts.defaultHumanFeedbackMechanism ??
|
||||||
|
new HumanFeedbackMechanismCLI({ agentic: this })
|
||||||
|
}
|
||||||
|
|
||||||
|
public get openai(): types.openai.OpenAIClient | undefined {
|
||||||
|
return this._openai
|
||||||
|
}
|
||||||
|
|
||||||
|
public get anthropic(): types.anthropic.Client | undefined {
|
||||||
|
return this._anthropic
|
||||||
|
}
|
||||||
|
|
||||||
|
public get defaultHumanFeedbackMechamism() {
|
||||||
|
return this._defaultHumanFeedbackMechamism
|
||||||
}
|
}
|
||||||
|
|
||||||
llm(
|
llm(
|
||||||
|
@ -58,8 +92,9 @@ export class Agentic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new OpenAIChatModel(this._client, {
|
return new OpenAIChatModel({
|
||||||
...(this._defaults as any), // TODO
|
agentic: this,
|
||||||
|
...(this._openaiModelDefaults as any), // TODO
|
||||||
...options
|
...options
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -88,8 +123,9 @@ export class Agentic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new OpenAIChatModel(this._client, {
|
return new OpenAIChatModel({
|
||||||
...(this._defaults as any), // TODO
|
agentic: this,
|
||||||
|
...(this._openaiModelDefaults as any), // TODO
|
||||||
model: 'gpt-3.5-turbo',
|
model: 'gpt-3.5-turbo',
|
||||||
...options
|
...options
|
||||||
})
|
})
|
||||||
|
@ -119,8 +155,9 @@ export class Agentic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new OpenAIChatModel(this._client, {
|
return new OpenAIChatModel({
|
||||||
...(this._defaults as any), // TODO
|
agentic: this,
|
||||||
|
...(this._openaiModelDefaults as any), // TODO
|
||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
...options
|
...options
|
||||||
})
|
})
|
||||||
|
|
|
@ -23,7 +23,6 @@ export class AnthropicChatModel<
|
||||||
_client: anthropic.Client
|
_client: anthropic.Client
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
client: anthropic.Client,
|
|
||||||
options: types.ChatModelOptions<
|
options: types.ChatModelOptions<
|
||||||
TInput,
|
TInput,
|
||||||
TOutput,
|
TOutput,
|
||||||
|
@ -39,7 +38,13 @@ export class AnthropicChatModel<
|
||||||
...options
|
...options
|
||||||
})
|
})
|
||||||
|
|
||||||
this._client = client
|
if (this._agentic.anthropic) {
|
||||||
|
this._client = this._agentic.anthropic
|
||||||
|
} else {
|
||||||
|
throw new Error(
|
||||||
|
'AnthropicChatModel requires an Anthropic client to be configured on the Agentic runtime'
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected override async _createChatCompletion(
|
protected override async _createChatCompletion(
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
import { ZodRawShape, ZodTypeAny } from 'zod'
|
||||||
|
|
||||||
|
import { Agentic } from './agentic'
|
||||||
|
import { BaseTask } from './task'
|
||||||
|
|
||||||
|
export type HumanFeedbackType = 'confirm' | 'selectOne' | 'selectN'
|
||||||
|
|
||||||
|
export type HumanFeedbackOptions = {
|
||||||
|
type: HumanFeedbackType
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether to allow exiting
|
||||||
|
*/
|
||||||
|
bail?: boolean
|
||||||
|
|
||||||
|
editing?: boolean
|
||||||
|
|
||||||
|
annotations?: boolean
|
||||||
|
|
||||||
|
feedbackMechanism?: HumanFeedbackMechanism
|
||||||
|
}
|
||||||
|
|
||||||
|
export abstract class HumanFeedbackMechanism {
|
||||||
|
protected _agentic: Agentic
|
||||||
|
|
||||||
|
constructor({ agentic }: { agentic: Agentic }) {
|
||||||
|
this._agentic = agentic
|
||||||
|
}
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
export class HumanFeedbackMechanismCLI extends HumanFeedbackMechanism {
|
||||||
|
// TODO
|
||||||
|
constructor(opts: { agentic: Agentic }) {
|
||||||
|
super(opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function withHumanFeedback<
|
||||||
|
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||||
|
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||||
|
>(
|
||||||
|
task: BaseTask<TInput, TOutput>,
|
||||||
|
options: HumanFeedbackOptions = {
|
||||||
|
type: 'confirm',
|
||||||
|
bail: false,
|
||||||
|
editing: false,
|
||||||
|
annotations: false
|
||||||
|
}
|
||||||
|
) {
|
||||||
|
const { feedbackMechanism = task.agentic.defaultHumanFeedbackMechamism } =
|
||||||
|
options
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
return task
|
||||||
|
}
|
|
@ -4,7 +4,8 @@ export * from './llm'
|
||||||
export * from './openai'
|
export * from './openai'
|
||||||
export * from './anthropic'
|
export * from './anthropic'
|
||||||
export * from './tokenizer'
|
export * from './tokenizer'
|
||||||
|
export * from './human-feedback'
|
||||||
|
|
||||||
export * from './services/metaphor'
|
export * from './services/metaphor'
|
||||||
|
export * from './services/serpapi'
|
||||||
export * from './tools/metaphor'
|
export * from './tools/metaphor'
|
||||||
export * from './tools/feedback'
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ export class OpenAIChatModel<
|
||||||
_client: types.openai.OpenAIClient
|
_client: types.openai.OpenAIClient
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
client: types.openai.OpenAIClient,
|
|
||||||
options: types.ChatModelOptions<
|
options: types.ChatModelOptions<
|
||||||
TInput,
|
TInput,
|
||||||
TOutput,
|
TOutput,
|
||||||
|
@ -30,7 +29,13 @@ export class OpenAIChatModel<
|
||||||
...options
|
...options
|
||||||
})
|
})
|
||||||
|
|
||||||
this._client = client
|
if (this._agentic.openai) {
|
||||||
|
this._client = this._agentic.openai
|
||||||
|
} else {
|
||||||
|
throw new Error(
|
||||||
|
'OpenAIChatModel requires an OpenAI client to be configured on the Agentic runtime'
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected override async _createChatCompletion(
|
protected override async _createChatCompletion(
|
||||||
|
|
13
src/task.ts
13
src/task.ts
|
@ -1,6 +1,7 @@
|
||||||
import { ZodRawShape, ZodTypeAny, z } from 'zod'
|
import { ZodRawShape, ZodTypeAny } from 'zod'
|
||||||
|
|
||||||
import * as types from './types'
|
import * as types from './types'
|
||||||
|
import { Agentic } from './agentic'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A `Task` is a typed, async function call that may be non-deterministic.
|
* A `Task` is a typed, async function call that may be non-deterministic.
|
||||||
|
@ -13,16 +14,22 @@ import * as types from './types'
|
||||||
*/
|
*/
|
||||||
export abstract class BaseTask<
|
export abstract class BaseTask<
|
||||||
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
TInput extends ZodRawShape | ZodTypeAny = ZodTypeAny,
|
||||||
TOutput extends ZodRawShape | ZodTypeAny = z.ZodTypeAny
|
TOutput extends ZodRawShape | ZodTypeAny = ZodTypeAny
|
||||||
> {
|
> {
|
||||||
|
protected _agentic: Agentic
|
||||||
protected _timeoutMs: number | undefined
|
protected _timeoutMs: number | undefined
|
||||||
protected _retryConfig: types.RetryConfig | undefined
|
protected _retryConfig: types.RetryConfig | undefined
|
||||||
|
|
||||||
constructor(options: types.BaseTaskOptions = {}) {
|
constructor(options: types.BaseTaskOptions) {
|
||||||
|
this._agentic = options.agentic
|
||||||
this._timeoutMs = options.timeoutMs
|
this._timeoutMs = options.timeoutMs
|
||||||
this._retryConfig = options.retryConfig
|
this._retryConfig = options.retryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public get agentic(): Agentic {
|
||||||
|
return this._agentic
|
||||||
|
}
|
||||||
|
|
||||||
public abstract get inputSchema(): TInput
|
public abstract get inputSchema(): TInput
|
||||||
public abstract get outputSchema(): TOutput
|
public abstract get outputSchema(): TOutput
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import { z } from 'zod'
|
import { z } from 'zod'
|
||||||
|
|
||||||
|
import { Agentic } from '../agentic'
|
||||||
import { MetaphorClient } from '../services/metaphor'
|
import { MetaphorClient } from '../services/metaphor'
|
||||||
import { BaseTask } from '../task'
|
import { BaseTask } from '../task'
|
||||||
|
|
||||||
|
@ -35,12 +36,14 @@ export class MetaphorSearchTool extends BaseTask<
|
||||||
_metaphorClient: MetaphorClient
|
_metaphorClient: MetaphorClient
|
||||||
|
|
||||||
constructor({
|
constructor({
|
||||||
|
agentic,
|
||||||
metaphorClient = new MetaphorClient()
|
metaphorClient = new MetaphorClient()
|
||||||
}: {
|
}: {
|
||||||
|
agentic: Agentic
|
||||||
metaphorClient?: MetaphorClient
|
metaphorClient?: MetaphorClient
|
||||||
} = {}) {
|
}) {
|
||||||
super({
|
super({
|
||||||
// TODO
|
agentic
|
||||||
})
|
})
|
||||||
|
|
||||||
this._metaphorClient = metaphorClient
|
this._metaphorClient = metaphorClient
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import * as anthropic from '@anthropic-ai/sdk'
|
||||||
import * as openai from 'openai-fetch'
|
import * as openai from 'openai-fetch'
|
||||||
import {
|
import {
|
||||||
SafeParseReturnType,
|
SafeParseReturnType,
|
||||||
|
@ -8,7 +9,10 @@ import {
|
||||||
z
|
z
|
||||||
} from 'zod'
|
} from 'zod'
|
||||||
|
|
||||||
|
import type { Agentic } from './agentic'
|
||||||
|
|
||||||
export { openai }
|
export { openai }
|
||||||
|
export { anthropic }
|
||||||
|
|
||||||
export type ParsedData<T extends ZodRawShape | ZodTypeAny> =
|
export type ParsedData<T extends ZodRawShape | ZodTypeAny> =
|
||||||
T extends ZodTypeAny
|
T extends ZodTypeAny
|
||||||
|
@ -25,13 +29,15 @@ export type SafeParsedData<T extends ZodRawShape | ZodTypeAny> =
|
||||||
: never
|
: never
|
||||||
|
|
||||||
export interface BaseTaskOptions {
|
export interface BaseTaskOptions {
|
||||||
|
agentic: Agentic
|
||||||
|
|
||||||
timeoutMs?: number
|
timeoutMs?: number
|
||||||
retryConfig?: RetryConfig
|
retryConfig?: RetryConfig
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
// caching config
|
// caching config
|
||||||
// logging config
|
// logging config
|
||||||
// reference to agentic context
|
// human feedback config
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface BaseLLMOptions<
|
export interface BaseLLMOptions<
|
||||||
|
|
|
@ -7,6 +7,8 @@ import Keyv from 'keyv'
|
||||||
import { OpenAIClient } from 'openai-fetch'
|
import { OpenAIClient } from 'openai-fetch'
|
||||||
import pMemoize from 'p-memoize'
|
import pMemoize from 'p-memoize'
|
||||||
|
|
||||||
|
import { Agentic } from '../src'
|
||||||
|
|
||||||
export const fakeOpenAIAPIKey = 'fake-openai-api-key'
|
export const fakeOpenAIAPIKey = 'fake-openai-api-key'
|
||||||
export const fakeAnthropicAPIKey = 'fake-anthropic-api-key'
|
export const fakeAnthropicAPIKey = 'fake-anthropic-api-key'
|
||||||
|
|
||||||
|
@ -86,3 +88,11 @@ export function createAnthropicTestClient() {
|
||||||
|
|
||||||
return new AnthropicTestClient(apiKey)
|
return new AnthropicTestClient(apiKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function createTestAgenticRuntime() {
|
||||||
|
const openai = createOpenAITestClient()
|
||||||
|
const anthropic = createAnthropicTestClient()
|
||||||
|
|
||||||
|
const agentic = new Agentic({ openai, anthropic })
|
||||||
|
return agentic
|
||||||
|
}
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
import test from 'ava'
|
import test from 'ava'
|
||||||
import { expectTypeOf } from 'expect-type'
|
import { expectTypeOf } from 'expect-type'
|
||||||
|
|
||||||
import { AnthropicChatModel } from '../src/anthropic'
|
import { AnthropicChatModel } from '../src'
|
||||||
import { createAnthropicTestClient } from './_utils'
|
import { createTestAgenticRuntime } from './_utils'
|
||||||
|
|
||||||
test('AnthropicChatModel ⇒ string output', async (t) => {
|
test('AnthropicChatModel ⇒ string output', async (t) => {
|
||||||
t.timeout(2 * 60 * 1000)
|
t.timeout(2 * 60 * 1000)
|
||||||
const client = createAnthropicTestClient()
|
const agentic = createTestAgenticRuntime()
|
||||||
|
|
||||||
const builder = new AnthropicChatModel(client, {
|
const builder = new AnthropicChatModel({
|
||||||
|
agentic,
|
||||||
modelParams: {
|
modelParams: {
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
max_tokens_to_sample: 30
|
max_tokens_to_sample: 30
|
||||||
|
|
|
@ -2,14 +2,15 @@ import test from 'ava'
|
||||||
import { expectTypeOf } from 'expect-type'
|
import { expectTypeOf } from 'expect-type'
|
||||||
import { z } from 'zod'
|
import { z } from 'zod'
|
||||||
|
|
||||||
import { OpenAIChatModel } from '../src/openai'
|
import { OpenAIChatModel } from '../src'
|
||||||
import { createOpenAITestClient } from './_utils'
|
import { createTestAgenticRuntime } from './_utils'
|
||||||
|
|
||||||
test('OpenAIChatModel ⇒ string output', async (t) => {
|
test('OpenAIChatModel ⇒ string output', async (t) => {
|
||||||
t.timeout(2 * 60 * 1000)
|
t.timeout(2 * 60 * 1000)
|
||||||
const client = createOpenAITestClient()
|
const agentic = createTestAgenticRuntime()
|
||||||
|
|
||||||
const builder = new OpenAIChatModel(client, {
|
const builder = new OpenAIChatModel({
|
||||||
|
agentic,
|
||||||
modelParams: {
|
modelParams: {
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
max_tokens: 30
|
max_tokens: 30
|
||||||
|
@ -40,9 +41,10 @@ test('OpenAIChatModel ⇒ string output', async (t) => {
|
||||||
|
|
||||||
test('OpenAIChatModel ⇒ json output', async (t) => {
|
test('OpenAIChatModel ⇒ json output', async (t) => {
|
||||||
t.timeout(2 * 60 * 1000)
|
t.timeout(2 * 60 * 1000)
|
||||||
const client = createOpenAITestClient()
|
const agentic = createTestAgenticRuntime()
|
||||||
|
|
||||||
const builder = new OpenAIChatModel(client, {
|
const builder = new OpenAIChatModel({
|
||||||
|
agentic,
|
||||||
modelParams: {
|
modelParams: {
|
||||||
temperature: 0.5
|
temperature: 0.5
|
||||||
},
|
},
|
||||||
|
@ -65,9 +67,10 @@ test('OpenAIChatModel ⇒ json output', async (t) => {
|
||||||
|
|
||||||
test('OpenAIChatModel ⇒ boolean output', async (t) => {
|
test('OpenAIChatModel ⇒ boolean output', async (t) => {
|
||||||
t.timeout(2 * 60 * 1000)
|
t.timeout(2 * 60 * 1000)
|
||||||
const client = createOpenAITestClient()
|
const agentic = createTestAgenticRuntime()
|
||||||
|
|
||||||
const builder = new OpenAIChatModel(client, {
|
const builder = new OpenAIChatModel({
|
||||||
|
agentic,
|
||||||
modelParams: {
|
modelParams: {
|
||||||
temperature: 0,
|
temperature: 0,
|
||||||
max_tokens: 30
|
max_tokens: 30
|
||||||
|
|
|
@ -12,6 +12,6 @@ test('SerpAPIClient.search', async (t) => {
|
||||||
const client = new SerpAPIClient()
|
const client = new SerpAPIClient()
|
||||||
|
|
||||||
const result = await client.search('coffee')
|
const result = await client.search('coffee')
|
||||||
console.log(result)
|
// console.log(result)
|
||||||
t.truthy(result)
|
t.truthy(result)
|
||||||
})
|
})
|
||||||
|
|
Ładowanie…
Reference in New Issue