feat: WIP core typed llm fluent interface

old-agentic-v1^2
Travis Fischer 2023-05-01 21:32:03 -05:00
rodzic 427b2783af
commit a2b20ad8b5
9 zmienionych plików z 2902 dodań i 1 usunięć

8
.env.example 100644
Wyświetl plik

@ -0,0 +1,8 @@
# ------------------------------------------------------------------------------
# This is an example .env file.
#
# All of these environment vars must be defined either in your environment or in
# a local .env file in order to run the demo for this project.
# ------------------------------------------------------------------------------
OPENAI_API_KEY=

Wyświetl plik

@ -54,5 +54,13 @@
"openapi",
"guardrails",
"plugins"
]
],
"dependencies": {
"dotenv-safe": "^8.2.0",
"openai-fetch": "^1.2.0",
"p-map": "^6.0.0",
"parse-json": "^7.0.0",
"zod": "^3.21.4",
"zod-validation-error": "^1.3.0"
}
}

2539
pnpm-lock.yaml 100644

Plik diff jest za duży Load Diff

Wyświetl plik

@ -15,6 +15,43 @@
TODO
## Use Cases
https://platform.openai.com/examples
- text completion
- text generation
- text classification
- https://platform.openai.com/docs/guides/completion/classification
- https://docs.cohere.com/docs/text-classification-with-classify
- special cases
- content moderation
- https://platform.openai.com/docs/guides/moderation/overview
- language detection
- conversation
- transformation
- translation
- conversion
- summarization
- completion
- factual responses
- chat completion
- entity extraction
- reranking
// take in a query and a list of texts and produces an ordered array with each text assigned a relevance score.
// generate JSON conforming to a zod schema
// generate a string conforming to a zod schema
// generate TS code and ensure it is valid syntax + valid exports
// generate HTML and ensure it parses correctly
// primitives (boolean, number, z.coerce.date, string)
// classifier (enum)
// CSV
// retry strategies
// separate the prompt formatting from the inference call?
## License
MIT © [Travis Fischer](https://transitivebullsh.it)

Wyświetl plik

@ -0,0 +1,77 @@
import { z } from 'zod'
async function main() {
const $ = {} as any
// work with a single input or an array of inputs using p-map under the hood
const ex0 = await $.gpt4(`give me a single boolean value: `)
.output(z.boolean())
.retry({ attempts: 3 })
.call()
// LLM
// give me a single boolean value
// given an output as a boolean.
// true/false
const ex1 = await $.gpt4(
'give me a list of character names from star wars'
).output(z.array(z.string().nonempty()))
const ex2 = await $.gpt4(`Summarize the following text: {{text}}`)
.output(z.string().nonempty())
.input(z.object({ text: z.string().nonempty() }))
.call({
text: 'The quick brown fox jumps over the lazy dog.'
})
const ext22 = await $.gpt4({ temperature: 0 }).call({
messages: [
// TEST
]
})
const ex3 = await $.gpt4({
temperature: 0,
messages: [
{
role: 'system',
content: 'You extract movie titles from text.'
},
{
role: 'user',
content: `Extract the movie title from the following text or return 'none' if no movie title is found.`
}
]
})
.examples([
{
input: `Deadpool 2 | Official HD Deadpool's "Wet on Wet" Teaser | 2018`,
output: `Deadpool 2`
},
{
input: `Jordan Peele Just Became the First Black Writer-Director With a $100M Movie Debut`,
output: 'none'
},
{
input: 'Joker Officially Rated “R”',
output: 'Joker'
},
{
input: `Ryan Reynolds 'Free Guy' Receives July 3, 2020 Release Date - About a bank teller stuck in his routine that discovers hes an NPC character in a brutal open world game.`,
output: 'Free Guy'
},
{
input: 'James Cameron congratulates Kevin Feige and Marvel!',
output: 'none'
},
{
input:
'The Cast of Guardians of the Galaxy release statement on James Gunn',
output: 'Guardians of the Galaxy'
}
])
.output(z.string().nonempty())
.call()
}

1
src/index.ts 100644
Wyświetl plik

@ -0,0 +1 @@
export * from './utils'

139
src/llm.ts 100644
Wyświetl plik

@ -0,0 +1,139 @@
import type { ZodType } from 'zod'
import * as types from './types'
export class Agentic {
_client: types.openai.OpenAIClient
_verbosity: number
_defaults: Pick<
types.BaseLLMOptions,
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
>
constructor(
client: types.openai.OpenAIClient,
opts: {
verbosity?: number
defaults?: Pick<
types.BaseLLMOptions,
'provider' | 'model' | 'modelParams' | 'timeoutMs' | 'retryConfig'
>
} = {}
) {
this._client = client
this._verbosity = opts.verbosity ?? 0
this._defaults = {
provider: 'openai',
model: 'gpt-3.5-turbo',
modelParams: {},
timeoutMs: 30000,
retryConfig: {
attempts: 3,
strategy: 'heal',
...opts.defaults?.retryConfig
},
...opts.defaults
}
}
gpt4(
promptOrChatCompletionParams: string | types.openai.ChatCompletionParams
) {
let options: Omit<types.openai.ChatCompletionParams, 'model'>
if (typeof promptOrChatCompletionParams === 'string') {
options = {
messages: [
{
role: 'user',
content: promptOrChatCompletionParams
}
]
}
} else {
options = promptOrChatCompletionParams
if (!options.messages) {
throw new Error()
}
}
return new OpenAIChatModelBuilder(this._client, {
...(this._defaults as any), // TODO
model: 'gpt-4',
...options
})
}
}
export abstract class BaseLLMCallBuilder<TInput, TOutput, TModelParams> {
_options: types.BaseLLMOptions<TInput, TOutput, TModelParams>
constructor(options: types.BaseLLMOptions<TInput, TOutput, TModelParams>) {
this._options = options
}
input(inputSchema: ZodType<TInput>) {
this._options.input = inputSchema
return this
}
output(outputSchema: ZodType<TOutput>) {
this._options.output = outputSchema
return this
}
examples(examples: types.LLMExample[]) {
this._options.examples = examples
return this
}
retry(retryConfig: types.LLMRetryConfig) {
this._options.retryConfig = retryConfig
return this
}
abstract call(input?: TInput): Promise<TOutput>
}
export abstract class ChatModelBuilder<
TInput,
TOutput,
TModelParams
> extends BaseLLMCallBuilder<TInput, TOutput, TModelParams> {
_messages: types.ChatMessage[]
constructor(options: types.ChatModelOptions<TInput, TOutput, TModelParams>) {
super(options)
this._messages = options.messages
}
}
export class OpenAIChatModelBuilder<TInput, TOutput> extends ChatModelBuilder<
TInput,
TOutput,
Omit<types.openai.ChatCompletionParams, 'messages'>
> {
_client: types.openai.OpenAIClient
constructor(
client: types.openai.OpenAIClient,
options: types.ChatModelOptions<
TInput,
TOutput,
Omit<types.openai.ChatCompletionParams, 'messages'>
>
) {
super({
provider: 'openai',
...options
})
this._client = client
}
override async call(input?: TInput): Promise<TOutput> {
// TODO
}
}

53
src/types.ts 100644
Wyświetl plik

@ -0,0 +1,53 @@
import * as openai from 'openai-fetch'
import type { ZodType } from 'zod'
export { openai }
export interface BaseLLMOptions<
TInput = any,
TOutput = any,
TModelParams = Record<string, any>
> {
provider?: string
model?: string
modelParams?: TModelParams
timeoutMs?: number
input?: ZodType<TInput>
output?: ZodType<TOutput>
examples?: LLMExample[]
retryConfig?: LLMRetryConfig
}
export interface LLMOptions<
TInput = any,
TOutput = any,
TModelParams = Record<string, any>
> extends BaseLLMOptions<TInput, TOutput, TModelParams> {
promptTemplate?: string
promptPrefix?: string
promptSuffix?: string
}
export interface ChatMessage {
role: 'user' | 'system' | 'assistant' | 'tool'
content: string
}
export interface ChatModelOptions<
TInput = any,
TOutput = any,
TModelParams = Record<string, any>
> extends BaseLLMOptions<TInput, TOutput, TModelParams> {
messages: ChatMessage[]
}
export interface LLMExample {
input: string
output: string
}
export interface LLMRetryConfig {
attempts: number
strategy: string
}

39
src/utils.ts 100644
Wyświetl plik

@ -0,0 +1,39 @@
import dotenv from 'dotenv-safe'
import { OpenAIClient } from 'openai-fetch'
import { z } from 'zod'
import { fromZodError } from 'zod-validation-error'
dotenv.config()
interface Temp {
contentType: string
}
async function main() {
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY })
const outputSchema = z.object({})
const res = await openai.createChatCompletion({
model: 'gpt-4',
messages: [
{
role: 'system',
content: ''
}
]
})
const out = await infer('give me a single boolean value', z.boolean(), {})
}
async function infer<T = any>(
prompt: string,
schema: z.ZodType<T>,
{ retry }
): Promise<T> {}
main().catch((err) => {
console.error('error', err)
process.exit(1)
})