kopia lustrzana https://github.com/transitive-bullshit/chatgpt-api
docs: add new feedback example
rodzic
e6548f037b
commit
ee0c3cf967
|
@ -0,0 +1,39 @@
|
|||
import 'dotenv/config'
|
||||
import { OpenAIClient } from 'openai-fetch'
|
||||
import { z } from 'zod'
|
||||
|
||||
import { HumanFeedbackMechanismCLI } from '@/human-feedback'
|
||||
import { Agentic, withHumanFeedback } from '@/index'
|
||||
|
||||
async function main() {
|
||||
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
||||
const ai = new Agentic({ openai })
|
||||
|
||||
const topicFacts = ai
|
||||
.gpt3(`Tell me {{num}} jokes about {{topic}}`)
|
||||
.input(
|
||||
z.object({
|
||||
topic: z.string(),
|
||||
num: z.number().int().default(5).optional()
|
||||
})
|
||||
)
|
||||
.output(z.array(z.string()))
|
||||
.modelParams({ temperature: 0.9 })
|
||||
|
||||
const topicFactsFeedback = withHumanFeedback(topicFacts, {
|
||||
type: 'selectN',
|
||||
annotations: false,
|
||||
bail: false,
|
||||
editing: true,
|
||||
mechanism: HumanFeedbackMechanismCLI
|
||||
})
|
||||
|
||||
const out = await topicFactsFeedback.callWithMetadata({
|
||||
topic: 'politicians',
|
||||
num: 5
|
||||
})
|
||||
const feedback = out.metadata.feedback
|
||||
console.log(JSON.stringify(feedback, null, 2))
|
||||
}
|
||||
|
||||
main()
|
|
@ -1,32 +0,0 @@
|
|||
import { OpenAIClient } from '@agentic/openai-fetch'
|
||||
import 'dotenv/config'
|
||||
import { z } from 'zod'
|
||||
|
||||
import { Agentic, HumanFeedbackSelect } from '@/index'
|
||||
|
||||
async function main() {
|
||||
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
||||
const ai = new Agentic({ openai })
|
||||
|
||||
const jokes = ai
|
||||
.gpt3(`Tell me {{num}} jokes about {{topic}}`)
|
||||
.input(
|
||||
z.object({
|
||||
topic: z.string(),
|
||||
num: z.number().int().default(5).optional()
|
||||
})
|
||||
)
|
||||
.output(z.array(z.string()))
|
||||
.modelParams({ temperature: 0.9 })
|
||||
|
||||
const feedback = new HumanFeedbackSelect(z.string())
|
||||
let out = await jokes.call({ topic: 'statisticians' })
|
||||
let hf = await feedback.call(out)
|
||||
while (!hf.accepted) {
|
||||
out = await jokes.call({ topic: 'statisticians' })
|
||||
hf = await feedback.call(out)
|
||||
}
|
||||
console.log(hf.results)
|
||||
}
|
||||
|
||||
main()
|
|
@ -1,33 +0,0 @@
|
|||
import { OpenAIClient } from '@agentic/openai-fetch'
|
||||
import 'dotenv/config'
|
||||
import { z } from 'zod'
|
||||
|
||||
import { Agentic, HumanFeedbackSingle } from '@/index'
|
||||
|
||||
async function main() {
|
||||
const openai = new OpenAIClient({ apiKey: process.env.OPENAI_API_KEY! })
|
||||
const ai = new Agentic({ openai })
|
||||
|
||||
const topicFacts = ai
|
||||
.gpt3(`Give me {{numFacts}} random facts about {{topic}}`)
|
||||
.input(
|
||||
z.object({
|
||||
topic: z.string(),
|
||||
numFacts: z.number().int().default(5).optional()
|
||||
})
|
||||
)
|
||||
.output(z.object({ facts: z.array(z.string()) }))
|
||||
.modelParams({ temperature: 0.9 })
|
||||
|
||||
const feedback = new HumanFeedbackSingle(topicFacts.outputSchema)
|
||||
|
||||
let out = await topicFacts.call({ topic: 'cats' })
|
||||
let hf = await feedback.call(out)
|
||||
while (!hf.accepted) {
|
||||
out = await topicFacts.call({ topic: 'cats' })
|
||||
hf = await feedback.call(out)
|
||||
}
|
||||
console.log(hf.result)
|
||||
}
|
||||
|
||||
main()
|
Ładowanie…
Reference in New Issue