fix: llamaindex examples

pull/710/head
Travis Fischer 2025-05-26 22:03:04 +07:00
rodzic 67080c38a0
commit cc8401e464
3 zmienionych plików z 54 dodań i 7 usunięć

Wyświetl plik

@ -2,23 +2,25 @@ import 'dotenv/config'
import { createLlamaIndexTools } from '@agentic/llamaindex'
import { WeatherClient } from '@agentic/stdlib'
import { OpenAI, OpenAIAgent } from 'llamaindex'
import { openai } from '@llamaindex/openai'
import { agent } from '@llamaindex/workflow'
async function main() {
const weather = new WeatherClient()
const tools = createLlamaIndexTools(weather)
const agent = new OpenAIAgent({
llm: new OpenAI({ model: 'gpt-4o-mini', temperature: 0 }),
const weatherAgent = agent({
name: 'Weather Agent',
llm: openai({ model: 'gpt-4o-mini', temperature: 0 }),
systemPrompt: 'You are a helpful assistant. Be as concise as possible.',
tools
})
const response = await agent.chat({
message: 'What is the weather in San Francisco?'
})
const response = await weatherAgent.run(
'What is the weather in San Francisco?'
)
console.log(response.message.content)
console.log(response.data.result)
}
await main()

Wyświetl plik

@ -11,6 +11,8 @@
"@agentic/core": "workspace:*",
"@agentic/llamaindex": "workspace:*",
"@agentic/stdlib": "workspace:*",
"@llamaindex/openai": "^0.4.1",
"@llamaindex/workflow": "^1.1.4",
"llamaindex": "catalog:",
"zod": "catalog:"
}

Wyświetl plik

@ -365,6 +365,12 @@ importers:
'@agentic/stdlib':
specifier: workspace:*
version: link:../../packages/stdlib
'@llamaindex/openai':
specifier: ^0.4.1
version: 0.4.1(@llamaindex/core@0.6.7(gpt-tokenizer@2.8.1))(@llamaindex/env@0.1.30(gpt-tokenizer@2.8.1))(ws@8.18.0)(zod@3.25.28)
'@llamaindex/workflow':
specifier: ^1.1.4
version: 1.1.4(@llamaindex/core@0.6.7(gpt-tokenizer@2.8.1))(@llamaindex/env@0.1.30(gpt-tokenizer@2.8.1))(@modelcontextprotocol/sdk@1.12.0)(hono@4.7.6)(p-retry@6.2.1)(zod@3.25.28)
llamaindex:
specifier: 'catalog:'
version: 0.11.3(@llama-flow/core@0.4.2(@modelcontextprotocol/sdk@1.12.0)(hono@4.7.6)(p-retry@6.2.1)(zod@3.25.28))(gpt-tokenizer@2.8.1)(tree-sitter@0.22.4)(web-tree-sitter@0.24.7)(zod@3.25.28)
@ -2030,6 +2036,12 @@ packages:
tree-sitter: ^0.22.0
web-tree-sitter: ^0.24.3
'@llamaindex/openai@0.4.1':
resolution: {integrity: sha512-l3y4qNCkO2FMyRaWdAXs5d/o7eduXrrYFwaEm1ohjb6YxtAlZpW9M8hTQhz+nP59llQtgLov+4hL+nE0lh8PkQ==}
peerDependencies:
'@llamaindex/core': 0.6.7
'@llamaindex/env': 0.1.30
'@llamaindex/workflow@1.0.3':
resolution: {integrity: sha512-GzYzLfn12BTQiLVwFr9tGl1Sa7PPVErLLQAJMgvfjUK8cv764SpJGqln8iKTxnKF05HcRrmJeE7ZD9Lzpf7UrA==}
peerDependencies:
@ -2037,6 +2049,13 @@ packages:
'@llamaindex/env': 0.1.29
zod: ^3.23.8
'@llamaindex/workflow@1.1.4':
resolution: {integrity: sha512-PkT4kS7VlqcgXs6s6wpXmZW7oTWf/ambVofKfGk4qdCrXr9kN4i+qtCtVKI3eYIveV0UabbHwcVq9CTz9Yu9jg==}
peerDependencies:
'@llamaindex/core': 0.6.7
'@llamaindex/env': 0.1.30
zod: ^3.23.8
'@mastra/core@0.10.0':
resolution: {integrity: sha512-vu5Y21qUz0yzejLnQIH6ZwpEi6Nk0UypwpY9bYwHcyxtdUGkpexnJvZLSfmEiKAiLcMGSxyPTUh120JMweX/CA==}
engines: {node: '>=20'}
@ -5341,6 +5360,7 @@ packages:
node-domexception@1.0.0:
resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==}
engines: {node: '>=10.5.0'}
deprecated: Use your platform's native DOMException instead
node-fetch-native@1.6.6:
resolution: {integrity: sha512-8Mc2HhqPdlIfedsuZoc3yioPuzp6b+L5jRCRY1QzuWZh2EGJVQrGppC6V6cF0bLdbW0+O2YpqCA25aF/1lvipQ==}
@ -7710,12 +7730,35 @@ snapshots:
tree-sitter: 0.22.4
web-tree-sitter: 0.24.7
'@llamaindex/openai@0.4.1(@llamaindex/core@0.6.7(gpt-tokenizer@2.8.1))(@llamaindex/env@0.1.30(gpt-tokenizer@2.8.1))(ws@8.18.0)(zod@3.25.28)':
dependencies:
'@llamaindex/core': 0.6.7(gpt-tokenizer@2.8.1)
'@llamaindex/env': 0.1.30(gpt-tokenizer@2.8.1)
openai: 4.103.0(encoding@0.1.13)(ws@8.18.0)(zod@3.25.28)
transitivePeerDependencies:
- encoding
- ws
- zod
'@llamaindex/workflow@1.0.3(@llamaindex/core@0.6.7(gpt-tokenizer@2.8.1))(@llamaindex/env@0.1.30(gpt-tokenizer@2.8.1))(zod@3.25.28)':
dependencies:
'@llamaindex/core': 0.6.7(gpt-tokenizer@2.8.1)
'@llamaindex/env': 0.1.30(gpt-tokenizer@2.8.1)
zod: 3.25.28
'@llamaindex/workflow@1.1.4(@llamaindex/core@0.6.7(gpt-tokenizer@2.8.1))(@llamaindex/env@0.1.30(gpt-tokenizer@2.8.1))(@modelcontextprotocol/sdk@1.12.0)(hono@4.7.6)(p-retry@6.2.1)(zod@3.25.28)':
dependencies:
'@llama-flow/core': 0.4.2(@modelcontextprotocol/sdk@1.12.0)(hono@4.7.6)(p-retry@6.2.1)(zod@3.25.28)
'@llamaindex/core': 0.6.7(gpt-tokenizer@2.8.1)
'@llamaindex/env': 0.1.30(gpt-tokenizer@2.8.1)
zod: 3.25.28
transitivePeerDependencies:
- '@modelcontextprotocol/sdk'
- hono
- next
- p-retry
- rxjs
'@mastra/core@0.10.0(encoding@0.1.13)(openapi-types@12.1.3)(react@18.3.1)(zod@3.25.28)':
dependencies:
'@opentelemetry/api': 1.9.0