kopia lustrzana https://github.com/kartoza/docker-osm
updated all but database_agent. Audio is broke for now
rodzic
fd92654074
commit
ea5ed6e9e0
|
@ -27,4 +27,7 @@ settings/clip/clip.shx
|
|||
!/geoserver-data/workspaces
|
||||
!/geoserver-data/styles
|
||||
|
||||
__pycache__
|
||||
__pycache__
|
||||
|
||||
notebooks/.ipynb_checkpoints
|
||||
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
select_layer_name = {
|
||||
"name": "select_layer_name",
|
||||
"description": "Gets a layer name from the text of a given task related to selecting layers on a map.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "select_layer_name",
|
||||
"description": "Gets a layer name from the text of a given task related to selecting layers on a map.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
},
|
||||
},
|
||||
"required": ["layer_name"],
|
||||
},
|
||||
"required": ["layer_name"],
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -1,69 +1,81 @@
|
|||
go_to_location = {
|
||||
"name": "go_to_location",
|
||||
"description": "Go to a given location.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"latitude": {
|
||||
"type": "number",
|
||||
"description": "The latitude to go to.",
|
||||
},
|
||||
"longitude": {
|
||||
"type": "number",
|
||||
"description": "The longitude to go to.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "go_to_location",
|
||||
"description": "Go to a given location.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"latitude": {
|
||||
"type": "number",
|
||||
"description": "The latitude to go to.",
|
||||
},
|
||||
"longitude": {
|
||||
"type": "number",
|
||||
"description": "The longitude to go to.",
|
||||
},
|
||||
},
|
||||
"required": ["latitude", "longitude"],
|
||||
},
|
||||
"required": ["latitude", "longitude"],
|
||||
},
|
||||
}
|
||||
|
||||
pan_in_direction = {
|
||||
"name": "pan_in_direction",
|
||||
"description": "Pan in a given direction and distance in kilometers.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"direction": {
|
||||
"type": "string",
|
||||
"description": "The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.",
|
||||
},
|
||||
"distance_in_kilometers": {
|
||||
"type": "number",
|
||||
"description": "The distance to pan in kilometers. If not provided, defaults to 1.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "pan_in_direction",
|
||||
"description": "Pan in a given direction and distance in kilometers.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"direction": {
|
||||
"type": "string",
|
||||
"description": "The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.",
|
||||
},
|
||||
"distance_in_kilometers": {
|
||||
"type": "number",
|
||||
"description": "The distance to pan in kilometers. If not provided, defaults to 1.",
|
||||
},
|
||||
},
|
||||
"required": ["direction"],
|
||||
},
|
||||
"required": ["direction"],
|
||||
},
|
||||
}
|
||||
|
||||
zoom_in = {
|
||||
"name": "zoom_in",
|
||||
"description": "Zoom in by a given number of zoom levels.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zoom_levels": {
|
||||
"type": "number",
|
||||
"description": "The number of zoom levels to zoom in. If not provided, defaults to 1.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "zoom_in",
|
||||
"description": "Zoom in by a given number of zoom levels.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zoom_levels": {
|
||||
"type": "number",
|
||||
"description": "The number of zoom levels to zoom in. If not provided, defaults to 1.",
|
||||
},
|
||||
},
|
||||
"required": ["zoom_levels"],
|
||||
},
|
||||
},
|
||||
"required": ["zoom_levels"],
|
||||
}
|
||||
|
||||
zoom_out = {
|
||||
"name": "zoom_out",
|
||||
"description": "Zoom out by a given number of zoom levels.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zoom_levels": {
|
||||
"type": "number",
|
||||
"description": "The number of zoom levels to zoom out. If not provided, defaults to 1.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "zoom_out",
|
||||
"description": "Zoom out by a given number of zoom levels.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zoom_levels": {
|
||||
"type": "number",
|
||||
"description": "The number of zoom levels to zoom out. If not provided, defaults to 1.",
|
||||
},
|
||||
},
|
||||
"required": ["zoom_levels"],
|
||||
},
|
||||
},
|
||||
"required": ["zoom_levels"],
|
||||
}
|
||||
|
||||
navigation_function_descriptions = [
|
||||
|
|
|
@ -1,77 +1,89 @@
|
|||
|
||||
set_color = {
|
||||
"name": "set_color",
|
||||
"description": "Set the maplibre paint property color of a layer.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
"description": "The color to set in hex format.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "set_color",
|
||||
"description": "Set the maplibre paint property color of a layer.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
},
|
||||
"color": {
|
||||
"type": "string",
|
||||
"description": "The color to set in hex format.",
|
||||
},
|
||||
},
|
||||
"required": ["layer_name", "color"],
|
||||
},
|
||||
"required": ["layer_name", "color"],
|
||||
},
|
||||
}
|
||||
|
||||
set_opacity = {
|
||||
"name": "set_opacity",
|
||||
"description": "Set the maplibre paint property opacity of a layer.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
},
|
||||
"opacity": {
|
||||
"type": "number",
|
||||
"description": "The opacity to set between 0 and 1.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "set_opacity",
|
||||
"description": "Set the maplibre paint property opacity of a layer.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer.",
|
||||
},
|
||||
"opacity": {
|
||||
"type": "number",
|
||||
"description": "The opacity to set between 0 and 1.",
|
||||
},
|
||||
},
|
||||
"required": ["layer_name", "opacity"],
|
||||
},
|
||||
"required": ["layer_name", "opacity"],
|
||||
},
|
||||
}
|
||||
|
||||
set_width = {
|
||||
"name": "set_width",
|
||||
"description": "Set the maplibre paint property width.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer to get the paint property for.",
|
||||
},
|
||||
"width": {
|
||||
"type": "number",
|
||||
"description": "The width to set.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "set_width",
|
||||
"description": "Set the maplibre paint property width.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer to get the paint property for.",
|
||||
},
|
||||
"width": {
|
||||
"type": "number",
|
||||
"description": "The width to set.",
|
||||
},
|
||||
},
|
||||
"required": ["layer_name", "width"],
|
||||
},
|
||||
"required": ["layer_name", "width"],
|
||||
},
|
||||
}
|
||||
|
||||
set_visibility = {
|
||||
"name": "set_visibility",
|
||||
"description": "Set the visibility of a layer (turning it on or off).",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer to get the layout property for.",
|
||||
},
|
||||
"visibility": {
|
||||
"type": "string",
|
||||
"description": "Either 'visible' or 'none'. Set to 'none' to hide the layer.",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "set_visibility",
|
||||
"description": "Set the visibility of a layer (turning it on or off).",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"layer_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the layer to get the layout property for.",
|
||||
},
|
||||
"visibility": {
|
||||
"type": "string",
|
||||
"description": "Either 'visible' or 'none'. Set to 'none' to hide the layer.",
|
||||
},
|
||||
},
|
||||
"required": ["layer_name", "visible"],
|
||||
},
|
||||
"required": ["layer_name", "visible"],
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import openai
|
||||
import json
|
||||
from .function_descriptions.map_info_function_descriptions import map_info_function_descriptions
|
||||
import logging
|
||||
|
@ -11,9 +10,10 @@ class MapInfoAgent:
|
|||
def select_layer_name(self, layer_name):
|
||||
return {"name": "select_layer_name", "layer_name": layer_name}
|
||||
|
||||
def __init__(self, model_version="gpt-3.5-turbo-0613", layers=[]):
|
||||
def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
|
||||
self.openai = openai
|
||||
self.model_version = model_version
|
||||
self.function_descriptions = map_info_function_descriptions
|
||||
self.tools = map_info_function_descriptions
|
||||
self.messages = [
|
||||
{
|
||||
"role": "system",
|
||||
|
@ -36,64 +36,49 @@ class MapInfoAgent:
|
|||
}
|
||||
|
||||
def listen(self, message, layer_names=[]):
|
||||
logger.info(f"In MapInfoAgent.listen()...message is: {message}")
|
||||
"""Listen to a message from the user."""
|
||||
#map_context = f"The following layers are in the map: {layer_names}"
|
||||
#remove the last item in self.messages
|
||||
if len(self.messages) > 1:
|
||||
self.messages.pop()
|
||||
logger.info(f"In MapInfoAgent...message is: {message}")
|
||||
|
||||
self.messages.append({
|
||||
"role": "user",
|
||||
"content": message,
|
||||
})
|
||||
# self.messages.append({
|
||||
# "role": "user",
|
||||
# "content": map_context,
|
||||
# })
|
||||
logger.info(f"MapInfoAgent self.messages: {self.messages}")
|
||||
|
||||
# this will be the function gpt will call if it
|
||||
# determines that the user wants to call a function
|
||||
function_response = None
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
response = self.openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=self.messages,
|
||||
functions=self.function_descriptions,
|
||||
function_call="auto",
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
tools=self.tools,
|
||||
tool_choice={"type": "function", "function": {"name": "select_layer_name"}},
|
||||
)
|
||||
response_message = response.choices[0].message
|
||||
logger.info(f"Response from OpenAI in MapInfoAgent: {response_message}")
|
||||
tool_calls = response_message.tool_calls
|
||||
|
||||
response_message = response["choices"][0]["message"]
|
||||
|
||||
logger.info(f"First response from OpenAI in MapInfoAgent: {response_message}")
|
||||
|
||||
if response_message.get("function_call"):
|
||||
function_name = response_message["function_call"]["name"]
|
||||
logger.info(f"Type of function_name: {type(function_name)}")
|
||||
function_name = function_name.strip()
|
||||
logger.info(f"Function name: {function_name}")
|
||||
function_to_call = self.available_functions[function_name]
|
||||
logger.info(f"Function to call: {function_to_call}")
|
||||
function_args = json.loads(response_message["function_call"]["arguments"])
|
||||
logger.info(f"Function args: {function_args}")
|
||||
# determine the function to call
|
||||
function_response = function_to_call(**function_args)
|
||||
if tool_calls:
|
||||
self.messages.append(response_message)
|
||||
self.messages.append({
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": function_response,
|
||||
})
|
||||
logger.info(f"Function response: {function_response}")
|
||||
for tool_call in tool_calls:
|
||||
function_name = tool_call.function.name
|
||||
function_to_call = self.available_functions[function_name]
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
function_response = function_to_call(**function_args)
|
||||
self.messages.append(
|
||||
{
|
||||
"tool_call_id": tool_call.id,
|
||||
"role": "tool",
|
||||
"name": function_name,
|
||||
"content": json.dumps(function_response),
|
||||
}
|
||||
)
|
||||
|
||||
# if the function name is select_layer_name, we need OpenAI to select the proper layer name instead of whatever the user said.
|
||||
# They may have used lower case or a descriptive designation instead of the actual layer name. We hope OpenAI can figure out
|
||||
# what they meant.
|
||||
# what they meant. In general, it's pretty good at this unless there are multiple layers with similar names, in which case
|
||||
# it just chooses one.
|
||||
if function_name == "select_layer_name":
|
||||
logger.info(f"Sending layer name retrieval request to OpenAI...")
|
||||
prompt = f"Please select a layer name from the following list that is closest to the text '{function_response['layer_name']}': {str(layer_names)}\n Only state the layer name in your response."
|
||||
|
@ -104,12 +89,12 @@ class MapInfoAgent:
|
|||
"content": prompt,
|
||||
},
|
||||
]
|
||||
second_response = openai.ChatCompletion.create(
|
||||
second_response = self.openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=messages,
|
||||
)
|
||||
logger.info(f"Second response from OpenAI in MapInfoAgent: {second_response}")
|
||||
second_response_message = second_response["choices"][0]["message"]["content"]
|
||||
second_response_message = second_response.choices[0].message.content
|
||||
logger.info(f"Second response message from OpenAI in MapInfoAgent: {second_response_message}")
|
||||
logger.info(f"Function Response bofore setting the layer name: {function_response}")
|
||||
function_response['layer_name'] = second_response_message
|
||||
|
|
|
@ -1,36 +1,34 @@
|
|||
import json
|
||||
import openai
|
||||
#import openai
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class MarshallAgent:
|
||||
"""A Marshall agent that has function descriptions for choosing the appropriate agent for a specified task."""
|
||||
def __init__(self, model_version="gpt-3.5-turbo-0613"):
|
||||
def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
|
||||
self.model_version = model_version
|
||||
|
||||
#we only need one function description for this agent
|
||||
function_description = {
|
||||
"name": "choose_agent",
|
||||
"description": """Chooses an appropriate agent for a given task.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the agent to choose. One of 'NavigationAgent', 'StyleAgent', 'MapInfoAgent', 'DatabaseAgent'.",
|
||||
self.openai = openai
|
||||
self.tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "choose_agent",
|
||||
"description": """Chooses an appropriate agent for a given task.""",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agent_name": {
|
||||
"type": "string",
|
||||
"description": "The name of the agent to choose. One of 'NavigationAgent', 'StyleAgent', 'MapInfoAgent', 'DatabaseAgent'.",
|
||||
},
|
||||
},
|
||||
"required": ["agent_name"],
|
||||
},
|
||||
},
|
||||
"required": ["agent_name"],
|
||||
},
|
||||
}
|
||||
|
||||
self.function_descriptions = [function_description]
|
||||
|
||||
self.messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": """You are a helpful assistant that decides which agent to use for a specified task.
|
||||
]
|
||||
self.system_message = """You are a helpful assistant that decides which agent to use for a specified task.
|
||||
|
||||
For tasks related to adding layers and other geospatial data to the map, use the DatabaseAgent.
|
||||
Examples include 'add buildings to the map' and 'get landuse polygons within this extent'.
|
||||
|
@ -47,9 +45,13 @@ class MarshallAgent:
|
|||
'go to Paris', 'show me the Statue of Liberty', and 'where is Houston, Texas?'
|
||||
|
||||
If you can't find the appropriate agent, say that you didn't understand and ask
|
||||
for a more specific description of the task.""",
|
||||
},
|
||||
]
|
||||
for a more specific description of the task."""
|
||||
|
||||
#initialize the messages queue with the system message
|
||||
self.messages = [{"role": "system", "content": self.system_message}]
|
||||
self.available_functions = {
|
||||
"choose_agent": self.choose_agent,
|
||||
}
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def choose_agent(self, agent_name):
|
||||
|
@ -59,9 +61,9 @@ class MarshallAgent:
|
|||
self.logger.info(f"In MarshallAgent.listen()...message is: {message}")
|
||||
"""Listen to a message from the user."""
|
||||
|
||||
# Remove the last item in self.messages. Our agent has no memory
|
||||
if len(self.messages) > 1:
|
||||
self.messages.pop()
|
||||
# # Remove the last item in self.messages. Our agent has no memory
|
||||
# if len(self.messages) > 1:
|
||||
# self.messages.pop()
|
||||
|
||||
self.messages.append({
|
||||
"role": "user",
|
||||
|
@ -73,34 +75,37 @@ class MarshallAgent:
|
|||
function_response = None
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
response = self.openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=self.messages,
|
||||
functions=self.function_descriptions,
|
||||
function_call={"name": "choose_agent"},
|
||||
temperature=0,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
tools=self.tools,
|
||||
tool_choice={"type": "function", "function": {"name": "choose_agent"}},
|
||||
)
|
||||
response_message = response["choices"][0]["message"]
|
||||
|
||||
self.logger.info(f"Response from OpenAI in MarshallAgent: {response_message}")
|
||||
|
||||
if response_message.get("function_call"):
|
||||
function_args = json.loads(response_message["function_call"]["arguments"])
|
||||
self.logger.info(f"Function args: {function_args}")
|
||||
|
||||
# call choose agent
|
||||
function_response = self.choose_agent(**function_args)
|
||||
self.logger.info(f"Function response: {function_response}")
|
||||
response_message = response.choices[0].message
|
||||
tool_calls = response_message.tool_calls
|
||||
|
||||
if tool_calls:
|
||||
available_functions = self.available_functions
|
||||
self.messages.append(response_message)
|
||||
for tool_call in tool_calls:
|
||||
function_name = tool_call.function.name
|
||||
function_to_call = available_functions[function_name]
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
function_response = function_to_call(**function_args)
|
||||
self.messages.append(
|
||||
{
|
||||
"tool_call_id": tool_call.id,
|
||||
"role": "tool",
|
||||
"name": function_name,
|
||||
"content": json.dumps(function_response),
|
||||
}
|
||||
)
|
||||
# second_response = self.openai.chat.completions.create(
|
||||
# model=self.model_version,
|
||||
# messages=self.messages,
|
||||
# )
|
||||
logger.info(f"Sucessful MarallAgent task completion: {function_response}")
|
||||
return {"response": function_response}
|
||||
elif response_message.get("content"):
|
||||
return {"response": response_message["content"]}
|
||||
else:
|
||||
return {"response": "I'm sorry, I don't understand."}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": "Failed to get response from OpenAI in MarshallAgent: " + str(e)}, 500
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import logging
|
||||
from .function_descriptions.navigation_function_descriptions import navigation_function_descriptions
|
||||
import openai
|
||||
#import openai
|
||||
import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -20,9 +20,9 @@ class NavigationAgent:
|
|||
def zoom_out(self, zoom_levels=1):
|
||||
return {"name": "zoom_out", "zoom_levels": zoom_levels}
|
||||
|
||||
def __init__(self, model_version="gpt-3.5-turbo-0613"):
|
||||
def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
|
||||
self.openai = openai
|
||||
self.model_version = model_version
|
||||
self.function_descriptions = navigation_function_descriptions
|
||||
self.messages = [
|
||||
{
|
||||
"role": "system",
|
||||
|
@ -41,13 +41,15 @@ class NavigationAgent:
|
|||
"zoom_in": self.zoom_in,
|
||||
"zoom_out": self.zoom_out,
|
||||
}
|
||||
self.tools = navigation_function_descriptions
|
||||
logger.info(f"self.tools in NavigationAgent is: {self.tools}")
|
||||
|
||||
def listen(self, message):
|
||||
logging.info(f"In NavigationAgent.listen()...message is: {message}")
|
||||
logging.info(f"In NavigationAgent...message is: {message}")
|
||||
"""Listen to a message from the user."""
|
||||
#remove the last item in self.messages
|
||||
if len(self.messages) > 1:
|
||||
self.messages.pop()
|
||||
# #remove the last item in self.messages
|
||||
# if len(self.messages) > 1:
|
||||
# self.messages.pop()
|
||||
self.messages.append({
|
||||
"role": "user",
|
||||
"content": message,
|
||||
|
@ -58,38 +60,35 @@ class NavigationAgent:
|
|||
function_response = None
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
logger.info("Calling OpenAI API in NavigationAgent...")
|
||||
response = self.openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=self.messages,
|
||||
functions=self.function_descriptions,
|
||||
function_call="auto",
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
tools=self.tools,
|
||||
tool_choice="auto",
|
||||
)
|
||||
logger.info(f"response in NavigationAgent is: {response}")
|
||||
response_message = response.choices[0].message
|
||||
logger.info(f"response_message in NavigationAgent is: {response_message}")
|
||||
tool_calls = response_message.tool_calls
|
||||
|
||||
response_message = response["choices"][0]["message"]
|
||||
|
||||
logging.info(f"Response from OpenAI in NavigationAgent: {response_message}")
|
||||
|
||||
if response_message.get("function_call"):
|
||||
function_name = response_message["function_call"]["name"]
|
||||
logging.info(f"Function name: {function_name}")
|
||||
function_to_call = self.available_functions[function_name]
|
||||
logging.info(f"Function to call: {function_to_call}")
|
||||
function_args = json.loads(response_message["function_call"]["arguments"])
|
||||
logging.info(f"Function args: {function_args}")
|
||||
# determine the function to call
|
||||
function_response = function_to_call(**function_args)
|
||||
logging.info(f"Function response: {function_response}")
|
||||
|
||||
if tool_calls:
|
||||
self.messages.append(response_message)
|
||||
for tool_call in tool_calls:
|
||||
function_name = tool_call.function.name
|
||||
function_to_call = self.available_functions[function_name]
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
function_response = function_to_call(**function_args)
|
||||
self.messages.append(
|
||||
{
|
||||
"tool_call_id": tool_call.id,
|
||||
"role": "tool",
|
||||
"name": function_name,
|
||||
"content": json.dumps(function_response),
|
||||
}
|
||||
)
|
||||
logger.info("Sucessful NavigationAgent task completion.")
|
||||
return {"response": function_response}
|
||||
elif response_message.get("content"):
|
||||
return {"response": response_message["content"]}
|
||||
else:
|
||||
return {"response": "I'm sorry, I don't understand."}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": "Failed to get response from OpenAI in NavigationAgent: " + str(e)}, 500
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
from .function_descriptions.style_function_descriptions import style_function_descriptions
|
||||
import json
|
||||
import openai
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -19,10 +19,11 @@ class StyleAgent:
|
|||
def set_visibility(self, layer_name, visibility):
|
||||
return {"name": "set_visibility", "layer_name": layer_name, "visibility": visibility}
|
||||
|
||||
def __init__(self, model_version="gpt-3.5-turbo-0613"):
|
||||
def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
|
||||
self.openai = openai
|
||||
self.model_version = model_version
|
||||
|
||||
self.function_descriptions = style_function_descriptions
|
||||
self.tools = style_function_descriptions
|
||||
|
||||
self.messages = [
|
||||
{
|
||||
|
@ -42,12 +43,9 @@ class StyleAgent:
|
|||
}
|
||||
|
||||
def listen(self, message):
|
||||
logging.info(f"In StyleAgent.listen()...message is: {message}")
|
||||
|
||||
"""Listen to a message from the user."""
|
||||
#remove the last item in self.messages
|
||||
if len(self.messages) > 1:
|
||||
self.messages.pop()
|
||||
logging.info(f"In StyleAgent...message is: {message}")
|
||||
|
||||
self.messages.append({
|
||||
"role": "user",
|
||||
"content": message,
|
||||
|
@ -58,38 +56,35 @@ class StyleAgent:
|
|||
function_response = None
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
logger.info("Calling OpenAI API in StyleAgent...")
|
||||
response = self.openai.chat.completions.create(
|
||||
model=self.model_version,
|
||||
messages=self.messages,
|
||||
functions=self.function_descriptions,
|
||||
function_call="auto",
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0
|
||||
tools=self.tools,
|
||||
tool_choice="auto",
|
||||
)
|
||||
logger.info(f"response in StyleAgent is: {response}")
|
||||
response_message = response.choices[0].message
|
||||
logger.info(f"response_message in StyleAgent is: {response_message}")
|
||||
tool_calls = response_message.tool_calls
|
||||
|
||||
response_message = response["choices"][0]["message"]
|
||||
|
||||
logging.info(f"Response from OpenAI in StyleAgent: {response_message}")
|
||||
|
||||
if response_message.get("function_call"):
|
||||
function_name = response_message["function_call"]["name"]
|
||||
logging.info(f"Function name: {function_name}")
|
||||
function_to_call = self.available_functions[function_name]
|
||||
logging.info(f"Function to call: {function_to_call}")
|
||||
function_args = json.loads(response_message["function_call"]["arguments"])
|
||||
logging.info(f"Function args: {function_args}")
|
||||
# determine the function to call
|
||||
function_response = function_to_call(**function_args)
|
||||
logging.info(f"Function response: {function_response}")
|
||||
|
||||
if tool_calls:
|
||||
self.messages.append(response_message)
|
||||
for tool_call in tool_calls:
|
||||
function_name = tool_call.function.name
|
||||
function_to_call = self.available_functions[function_name]
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
function_response = function_to_call(**function_args)
|
||||
self.messages.append(
|
||||
{
|
||||
"tool_call_id": tool_call.id,
|
||||
"role": "tool",
|
||||
"name": function_name,
|
||||
"content": json.dumps(function_response),
|
||||
}
|
||||
)
|
||||
logger.info("Sucessful StyleAgent task completion.")
|
||||
return {"response": function_response}
|
||||
elif response_message.get("content"):
|
||||
return {"response": response_message["content"]}
|
||||
else:
|
||||
return {"response": "I'm sorry, I don't understand."}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": "Failed to get response from OpenAI in StyleAgent: " + str(e)}, 500
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from flask import Flask, render_template, request, jsonify
|
||||
from flask_cors import CORS
|
||||
import os
|
||||
import openai
|
||||
from openai import OpenAI, NotFoundError
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
import psycopg2
|
||||
|
@ -22,15 +22,14 @@ load_dotenv()
|
|||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
|
||||
#openai.organization = os.getenv("OPENAI_ORGANIZATION")
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||
model_version = os.getenv("OPENAI_MODEL_VERSION")
|
||||
UPLOAD_FOLDER = 'uploads/audio'
|
||||
|
||||
navigation_agent = NavigationAgent(model_version=model_version)
|
||||
marshall_agent = MarshallAgent(model_version=model_version)
|
||||
style_agent = StyleAgent(model_version=model_version)
|
||||
map_info_agent = MapInfoAgent(model_version=model_version)
|
||||
navigation_agent = NavigationAgent(openai, model_version=model_version)
|
||||
marshall_agent = MarshallAgent(openai, model_version=model_version)
|
||||
style_agent = StyleAgent(openai, model_version=model_version)
|
||||
map_info_agent = MapInfoAgent(openai, model_version=model_version)
|
||||
|
||||
def get_database_schema():
|
||||
db = Database(
|
||||
|
@ -164,7 +163,7 @@ def upload_audio():
|
|||
audio_file = request.files['audio']
|
||||
audio_file.save(os.path.join(UPLOAD_FOLDER, "user_audio.webm"))
|
||||
audio_file=open(os.path.join(UPLOAD_FOLDER, "user_audio.webm"), 'rb')
|
||||
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
||||
transcript = openai.audio.transcribe("whisper-1", audio_file)
|
||||
logging.info(f"Received transcript: {transcript}")
|
||||
message = transcript['text']
|
||||
#delete the audio
|
||||
|
|
|
@ -0,0 +1,261 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 163,
|
||||
"id": "aa531c59-0557-4348-bef6-0a614ee39d0b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import html\n",
|
||||
"import json\n",
|
||||
"import requests"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 164,
|
||||
"id": "90b627b6-fc63-4a26-b3c6-0594f847fe78",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#URL = 'http://localhost:5001/api/v1/generate'\n",
|
||||
"URL = 'http://localhost:5001/api/v1/chat'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 165,
|
||||
"id": "ee38c219-22d8-4d84-b125-4a1b3ac8561f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"available_functions = \"\"\"\n",
|
||||
"{\n",
|
||||
" \"name\": \"go_to_location\",\n",
|
||||
" \"description\": \"Go to a given location.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"latitude\": {\n",
|
||||
" \"type\": \"number\",\n",
|
||||
" \"description\": \"The latitude to go to.\",\n",
|
||||
" },\n",
|
||||
" \"longitude\": {\n",
|
||||
" \"type\": \"number\",\n",
|
||||
" \"description\": \"The longitude to go to.\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"latitude\", \"longitude\"],\n",
|
||||
" },\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" \"name\": \"pan_in_direction\",\n",
|
||||
" \"description\": \"Pan in a given direction and distance in kilometers.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"direction\": {\n",
|
||||
" \"type\": \"string\",\n",
|
||||
" \"description\": \"The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.\",\n",
|
||||
" },\n",
|
||||
" \"distance_in_kilometers\": {\n",
|
||||
" \"type\": \"number\",\n",
|
||||
" \"description\": \"The distance to pan in kilometers. If not provided, defaults to 1.\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"direction\"],\n",
|
||||
" },\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" \"name\": \"zoom_in\",\n",
|
||||
" \"description\": \"Zoom in by a given number of zoom levels.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"zoom_levels\": {\n",
|
||||
" \"type\": \"number\",\n",
|
||||
" \"description\": \"The number of zoom levels to zoom in. If not provided, defaults to 1.\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"zoom_levels\"],\n",
|
||||
"},\n",
|
||||
"{\n",
|
||||
" \"name\": \"zoom_out\",\n",
|
||||
" \"description\": \"Zoom out by a given number of zoom levels.\",\n",
|
||||
" \"parameters\": {\n",
|
||||
" \"type\": \"object\",\n",
|
||||
" \"properties\": {\n",
|
||||
" \"zoom_levels\": {\n",
|
||||
" \"type\": \"number\",\n",
|
||||
" \"description\": \"The number of zoom levels to zoom out. If not provided, defaults to 1.\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" \"required\": [\"zoom_levels\"],\n",
|
||||
"}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 166,
|
||||
"id": "d007f9f6-c76e-4237-bd85-5cd6a0e03521",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message = f\"\"\"As an AI assistant, please select the most suitable function and parameters \n",
|
||||
"from the list of available functions below, based on the user's input. Provide your response in JSON format.\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 167,
|
||||
"id": "8f7e36b3-ce1f-4c6b-ba81-9c5a884b4e93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def send_message(message, system_message, function_descriptions, temperature=0.0):\n",
|
||||
" payload = f\"\"\"{system_message}\\nInput: {message}\\nAvailable functions:\\n{function_descriptions}\"\"\"\n",
|
||||
" history = {'internal': [], 'visible': []}\n",
|
||||
" request = {\n",
|
||||
" 'user_input': payload,\n",
|
||||
" 'history': history,\n",
|
||||
" 'temperature': temperature,\n",
|
||||
" }\n",
|
||||
" response = requests.post(URL, json=request)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" result = response.json()['results'][0]['history']\n",
|
||||
" response_data = html.unescape(result['visible'][-1][1])\n",
|
||||
" response_message = json.loads(response_data.strip('\\n'))\n",
|
||||
" response = {\"response\": response_message}\n",
|
||||
" return response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 168,
|
||||
"id": "82831633-bd48-4e22-9b09-80a686eadd3d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = \"go to paris\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 169,
|
||||
"id": "5ee7f345-2e3a-4ac6-8601-c0fdc6d49b62",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'response': {'function': 'go_to_location',\n",
|
||||
" 'parameters': {'latitude': 48.8567, 'longitude': 2.3522}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 169,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"send_message(message, system_message, available_functions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 170,
|
||||
"id": "f2b4aee0-f2e2-4783-bb33-76f9906dbb66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = \"zoom in\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 171,
|
||||
"id": "bad94859-7014-4abe-bf1e-22303634eecf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'response': {'function': 'zoom_in', 'parameters': {'zoom_levels': 2}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 171,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"send_message(message, system_message, available_functions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 172,
|
||||
"id": "8f755da8-0ce8-4ed5-bae5-21712bc604bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"message = \"pan north by 30km\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 173,
|
||||
"id": "5efddf84-6bea-457d-91b7-ab70f5f4909f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'response': {'function': 'pan_in_direction',\n",
|
||||
" 'parameters': {'direction': 'north', 'distance_in_kilometers': 30}}}"
|
||||
]
|
||||
},
|
||||
"execution_count": 173,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"send_message(message, system_message, available_functions)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ba2bbc0-7d27-48ed-bd0a-b11dfa099098",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.17"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
Ładowanie…
Reference in New Issue