updated all but database_agent. Audio is broke for now

pull/137/head
shanergi 2023-11-08 20:02:43 -05:00
rodzic fd92654074
commit ea5ed6e9e0
10 zmienionych plików z 553 dodań i 279 usunięć

5
.gitignore vendored
Wyświetl plik

@ -27,4 +27,7 @@ settings/clip/clip.shx
!/geoserver-data/workspaces !/geoserver-data/workspaces
!/geoserver-data/styles !/geoserver-data/styles
__pycache__ __pycache__
notebooks/.ipynb_checkpoints

Wyświetl plik

@ -1,15 +1,18 @@
select_layer_name = { select_layer_name = {
"name": "select_layer_name", "type": "function",
"description": "Gets a layer name from the text of a given task related to selecting layers on a map.", "function": {
"parameters": { "name": "select_layer_name",
"type": "object", "description": "Gets a layer name from the text of a given task related to selecting layers on a map.",
"properties": { "parameters": {
"layer_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the layer.", "layer_name": {
"type": "string",
"description": "The name of the layer.",
},
}, },
"required": ["layer_name"],
}, },
"required": ["layer_name"],
}, },
} }

Wyświetl plik

@ -1,69 +1,81 @@
go_to_location = { go_to_location = {
"name": "go_to_location", "type": "function",
"description": "Go to a given location.", "function": {
"parameters": { "name": "go_to_location",
"type": "object", "description": "Go to a given location.",
"properties": { "parameters": {
"latitude": { "type": "object",
"type": "number", "properties": {
"description": "The latitude to go to.", "latitude": {
}, "type": "number",
"longitude": { "description": "The latitude to go to.",
"type": "number", },
"description": "The longitude to go to.", "longitude": {
"type": "number",
"description": "The longitude to go to.",
},
}, },
"required": ["latitude", "longitude"],
}, },
"required": ["latitude", "longitude"],
}, },
} }
pan_in_direction = { pan_in_direction = {
"name": "pan_in_direction", "type": "function",
"description": "Pan in a given direction and distance in kilometers.", "function": {
"parameters": { "name": "pan_in_direction",
"type": "object", "description": "Pan in a given direction and distance in kilometers.",
"properties": { "parameters": {
"direction": { "type": "object",
"type": "string", "properties": {
"description": "The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.", "direction": {
}, "type": "string",
"distance_in_kilometers": { "description": "The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.",
"type": "number", },
"description": "The distance to pan in kilometers. If not provided, defaults to 1.", "distance_in_kilometers": {
"type": "number",
"description": "The distance to pan in kilometers. If not provided, defaults to 1.",
},
}, },
"required": ["direction"],
}, },
"required": ["direction"],
}, },
} }
zoom_in = { zoom_in = {
"name": "zoom_in", "type": "function",
"description": "Zoom in by a given number of zoom levels.", "function": {
"parameters": { "name": "zoom_in",
"type": "object", "description": "Zoom in by a given number of zoom levels.",
"properties": { "parameters": {
"zoom_levels": { "type": "object",
"type": "number", "properties": {
"description": "The number of zoom levels to zoom in. If not provided, defaults to 1.", "zoom_levels": {
"type": "number",
"description": "The number of zoom levels to zoom in. If not provided, defaults to 1.",
},
}, },
"required": ["zoom_levels"],
}, },
}, },
"required": ["zoom_levels"],
} }
zoom_out = { zoom_out = {
"name": "zoom_out", "type": "function",
"description": "Zoom out by a given number of zoom levels.", "function": {
"parameters": { "name": "zoom_out",
"type": "object", "description": "Zoom out by a given number of zoom levels.",
"properties": { "parameters": {
"zoom_levels": { "type": "object",
"type": "number", "properties": {
"description": "The number of zoom levels to zoom out. If not provided, defaults to 1.", "zoom_levels": {
"type": "number",
"description": "The number of zoom levels to zoom out. If not provided, defaults to 1.",
},
}, },
"required": ["zoom_levels"],
}, },
}, },
"required": ["zoom_levels"],
} }
navigation_function_descriptions = [ navigation_function_descriptions = [

Wyświetl plik

@ -1,77 +1,89 @@
set_color = { set_color = {
"name": "set_color", "type": "function",
"description": "Set the maplibre paint property color of a layer.", "function": {
"parameters": { "name": "set_color",
"type": "object", "description": "Set the maplibre paint property color of a layer.",
"properties": { "parameters": {
"layer_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the layer.", "layer_name": {
}, "type": "string",
"color": { "description": "The name of the layer.",
"type": "string", },
"description": "The color to set in hex format.", "color": {
"type": "string",
"description": "The color to set in hex format.",
},
}, },
"required": ["layer_name", "color"],
}, },
"required": ["layer_name", "color"],
}, },
} }
set_opacity = { set_opacity = {
"name": "set_opacity", "type": "function",
"description": "Set the maplibre paint property opacity of a layer.", "function": {
"parameters": { "name": "set_opacity",
"type": "object", "description": "Set the maplibre paint property opacity of a layer.",
"properties": { "parameters": {
"layer_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the layer.", "layer_name": {
}, "type": "string",
"opacity": { "description": "The name of the layer.",
"type": "number", },
"description": "The opacity to set between 0 and 1.", "opacity": {
"type": "number",
"description": "The opacity to set between 0 and 1.",
},
}, },
"required": ["layer_name", "opacity"],
}, },
"required": ["layer_name", "opacity"],
}, },
} }
set_width = { set_width = {
"name": "set_width", "type": "function",
"description": "Set the maplibre paint property width.", "function": {
"parameters": { "name": "set_width",
"type": "object", "description": "Set the maplibre paint property width.",
"properties": { "parameters": {
"layer_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the layer to get the paint property for.", "layer_name": {
}, "type": "string",
"width": { "description": "The name of the layer to get the paint property for.",
"type": "number", },
"description": "The width to set.", "width": {
"type": "number",
"description": "The width to set.",
},
}, },
"required": ["layer_name", "width"],
}, },
"required": ["layer_name", "width"],
}, },
} }
set_visibility = { set_visibility = {
"name": "set_visibility", "type": "function",
"description": "Set the visibility of a layer (turning it on or off).", "function": {
"parameters": { "name": "set_visibility",
"type": "object", "description": "Set the visibility of a layer (turning it on or off).",
"properties": { "parameters": {
"layer_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the layer to get the layout property for.", "layer_name": {
}, "type": "string",
"visibility": { "description": "The name of the layer to get the layout property for.",
"type": "string", },
"description": "Either 'visible' or 'none'. Set to 'none' to hide the layer.", "visibility": {
"type": "string",
"description": "Either 'visible' or 'none'. Set to 'none' to hide the layer.",
},
}, },
"required": ["layer_name", "visible"],
}, },
"required": ["layer_name", "visible"],
}, },
} }

Wyświetl plik

@ -1,4 +1,3 @@
import openai
import json import json
from .function_descriptions.map_info_function_descriptions import map_info_function_descriptions from .function_descriptions.map_info_function_descriptions import map_info_function_descriptions
import logging import logging
@ -11,9 +10,10 @@ class MapInfoAgent:
def select_layer_name(self, layer_name): def select_layer_name(self, layer_name):
return {"name": "select_layer_name", "layer_name": layer_name} return {"name": "select_layer_name", "layer_name": layer_name}
def __init__(self, model_version="gpt-3.5-turbo-0613", layers=[]): def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
self.openai = openai
self.model_version = model_version self.model_version = model_version
self.function_descriptions = map_info_function_descriptions self.tools = map_info_function_descriptions
self.messages = [ self.messages = [
{ {
"role": "system", "role": "system",
@ -36,64 +36,49 @@ class MapInfoAgent:
} }
def listen(self, message, layer_names=[]): def listen(self, message, layer_names=[]):
logger.info(f"In MapInfoAgent.listen()...message is: {message}")
"""Listen to a message from the user.""" """Listen to a message from the user."""
#map_context = f"The following layers are in the map: {layer_names}" logger.info(f"In MapInfoAgent...message is: {message}")
#remove the last item in self.messages
if len(self.messages) > 1:
self.messages.pop()
self.messages.append({ self.messages.append({
"role": "user", "role": "user",
"content": message, "content": message,
}) })
# self.messages.append({
# "role": "user",
# "content": map_context,
# })
logger.info(f"MapInfoAgent self.messages: {self.messages}")
# this will be the function gpt will call if it # this will be the function gpt will call if it
# determines that the user wants to call a function # determines that the user wants to call a function
function_response = None function_response = None
try: try:
response = openai.ChatCompletion.create( response = self.openai.chat.completions.create(
model=self.model_version, model=self.model_version,
messages=self.messages, messages=self.messages,
functions=self.function_descriptions, tools=self.tools,
function_call="auto", tool_choice={"type": "function", "function": {"name": "select_layer_name"}},
temperature=0.1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
) )
response_message = response.choices[0].message
logger.info(f"Response from OpenAI in MapInfoAgent: {response_message}")
tool_calls = response_message.tool_calls
response_message = response["choices"][0]["message"] if tool_calls:
logger.info(f"First response from OpenAI in MapInfoAgent: {response_message}")
if response_message.get("function_call"):
function_name = response_message["function_call"]["name"]
logger.info(f"Type of function_name: {type(function_name)}")
function_name = function_name.strip()
logger.info(f"Function name: {function_name}")
function_to_call = self.available_functions[function_name]
logger.info(f"Function to call: {function_to_call}")
function_args = json.loads(response_message["function_call"]["arguments"])
logger.info(f"Function args: {function_args}")
# determine the function to call
function_response = function_to_call(**function_args)
self.messages.append(response_message) self.messages.append(response_message)
self.messages.append({ for tool_call in tool_calls:
"role": "function", function_name = tool_call.function.name
"name": function_name, function_to_call = self.available_functions[function_name]
"content": function_response, function_args = json.loads(tool_call.function.arguments)
}) function_response = function_to_call(**function_args)
logger.info(f"Function response: {function_response}") self.messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps(function_response),
}
)
# if the function name is select_layer_name, we need OpenAI to select the proper layer name instead of whatever the user said. # if the function name is select_layer_name, we need OpenAI to select the proper layer name instead of whatever the user said.
# They may have used lower case or a descriptive designation instead of the actual layer name. We hope OpenAI can figure out # They may have used lower case or a descriptive designation instead of the actual layer name. We hope OpenAI can figure out
# what they meant. # what they meant. In general, it's pretty good at this unless there are multiple layers with similar names, in which case
# it just chooses one.
if function_name == "select_layer_name": if function_name == "select_layer_name":
logger.info(f"Sending layer name retrieval request to OpenAI...") logger.info(f"Sending layer name retrieval request to OpenAI...")
prompt = f"Please select a layer name from the following list that is closest to the text '{function_response['layer_name']}': {str(layer_names)}\n Only state the layer name in your response." prompt = f"Please select a layer name from the following list that is closest to the text '{function_response['layer_name']}': {str(layer_names)}\n Only state the layer name in your response."
@ -104,12 +89,12 @@ class MapInfoAgent:
"content": prompt, "content": prompt,
}, },
] ]
second_response = openai.ChatCompletion.create( second_response = self.openai.chat.completions.create(
model=self.model_version, model=self.model_version,
messages=messages, messages=messages,
) )
logger.info(f"Second response from OpenAI in MapInfoAgent: {second_response}") logger.info(f"Second response from OpenAI in MapInfoAgent: {second_response}")
second_response_message = second_response["choices"][0]["message"]["content"] second_response_message = second_response.choices[0].message.content
logger.info(f"Second response message from OpenAI in MapInfoAgent: {second_response_message}") logger.info(f"Second response message from OpenAI in MapInfoAgent: {second_response_message}")
logger.info(f"Function Response bofore setting the layer name: {function_response}") logger.info(f"Function Response bofore setting the layer name: {function_response}")
function_response['layer_name'] = second_response_message function_response['layer_name'] = second_response_message

Wyświetl plik

@ -1,36 +1,34 @@
import json import json
import openai #import openai
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class MarshallAgent: class MarshallAgent:
"""A Marshall agent that has function descriptions for choosing the appropriate agent for a specified task.""" """A Marshall agent that has function descriptions for choosing the appropriate agent for a specified task."""
def __init__(self, model_version="gpt-3.5-turbo-0613"): def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
self.model_version = model_version self.model_version = model_version
self.openai = openai
#we only need one function description for this agent self.tools = [
function_description = { {
"name": "choose_agent", "type": "function",
"description": """Chooses an appropriate agent for a given task.""", "function": {
"parameters": { "name": "choose_agent",
"type": "object", "description": """Chooses an appropriate agent for a given task.""",
"properties": { "parameters": {
"agent_name": { "type": "object",
"type": "string", "properties": {
"description": "The name of the agent to choose. One of 'NavigationAgent', 'StyleAgent', 'MapInfoAgent', 'DatabaseAgent'.", "agent_name": {
"type": "string",
"description": "The name of the agent to choose. One of 'NavigationAgent', 'StyleAgent', 'MapInfoAgent', 'DatabaseAgent'.",
},
},
"required": ["agent_name"],
}, },
}, },
"required": ["agent_name"],
}, },
} ]
self.system_message = """You are a helpful assistant that decides which agent to use for a specified task.
self.function_descriptions = [function_description]
self.messages = [
{
"role": "system",
"content": """You are a helpful assistant that decides which agent to use for a specified task.
For tasks related to adding layers and other geospatial data to the map, use the DatabaseAgent. For tasks related to adding layers and other geospatial data to the map, use the DatabaseAgent.
Examples include 'add buildings to the map' and 'get landuse polygons within this extent'. Examples include 'add buildings to the map' and 'get landuse polygons within this extent'.
@ -47,9 +45,13 @@ class MarshallAgent:
'go to Paris', 'show me the Statue of Liberty', and 'where is Houston, Texas?' 'go to Paris', 'show me the Statue of Liberty', and 'where is Houston, Texas?'
If you can't find the appropriate agent, say that you didn't understand and ask If you can't find the appropriate agent, say that you didn't understand and ask
for a more specific description of the task.""", for a more specific description of the task."""
},
] #initialize the messages queue with the system message
self.messages = [{"role": "system", "content": self.system_message}]
self.available_functions = {
"choose_agent": self.choose_agent,
}
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
def choose_agent(self, agent_name): def choose_agent(self, agent_name):
@ -59,9 +61,9 @@ class MarshallAgent:
self.logger.info(f"In MarshallAgent.listen()...message is: {message}") self.logger.info(f"In MarshallAgent.listen()...message is: {message}")
"""Listen to a message from the user.""" """Listen to a message from the user."""
# Remove the last item in self.messages. Our agent has no memory # # Remove the last item in self.messages. Our agent has no memory
if len(self.messages) > 1: # if len(self.messages) > 1:
self.messages.pop() # self.messages.pop()
self.messages.append({ self.messages.append({
"role": "user", "role": "user",
@ -73,34 +75,37 @@ class MarshallAgent:
function_response = None function_response = None
try: try:
response = openai.ChatCompletion.create( response = self.openai.chat.completions.create(
model=self.model_version, model=self.model_version,
messages=self.messages, messages=self.messages,
functions=self.function_descriptions, tools=self.tools,
function_call={"name": "choose_agent"}, tool_choice={"type": "function", "function": {"name": "choose_agent"}},
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
) )
response_message = response["choices"][0]["message"] response_message = response.choices[0].message
tool_calls = response_message.tool_calls
self.logger.info(f"Response from OpenAI in MarshallAgent: {response_message}")
if response_message.get("function_call"):
function_args = json.loads(response_message["function_call"]["arguments"])
self.logger.info(f"Function args: {function_args}")
# call choose agent
function_response = self.choose_agent(**function_args)
self.logger.info(f"Function response: {function_response}")
if tool_calls:
available_functions = self.available_functions
self.messages.append(response_message)
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(**function_args)
self.messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": json.dumps(function_response),
}
)
# second_response = self.openai.chat.completions.create(
# model=self.model_version,
# messages=self.messages,
# )
logger.info(f"Sucessful MarallAgent task completion: {function_response}")
return {"response": function_response} return {"response": function_response}
elif response_message.get("content"):
return {"response": response_message["content"]}
else:
return {"response": "I'm sorry, I don't understand."}
except Exception as e: except Exception as e:
return {"error": "Failed to get response from OpenAI in MarshallAgent: " + str(e)}, 500 return {"error": "Failed to get response from OpenAI in MarshallAgent: " + str(e)}, 500

Wyświetl plik

@ -1,6 +1,6 @@
import logging import logging
from .function_descriptions.navigation_function_descriptions import navigation_function_descriptions from .function_descriptions.navigation_function_descriptions import navigation_function_descriptions
import openai #import openai
import json import json
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -20,9 +20,9 @@ class NavigationAgent:
def zoom_out(self, zoom_levels=1): def zoom_out(self, zoom_levels=1):
return {"name": "zoom_out", "zoom_levels": zoom_levels} return {"name": "zoom_out", "zoom_levels": zoom_levels}
def __init__(self, model_version="gpt-3.5-turbo-0613"): def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
self.openai = openai
self.model_version = model_version self.model_version = model_version
self.function_descriptions = navigation_function_descriptions
self.messages = [ self.messages = [
{ {
"role": "system", "role": "system",
@ -41,13 +41,15 @@ class NavigationAgent:
"zoom_in": self.zoom_in, "zoom_in": self.zoom_in,
"zoom_out": self.zoom_out, "zoom_out": self.zoom_out,
} }
self.tools = navigation_function_descriptions
logger.info(f"self.tools in NavigationAgent is: {self.tools}")
def listen(self, message): def listen(self, message):
logging.info(f"In NavigationAgent.listen()...message is: {message}") logging.info(f"In NavigationAgent...message is: {message}")
"""Listen to a message from the user.""" """Listen to a message from the user."""
#remove the last item in self.messages # #remove the last item in self.messages
if len(self.messages) > 1: # if len(self.messages) > 1:
self.messages.pop() # self.messages.pop()
self.messages.append({ self.messages.append({
"role": "user", "role": "user",
"content": message, "content": message,
@ -58,38 +60,35 @@ class NavigationAgent:
function_response = None function_response = None
try: try:
response = openai.ChatCompletion.create( logger.info("Calling OpenAI API in NavigationAgent...")
response = self.openai.chat.completions.create(
model=self.model_version, model=self.model_version,
messages=self.messages, messages=self.messages,
functions=self.function_descriptions, tools=self.tools,
function_call="auto", tool_choice="auto",
temperature=0.1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
) )
logger.info(f"response in NavigationAgent is: {response}")
response_message = response.choices[0].message
logger.info(f"response_message in NavigationAgent is: {response_message}")
tool_calls = response_message.tool_calls
response_message = response["choices"][0]["message"] if tool_calls:
self.messages.append(response_message)
logging.info(f"Response from OpenAI in NavigationAgent: {response_message}") for tool_call in tool_calls:
function_name = tool_call.function.name
if response_message.get("function_call"): function_to_call = self.available_functions[function_name]
function_name = response_message["function_call"]["name"] function_args = json.loads(tool_call.function.arguments)
logging.info(f"Function name: {function_name}") function_response = function_to_call(**function_args)
function_to_call = self.available_functions[function_name] self.messages.append(
logging.info(f"Function to call: {function_to_call}") {
function_args = json.loads(response_message["function_call"]["arguments"]) "tool_call_id": tool_call.id,
logging.info(f"Function args: {function_args}") "role": "tool",
# determine the function to call "name": function_name,
function_response = function_to_call(**function_args) "content": json.dumps(function_response),
logging.info(f"Function response: {function_response}") }
)
logger.info("Sucessful NavigationAgent task completion.")
return {"response": function_response} return {"response": function_response}
elif response_message.get("content"):
return {"response": response_message["content"]}
else:
return {"response": "I'm sorry, I don't understand."}
except Exception as e: except Exception as e:
return {"error": "Failed to get response from OpenAI in NavigationAgent: " + str(e)}, 500 return {"error": "Failed to get response from OpenAI in NavigationAgent: " + str(e)}, 500

Wyświetl plik

@ -1,7 +1,7 @@
import logging import logging
from .function_descriptions.style_function_descriptions import style_function_descriptions from .function_descriptions.style_function_descriptions import style_function_descriptions
import json import json
import openai
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -19,10 +19,11 @@ class StyleAgent:
def set_visibility(self, layer_name, visibility): def set_visibility(self, layer_name, visibility):
return {"name": "set_visibility", "layer_name": layer_name, "visibility": visibility} return {"name": "set_visibility", "layer_name": layer_name, "visibility": visibility}
def __init__(self, model_version="gpt-3.5-turbo-0613"): def __init__(self, openai, model_version="gpt-3.5-turbo-0613"):
self.openai = openai
self.model_version = model_version self.model_version = model_version
self.function_descriptions = style_function_descriptions self.tools = style_function_descriptions
self.messages = [ self.messages = [
{ {
@ -42,12 +43,9 @@ class StyleAgent:
} }
def listen(self, message): def listen(self, message):
logging.info(f"In StyleAgent.listen()...message is: {message}")
"""Listen to a message from the user.""" """Listen to a message from the user."""
#remove the last item in self.messages logging.info(f"In StyleAgent...message is: {message}")
if len(self.messages) > 1:
self.messages.pop()
self.messages.append({ self.messages.append({
"role": "user", "role": "user",
"content": message, "content": message,
@ -58,38 +56,35 @@ class StyleAgent:
function_response = None function_response = None
try: try:
response = openai.ChatCompletion.create( logger.info("Calling OpenAI API in StyleAgent...")
response = self.openai.chat.completions.create(
model=self.model_version, model=self.model_version,
messages=self.messages, messages=self.messages,
functions=self.function_descriptions, tools=self.tools,
function_call="auto", tool_choice="auto",
temperature=0.1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
) )
logger.info(f"response in StyleAgent is: {response}")
response_message = response.choices[0].message
logger.info(f"response_message in StyleAgent is: {response_message}")
tool_calls = response_message.tool_calls
response_message = response["choices"][0]["message"] if tool_calls:
self.messages.append(response_message)
logging.info(f"Response from OpenAI in StyleAgent: {response_message}") for tool_call in tool_calls:
function_name = tool_call.function.name
if response_message.get("function_call"): function_to_call = self.available_functions[function_name]
function_name = response_message["function_call"]["name"] function_args = json.loads(tool_call.function.arguments)
logging.info(f"Function name: {function_name}") function_response = function_to_call(**function_args)
function_to_call = self.available_functions[function_name] self.messages.append(
logging.info(f"Function to call: {function_to_call}") {
function_args = json.loads(response_message["function_call"]["arguments"]) "tool_call_id": tool_call.id,
logging.info(f"Function args: {function_args}") "role": "tool",
# determine the function to call "name": function_name,
function_response = function_to_call(**function_args) "content": json.dumps(function_response),
logging.info(f"Function response: {function_response}") }
)
logger.info("Sucessful StyleAgent task completion.")
return {"response": function_response} return {"response": function_response}
elif response_message.get("content"):
return {"response": response_message["content"]}
else:
return {"response": "I'm sorry, I don't understand."}
except Exception as e: except Exception as e:
return {"error": "Failed to get response from OpenAI in StyleAgent: " + str(e)}, 500 return {"error": "Failed to get response from OpenAI in StyleAgent: " + str(e)}, 500

Wyświetl plik

@ -1,7 +1,7 @@
from flask import Flask, render_template, request, jsonify from flask import Flask, render_template, request, jsonify
from flask_cors import CORS from flask_cors import CORS
import os import os
import openai from openai import OpenAI, NotFoundError
from dotenv import load_dotenv from dotenv import load_dotenv
import json import json
import psycopg2 import psycopg2
@ -22,15 +22,14 @@ load_dotenv()
app = Flask(__name__) app = Flask(__name__)
CORS(app) CORS(app)
#openai.organization = os.getenv("OPENAI_ORGANIZATION") openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
openai.api_key = os.getenv("OPENAI_API_KEY")
model_version = os.getenv("OPENAI_MODEL_VERSION") model_version = os.getenv("OPENAI_MODEL_VERSION")
UPLOAD_FOLDER = 'uploads/audio' UPLOAD_FOLDER = 'uploads/audio'
navigation_agent = NavigationAgent(model_version=model_version) navigation_agent = NavigationAgent(openai, model_version=model_version)
marshall_agent = MarshallAgent(model_version=model_version) marshall_agent = MarshallAgent(openai, model_version=model_version)
style_agent = StyleAgent(model_version=model_version) style_agent = StyleAgent(openai, model_version=model_version)
map_info_agent = MapInfoAgent(model_version=model_version) map_info_agent = MapInfoAgent(openai, model_version=model_version)
def get_database_schema(): def get_database_schema():
db = Database( db = Database(
@ -164,7 +163,7 @@ def upload_audio():
audio_file = request.files['audio'] audio_file = request.files['audio']
audio_file.save(os.path.join(UPLOAD_FOLDER, "user_audio.webm")) audio_file.save(os.path.join(UPLOAD_FOLDER, "user_audio.webm"))
audio_file=open(os.path.join(UPLOAD_FOLDER, "user_audio.webm"), 'rb') audio_file=open(os.path.join(UPLOAD_FOLDER, "user_audio.webm"), 'rb')
transcript = openai.Audio.transcribe("whisper-1", audio_file) transcript = openai.audio.transcribe("whisper-1", audio_file)
logging.info(f"Received transcript: {transcript}") logging.info(f"Received transcript: {transcript}")
message = transcript['text'] message = transcript['text']
#delete the audio #delete the audio

Wyświetl plik

@ -0,0 +1,261 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 163,
"id": "aa531c59-0557-4348-bef6-0a614ee39d0b",
"metadata": {},
"outputs": [],
"source": [
"import html\n",
"import json\n",
"import requests"
]
},
{
"cell_type": "code",
"execution_count": 164,
"id": "90b627b6-fc63-4a26-b3c6-0594f847fe78",
"metadata": {},
"outputs": [],
"source": [
"#URL = 'http://localhost:5001/api/v1/generate'\n",
"URL = 'http://localhost:5001/api/v1/chat'"
]
},
{
"cell_type": "code",
"execution_count": 165,
"id": "ee38c219-22d8-4d84-b125-4a1b3ac8561f",
"metadata": {},
"outputs": [],
"source": [
"available_functions = \"\"\"\n",
"{\n",
" \"name\": \"go_to_location\",\n",
" \"description\": \"Go to a given location.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"latitude\": {\n",
" \"type\": \"number\",\n",
" \"description\": \"The latitude to go to.\",\n",
" },\n",
" \"longitude\": {\n",
" \"type\": \"number\",\n",
" \"description\": \"The longitude to go to.\",\n",
" },\n",
" },\n",
" \"required\": [\"latitude\", \"longitude\"],\n",
" },\n",
"},\n",
"{\n",
" \"name\": \"pan_in_direction\",\n",
" \"description\": \"Pan in a given direction and distance in kilometers.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"direction\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The direction to pan in. One of 'north', 'south', 'east', 'west', 'northwest', 'northeast', 'southwest', 'southeast'.\",\n",
" },\n",
" \"distance_in_kilometers\": {\n",
" \"type\": \"number\",\n",
" \"description\": \"The distance to pan in kilometers. If not provided, defaults to 1.\",\n",
" },\n",
" },\n",
" \"required\": [\"direction\"],\n",
" },\n",
"},\n",
"{\n",
" \"name\": \"zoom_in\",\n",
" \"description\": \"Zoom in by a given number of zoom levels.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"zoom_levels\": {\n",
" \"type\": \"number\",\n",
" \"description\": \"The number of zoom levels to zoom in. If not provided, defaults to 1.\",\n",
" },\n",
" },\n",
" },\n",
" \"required\": [\"zoom_levels\"],\n",
"},\n",
"{\n",
" \"name\": \"zoom_out\",\n",
" \"description\": \"Zoom out by a given number of zoom levels.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"zoom_levels\": {\n",
" \"type\": \"number\",\n",
" \"description\": \"The number of zoom levels to zoom out. If not provided, defaults to 1.\",\n",
" },\n",
" },\n",
" },\n",
" \"required\": [\"zoom_levels\"],\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 166,
"id": "d007f9f6-c76e-4237-bd85-5cd6a0e03521",
"metadata": {},
"outputs": [],
"source": [
"system_message = f\"\"\"As an AI assistant, please select the most suitable function and parameters \n",
"from the list of available functions below, based on the user's input. Provide your response in JSON format.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 167,
"id": "8f7e36b3-ce1f-4c6b-ba81-9c5a884b4e93",
"metadata": {},
"outputs": [],
"source": [
"def send_message(message, system_message, function_descriptions, temperature=0.0):\n",
" payload = f\"\"\"{system_message}\\nInput: {message}\\nAvailable functions:\\n{function_descriptions}\"\"\"\n",
" history = {'internal': [], 'visible': []}\n",
" request = {\n",
" 'user_input': payload,\n",
" 'history': history,\n",
" 'temperature': temperature,\n",
" }\n",
" response = requests.post(URL, json=request)\n",
" if response.status_code == 200:\n",
" result = response.json()['results'][0]['history']\n",
" response_data = html.unescape(result['visible'][-1][1])\n",
" response_message = json.loads(response_data.strip('\\n'))\n",
" response = {\"response\": response_message}\n",
" return response"
]
},
{
"cell_type": "code",
"execution_count": 168,
"id": "82831633-bd48-4e22-9b09-80a686eadd3d",
"metadata": {},
"outputs": [],
"source": [
"message = \"go to paris\""
]
},
{
"cell_type": "code",
"execution_count": 169,
"id": "5ee7f345-2e3a-4ac6-8601-c0fdc6d49b62",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'response': {'function': 'go_to_location',\n",
" 'parameters': {'latitude': 48.8567, 'longitude': 2.3522}}}"
]
},
"execution_count": 169,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"send_message(message, system_message, available_functions)"
]
},
{
"cell_type": "code",
"execution_count": 170,
"id": "f2b4aee0-f2e2-4783-bb33-76f9906dbb66",
"metadata": {},
"outputs": [],
"source": [
"message = \"zoom in\""
]
},
{
"cell_type": "code",
"execution_count": 171,
"id": "bad94859-7014-4abe-bf1e-22303634eecf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'response': {'function': 'zoom_in', 'parameters': {'zoom_levels': 2}}}"
]
},
"execution_count": 171,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"send_message(message, system_message, available_functions)"
]
},
{
"cell_type": "code",
"execution_count": 172,
"id": "8f755da8-0ce8-4ed5-bae5-21712bc604bf",
"metadata": {},
"outputs": [],
"source": [
"message = \"pan north by 30km\""
]
},
{
"cell_type": "code",
"execution_count": 173,
"id": "5efddf84-6bea-457d-91b7-ab70f5f4909f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'response': {'function': 'pan_in_direction',\n",
" 'parameters': {'direction': 'north', 'distance_in_kilometers': 30}}}"
]
},
"execution_count": 173,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"send_message(message, system_message, available_functions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ba2bbc0-7d27-48ed-bd0a-b11dfa099098",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.17"
}
},
"nbformat": 4,
"nbformat_minor": 5
}