import os
from autogen import ConversableAgent, LLMConfig
from mem0 import MemoryClient
# Set up environment variables
os.environ["OPENAI_API_KEY"] = "your_openai_api_key"
os.environ["MEM0_API_KEY"] = "your_mem0_api_key"
# Initialize Agent and Memory
agent = ConversableAgent(
"chatbot",
llm_config=LLMConfig(
api_type="openai",
model="gpt-4",
api_key=os.environ.get("OPENAI_API_KEY")
),
code_execution_config=False,
function_map=None,
human_input_mode="NEVER",
)
memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY"))
# Insert a conversation into memory
conversation = [
{
"role": "assistant",
"content": "Hi, I'm Best Buy's chatbot!\n\nThanks for being a My Best Buy TotalTM member.\n\nWhat can I help you with?"
},
{
"role": "user",
"content": "Seeing horizontal lines on our tv. TV model: Sony - 77\" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV"
},
]
memory.add(messages=conversation, user_id="customer_service_bot")
# Agent Inference
data = "Which TV am I using?"
relevant_memories = memory.search(data, user_id="customer_service_bot")
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
prompt = f"""Answer the user question considering the memories.
Memories:
{flatten_relevant_memories}
\n\n
Question: {data}
"""
reply = agent.generate_reply(messages=[{"content": prompt, "role": "user"}])
print("Reply :", reply)
# Multi Agent Conversation
manager = ConversableAgent(
"manager",
system_message="You are a manager who helps in resolving customer issues.",
llm_config=LLMConfig(
api_type="openai",
model="gpt-4",
temperature=0,
api_key=os.environ.get("OPENAI_API_KEY")
),
human_input_mode="NEVER"
)
customer_bot = ConversableAgent(
"customer_bot",
system_message="You are a customer service bot who gathers information on issues customers are facing.",
llm_config=LLMConfig(
api_type="openai",
model="gpt-4",
temperature=0,
api_key=os.environ.get("OPENAI_API_KEY")
),
human_input_mode="NEVER"
)
data = "What appointment is booked?"
relevant_memories = memory.search(data, user_id="customer_service_bot")
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
prompt = f"""
Context:
{flatten_relevant_memories}
\n\n
Question: {data}
"""
result = manager.send(prompt, customer_bot, request_reply=True)