Implement and manage a multi-agent chat system using AG2, where AI assistants retrieve information, generate code, and interact collaboratively to solve complex tasks, especially in areas not covered by their training data.
pip install ag2[openai,retrievechat]
config_list_from_json
function loads a list of configurations from an environment variable or
a json file.
from typing import Annotated
import autogen
from autogen import AssistantAgent
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
llm_config = autogen.LLMConfig.from_json(path="OAI_CONFIG_LIST", timeout=60, temperature=0.8, seed=1234)
print("LLM models: ", [x.model for x in llm_config.config_list])
def termination_msg(x):
return isinstance(x, dict) and str(x.get("content", ""))[-9:].upper() == "TERMINATE"
boss = autogen.UserProxyAgent(
name="Boss",
is_termination_msg=termination_msg,
human_input_mode="NEVER",
code_execution_config=False, # we don't want to execute code in this case.
default_auto_reply="Reply `TERMINATE` if the task is done.",
description="The boss who ask questions and give tasks.",
)
boss_aid = RetrieveUserProxyAgent(
name="Boss_Assistant",
is_termination_msg=termination_msg,
human_input_mode="NEVER",
default_auto_reply="Reply `TERMINATE` if the task is done.",
max_consecutive_auto_reply=3,
retrieve_config={
"task": "code",
"docs_path": "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md",
"chunk_token_size": 1000,
"model": llm_config.config_list[0].model,
"collection_name": "groupchat",
"get_or_create": True,
},
code_execution_config=False, # we don't want to execute code in this case.
description="Assistant who has extra content retrieval power for solving difficult problems.",
)
coder = AssistantAgent(
name="Senior_Python_Engineer",
is_termination_msg=termination_msg,
system_message="You are a senior python engineer, you provide python code to answer questions. Reply `TERMINATE` in the end when everything is done.",
llm_config=llm_config,
description="Senior Python Engineer who can write code to solve problems and answer questions.",
)
pm = autogen.AssistantAgent(
name="Product_Manager",
is_termination_msg=termination_msg,
system_message="You are a product manager. Reply `TERMINATE` in the end when everything is done.",
llm_config=llm_config,
description="Product Manager who can design and plan the project.",
)
reviewer = autogen.AssistantAgent(
name="Code_Reviewer",
is_termination_msg=termination_msg,
system_message="You are a code reviewer. Reply `TERMINATE` in the end when everything is done.",
llm_config=llm_config,
description="Code Reviewer who can review the code.",
)
PROBLEM = "How to use spark for parallel training in FLAML? Give me sample code."
def _reset_agents():
boss.reset()
boss_aid.reset()
coder.reset()
pm.reset()
reviewer.reset()
def rag_chat():
_reset_agents()
groupchat = autogen.GroupChat(
agents=[boss_aid, pm, coder, reviewer], messages=[], max_round=12, speaker_selection_method="round_robin"
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
# Start chatting with boss_aid as this is the user proxy agent.
boss_aid.initiate_chat(
manager,
message=boss_aid.message_generator,
problem=PROBLEM,
n_results=3,
)
def norag_chat():
_reset_agents()
groupchat = autogen.GroupChat(
agents=[boss, pm, coder, reviewer],
messages=[],
max_round=12,
speaker_selection_method="auto",
allow_repeat_speaker=False,
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
# Start chatting with the boss as this is the user proxy agent.
boss.initiate_chat(
manager,
message=PROBLEM,
)
def call_rag_chat():
_reset_agents()
# In this case, we will have multiple user proxy agents and we don't initiate the chat
# with RAG user proxy agent.
# In order to use RAG user proxy agent, we need to wrap RAG agents in a function and call
# it from other agents.
def retrieve_content(
message: Annotated[
str,
"Refined message which keeps the original meaning and can be used to retrieve content for code generation and question answering.",
],
n_results: Annotated[int, "number of results"] = 3,
) -> str:
boss_aid.n_results = n_results # Set the number of results to be retrieved.
_context = {"problem": message, "n_results": n_results}
ret_msg = boss_aid.message_generator(boss_aid, None, _context)
return ret_msg or message
boss_aid.human_input_mode = "NEVER" # Disable human input for boss_aid since it only retrieves content.
for caller in [pm, coder, reviewer]:
d_retrieve_content = caller.register_for_llm(
description="retrieve content for code generation and question answering.", api_style="function"
)(retrieve_content)
for executor in [boss, pm]:
executor.register_for_execution()(d_retrieve_content)
groupchat = autogen.GroupChat(
agents=[boss, pm, coder, reviewer],
messages=[],
max_round=12,
speaker_selection_method="round_robin",
allow_repeat_speaker=False,
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
# Start chatting with the boss as this is the user proxy agent.
boss.initiate_chat(
manager,
message=PROBLEM,
)
norag_chat()
rag_chat()
# type exit to terminate the chat
call_rag_chat()