def get_llm_config(model_name):
return {
"config_list": autogen.config_list_from_json("OAI_CONFIG_LIST", filter_dict={"model": [model_name]}),
"cache_seed": 41,
}
affirmative_system_message = "You are in the Affirmative team of a debate. When it is your turn, please give at least one reason why you are for the topic. Keep it short."
negative_system_message = "You are in the Negative team of a debate. The affirmative team has given their reason, please counter their argument. Keep it short."
gpt35_agent = autogen.AssistantAgent(
name="GPT35", system_message=affirmative_system_message, llm_config=get_llm_config("gpt-35-turbo-1106")
)
llama_agent = autogen.AssistantAgent(
name="Llama3",
system_message=negative_system_message,
llm_config=get_llm_config("meta-llama/Meta-Llama-3-70B-Instruct"),
)
mistral_agent = autogen.AssistantAgent(
name="Mistral", system_message=affirmative_system_message, llm_config=get_llm_config("mistral-large-latest")
)
gemini_agent = autogen.AssistantAgent(
name="Gemini", system_message=negative_system_message, llm_config=get_llm_config("gemini-1.5-pro-latest")
)
claude_agent = autogen.AssistantAgent(
name="Claude", system_message=affirmative_system_message, llm_config=get_llm_config("claude-3-opus-20240229")
)
user_proxy = autogen.UserProxyAgent(
name="User",
code_execution_config=False,
)
# initialize the groupchat with round robin speaker selection method
groupchat = autogen.GroupChat(
agents=[claude_agent, gemini_agent, mistral_agent, llama_agent, gpt35_agent, user_proxy],
messages=[],
max_round=8,
speaker_selection_method="round_robin",
)
manager = autogen.GroupChatManager(groupchat=groupchat)