assistant_base = autogen.AssistantAgent(
"assistant",
llm_config=llm_config,
)
assistant_with_context_handling = autogen.AssistantAgent(
"assistant",
llm_config=llm_config,
)
# suppose this capability is not available
context_handling = transform_messages.TransformMessages(
transforms=[
transforms.MessageHistoryLimiter(max_messages=10),
transforms.MessageTokenLimiter(max_tokens=1000, max_tokens_per_message=50, min_tokens=500),
]
)
context_handling.add_to_agent(assistant_with_context_handling)
user_proxy = autogen.UserProxyAgent(
"user_proxy",
human_input_mode="NEVER",
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
code_execution_config={
"work_dir": "coding",
"use_docker": False,
},
max_consecutive_auto_reply=2,
)
# suppose the chat history is large
# Create a very long chat history that is bound to cause a crash
# for gpt 3.5
for i in range(1000):
# define a fake, very long messages
assitant_msg = {"role": "assistant", "content": "test " * 1000}
user_msg = {"role": "user", "content": ""}
assistant_base.send(assitant_msg, user_proxy, request_reply=False, silent=True)
assistant_with_context_handling.send(assitant_msg, user_proxy, request_reply=False, silent=True)
user_proxy.send(user_msg, assistant_base, request_reply=False, silent=True)
user_proxy.send(user_msg, assistant_with_context_handling, request_reply=False, silent=True)
try:
user_proxy.initiate_chat(assistant_base, message="plot and save a graph of x^2 from -10 to 10", clear_history=False)
except Exception as e:
print("Encountered an error with the base assistant")
print(e)
print("\n\n")
try:
user_proxy.initiate_chat(
assistant_with_context_handling, message="plot and save a graph of x^2 from -10 to 10", clear_history=False
)
except Exception as e:
print(e)