gemma = {
"config_list": [
{
"api_type": "openai",
"model": "lmstudio-ai/gemma-2b-it-GGUF/gemma-2b-it-q8_0.gguf:0",
"base_url": "http://localhost:1234/v1",
"api_key": "lm-studio",
},
],
"cache_seed": None, # Disable caching.
}
phi2 = {
"config_list": [
{
"api_type": "openai",
"model": "TheBloke/phi-2-GGUF/phi-2.Q4_K_S.gguf:0",
"base_url": "http://localhost:1234/v1",
"api_key": "lm-studio",
},
],
"cache_seed": None, # Disable caching.
}
# Now we create two agents, one for each model.
from autogen import ConversableAgent
jack = ConversableAgent(
"Jack (Phi-2)",
llm_config=phi2,
system_message="Your name is Jack and you are a comedian in a two-person comedy show.",
)
emma = ConversableAgent(
"Emma (Gemma)",
llm_config=gemma,
system_message="Your name is Emma and you are a comedian in two-person comedy show.",
)
# Now we start the conversation.
chat_result = jack.initiate_chat(emma, message="Emma, tell me a joke.", max_turns=2)