Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 95 additions & 0 deletions docs/integrations/llms/ag2.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
---
title: AG2 (Multi-Agent Framework)
description: Instrument AG2 multi-agent conversations with Logfire spans for conversation flow, agent turns, and tool execution.
integration: logfire
---

[AG2](https://docs.ag2.ai) supports multi-agent orchestration patterns such as two-agent chat and group chat.
Use [`logfire.instrument_ag2()`][logfire.Logfire.instrument_ag2] to trace conversation lifecycle events, agent turns, and tool execution.

## What gets traced

`logfire.instrument_ag2()` creates spans for:

- **Conversation lifecycle** (`AG2 conversation`)
- **Group chat orchestration** (`AG2 group chat run`, `AG2 group chat round`)
- **Agent turns** (`AG2 agent turn`)
- **Tool execution** (`AG2 tool execution`)

LLM provider request spans are still produced by provider-specific integrations (for example [`instrument_openai()`][logfire.Logfire.instrument_openai]), so AG2 spans stay focused on orchestration.

## Install

```bash
pip install logfire "ag2[openai]>=0.11.4,<1.0"
```

## Basic usage

```python skip-run="true" skip-reason="external-connection"
import os

from autogen import AssistantAgent, GroupChat, GroupChatManager, LLMConfig, UserProxyAgent

import logfire


llm_config = LLMConfig(
{
'model': 'gpt-4o-mini',
'api_key': os.getenv('OPENAI_API_KEY'),
'api_type': 'openai',
}
)


def is_termination(msg: dict[str, object]) -> bool:
content = msg.get('content', '') or ''
return isinstance(content, str) and 'TERMINATE' in content


proxy = UserProxyAgent(
name='user_proxy',
human_input_mode='NEVER',
max_consecutive_auto_reply=10,
code_execution_config=False,
is_termination_msg=is_termination,
)
research = AssistantAgent(name='research_agent', system_message='Find relevant facts.', llm_config=llm_config)
analyst = AssistantAgent(
name='analyst_agent',
system_message='Summarize the findings and say TERMINATE when done.',
llm_config=llm_config,
)


@proxy.register_for_execution()
@research.register_for_llm(description='Search for information')
def search_knowledge(query: str) -> str:
return f'Results for {query}'


group_chat = GroupChat(agents=[proxy, research, analyst], messages=[], max_round=10)
manager = GroupChatManager(groupchat=group_chat, llm_config=llm_config, is_termination_msg=is_termination)

logfire.configure(send_to_logfire=False)
logfire.instrument_ag2(record_content=False)

# Optional: also instrument OpenAI request spans
logfire.instrument_openai()

proxy.run(manager, message='What is AG2?').process()
```

## Configuration

`logfire.instrument_ag2()` supports:

- `agent`: instrument a specific AG2 agent instance (or iterable of instances); if omitted, instrument globally.
- `record_content`: include message/tool payload content in span attributes. Defaults to `False`.
- `suppress_other_instrumentation`: suppress other OTEL instrumentation while AG2 conversation processing runs.

## Related docs

- [Logfire concepts](https://logfire.pydantic.dev/docs/concepts/)
- [AG2 documentation](https://docs.ag2.ai)
32 changes: 32 additions & 0 deletions examples/python/ag2-openai-quickstart/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# AG2 + Logfire quickstart (real OpenAI calls)

This example runs a real AG2 multi-agent conversation and emits Logfire spans.

## Requirements

- Python environment with this repository installed
- `ag2[openai]` package
- `OPENAI_API_KEY`
- Optional: `LOGFIRE_TOKEN` (if you want to send traces to cloud)

## Run

```bash
python examples/python/ag2-openai-quickstart/main.py
```

Optional arguments:

```bash
python examples/python/ag2-openai-quickstart/main.py --model gpt-4o-mini --question "Plan a 2-day trip to Samarkand" --max-round 6
```

## Environment variables

```bash
export OPENAI_API_KEY="<your-openai-key>"
# Optional cloud export:
export LOGFIRE_TOKEN="<your-logfire-token>"
```

If `LOGFIRE_TOKEN` is not set, spans are still created locally with `send_to_logfire=False`.
114 changes: 114 additions & 0 deletions examples/python/ag2-openai-quickstart/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
from __future__ import annotations

import argparse
import os

from autogen import AssistantAgent, GroupChat, GroupChatManager, LLMConfig, UserProxyAgent

import logfire


def parse_args() -> argparse.Namespace:
"""Parse CLI arguments for the AG2 quickstart runner."""
parser = argparse.ArgumentParser(description='AG2 + Logfire quickstart with real OpenAI calls')
parser.add_argument(
'--model',
default='gpt-4o-mini',
help='OpenAI model name (default: gpt-4o-mini)',
)
parser.add_argument(
'--question',
default='Briefly explain what AG2 is and why multi-agent orchestration helps in practice.',
help='Task to run through AG2 group chat',
)
parser.add_argument(
'--max-round',
type=int,
default=6,
help='Maximum group-chat rounds (default: 6)',
)
return parser.parse_args()


def is_termination(msg: dict[str, object]) -> bool:
"""Return True when a message indicates the AG2 chat should terminate."""
content = msg.get('content', '') or ''
return isinstance(content, str) and 'TERMINATE' in content


def _extract_final_message(messages: list[dict[str, object]]) -> str | None:
for message in reversed(messages):
content = message.get('content')
if not isinstance(content, str) or not content.strip():
continue

role = message.get('role')
if role == 'assistant' or 'TERMINATE' in content:
return content

return None


def main() -> None:
"""Run a real AG2 group chat with OpenAI and emit Logfire spans."""
args = parse_args()

openai_api_key = os.getenv('OPENAI_API_KEY')
if not openai_api_key:
raise SystemExit('OPENAI_API_KEY is required to run real OpenAI calls.')

llm_config = LLMConfig(
{
'model': args.model,
'api_key': openai_api_key,
'api_type': 'openai',
}
)

send_to_logfire = bool(os.getenv('LOGFIRE_TOKEN'))
logfire.configure(send_to_logfire=send_to_logfire)

proxy = UserProxyAgent(
name='user_proxy',
human_input_mode='NEVER',
max_consecutive_auto_reply=10,
code_execution_config=False,
is_termination_msg=is_termination,
)
researcher = AssistantAgent(
name='researcher',
system_message='You gather short factual context. Keep it compact.',
llm_config=llm_config,
)
summarizer = AssistantAgent(
name='summarizer',
system_message='You summarize the answer clearly and finish with TERMINATE.',
llm_config=llm_config,
)

@proxy.register_for_execution()
@researcher.register_for_llm(description='Lookup short synthetic facts')
def quick_lookup(topic: str) -> str:
return f'Quick lookup result about: {topic}'

group_chat = GroupChat(agents=[proxy, researcher, summarizer], messages=[], max_round=args.max_round)
manager = GroupChatManager(groupchat=group_chat, llm_config=llm_config, is_termination_msg=is_termination)

with logfire.instrument_ag2(record_content=False):
# Optional but recommended for provider-level spans.
logfire.instrument_openai()
print(f'Running AG2 chat with model={args.model!r} ...')
response = proxy.run(manager, message=args.question)
final_output = response.process()

if final_output is None:
typed_messages: list[dict[str, object]] = list(manager.groupchat.messages)
final_output = _extract_final_message(typed_messages)

print('\n=== Final output ===\n')
print(final_output if final_output is not None else '(No final content returned by AG2 response.process())')
print('\nDone. Open Logfire to inspect AG2 conversation, rounds, turns, and tool spans.')


if __name__ == '__main__':
main()
4 changes: 4 additions & 0 deletions logfire-api/logfire_api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ def instrument_pydantic(self, *args, **kwargs) -> None: ...

def instrument_pydantic_ai(self, *args, **kwargs) -> None: ...

def instrument_ag2(self, *args, **kwargs) -> ContextManager[None]:
return nullcontext()

def instrument_pymongo(self, *args, **kwargs) -> None: ...

def instrument_sqlalchemy(self, *args, **kwargs) -> None: ...
Expand Down Expand Up @@ -231,6 +234,7 @@ def shutdown(self, *args, **kwargs) -> None: ...
instrument_wsgi = DEFAULT_LOGFIRE_INSTANCE.instrument_wsgi
instrument_pydantic = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic
instrument_pydantic_ai = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic_ai
instrument_ag2 = DEFAULT_LOGFIRE_INSTANCE.instrument_ag2
instrument_fastapi = DEFAULT_LOGFIRE_INSTANCE.instrument_fastapi
instrument_openai = DEFAULT_LOGFIRE_INSTANCE.instrument_openai
instrument_openai_agents = DEFAULT_LOGFIRE_INSTANCE.instrument_openai_agents
Expand Down
3 changes: 2 additions & 1 deletion logfire-api/logfire_api/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ from logfire.propagate import attach_context as attach_context, get_context as g
from logfire.sampling import SamplingOptions as SamplingOptions
from typing import Any

__all__ = ['Logfire', 'LogfireSpan', 'LevelName', 'AdvancedOptions', 'ConsoleOptions', 'CodeSource', 'PydanticPlugin', 'configure', 'span', 'instrument', 'log', 'trace', 'debug', 'notice', 'info', 'warn', 'warning', 'error', 'exception', 'fatal', 'force_flush', 'log_slow_async_callbacks', 'install_auto_tracing', 'instrument_asgi', 'instrument_wsgi', 'instrument_pydantic', 'instrument_pydantic_ai', 'instrument_fastapi', 'instrument_openai', 'instrument_openai_agents', 'instrument_anthropic', 'instrument_google_genai', 'instrument_litellm', 'instrument_dspy', 'instrument_print', 'instrument_asyncpg', 'instrument_httpx', 'instrument_celery', 'instrument_requests', 'instrument_psycopg', 'instrument_django', 'instrument_flask', 'instrument_starlette', 'instrument_aiohttp_client', 'instrument_aiohttp_server', 'instrument_sqlalchemy', 'instrument_sqlite3', 'instrument_aws_lambda', 'instrument_redis', 'instrument_pymongo', 'instrument_mysql', 'instrument_surrealdb', 'instrument_system_metrics', 'instrument_mcp', 'instrument_claude_agent_sdk', 'AutoTraceModule', 'with_tags', 'with_settings', 'suppress_scopes', 'shutdown', 'no_auto_trace', 'ScrubMatch', 'ScrubbingOptions', 'VERSION', 'add_non_user_code_prefix', 'suppress_instrumentation', 'StructlogProcessor', 'LogfireLoggingHandler', 'loguru_handler', 'SamplingOptions', 'MetricsOptions', 'VariablesOptions', 'LocalVariablesOptions', 'variables', 'var', 'variables_clear', 'variables_get', 'variables_push', 'variables_push_types', 'variables_validate', 'variables_push_config', 'variables_pull_config', 'variables_build_config', 'logfire_info', 'get_baggage', 'set_baggage', 'get_context', 'attach_context', 'url_from_eval']
__all__ = ['Logfire', 'LogfireSpan', 'LevelName', 'AdvancedOptions', 'ConsoleOptions', 'CodeSource', 'PydanticPlugin', 'configure', 'span', 'instrument', 'log', 'trace', 'debug', 'notice', 'info', 'warn', 'warning', 'error', 'exception', 'fatal', 'force_flush', 'log_slow_async_callbacks', 'install_auto_tracing', 'instrument_asgi', 'instrument_wsgi', 'instrument_pydantic', 'instrument_pydantic_ai', 'instrument_ag2', 'instrument_fastapi', 'instrument_openai', 'instrument_openai_agents', 'instrument_anthropic', 'instrument_google_genai', 'instrument_litellm', 'instrument_dspy', 'instrument_print', 'instrument_asyncpg', 'instrument_httpx', 'instrument_celery', 'instrument_requests', 'instrument_psycopg', 'instrument_django', 'instrument_flask', 'instrument_starlette', 'instrument_aiohttp_client', 'instrument_aiohttp_server', 'instrument_sqlalchemy', 'instrument_sqlite3', 'instrument_aws_lambda', 'instrument_redis', 'instrument_pymongo', 'instrument_mysql', 'instrument_surrealdb', 'instrument_system_metrics', 'instrument_mcp', 'instrument_claude_agent_sdk', 'AutoTraceModule', 'with_tags', 'with_settings', 'suppress_scopes', 'shutdown', 'no_auto_trace', 'ScrubMatch', 'ScrubbingOptions', 'VERSION', 'add_non_user_code_prefix', 'suppress_instrumentation', 'StructlogProcessor', 'LogfireLoggingHandler', 'loguru_handler', 'SamplingOptions', 'MetricsOptions', 'VariablesOptions', 'LocalVariablesOptions', 'variables', 'var', 'variables_clear', 'variables_get', 'variables_push', 'variables_push_types', 'variables_validate', 'variables_push_config', 'variables_pull_config', 'variables_build_config', 'logfire_info', 'get_baggage', 'set_baggage', 'get_context', 'attach_context', 'url_from_eval']

DEFAULT_LOGFIRE_INSTANCE = Logfire()
span = DEFAULT_LOGFIRE_INSTANCE.span
Expand All @@ -26,6 +26,7 @@ log_slow_async_callbacks = DEFAULT_LOGFIRE_INSTANCE.log_slow_async_callbacks
install_auto_tracing = DEFAULT_LOGFIRE_INSTANCE.install_auto_tracing
instrument_pydantic = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic
instrument_pydantic_ai = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic_ai
instrument_ag2 = DEFAULT_LOGFIRE_INSTANCE.instrument_ag2
instrument_asgi = DEFAULT_LOGFIRE_INSTANCE.instrument_asgi
instrument_wsgi = DEFAULT_LOGFIRE_INSTANCE.instrument_wsgi
instrument_fastapi = DEFAULT_LOGFIRE_INSTANCE.instrument_fastapi
Expand Down
2 changes: 2 additions & 0 deletions logfire-api/logfire_api/_internal/main.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,8 @@ class Logfire:
def instrument_pydantic_ai(self, obj: pydantic_ai.Agent | None = None, /, *, include_binary_content: bool | None = None, include_content: bool | None = None, version: Literal[1, 2, 3] | None = None, event_mode: Literal['attributes', 'logs'] | None = None, **kwargs: Any) -> None: ...
@overload
def instrument_pydantic_ai(self, obj: pydantic_ai.models.Model, /, *, include_binary_content: bool | None = None, include_content: bool | None = None, version: Literal[1, 2, 3] | None = None, event_mode: Literal['attributes', 'logs'] | None = None, **kwargs: Any) -> pydantic_ai.models.Model: ...
def instrument_ag2(self, agent: Any | Iterable[Any] | None = None, *, record_content: bool = False, suppress_other_instrumentation: bool = False) -> AbstractContextManager[None]:
"""Instrument AG2 conversations, turns, and tool executions."""
def instrument_fastapi(self, app: FastAPI, *, capture_headers: bool = False, request_attributes_mapper: Callable[[Request | WebSocket, dict[str, Any]], dict[str, Any] | None] | None = None, excluded_urls: str | Iterable[str] | None = None, record_send_receive: bool = False, extra_spans: bool = False, **opentelemetry_kwargs: Any) -> AbstractContextManager[None]:
"""Instrument a FastAPI app so that spans and logs are automatically created for each request.

Expand Down
2 changes: 2 additions & 0 deletions logfire/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
install_auto_tracing = DEFAULT_LOGFIRE_INSTANCE.install_auto_tracing
instrument_pydantic = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic
instrument_pydantic_ai = DEFAULT_LOGFIRE_INSTANCE.instrument_pydantic_ai
instrument_ag2 = DEFAULT_LOGFIRE_INSTANCE.instrument_ag2
instrument_asgi = DEFAULT_LOGFIRE_INSTANCE.instrument_asgi
instrument_wsgi = DEFAULT_LOGFIRE_INSTANCE.instrument_wsgi
instrument_fastapi = DEFAULT_LOGFIRE_INSTANCE.instrument_fastapi
Expand Down Expand Up @@ -150,6 +151,7 @@ def loguru_handler() -> Any:
'instrument_wsgi',
'instrument_pydantic',
'instrument_pydantic_ai',
'instrument_ag2',
'instrument_fastapi',
'instrument_openai',
'instrument_openai_agents',
Expand Down
Loading
Loading