[Docs] Agent Gateway (#17454)

* init litellm A2a client

* simpler a2a client interface

* test a2a

* move a2a invoking tests

* test fix

* ensure a2a send message is tracked n logs

* rename tags

* add streaming handlng

* add a2a invocation

* add a2a invocation i cost calc

* test_a2a_logging_payload

* update invoke_agent_a2a

* test_invoke_agent_a2a_adds_litellm_data

* add A2a agent

* fix endpoints on A2a

* UI allow testing a2a endpoints

* add agent imgs

* add a2a as an endpoint

* add a2a

* docs a2a invoke

* docs a2a

* docs A2a invoke
This commit is contained in:
Ishaan Jaff
2025-12-03 18:57:41 -08:00
committed by GitHub
parent 585aee2ae4
commit e4f954b354
14 changed files with 1330 additions and 196 deletions

198
docs/my-website/docs/a2a.md Normal file
View File

@@ -0,0 +1,198 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Image from '@theme/IdealImage';
# /a2a - Agent Gateway (A2A Protocol)
| Feature | Supported |
|---------|-----------|
| Logging | ✅ |
| Load Balancing | ✅ |
| Streaming | ✅ |
:::tip
LiteLLM follows the [A2A (Agent-to-Agent) Protocol](https://github.com/google/A2A) for invoking agents.
:::
## Adding your Agent
You can add A2A-compatible agents through the LiteLLM Admin UI.
1. Navigate to the **Agents** tab
2. Click **Add Agent**
3. Enter the agent name (e.g., `ij-local`) and the URL of your A2A agent
<Image
img={require('../img/add_agent_1.png')}
style={{width: '80%', display: 'block', margin: '0'}}
/>
The URL should be the invocation URL for your A2A agent (e.g., `http://localhost:10001`).
## Invoking your Agents
Use the [A2A Python SDK](https://pypi.org/project/a2a/) to invoke agents through LiteLLM:
- `base_url`: Your LiteLLM proxy URL + `/a2a/{agent_name}`
- `headers`: Include your LiteLLM Virtual Key for authentication
```python showLineNumbers title="invoke_a2a_agent.py"
from uuid import uuid4
import httpx
import asyncio
from a2a.client import A2ACardResolver, A2AClient
from a2a.types import MessageSendParams, SendMessageRequest
# === CONFIGURE THESE ===
LITELLM_BASE_URL = "http://localhost:4000" # Your LiteLLM proxy URL
LITELLM_VIRTUAL_KEY = "sk-1234" # Your LiteLLM Virtual Key
LITELLM_AGENT_NAME = "ij-local" # Agent name registered in LiteLLM
# =======================
async def main():
base_url = f"{LITELLM_BASE_URL}/a2a/{LITELLM_AGENT_NAME}"
headers = {"Authorization": f"Bearer {LITELLM_VIRTUAL_KEY}"}
async with httpx.AsyncClient(headers=headers) as httpx_client:
# Resolve agent card and create client
resolver = A2ACardResolver(httpx_client=httpx_client, base_url=base_url)
agent_card = await resolver.get_agent_card()
client = A2AClient(httpx_client=httpx_client, agent_card=agent_card)
# Send a message
request = SendMessageRequest(
id=str(uuid4()),
params=MessageSendParams(
message={
"role": "user",
"parts": [{"kind": "text", "text": "Hello, what can you do?"}],
"messageId": uuid4().hex,
}
),
)
response = await client.send_message(request)
print(response.model_dump(mode="json", exclude_none=True))
if __name__ == "__main__":
asyncio.run(main())
```
### Streaming Responses
For streaming responses, use `send_message_streaming`:
```python showLineNumbers title="invoke_a2a_agent_streaming.py"
from uuid import uuid4
import httpx
import asyncio
from a2a.client import A2ACardResolver, A2AClient
from a2a.types import MessageSendParams, SendStreamingMessageRequest
# === CONFIGURE THESE ===
LITELLM_BASE_URL = "http://localhost:4000" # Your LiteLLM proxy URL
LITELLM_VIRTUAL_KEY = "sk-1234" # Your LiteLLM Virtual Key
LITELLM_AGENT_NAME = "ij-local" # Agent name registered in LiteLLM
# =======================
async def main():
base_url = f"{LITELLM_BASE_URL}/a2a/{LITELLM_AGENT_NAME}"
headers = {"Authorization": f"Bearer {LITELLM_VIRTUAL_KEY}"}
async with httpx.AsyncClient(headers=headers) as httpx_client:
# Resolve agent card and create client
resolver = A2ACardResolver(httpx_client=httpx_client, base_url=base_url)
agent_card = await resolver.get_agent_card()
client = A2AClient(httpx_client=httpx_client, agent_card=agent_card)
# Send a streaming message
request = SendStreamingMessageRequest(
id=str(uuid4()),
params=MessageSendParams(
message={
"role": "user",
"parts": [{"kind": "text", "text": "Hello, what can you do?"}],
"messageId": uuid4().hex,
}
),
)
# Stream the response
async for chunk in client.send_message_streaming(request):
print(chunk.model_dump(mode="json", exclude_none=True))
if __name__ == "__main__":
asyncio.run(main())
```
## Tracking Agent Logs
After invoking an agent, you can view the request logs in the LiteLLM **Logs** tab.
The logs show:
- **Request/Response content** sent to and received from the agent
- **User, Key, Team** information for tracking who made the request
- **Latency and cost** metrics
<Image
img={require('../img/agent2.png')}
style={{width: '100%', display: 'block', margin: '2rem auto'}}
/>
## API Reference
### Endpoint
```
POST /a2a/{agent_name}/message/send
```
### Authentication
Include your LiteLLM Virtual Key in the `Authorization` header:
```
Authorization: Bearer sk-your-litellm-key
```
### Request Format
LiteLLM follows the [A2A JSON-RPC 2.0 specification](https://github.com/google/A2A):
```json title="Request Body"
{
"jsonrpc": "2.0",
"id": "unique-request-id",
"method": "message/send",
"params": {
"message": {
"role": "user",
"parts": [{"kind": "text", "text": "Your message here"}],
"messageId": "unique-message-id"
}
}
}
```
### Response Format
```json title="Response"
{
"jsonrpc": "2.0",
"id": "unique-request-id",
"result": {
"kind": "task",
"id": "task-id",
"contextId": "context-id",
"status": {"state": "completed", "timestamp": "2025-01-01T00:00:00Z"},
"artifacts": [
{
"artifactId": "artifact-id",
"name": "response",
"parts": [{"kind": "text", "text": "Agent response here"}]
}
]
}
}
```

View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 806 KiB

View File

@@ -316,6 +316,7 @@ const sidebars = {
slug: "/supported_endpoints",
},
items: [
"a2a",
"assistants",
{
type: "category",

View File

@@ -115,6 +115,16 @@ async def get_agent_card(
tags=["[beta] A2A Agents"],
dependencies=[Depends(user_api_key_auth)],
)
@router.post(
"/a2a/{agent_id}/message/send",
tags=["[beta] A2A Agents"],
dependencies=[Depends(user_api_key_auth)],
)
@router.post(
"/v1/a2a/{agent_id}/message/send",
tags=["[beta] A2A Agents"],
dependencies=[Depends(user_api_key_auth)],
)
async def invoke_agent_a2a(
agent_id: str,
request: Request,

View File

@@ -17,7 +17,8 @@
"rerank": "Supports /rerank endpoint",
"ocr": "Supports /ocr endpoint",
"search": "Supports /search endpoint",
"skills": "Supports /skills endpoint"
"skills": "Supports /skills endpoint",
"a2a_(Agent Gateway)": "Supports /a2a/{agent}/message/send endpoint (A2A Protocol)"
}
}
},
@@ -35,7 +36,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"ai21": {
@@ -51,7 +53,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"ai21_chat": {
@@ -67,7 +70,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"anthropic": {
@@ -84,7 +88,8 @@
"moderations": false,
"batches": true,
"rerank": false,
"skills": true
"skills": true,
"a2a": true
}
},
"anthropic_text": {
@@ -101,7 +106,8 @@
"moderations": false,
"batches": true,
"rerank": false,
"skills": true
"skills": true,
"a2a": true
}
},
"assemblyai": {
@@ -117,7 +123,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"auto_router": {
@@ -133,7 +140,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"bedrock": {
@@ -149,7 +157,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": true
"rerank": true,
"a2a": true
}
},
"sagemaker": {
@@ -165,7 +174,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"azure": {
@@ -181,7 +191,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"azure_ai": {
@@ -198,7 +209,8 @@
"moderations": true,
"batches": true,
"rerank": false,
"ocr": true
"ocr": true,
"a2a": true
}
},
"azure_ai/doc-intelligence": {
@@ -231,7 +243,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"baseten": {
@@ -247,7 +260,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"bytez": {
@@ -263,7 +277,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"cerebras": {
@@ -279,7 +294,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"clarifai": {
@@ -295,7 +311,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"cloudflare": {
@@ -311,7 +328,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"codestral": {
@@ -327,7 +345,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"cohere": {
@@ -343,7 +362,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": true
"rerank": true,
"a2a": true
}
},
"cohere_chat": {
@@ -359,7 +379,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"cometapi": {
@@ -375,7 +396,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"compactifai": {
@@ -391,7 +413,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"custom": {
@@ -407,7 +430,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"custom_openai": {
@@ -423,7 +447,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"dashscope": {
@@ -439,7 +464,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"databricks": {
@@ -455,7 +481,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"dataforseo": {
@@ -488,7 +515,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"deepgram": {
@@ -504,7 +532,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"deepinfra": {
@@ -520,7 +549,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"deepseek": {
@@ -536,7 +566,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"elevenlabs": {
@@ -552,7 +583,8 @@
"audio_speech": true,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"exa_ai": {
@@ -585,7 +617,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"fal_ai": {
@@ -601,7 +634,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"featherless_ai": {
@@ -617,7 +651,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"fireworks_ai": {
@@ -633,7 +668,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"firecrawl": {
@@ -666,7 +702,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"galadriel": {
@@ -682,7 +719,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"github_copilot": {
@@ -698,7 +736,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"github": {
@@ -714,7 +753,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"vertex_ai": {
@@ -731,7 +771,8 @@
"moderations": false,
"batches": false,
"rerank": false,
"ocr": true
"ocr": true,
"a2a": true
}
},
"vertex_ai/chirp": {
@@ -763,7 +804,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"gradient_ai": {
@@ -779,7 +821,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"groq": {
@@ -795,7 +838,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"heroku": {
@@ -811,7 +855,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"hosted_vllm": {
@@ -827,7 +872,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"huggingface": {
@@ -843,7 +889,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": true
"rerank": true,
"a2a": true
}
},
"hyperbolic": {
@@ -859,7 +906,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"watsonx": {
@@ -875,7 +923,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"infinity": {
@@ -923,7 +972,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"lemonade": {
@@ -939,7 +989,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"litellm_proxy": {
@@ -955,7 +1006,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"llamafile": {
@@ -971,7 +1023,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"lm_studio": {
@@ -987,7 +1040,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"maritalk": {
@@ -1003,7 +1057,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"meta_llama": {
@@ -1019,7 +1074,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"mistral": {
@@ -1036,7 +1092,8 @@
"moderations": false,
"batches": false,
"rerank": false,
"ocr": true
"ocr": true,
"a2a": true
}
},
"moonshot": {
@@ -1052,7 +1109,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"docker_model_runner": {
@@ -1068,7 +1126,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"morph": {
@@ -1084,7 +1143,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"nebius": {
@@ -1100,7 +1160,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"nlp_cloud": {
@@ -1116,7 +1177,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"novita": {
@@ -1132,7 +1194,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"nscale": {
@@ -1148,7 +1211,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"nvidia_nim": {
@@ -1164,7 +1228,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"oci": {
@@ -1180,7 +1245,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"ollama": {
@@ -1196,7 +1262,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"ollama_chat": {
@@ -1212,7 +1279,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"oobabooga": {
@@ -1228,7 +1296,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"openai": {
@@ -1244,7 +1313,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"openai_like": {
@@ -1276,7 +1346,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"ovhcloud": {
@@ -1292,7 +1363,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"parallel_ai": {
@@ -1326,7 +1398,8 @@
"moderations": false,
"batches": false,
"rerank": false,
"search": true
"search": true,
"a2a": true
}
},
"petals": {
@@ -1342,7 +1415,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"publicai": {
@@ -1358,7 +1432,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"predibase": {
@@ -1374,7 +1449,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"recraft": {
@@ -1406,7 +1482,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"runwayml": {
@@ -1439,7 +1516,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"searxng": {
@@ -1472,7 +1550,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"snowflake": {
@@ -1488,7 +1567,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"text-completion-codestral": {
@@ -1504,7 +1584,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"text-completion-openai": {
@@ -1520,7 +1601,8 @@
"audio_speech": true,
"moderations": true,
"batches": true,
"rerank": false
"rerank": false,
"a2a": true
}
},
"together_ai": {
@@ -1536,7 +1618,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"topaz": {
@@ -1552,7 +1635,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"tavily": {
@@ -1585,7 +1669,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"v0": {
@@ -1601,7 +1686,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"vercel_ai_gateway": {
@@ -1617,7 +1703,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"vllm": {
@@ -1633,7 +1720,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"volcengine": {
@@ -1649,7 +1737,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"voyage": {
@@ -1681,7 +1770,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"watsonx_text": {
@@ -1697,7 +1787,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"xai": {
@@ -1713,7 +1804,8 @@
"audio_speech": false,
"moderations": false,
"batches": false,
"rerank": false
"rerank": false,
"a2a": true
}
},
"xinference": {

View File

@@ -0,0 +1,231 @@
import React, { useState } from "react";
import { Tooltip, Button } from "antd";
import {
CheckCircleOutlined,
ClockCircleOutlined,
LoadingOutlined,
ExclamationCircleOutlined,
CopyOutlined,
DownOutlined,
RightOutlined,
LinkOutlined,
FileTextOutlined,
RobotOutlined,
} from "@ant-design/icons";
export interface A2ATaskMetadata {
taskId?: string;
contextId?: string;
status?: {
state?: string;
timestamp?: string;
message?: string;
};
metadata?: Record<string, any>;
}
interface A2AMetricsProps {
a2aMetadata?: A2ATaskMetadata;
timeToFirstToken?: number;
totalLatency?: number;
}
const getStatusIcon = (state?: string) => {
switch (state) {
case "completed":
return <CheckCircleOutlined className="text-green-500" />;
case "working":
case "submitted":
return <LoadingOutlined className="text-blue-500" />;
case "failed":
case "canceled":
return <ExclamationCircleOutlined className="text-red-500" />;
default:
return <ClockCircleOutlined className="text-gray-500" />;
}
};
const getStatusColor = (state?: string) => {
switch (state) {
case "completed":
return "bg-green-100 text-green-700";
case "working":
case "submitted":
return "bg-blue-100 text-blue-700";
case "failed":
case "canceled":
return "bg-red-100 text-red-700";
default:
return "bg-gray-100 text-gray-700";
}
};
const formatTimestamp = (timestamp?: string) => {
if (!timestamp) return null;
try {
const date = new Date(timestamp);
return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" });
} catch {
return timestamp;
}
};
const truncateId = (id?: string, length = 8) => {
if (!id) return null;
return id.length > length ? `${id.substring(0, length)}` : id;
};
const copyToClipboard = (text: string) => {
navigator.clipboard.writeText(text);
};
const A2AMetrics: React.FC<A2AMetricsProps> = ({ a2aMetadata, timeToFirstToken, totalLatency }) => {
const [showDetails, setShowDetails] = useState(false);
if (!a2aMetadata && !timeToFirstToken && !totalLatency) return null;
const { taskId, contextId, status, metadata } = a2aMetadata || {};
const formattedTime = formatTimestamp(status?.timestamp);
return (
<div className="a2a-metrics mt-3 pt-2 border-t border-gray-200 text-xs">
{/* A2A Metadata Header */}
<div className="flex items-center mb-2 text-gray-600">
<RobotOutlined className="mr-1.5 text-blue-500" />
<span className="font-medium text-gray-700">A2A Metadata</span>
</div>
{/* Main metrics row */}
<div className="flex flex-wrap items-center gap-2 text-gray-500 ml-4">
{/* Status badge */}
{status?.state && (
<span className={`inline-flex items-center px-2 py-0.5 rounded-full text-xs font-medium ${getStatusColor(status.state)}`}>
{getStatusIcon(status.state)}
<span className="ml-1 capitalize">{status.state}</span>
</span>
)}
{/* Timestamp */}
{formattedTime && (
<Tooltip title={status?.timestamp}>
<span className="flex items-center">
<ClockCircleOutlined className="mr-1" />
{formattedTime}
</span>
</Tooltip>
)}
{/* Latency */}
{totalLatency !== undefined && (
<Tooltip title="Total latency">
<span className="flex items-center text-blue-600">
<ClockCircleOutlined className="mr-1" />
{(totalLatency / 1000).toFixed(2)}s
</span>
</Tooltip>
)}
{/* Time to first token */}
{timeToFirstToken !== undefined && (
<Tooltip title="Time to first token">
<span className="flex items-center text-green-600">
TTFT: {(timeToFirstToken / 1000).toFixed(2)}s
</span>
</Tooltip>
)}
</div>
{/* IDs row */}
<div className="flex flex-wrap items-center gap-3 text-gray-500 ml-4 mt-1.5">
{/* Task ID */}
{taskId && (
<Tooltip title={`Click to copy: ${taskId}`}>
<span
className="flex items-center cursor-pointer hover:text-gray-700"
onClick={() => copyToClipboard(taskId)}
>
<FileTextOutlined className="mr-1" />
Task: {truncateId(taskId)}
<CopyOutlined className="ml-1 text-gray-400 hover:text-gray-600" />
</span>
</Tooltip>
)}
{/* Context/Session ID */}
{contextId && (
<Tooltip title={`Click to copy: ${contextId}`}>
<span
className="flex items-center cursor-pointer hover:text-gray-700"
onClick={() => copyToClipboard(contextId)}
>
<LinkOutlined className="mr-1" />
Session: {truncateId(contextId)}
<CopyOutlined className="ml-1 text-gray-400 hover:text-gray-600" />
</span>
</Tooltip>
)}
{/* Details toggle */}
{(metadata || status?.message) && (
<Button
type="text"
size="small"
className="text-xs text-blue-500 hover:text-blue-700 p-0 h-auto"
onClick={() => setShowDetails(!showDetails)}
>
{showDetails ? <DownOutlined /> : <RightOutlined />}
<span className="ml-1">Details</span>
</Button>
)}
</div>
{/* Expandable details panel */}
{showDetails && (
<div className="mt-2 ml-4 p-3 bg-gray-50 rounded-md text-gray-600 border border-gray-200">
{/* Status message */}
{status?.message && (
<div className="mb-2">
<span className="font-medium text-gray-700">Status Message:</span>
<span className="ml-2">{status.message}</span>
</div>
)}
{/* Full IDs */}
{taskId && (
<div className="mb-1.5 flex items-center">
<span className="font-medium text-gray-700 w-24">Task ID:</span>
<code className="ml-2 px-2 py-1 bg-white border border-gray-200 rounded text-xs font-mono">{taskId}</code>
<CopyOutlined
className="ml-2 cursor-pointer text-gray-400 hover:text-blue-500"
onClick={() => copyToClipboard(taskId)}
/>
</div>
)}
{contextId && (
<div className="mb-1.5 flex items-center">
<span className="font-medium text-gray-700 w-24">Session ID:</span>
<code className="ml-2 px-2 py-1 bg-white border border-gray-200 rounded text-xs font-mono">{contextId}</code>
<CopyOutlined
className="ml-2 cursor-pointer text-gray-400 hover:text-blue-500"
onClick={() => copyToClipboard(contextId)}
/>
</div>
)}
{/* Metadata fields */}
{metadata && Object.keys(metadata).length > 0 && (
<div className="mt-3">
<span className="font-medium text-gray-700">Custom Metadata:</span>
<pre className="mt-1.5 p-2 bg-white border border-gray-200 rounded text-xs font-mono overflow-x-auto whitespace-pre-wrap">
{JSON.stringify(metadata, null, 2)}
</pre>
</div>
)}
</div>
)}
</div>
);
};
export default A2AMetrics;

View File

@@ -51,6 +51,10 @@ import { fetchAvailableModels, ModelGroup } from "../llm_calls/fetch_models";
import { makeOpenAIImageEditsRequest } from "../llm_calls/image_edits";
import { makeOpenAIImageGenerationRequest } from "../llm_calls/image_generation";
import { makeOpenAIResponsesRequest } from "../llm_calls/responses_api";
import { Agent, fetchAvailableAgents } from "../llm_calls/fetch_agents";
import { makeA2AStreamMessageRequest } from "../llm_calls/a2a_send_message";
import A2AMetrics from "./A2AMetrics";
import { A2ATaskMetadata } from "./types";
import MCPEventsDisplay, { MCPEvent } from "./MCPEventsDisplay";
import { EndpointType, getEndpointType } from "./mode_endpoint_mapping";
import ReasoningContent from "./ReasoningContent";
@@ -124,6 +128,8 @@ const ChatUI: React.FC<ChatUIProps> = ({
const [selectedModel, setSelectedModel] = useState<string | undefined>(undefined);
const [showCustomModelInput, setShowCustomModelInput] = useState<boolean>(false);
const [modelInfo, setModelInfo] = useState<ModelGroup[]>([]);
const [agentInfo, setAgentInfo] = useState<Agent[]>([]);
const [selectedAgent, setSelectedAgent] = useState<string | undefined>(undefined);
const customModelTimeout = useRef<NodeJS.Timeout | null>(null);
const [endpointType, setEndpointType] = useState<string>(
() => sessionStorage.getItem("endpointType") || EndpointType.CHAT,
@@ -340,6 +346,29 @@ const ChatUI: React.FC<ChatUIProps> = ({
loadMCPTools();
}, [accessToken, userID, userRole, apiKeySource, apiKey, token]);
// Fetch agents when A2A endpoint is selected
useEffect(() => {
const userApiKey = apiKeySource === "session" ? accessToken : apiKey;
if (!userApiKey || endpointType !== EndpointType.A2A_AGENTS) {
return;
}
const loadAgents = async () => {
try {
const agents = await fetchAvailableAgents(userApiKey);
setAgentInfo(agents);
// Clear selection if current agent not in list
if (selectedAgent && !agents.some((a) => a.agent_name === selectedAgent)) {
setSelectedAgent(undefined);
}
} catch (error) {
console.error("Error fetching agents:", error);
}
};
loadAgents();
}, [accessToken, apiKeySource, apiKey, endpointType]);
useEffect(() => {
// Scroll to the bottom of the chat whenever chatHistory updates
if (chatEndRef.current) {
@@ -469,6 +498,23 @@ const ChatUI: React.FC<ChatUIProps> = ({
});
};
const updateA2AMetadata = (a2aMetadata: A2ATaskMetadata) => {
console.log("Received A2A metadata:", a2aMetadata);
setChatHistory((prevHistory) => {
const lastMessage = prevHistory[prevHistory.length - 1];
if (lastMessage && lastMessage.role === "assistant") {
const updatedMessage = {
...lastMessage,
a2aMetadata,
};
return [...prevHistory.slice(0, prevHistory.length - 1), updatedMessage];
}
return prevHistory;
});
};
const updateTotalLatency = (totalLatency: number) => {
setChatHistory((prevHistory) => {
const lastMessage = prevHistory[prevHistory.length - 1];
@@ -684,6 +730,12 @@ const ChatUI: React.FC<ChatUIProps> = ({
return;
}
// For A2A agents, require agent selection
if (endpointType === EndpointType.A2A_AGENTS && !selectedAgent) {
NotificationsManager.fromBackend("Please select an agent to send a message");
return;
}
if (!token || !userRole || !userID) {
return;
}
@@ -908,6 +960,20 @@ const ChatUI: React.FC<ChatUIProps> = ({
}
}
}
// Handle A2A agent calls (separate from model-based calls) - use streaming
if (endpointType === EndpointType.A2A_AGENTS && selectedAgent) {
await makeA2AStreamMessageRequest(
selectedAgent,
inputMessage,
(chunk, model) => updateTextUI("assistant", chunk, model),
effectiveApiKey,
signal,
updateTimingData,
updateTotalLatency,
updateA2AMetadata,
);
}
} catch (error) {
if (signal.aborted) {
console.log("Request was cancelled");
@@ -1038,11 +1104,13 @@ const ChatUI: React.FC<ChatUIProps> = ({
endpointType={endpointType}
onEndpointChange={(value) => {
setEndpointType(value);
// Clear model selection when switching endpoint type
// Clear model/agent selection when switching endpoint type
setSelectedModel(undefined);
setSelectedAgent(undefined);
setShowCustomModelInput(false);
try {
sessionStorage.removeItem("selectedModel");
sessionStorage.removeItem("selectedAgent");
} catch {}
}}
className="mb-4"
@@ -1077,6 +1145,8 @@ const ChatUI: React.FC<ChatUIProps> = ({
/>
</div>
{/* Model Selector - shown when NOT using A2A Agents */}
{endpointType !== EndpointType.A2A_AGENTS && (
<div>
<Text className="font-medium block mb-2 text-gray-700 flex items-center justify-between">
<span className="flex items-center">
@@ -1174,6 +1244,46 @@ const ChatUI: React.FC<ChatUIProps> = ({
/>
)}
</div>
)}
{/* Agent Selector - shown ONLY for A2A Agents endpoint */}
{endpointType === EndpointType.A2A_AGENTS && (
<div>
<Text className="font-medium block mb-2 text-gray-700 flex items-center">
<RobotOutlined className="mr-2" /> Select Agent
</Text>
<Select
value={selectedAgent}
placeholder="Select an Agent"
onChange={(value) => setSelectedAgent(value)}
options={agentInfo.map((agent) => ({
value: agent.agent_name,
label: agent.agent_name || agent.agent_id,
key: agent.agent_id,
}))}
style={{ width: "100%" }}
showSearch={true}
className="rounded-md"
optionLabelProp="label"
>
{agentInfo.map((agent) => (
<Select.Option key={agent.agent_id} value={agent.agent_name} label={agent.agent_name || agent.agent_id}>
<div className="flex flex-col py-1">
<span className="font-medium">{agent.agent_name || agent.agent_id}</span>
{agent.agent_card_params?.description && (
<span className="text-xs text-gray-500 mt-1">{agent.agent_card_params.description}</span>
)}
</div>
</Select.Option>
))}
</Select>
{agentInfo.length === 0 && (
<Text className="text-xs text-gray-500 mt-2 block">
No agents found. Create agents via /v1/agents endpoint.
</Text>
)}
</div>
)}
<div>
<Text className="font-medium block mb-2 text-gray-700 flex items-center">
@@ -1440,7 +1550,8 @@ const ChatUI: React.FC<ChatUIProps> = ({
)}
{message.role === "assistant" &&
(message.timeToFirstToken || message.totalLatency || message.usage) && (
(message.timeToFirstToken || message.totalLatency || message.usage) &&
!message.a2aMetadata && (
<ResponseMetrics
timeToFirstToken={message.timeToFirstToken}
totalLatency={message.totalLatency}
@@ -1448,6 +1559,15 @@ const ChatUI: React.FC<ChatUIProps> = ({
toolName={message.toolName}
/>
)}
{/* A2A Metrics - show for A2A agent responses */}
{message.role === "assistant" && message.a2aMetadata && (
<A2AMetrics
a2aMetadata={message.a2aMetadata}
timeToFirstToken={message.timeToFirstToken}
totalLatency={message.totalLatency}
/>
)}
</div>
</div>
</div>
@@ -1685,6 +1805,8 @@ const ChatUI: React.FC<ChatUIProps> = ({
endpointType === EndpointType.RESPONSES ||
endpointType === EndpointType.ANTHROPIC_MESSAGES
? "Type your message... (Shift+Enter for new line)"
: endpointType === EndpointType.A2A_AGENTS
? "Send a message to the A2A agent..."
: endpointType === EndpointType.IMAGE_EDITS
? "Describe how you want to edit the image..."
: endpointType === EndpointType.SPEECH

View File

@@ -42,4 +42,5 @@ export const ENDPOINT_OPTIONS = [
{ value: EndpointType.EMBEDDINGS, label: "/v1/embeddings" },
{ value: EndpointType.SPEECH, label: "/v1/audio/speech" },
{ value: EndpointType.TRANSCRIPTION, label: "/v1/audio/transcriptions" },
{ value: EndpointType.A2A_AGENTS, label: "/v1/a2a/message/send" },
];

View File

@@ -25,6 +25,7 @@ export enum EndpointType {
EMBEDDINGS = "embeddings",
SPEECH = "speech",
TRANSCRIPTION = "transcription",
A2A_AGENTS = "a2a_agents",
// add additional endpoint types if required
}

View File

@@ -70,12 +70,24 @@ export interface VectorStoreSearchResponse {
data: VectorStoreSearchResult[];
}
export interface A2ATaskMetadata {
taskId?: string;
contextId?: string;
status?: {
state?: string;
timestamp?: string;
message?: string;
};
metadata?: Record<string, any>;
}
export interface MessageType {
role: string;
content: string | MultimodalContent[];
model?: string;
isImage?: boolean;
isAudio?: boolean;
isEmbeddings?: boolean;
reasoningContent?: string;
timeToFirstToken?: number;
totalLatency?: number;
@@ -93,6 +105,7 @@ export interface MessageType {
detail: string;
};
searchResults?: VectorStoreSearchResponse[];
a2aMetadata?: A2ATaskMetadata;
}
export interface MultimodalContent {

View File

@@ -0,0 +1,413 @@
// a2a_send_message.tsx
// A2A Protocol (JSON-RPC 2.0) implementation for sending messages to agents
import { v4 as uuidv4 } from "uuid";
import { getProxyBaseUrl } from "../../networking";
import { A2ATaskMetadata } from "../chat_ui/types";
interface A2AMessagePart {
kind: "text";
text: string;
}
interface A2AMessage {
kind: "message";
messageId: string;
role: "user" | "agent";
parts: A2AMessagePart[];
}
interface A2AJsonRpcRequest {
jsonrpc: "2.0";
id: string;
method: string;
params: {
message: A2AMessage;
};
}
interface A2AJsonRpcResponse {
jsonrpc: "2.0";
id: string;
result?: {
kind?: string;
parts?: A2AMessagePart[];
id?: string;
contextId?: string;
status?: {
state?: string;
timestamp?: string;
message?: {
parts?: A2AMessagePart[];
};
};
metadata?: Record<string, any>;
artifacts?: Array<{
artifactId?: string;
name?: string;
parts?: A2AMessagePart[];
}>;
[key: string]: any;
};
error?: {
code: number;
message: string;
};
}
/**
* Extracts A2A task metadata from the response result.
*/
const extractA2AMetadata = (result: A2AJsonRpcResponse["result"]): A2ATaskMetadata | undefined => {
if (!result) return undefined;
const metadata: A2ATaskMetadata = {};
// Extract task ID
if (result.id) {
metadata.taskId = result.id;
}
// Extract context/session ID
if (result.contextId) {
metadata.contextId = result.contextId;
}
// Extract status
if (result.status) {
metadata.status = {
state: result.status.state,
timestamp: result.status.timestamp,
};
// Extract status message text if present
if (result.status.message?.parts) {
const statusText = result.status.message.parts
.filter((p: any) => p.kind === "text" && p.text)
.map((p: any) => p.text)
.join(" ");
if (statusText) {
metadata.status.message = statusText;
}
}
}
// Extract custom metadata
if (result.metadata && typeof result.metadata === "object") {
metadata.metadata = result.metadata;
}
return Object.keys(metadata).length > 0 ? metadata : undefined;
};
/**
* Sends a message to an A2A agent using the JSON-RPC 2.0 protocol.
* Uses the non-streaming message/send method.
*/
export const makeA2ASendMessageRequest = async (
agentId: string,
message: string,
onTextUpdate: (chunk: string, model?: string) => void,
accessToken: string,
signal?: AbortSignal,
onTimingData?: (timeToFirstToken: number) => void,
onTotalLatency?: (totalLatency: number) => void,
onA2AMetadata?: (metadata: A2ATaskMetadata) => void,
): Promise<void> => {
const proxyBaseUrl = getProxyBaseUrl();
const url = proxyBaseUrl
? `${proxyBaseUrl}/a2a/${agentId}/message/send`
: `/a2a/${agentId}/message/send`;
const requestId = uuidv4();
const messageId = uuidv4().replace(/-/g, "");
const jsonRpcRequest: A2AJsonRpcRequest = {
jsonrpc: "2.0",
id: requestId,
method: "message/send",
params: {
message: {
kind: "message",
messageId: messageId,
role: "user",
parts: [{ kind: "text", text: message }],
},
},
};
const startTime = performance.now();
try {
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify(jsonRpcRequest),
signal,
});
const timeToFirstToken = performance.now() - startTime;
if (onTimingData) {
onTimingData(timeToFirstToken);
}
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error?.message || errorData.detail || `HTTP ${response.status}`);
}
const jsonRpcResponse: A2AJsonRpcResponse = await response.json();
const totalLatency = performance.now() - startTime;
if (onTotalLatency) {
onTotalLatency(totalLatency);
}
if (jsonRpcResponse.error) {
throw new Error(jsonRpcResponse.error.message);
}
// Extract text and metadata from response
const result = jsonRpcResponse.result;
if (result) {
let responseText = "";
// Extract and send A2A metadata
const a2aMetadata = extractA2AMetadata(result);
if (a2aMetadata && onA2AMetadata) {
onA2AMetadata(a2aMetadata);
}
// A2A Task response format with artifacts array
// Extract text from artifacts[*].parts[*] where kind === "text"
if (result.artifacts && Array.isArray(result.artifacts)) {
for (const artifact of result.artifacts) {
if (artifact.parts && Array.isArray(artifact.parts)) {
for (const part of artifact.parts) {
if (part.kind === "text" && part.text) {
responseText += part.text;
}
}
}
}
}
// Fallback: direct parts array (simpler response format)
else if (result.parts && Array.isArray(result.parts)) {
for (const part of result.parts) {
if (part.kind === "text" && part.text) {
responseText += part.text;
}
}
}
// Fallback: status.message.parts format
else if (result.status?.message?.parts) {
for (const part of result.status.message.parts) {
if (part.kind === "text" && part.text) {
responseText += part.text;
}
}
}
if (responseText) {
onTextUpdate(responseText, `a2a_agent/${agentId}`);
} else {
// Fallback: show raw result if we couldn't parse it
console.warn("Could not extract text from A2A response, showing raw JSON:", result);
onTextUpdate(JSON.stringify(result, null, 2), `a2a_agent/${agentId}`);
}
}
} catch (error) {
if (signal?.aborted) {
console.log("A2A request was cancelled");
return;
}
console.error("A2A send message error:", error);
throw error;
}
};
/**
* Sends a streaming message to an A2A agent using the JSON-RPC 2.0 protocol.
* Uses the message/stream method with NDJSON responses.
*/
export const makeA2AStreamMessageRequest = async (
agentId: string,
message: string,
onTextUpdate: (chunk: string, model?: string) => void,
accessToken: string,
signal?: AbortSignal,
onTimingData?: (timeToFirstToken: number) => void,
onTotalLatency?: (totalLatency: number) => void,
onA2AMetadata?: (metadata: A2ATaskMetadata) => void,
): Promise<void> => {
const proxyBaseUrl = getProxyBaseUrl();
const url = proxyBaseUrl
? `${proxyBaseUrl}/a2a/${agentId}`
: `/a2a/${agentId}`;
const requestId = uuidv4();
const messageId = uuidv4().replace(/-/g, "");
const jsonRpcRequest: A2AJsonRpcRequest = {
jsonrpc: "2.0",
id: requestId,
method: "message/stream",
params: {
message: {
kind: "message",
messageId: messageId,
role: "user",
parts: [{ kind: "text", text: message }],
},
},
};
const startTime = performance.now();
let firstChunkReceived = false;
let latestMetadata: A2ATaskMetadata | undefined;
let accumulatedText = "";
try {
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify(jsonRpcRequest),
signal,
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error?.message || errorData.detail || `HTTP ${response.status}`);
}
const reader = response.body?.getReader();
if (!reader) {
throw new Error("No response body");
}
const decoder = new TextDecoder();
let buffer = "";
let done = false;
while (!done) {
const readResult = await reader.read();
done = readResult.done;
const value = readResult.value;
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() || "";
for (const line of lines) {
if (!line.trim()) continue;
try {
const chunk = JSON.parse(line);
if (!firstChunkReceived) {
firstChunkReceived = true;
const timeToFirstToken = performance.now() - startTime;
if (onTimingData) {
onTimingData(timeToFirstToken);
}
}
// Handle streaming chunks - extract text from various A2A formats
const result = chunk.result;
if (result) {
// Extract metadata from each chunk (keep latest)
const chunkMetadata = extractA2AMetadata(result);
if (chunkMetadata) {
latestMetadata = { ...latestMetadata, ...chunkMetadata };
}
const chunkKind = result.kind;
// Handle artifact-update chunks (streaming response content)
// Note: streaming uses "artifact" (singular), not "artifacts" (plural)
if (chunkKind === "artifact-update" && result.artifact) {
const artifact = result.artifact;
if (artifact.parts && Array.isArray(artifact.parts)) {
for (const part of artifact.parts) {
if (part.kind === "text" && part.text) {
// Accumulate actual response content
accumulatedText += part.text;
onTextUpdate(accumulatedText, `a2a_agent/${agentId}`);
}
}
}
}
// Handle non-streaming Task response format with artifacts array (plural)
else if (result.artifacts && Array.isArray(result.artifacts)) {
for (const artifact of result.artifacts) {
if (artifact.parts && Array.isArray(artifact.parts)) {
for (const part of artifact.parts) {
if (part.kind === "text" && part.text) {
accumulatedText += part.text;
onTextUpdate(accumulatedText, `a2a_agent/${agentId}`);
}
}
}
}
}
// Handle status-update chunks (progress messages like "Processing request...")
// Only show these temporarily if we haven't received actual content yet
else if (chunkKind === "status-update" && result.status?.message?.parts) {
// Skip status messages once we have real content
if (!accumulatedText) {
for (const part of result.status.message.parts) {
if (part.kind === "text" && part.text) {
// Show as temporary status - will be replaced when real content arrives
onTextUpdate(part.text, `a2a_agent/${agentId}`);
}
}
}
}
// Direct parts array (fallback)
else if (result.parts && Array.isArray(result.parts)) {
for (const part of result.parts) {
if (part.kind === "text" && part.text) {
accumulatedText += part.text;
onTextUpdate(accumulatedText, `a2a_agent/${agentId}`);
}
}
}
}
if (chunk.error) {
throw new Error(chunk.error.message);
}
} catch (parseError) {
// Only warn if it's not a JSON parse error on an empty/partial line
if (line.trim().length > 0) {
console.warn("Failed to parse A2A streaming chunk:", line, parseError);
}
}
}
}
const totalLatency = performance.now() - startTime;
if (onTotalLatency) {
onTotalLatency(totalLatency);
}
// Send final metadata after streaming completes
if (latestMetadata && onA2AMetadata) {
onA2AMetadata(latestMetadata);
}
} catch (error) {
if (signal?.aborted) {
console.log("A2A streaming request was cancelled");
return;
}
console.error("A2A stream message error:", error);
throw error;
}
};

View File

@@ -0,0 +1,52 @@
// fetch_agents.tsx
import { getProxyBaseUrl } from "../../networking";
export interface Agent {
agent_id: string;
agent_name: string;
description?: string;
agent_card_params?: {
name?: string;
description?: string;
url?: string;
};
}
/**
* Fetches available A2A agents from /v1/agents endpoint.
*/
export const fetchAvailableAgents = async (accessToken: string): Promise<Agent[]> => {
try {
const proxyBaseUrl = getProxyBaseUrl();
const url = proxyBaseUrl ? `${proxyBaseUrl}/v1/agents` : `/v1/agents`;
const response = await fetch(url, {
method: "GET",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.detail || "Failed to fetch agents");
}
const agents: Agent[] = await response.json();
console.log("Fetched agents:", agents);
// Sort agents alphabetically by name
agents.sort((a, b) => {
const nameA = a.agent_name || a.agent_id;
const nameB = b.agent_name || b.agent_id;
return nameA.localeCompare(nameB);
});
return agents;
} catch (error) {
console.error("Error fetching agents:", error);
throw error;
}
};