WARNING: THIS SITE IS A MIRROR OF GITHUB.COM / IT CANNOT LOGIN OR REGISTER ACCOUNTS / THE CONTENTS ARE PROVIDED AS-IS / THIS SITE ASSUMES NO RESPONSIBILITY FOR ANY DISPLAYED CONTENT OR LINKS / IF YOU FOUND SOMETHING MAY NOT GOOD FOR EVERYONE, CONTACT ADMIN AT ilovescratch@foxmail.com
Skip to content

Commit 4fa016e

Browse files
committed
integrate connectors api within responses api impl in agentic provider
Signed-off-by: Jaideep Rao <[email protected]>
1 parent def56c4 commit 4fa016e

File tree

13 files changed

+89
-24
lines changed

13 files changed

+89
-24
lines changed

client-sdks/stainless/openapi.yml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7246,8 +7246,13 @@ components:
72467246
type: string
72477247
title: Server Label
72487248
server_url:
7249-
type: string
7250-
title: Server Url
7249+
anyOf:
7250+
- type: string
7251+
- type: 'null'
7252+
connector_id:
7253+
anyOf:
7254+
- type: string
7255+
- type: 'null'
72517256
headers:
72527257
anyOf:
72537258
- additionalProperties: true
@@ -7280,7 +7285,6 @@ components:
72807285
type: object
72817286
required:
72827287
- server_label
7283-
- server_url
72847288
title: OpenAIResponseInputToolMCP
72857289
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
72867290
CreateOpenaiResponseRequest:

docs/static/deprecated-llama-stack-spec.yaml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3876,8 +3876,13 @@ components:
38763876
type: string
38773877
title: Server Label
38783878
server_url:
3879-
type: string
3880-
title: Server Url
3879+
anyOf:
3880+
- type: string
3881+
- type: 'null'
3882+
connector_id:
3883+
anyOf:
3884+
- type: string
3885+
- type: 'null'
38813886
headers:
38823887
anyOf:
38833888
- additionalProperties: true
@@ -3910,7 +3915,6 @@ components:
39103915
type: object
39113916
required:
39123917
- server_label
3913-
- server_url
39143918
title: OpenAIResponseInputToolMCP
39153919
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
39163920
CreateOpenaiResponseRequest:

docs/static/experimental-llama-stack-spec.yaml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3786,8 +3786,13 @@ components:
37863786
type: string
37873787
title: Server Label
37883788
server_url:
3789-
type: string
3790-
title: Server Url
3789+
anyOf:
3790+
- type: string
3791+
- type: 'null'
3792+
connector_id:
3793+
anyOf:
3794+
- type: string
3795+
- type: 'null'
37913796
headers:
37923797
anyOf:
37933798
- additionalProperties: true
@@ -3820,7 +3825,6 @@ components:
38203825
type: object
38213826
required:
38223827
- server_label
3823-
- server_url
38243828
title: OpenAIResponseInputToolMCP
38253829
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
38263830
OpenAIResponseObject:

docs/static/llama-stack-spec.yaml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6054,8 +6054,13 @@ components:
60546054
type: string
60556055
title: Server Label
60566056
server_url:
6057-
type: string
6058-
title: Server Url
6057+
anyOf:
6058+
- type: string
6059+
- type: 'null'
6060+
connector_id:
6061+
anyOf:
6062+
- type: string
6063+
- type: 'null'
60596064
headers:
60606065
anyOf:
60616066
- additionalProperties: true
@@ -6088,7 +6093,6 @@ components:
60886093
type: object
60896094
required:
60906095
- server_label
6091-
- server_url
60926096
title: OpenAIResponseInputToolMCP
60936097
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
60946098
CreateOpenaiResponseRequest:

docs/static/stainless-llama-stack-spec.yaml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7246,8 +7246,13 @@ components:
72467246
type: string
72477247
title: Server Label
72487248
server_url:
7249-
type: string
7250-
title: Server Url
7249+
anyOf:
7250+
- type: string
7251+
- type: 'null'
7252+
connector_id:
7253+
anyOf:
7254+
- type: string
7255+
- type: 'null'
72517256
headers:
72527257
anyOf:
72537258
- additionalProperties: true
@@ -7280,7 +7285,6 @@ components:
72807285
type: object
72817286
required:
72827287
- server_label
7283-
- server_url
72847288
title: OpenAIResponseInputToolMCP
72857289
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
72867290
CreateOpenaiResponseRequest:

src/llama_stack/core/connectors/connectors.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,16 @@
1414
from llama_stack.providers.utils.tools.mcp import get_mcp_server_info, list_mcp_tools
1515
from llama_stack_api import (
1616
Connector,
17+
ConnectorNotFoundError,
1718
Connectors,
19+
ConnectorToolNotFoundError,
1820
ConnectorType,
1921
ListConnectorsResponse,
2022
ListRegistriesResponse,
2123
ListToolsResponse,
2224
Registry,
23-
ToolDef,
24-
)
25-
from llama_stack_api.common.errors import (
26-
ConnectorNotFoundError,
27-
ConnectorToolNotFoundError,
2825
RegistryNotFoundError,
26+
ToolDef,
2927
)
3028

3129
logger = get_logger(name=__name__, category="connectors")
@@ -55,6 +53,17 @@ def __init__(self, config: ConnectorServiceConfig):
5553
self.connectors_map: dict[str, Connector] = {}
5654
self.registries_map: dict[str, Registry] = {}
5755

56+
def get_connector_url(self, connector_id: str) -> str | None:
57+
"""Get the URL of a connector by its ID.
58+
59+
:param connector_id: The ID of the connector to get the URL for.
60+
:returns: The URL of the connector.
61+
"""
62+
connector = self.connectors_map.get(connector_id)
63+
if connector is None:
64+
return None
65+
return connector.url
66+
5867
async def register_connector(
5968
self,
6069
url: str,

src/llama_stack/providers/inline/agents/meta_reference/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,9 @@ async def get_provider_impl(
2929
deps[Api.conversations],
3030
deps[Api.prompts],
3131
deps[Api.files],
32-
telemetry_enabled,
32+
deps[Api.connectors],
3333
policy,
34+
telemetry_enabled,
3435
)
3536
await impl.initialize()
3637
return impl

src/llama_stack/providers/inline/agents/meta_reference/agents.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
1212
from llama_stack_api import (
1313
Agents,
14+
Connectors,
1415
Conversations,
1516
Files,
1617
Inference,
@@ -49,6 +50,7 @@ def __init__(
4950
conversations_api: Conversations,
5051
prompts_api: Prompts,
5152
files_api: Files,
53+
connectors_api: Connectors,
5254
policy: list[AccessRule],
5355
telemetry_enabled: bool = False,
5456
):
@@ -62,6 +64,7 @@ def __init__(
6264
self.telemetry_enabled = telemetry_enabled
6365
self.prompts_api = prompts_api
6466
self.files_api = files_api
67+
self.connectors_api = connectors_api
6568
self.in_memory_store = InmemoryKVStoreImpl()
6669
self.openai_responses_impl: OpenAIResponsesImpl | None = None
6770
self.policy = policy
@@ -80,6 +83,7 @@ async def initialize(self) -> None:
8083
conversations_api=self.conversations_api,
8184
prompts_api=self.prompts_api,
8285
files_api=self.files_api,
86+
connectors_api=self.connectors_api,
8387
)
8488

8589
async def shutdown(self) -> None:

src/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
_OpenAIResponseObjectWithInputAndMessages,
1818
)
1919
from llama_stack_api import (
20+
Connectors,
2021
ConversationItem,
2122
Conversations,
2223
Files,
@@ -79,6 +80,7 @@ def __init__(
7980
conversations_api: Conversations,
8081
prompts_api: Prompts,
8182
files_api: Files,
83+
connectors_api: Connectors,
8284
):
8385
self.inference_api = inference_api
8486
self.tool_groups_api = tool_groups_api
@@ -94,6 +96,7 @@ def __init__(
9496
)
9597
self.prompts_api = prompts_api
9698
self.files_api = files_api
99+
self.connectors_api = connectors_api
97100

98101
async def _prepend_previous_response(
99102
self,
@@ -490,6 +493,7 @@ async def _create_streaming_response(
490493
guardrail_ids=guardrail_ids,
491494
instructions=instructions,
492495
max_tool_calls=max_tool_calls,
496+
connectors_api=self.connectors_api,
493497
)
494498

495499
# Stream the response

src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from llama_stack_api import (
1515
AllowedToolsFilter,
1616
ApprovalFilter,
17+
Connectors,
1718
Inference,
1819
MCPListToolsTool,
1920
ModelNotFoundError,
@@ -118,8 +119,10 @@ def __init__(
118119
prompt: OpenAIResponsePrompt | None = None,
119120
parallel_tool_calls: bool | None = None,
120121
max_tool_calls: int | None = None,
122+
connectors_api: Connectors | None = None,
121123
):
122124
self.inference_api = inference_api
125+
self.connectors_api = connectors_api
123126
self.ctx = ctx
124127
self.response_id = response_id
125128
self.created_at = created_at
@@ -1082,6 +1085,15 @@ async def _process_mcp_tool(
10821085
"""Process an MCP tool configuration and emit appropriate streaming events."""
10831086
from llama_stack.providers.utils.tools.mcp import list_mcp_tools
10841087

1088+
# Resolve connector_id to server_url if provided
1089+
if mcp_tool.connector_id and not mcp_tool.server_url:
1090+
if self.connectors_api is None:
1091+
raise ValueError("Connectors API not available to resolve connector_id")
1092+
server_url = self.connectors_api.get_connector_url(mcp_tool.connector_id)
1093+
if not server_url:
1094+
raise ValueError(f"Connector {mcp_tool.connector_id} not found")
1095+
mcp_tool = mcp_tool.model_copy(update={"server_url": server_url})
1096+
10851097
# Emit mcp_list_tools.in_progress
10861098
self.sequence_number += 1
10871099
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(

0 commit comments

Comments
 (0)