Agents used as tools are generally not expected to continue conversation with the user.
Their role is to perform a task and return results to the controller agent.
If you need subagents to be able to converse with the user, use handoffs instead.
from langchain.agents import AgentStatefrom langchain.tools import tool, ToolRuntimeclass CustomState(AgentState): example_state_key: str@tool( "subagent1_name", description="subagent1_description")def call_subagent1(query: str, runtime: ToolRuntime[None, CustomState]): # Apply any logic needed to transform the messages into a suitable input subagent_input = some_logic(query, runtime.state["messages"]) result = subagent1.invoke({ "messages": subagent_input, # You could also pass other state keys here as needed. # Make sure to define these in both the main and subagent's # state schemas. "example_state_key": runtime.state["example_state_key"] }) return result["messages"][-1].content
from typing import Annotatedfrom langchain.agents import AgentStatefrom langchain.tools import InjectedToolCallIdfrom langgraph.types import Command@tool( "subagent1_name", description="subagent1_description")# We need to pass the `tool_call_id` to the sub agent so it can use it to respond with the tool call resultdef call_subagent1( query: str, tool_call_id: Annotated[str, InjectedToolCallId],# You need to return a `Command` object to include more than just a final tool call) -> Command: result = subagent1.invoke({ "messages": [{"role": "user", "content": query}] }) return Command(update={ # This is the example state key we are passing back "example_state_key": result["example_state_key"], "messages": [ ToolMessage( content=result["messages"][-1].content, # We need to include the tool call id so it matches up with the right tool call tool_call_id=tool_call_id ) ] })