You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I searched the LangChain documentation with the integrated search.
I used the GitHub search to find a similar question and didn't find it.
I am sure that this is a bug in LangChain rather than my code.
The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
Example Code
prompt = hub.pull("hwchase17/openai-functions-agent")
# Choose the LLM that will drive the agent
llm = ChatOpenAI(model="gpt-4-turbo-preview", openai_api_key=openai_api_key, openai_api_base=openai_api_base)
tools = search_tools
# 可以执行Python代码
python_repl_tool = PythonREPLTool()
def create_agent(llm, tools, system_prompt):
# 每个工作的节点agent都有一个名字和一些工具
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt
),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = create_openai_tools_agent(llm, tools, prompt)
executor = AgentExecutor(agent=agent, tools=tools)
return executor
def agent_node(state, agent, name):
result = agent.invoke(state)
return {"messages": [HumanMessage(content=result["output"], name=name)]}
# 定义agent supervisor
members = ["Researcher", "Coder"]
system_prompt = (
"You are a supervisor tasked with managing a conversation between the"
" following workers: {members}. Given the following user request,"
" respond with the worker to act next. Each worker will perform a"
" task and respond with their results and status. When finished,"
" respond with FINISH."
)
options = ["FINISH"] + members
# 使用openAI的function_call
function_def = {
"name": "route",
"description": "Select the next role.",
"parameters": {
"title": "routeSchema",
"type": "object",
"properties": {
"next": {
"title": "Next",
"anyOf": [
{"enum": options},
],
}
},
"required": ["next"],
},
}
supervisor_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
(
"system",
"Given the conversation above, who should act next?"
" Or should we FINISH? Select one of: {options}",
),
]
).partial(options=str(options),members=",".join(members))
supervisor_chain=supervisor_prompt \
| llm.bind_functions(functions=[function_def],function_call="route") \
| JsonOutputFunctionsParser()
# 构建图
class AgentState(TypedDict):
# The annotation tells the graph that new messages will always
# be added to the current states
messages: Annotated[Sequence[BaseMessage], operator.add]
# The 'next' field indicates where to route to next
next: str
researche_agent=create_agent(llm,search_tools,"You are a web researcher.")
# from functools import partial
# def power(base, exponent):
# return base ** exponent
# # 创建一个偏函数,固定base为2
# square = partial(power, base=2)
# print(square(3)) # 输出8,相当于调用了power(2, 3)
research_node=functools.partial(agent_node,agent=researche_agent,name="Researcher")
code_agent = create_agent(
llm,
[python_repl_tool],
"You may generate safe python code to analyze data and generate charts using matplotlib.",
)
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
workflow = StateGraph(AgentState)
workflow.add_node("Researcher", research_node)
workflow.add_node("Coder", code_node)
workflow.add_node("supervisor", supervisor_chain)
for member in members:
# worker执行完之后返回supervisor节点
workflow.add_edge(member, "supervisor")
conditional_map = {k: k for k in members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges("supervisor",lambda x: x["next"],conditional_map)
workflow.set_entry_point("supervisor")
graph = workflow.compile()
for s in graph.stream(
{
"messages": [
HumanMessage(content="Code hello world and print it to the terminal")
]
}
):
if "__end__" not in s:
print(s)
print("----")
Error Message and Stack Trace (if applicable)
Error Message and Stack Trace (if applicable)
raise OutputParserException(f"Could not parse function call: {exc}")
langchain_core.exceptions.OutputParserException: Could not parse function call: 'function_call'
Description
This is a problem that I encountered while studying Langgraph's multi-agent blog. This problem not only occurs here, but also when I was studying Langgraph's Planning Agent Examples module
System Info
Latest version
The text was updated successfully, but these errors were encountered:
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
Is the problem with the parser, or is the example in these notebooks using the wrong type of parser? I also am having issues running those example notebooks under langgraph/examples/multi_agent. I consistently get this same error.
Checked other resources
Example Code
Error Message and Stack Trace (if applicable)
Error Message and Stack Trace (if applicable)
raise OutputParserException(f"Could not parse function call: {exc}")
langchain_core.exceptions.OutputParserException: Could not parse function call: 'function_call'
Description
This is a problem that I encountered while studying Langgraph's multi-agent blog. This problem not only occurs here, but also when I was studying Langgraph's Planning Agent Examples module
System Info
Latest version
The text was updated successfully, but these errors were encountered: