Skip to content

Commit

Permalink
Fixed core invoke loop logic and relevant tests (crewAIInc#1865)
Browse files Browse the repository at this point in the history
* Fixed core invoke loop logic and relevant tests

* Fix failing tests

* Clean up final print statements

* Additional clean up for PR review
  • Loading branch information
bhancockio authored Jan 9, 2025
1 parent b3504e7 commit 2131b94
Show file tree
Hide file tree
Showing 11 changed files with 1,437 additions and 31,684 deletions.
5 changes: 0 additions & 5 deletions src/crewai/agents/agent_builder/base_agent_executor_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,10 @@ class CrewAgentExecutorMixin:
agent: Optional["BaseAgent"]
task: Optional["Task"]
iterations: int
have_forced_answer: bool
max_iter: int
_i18n: I18N
_printer: Printer = Printer()

def _should_force_answer(self) -> bool:
"""Determine if a forced answer is required based on iteration count."""
return (self.iterations >= self.max_iter) and not self.have_forced_answer

def _create_short_term_memory(self, output) -> None:
"""Create and save a short-term memory item if conditions are met."""
if (
Expand Down
272 changes: 179 additions & 93 deletions src/crewai/agents/crew_agent_executor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import json
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Union
from typing import Any, Callable, Dict, List, Optional, Union

from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
Expand Down Expand Up @@ -50,7 +50,7 @@ def __init__(
original_tools: List[Any] = [],
function_calling_llm: Any = None,
respect_context_window: bool = False,
request_within_rpm_limit: Any = None,
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
callbacks: List[Any] = [],
):
self._i18n: I18N = I18N()
Expand All @@ -77,7 +77,6 @@ def __init__(
self.messages: List[Dict[str, str]] = []
self.iterations = 0
self.log_error_after = 3
self.have_forced_answer = False
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
tool.name: tool for tool in self.tools
}
Expand Down Expand Up @@ -108,106 +107,151 @@ def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
self._create_long_term_memory(formatted_answer)
return {"output": formatted_answer.output}

def _invoke_loop(self, formatted_answer=None):
try:
while not isinstance(formatted_answer, AgentFinish):
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
def _invoke_loop(self):
"""
Main loop to invoke the agent's thought process until it reaches a conclusion
or the maximum number of iterations is reached.
"""
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try:
if self._has_reached_max_iterations():
formatted_answer = self._handle_max_iterations_exceeded(
formatted_answer
)
break

if answer is None or answer == "":
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError(
"Invalid response from LLM call - None or empty."
)
self._enforce_rpm_limit()

if not self.use_stop_words:
try:
self._format_answer(answer)
except OutputParserException as e:
if (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
in e.error
):
answer = answer.split("Observation:")[0].strip()

self.iterations += 1
formatted_answer = self._format_answer(answer)

if isinstance(formatted_answer, AgentAction):
tool_result = self._execute_tool_and_check_finality(
formatted_answer
)
answer = self._get_llm_response()

formatted_answer = self._process_llm_response(answer)

# Directly append the result to the messages if the
# tool is "Add image to content" in case of multimodal
# agents
if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
self.messages.append(tool_result.result)
continue

else:
if self.step_callback:
self.step_callback(tool_result)

formatted_answer.text += f"\nObservation: {tool_result.result}"

formatted_answer.result = tool_result.result
if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)
self._show_logs(formatted_answer)

if self.step_callback:
self.step_callback(formatted_answer)

if self._should_force_answer():
if self.have_forced_answer:
return AgentFinish(
thought="",
output=self._i18n.errors(
"force_final_answer_error"
).format(formatted_answer.text),
text=formatted_answer.text,
)
else:
formatted_answer.text += (
f'\n{self._i18n.errors("force_final_answer")}'
)
self.have_forced_answer = True
self.messages.append(
self._format_msg(formatted_answer.text, role="assistant")
if isinstance(formatted_answer, AgentAction):
tool_result = self._execute_tool_and_check_finality(
formatted_answer
)
formatted_answer = self._handle_agent_action(
formatted_answer, tool_result
)

except OutputParserException as e:
self.messages.append({"role": "user", "content": e.error})
if self.iterations > self.log_error_after:
self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)
return self._invoke_loop(formatted_answer)

except Exception as e:
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
self._handle_context_length()
return self._invoke_loop(formatted_answer)
else:
raise e
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant")

except OutputParserException as e:
formatted_answer = self._handle_output_parser_exception(e)

except Exception as e:
if self._is_context_length_exceeded(e):
self._handle_context_length()
continue
else:
raise e

self._show_logs(formatted_answer)
return formatted_answer

def _has_reached_max_iterations(self) -> bool:
"""Check if the maximum number of iterations has been reached."""
return self.iterations >= self.max_iter

def _enforce_rpm_limit(self) -> None:
"""Enforce the requests per minute (RPM) limit if applicable."""
if self.request_within_rpm_limit:
self.request_within_rpm_limit()

def _get_llm_response(self) -> str:
"""Call the LLM and return the response, handling any invalid responses."""
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
)

if not answer:
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")

return answer

def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
if not self.use_stop_words:
try:
# Preliminary parsing to check for errors.
self._format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()

self.iterations += 1
return self._format_answer(answer)

def _handle_agent_action(
self, formatted_answer: AgentAction, tool_result: ToolResult
) -> Union[AgentAction, AgentFinish]:
"""Handle the AgentAction, execute tools, and process the results."""
add_image_tool = self._i18n.tools("add_image")
if (
isinstance(add_image_tool, dict)
and formatted_answer.tool.casefold().strip()
== add_image_tool.get("name", "").casefold().strip()
):
self.messages.append(tool_result.result)
return formatted_answer # Continue the loop

if self.step_callback:
self.step_callback(tool_result)

formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result

if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)

self._show_logs(formatted_answer)
return formatted_answer

def _invoke_step_callback(self, formatted_answer) -> None:
"""Invoke the step callback if it exists."""
if self.step_callback:
self.step_callback(formatted_answer)

def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role."""
self.messages.append(self._format_msg(text, role=role))

def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer."""
self.messages.append({"role": "user", "content": e.error})

formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="",
)

if self.iterations > self.log_error_after:
self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)

return formatted_answer

def _is_context_length_exceeded(self, exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding."""
return LLMContextLengthExceededException(
str(exception)
)._is_context_limit_error(str(exception))

def _show_start_logs(self):
if self.agent is None:
raise ValueError("Agent cannot be None")
Expand Down Expand Up @@ -487,3 +531,45 @@ def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
self.ask_for_human_input = False

return formatted_answer

def _handle_max_iterations_exceeded(self, formatted_answer):
"""
Handles the case when the maximum number of iterations is exceeded.
Performs one more LLM call to get the final answer.
Parameters:
formatted_answer: The last formatted answer from the agent.
Returns:
The final formatted answer after exceeding max iterations.
"""
self._printer.print(
content="Maximum iterations reached. Requesting final answer.",
color="yellow",
)

if formatted_answer and hasattr(formatted_answer, "text"):
assistant_message = (
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
)
else:
assistant_message = self._i18n.errors("force_final_answer")

self.messages.append(self._format_msg(assistant_message, role="assistant"))

# Perform one more LLM call to get the final answer
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
)

if answer is None or answer == "":
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")

formatted_answer = self._format_answer(answer)
# Return the formatted answer, regardless of its type
return formatted_answer
2 changes: 1 addition & 1 deletion src/crewai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def flush(self):
"mixtral-8x7b-32768": 32768,
"llama-3.3-70b-versatile": 128000,
"llama-3.3-70b-instruct": 128000,
#sambanova
# sambanova
"Meta-Llama-3.3-70B-Instruct": 131072,
"QwQ-32B-Preview": 8192,
"Qwen2.5-72B-Instruct": 8192,
Expand Down
2 changes: 1 addition & 1 deletion src/crewai/translations/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
Expand Down
1 change: 0 additions & 1 deletion src/crewai/utilities/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ def create_llm(
api_key=api_key,
base_url=base_url,
)
print("LLM created with extracted parameters; " f"model='{model}'")
return created_llm
except Exception as e:
print(f"Error instantiating LLM from unknown object type: {e}")
Expand Down
2 changes: 2 additions & 0 deletions src/crewai/utilities/rpm_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@

"""Controls request rate limiting for API calls."""


class RPMController(BaseModel):
"""Manages requests per minute limiting."""

max_rpm: Optional[int] = Field(default=None)
logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
_current_rpm: int = PrivateAttr(default=0)
Expand Down
Loading

0 comments on commit 2131b94

Please sign in to comment.