You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
🖇 AgentOps: Session Stats - Duration: 18.8s | Cost: $0.132369 | LLMs: 14 | Tools: 0 | Actions: 17 | Errors: 3
🖇 AgentOps: Session Replay: https://app.agentops.ai/drilldown?session_id=4a64d7e5-0317-46f4-9909-608611420e56
Traceback (most recent call last):
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/llms/anthropic/chat/handler.py", line 565, in completion
response = client.post(
api_base,
...<2 lines>...
timeout=timeout,
)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/llms/custom_httpx/http_handler.py", line 386, in post
raise e
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/llms/custom_httpx/http_handler.py", line 372, in post
response.raise_for_status()
~~~~~~~~~~~~~~~~~~~~~~~~~^^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/httpx/_models.py", line 763, in raise_for_status
raise HTTPStatusError(message, request=request, response=self)
httpx.HTTPStatusError: Client error '429 Too Many Requests' for url 'https://api.anthropic.com/v1/messages'
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/429
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/main.py", line 1754, in completion
response = anthropic_chat_completions.completion(
model=model,
...<14 lines>...
client=client,
)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/llms/anthropic/chat/handler.py", line 580, in completion
raise AnthropicError(
...<3 lines>...
)
litellm.llms.anthropic.common_utils.AnthropicError: {"type":"error","error":{"type":"rate_limit_error","message":"Number of request tokens has exceeded your per-minute rate limit (https://docs.anthropic.com/en/api/rate-limits); see the response headers for current usage. Please reduce the prompt length or the maximum tokens requested, or try again later. You may also contact sales at https://www.anthropic.com/contact-sales to discuss your options for a rate limit increase."}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/utils.py", line 903, in wrapper
result = original_function(*args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/main.py", line 2998, in completion
raise exception_type(
~~~~~~~~~~~~~~^
model=model,
^^^^^^^^^^^^
...<3 lines>...
extra_kwargs=kwargs,
^^^^^^^^^^^^^^^^^^^^
)
^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2116, in exception_type
raise e
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 490, in exception_type
raise RateLimitError(
...<3 lines>...
)
litellm.exceptions.RateLimitError: litellm.RateLimitError: AnthropicException - {"type":"error","error":{"type":"rate_limit_error","message":"Number of request tokens has exceeded your per-minute rate limit (https://docs.anthropic.com/en/api/rate-limits); see the response headers for current usage. Please reduce the prompt length or the maximum tokens requested, or try again later. You may also contact sales at https://www.anthropic.com/contact-sales to discuss your options for a rate limit increase."}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/neilconway/.local/bin/spellcaster", line 8, in <module>
sys.exit(main())
~~~~^^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/spellcaster/cli.py", line 84, in main
result = future.result()
File "/usr/local/Cellar/[email protected]/3.13.0_1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/concurrent/futures/_base.py", line 449, in result
return self.__get_result()
~~~~~~~~~~~~~~~~~^^
File "/usr/local/Cellar/[email protected]/3.13.0_1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/concurrent/futures/_base.py", line 401, in __get_result
raise self._exception
File "/usr/local/Cellar/[email protected]/3.13.0_1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/agentops/decorators.py", line 135, in sync_wrapper
returns = func(*args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/spellcaster/grammar.py", line 73, in check_grammar
resp = litellm.completion(
model=model,
...<13 lines>...
]
)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/agentops/llms/litellm.py", line 203, in patched_function
result = self.original_create(*args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/utils.py", line 993, in wrapper
return litellm.completion_with_retries(*args, **kwargs)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/main.py", line 3031, in completion_with_retries
return retryer(original_function, *args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/tenacity/__init__.py", line 475, in __call__
do = self.iter(retry_state=retry_state)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/tenacity/__init__.py", line 376, in iter
result = action(retry_state)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/tenacity/__init__.py", line 418, in exc_check
raise retry_exc.reraise()
~~~~~~~~~~~~~~~~~^^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/tenacity/__init__.py", line 185, in reraise
raise self.last_attempt.result()
~~~~~~~~~~~~~~~~~~~~~~~~^^
File "/usr/local/Cellar/[email protected]/3.13.0_1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/concurrent/futures/_base.py", line 449, in result
return self.__get_result()
~~~~~~~~~~~~~~~~~^^
File "/usr/local/Cellar/[email protected]/3.13.0_1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/concurrent/futures/_base.py", line 401, in __get_result
raise self._exception
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/tenacity/__init__.py", line 478, in __call__
result = fn(*args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/utils.py", line 1013, in wrapper
raise e
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/utils.py", line 903, in wrapper
result = original_function(*args, **kwargs)
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/main.py", line 2998, in completion
raise exception_type(
~~~~~~~~~~~~~~^
model=model,
^^^^^^^^^^^^
...<3 lines>...
extra_kwargs=kwargs,
^^^^^^^^^^^^^^^^^^^^
)
^
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2116, in exception_type
raise e
File "/Users/neilconway/.local/pipx/venvs/spellcaster/lib/python3.13/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 490, in exception_type
raise RateLimitError(
...<3 lines>...
)
litellm.exceptions.RateLimitError: litellm.RateLimitError: AnthropicException - {"type":"error","error":{"type":"rate_limit_error","message":"Number of request tokens has exceeded your per-minute rate limit (https://docs.anthropic.com/en/api/rate-limits); see the response headers for current usage. Please reduce the prompt length or the maximum tokens requested, or try again later. You may also contact sales at https://www.anthropic.com/contact-sales to discuss your options for a rate limit increase."}}
The text was updated successfully, but these errors were encountered:
When I hit an LLM rate limit, I get:
The text was updated successfully, but these errors were encountered: