Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions sentry_sdk/integrations/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
GEN_AI_ALLOWED_MESSAGE_ROLES,
set_data_normalized,
normalize_message_roles,
truncate_and_annotate_messages,
get_start_span_function,
)
from sentry_sdk.consts import OP, SPANDATA
Expand Down Expand Up @@ -394,14 +393,12 @@ def _set_common_input_data(
normalized_messages.append(message)

role_normalized_messages = normalize_message_roles(normalized_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
role_normalized_messages, span, scope
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
role_normalized_messages,
unpack=False,
)
if messages_data is not None:
set_data_normalized(
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
)

if max_tokens is not None and _is_given(max_tokens):
span.set_data(SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
Expand Down
99 changes: 0 additions & 99 deletions tests/integrations/anthropic/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2478,105 +2478,6 @@ def mock_messages_create(*args, **kwargs):
assert stored_messages[0]["role"] == expected_role


def test_anthropic_message_truncation(sentry_init, capture_items):
"""Test that large messages are truncated properly in Anthropic integration."""
sentry_init(
integrations=[AnthropicIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
items = capture_items("transaction", "span")

client = Anthropic(api_key="z")
client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE)

large_content = (
"This is a very long message that will exceed our size limits. " * 1000
)
messages = [
{"role": "user", "content": "small message 1"},
{"role": "assistant", "content": large_content},
{"role": "user", "content": large_content},
{"role": "assistant", "content": "small message 4"},
{"role": "user", "content": "small message 5"},
]

with start_transaction():
client.messages.create(max_tokens=1024, messages=messages, model="model")

spans = [item.payload for item in items if item.type == "span"]
chat_spans = [
span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT
]
assert len(chat_spans) > 0

chat_span = chat_spans[0]
assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic"
assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat"
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"]

messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_data, str)

parsed_messages = json.loads(messages_data)
assert isinstance(parsed_messages, list)
assert len(parsed_messages) == 1
assert "small message 5" in str(parsed_messages[0])

tx = next(item.payload for item in items if item.type == "transaction")
assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5


@pytest.mark.asyncio
async def test_anthropic_message_truncation_async(sentry_init, capture_items):
"""Test that large messages are truncated properly in Anthropic integration."""
sentry_init(
integrations=[AnthropicIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
items = capture_items("transaction", "span")

client = AsyncAnthropic(api_key="z")
client.messages._post = mock.AsyncMock(return_value=EXAMPLE_MESSAGE)

large_content = (
"This is a very long message that will exceed our size limits. " * 1000
)
messages = [
{"role": "user", "content": "small message 1"},
{"role": "assistant", "content": large_content},
{"role": "user", "content": large_content},
{"role": "assistant", "content": "small message 4"},
{"role": "user", "content": "small message 5"},
]

with start_transaction():
await client.messages.create(max_tokens=1024, messages=messages, model="model")

spans = [item.payload for item in items if item.type == "span"]
chat_spans = [
span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT
]
assert len(chat_spans) > 0

chat_span = chat_spans[0]
assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic"
assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat"
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"]

messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
assert isinstance(messages_data, str)

parsed_messages = json.loads(messages_data)
assert isinstance(parsed_messages, list)
assert len(parsed_messages) == 1
assert "small message 5" in str(parsed_messages[0])

tx = next(item.payload for item in items if item.type == "transaction")
assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5


@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[
Expand Down
Loading