Skip to content
48 changes: 48 additions & 0 deletions sentry_sdk/ai/_openai_completions_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from collections.abc import Iterable

from typing import TYPE_CHECKING

if TYPE_CHECKING:
from sentry_sdk._types import TextPart

from openai.types.chat import (
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
)


def _is_system_instruction(message: "ChatCompletionMessageParam") -> bool:
return isinstance(message, dict) and message.get("role") == "system"


def _get_system_instructions(
messages: "Iterable[ChatCompletionMessageParam]",
) -> "list[ChatCompletionMessageParam]":
if not isinstance(messages, Iterable):
return []

return [message for message in messages if _is_system_instruction(message)]


def _transform_system_instructions(
system_instructions: "list[ChatCompletionSystemMessageParam]",
) -> "list[TextPart]":
instruction_text_parts: "list[TextPart]" = []

for instruction in system_instructions:
if not isinstance(instruction, dict):
continue

content = instruction.get("content")

if isinstance(content, str):
instruction_text_parts.append({"type": "text", "content": content})

elif isinstance(content, list):
for part in content:
if isinstance(part, dict) and part.get("type") == "text":
text = part.get("text", "")
if text:
instruction_text_parts.append({"type": "text", "content": text})

return instruction_text_parts
22 changes: 22 additions & 0 deletions sentry_sdk/ai/_openai_responses_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Union

from openai.types.responses import ResponseInputParam, ResponseInputItemParam


def _is_system_instruction(message: "ResponseInputItemParam") -> bool:
if not isinstance(message, dict) or not message.get("role") == "system":
return False

return "type" not in message or message["type"] == "message"


def _get_system_instructions(
messages: "Union[str, ResponseInputParam]",
) -> "list[ResponseInputItemParam]":
if not isinstance(messages, list):
return []

return [message for message in messages if _is_system_instruction(message)]
70 changes: 11 additions & 59 deletions sentry_sdk/integrations/openai.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import sys
from functools import wraps
from collections.abc import Iterable

import sentry_sdk
from sentry_sdk import consts
Expand All @@ -10,6 +9,15 @@
normalize_message_roles,
truncate_and_annotate_messages,
)
from sentry_sdk.ai._openai_completions_api import (
_is_system_instruction as _is_system_instruction_completions,
_get_system_instructions as _get_system_instructions_completions,
_transform_system_instructions,
)
from sentry_sdk.ai._openai_responses_api import (
_is_system_instruction as _is_system_instruction_responses,
_get_system_instructions as _get_system_instructions_responses,
)
from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.scope import should_send_default_pii
Expand All @@ -32,11 +40,12 @@
AsyncIterator,
Iterator,
Union,
Iterable,
)
from sentry_sdk.tracing import Span
from sentry_sdk._types import TextPart

from openai.types.responses import ResponseInputParam, ResponseInputItemParam
from openai.types.responses import ResponseInputParam
from openai import Omit

try:
Expand Down Expand Up @@ -199,63 +208,6 @@ def _calculate_token_usage(
)


def _is_system_instruction_completions(message: "ChatCompletionMessageParam") -> bool:
return isinstance(message, dict) and message.get("role") == "system"


def _get_system_instructions_completions(
messages: "Iterable[ChatCompletionMessageParam]",
) -> "list[ChatCompletionMessageParam]":
if not isinstance(messages, Iterable):
return []

return [
message for message in messages if _is_system_instruction_completions(message)
]


def _is_system_instruction_responses(message: "ResponseInputItemParam") -> bool:
if not isinstance(message, dict) or not message.get("role") == "system":
return False

return "type" not in message or message["type"] == "message"


def _get_system_instructions_responses(
messages: "Union[str, ResponseInputParam]",
) -> "list[ResponseInputItemParam]":
if not isinstance(messages, list):
return []

return [
message for message in messages if _is_system_instruction_responses(message)
]


def _transform_system_instructions(
system_instructions: "list[ChatCompletionSystemMessageParam]",
) -> "list[TextPart]":
instruction_text_parts: "list[TextPart]" = []

for instruction in system_instructions:
if not isinstance(instruction, dict):
continue

content = instruction.get("content")

if isinstance(content, str):
instruction_text_parts.append({"type": "text", "content": content})

elif isinstance(content, list):
for part in content:
if isinstance(part, dict) and part.get("type") == "text":
text = part.get("text", "")
if text:
instruction_text_parts.append({"type": "text", "content": text})

return instruction_text_parts


def _get_input_messages(
kwargs: "dict[str, Any]",
) -> "Optional[Union[Iterable[Any], list[str]]]":
Expand Down
23 changes: 6 additions & 17 deletions sentry_sdk/integrations/openai_agents/spans/invoke_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,6 @@
import agents
from typing import Any, Optional

from sentry_sdk._types import TextPart


def _transform_system_instruction(system_instructions: "str") -> "list[TextPart]":
return [
{
"type": "text",
"content": system_instructions,
}
]


def invoke_agent_span(
context: "agents.RunContextWrapper", agent: "agents.Agent", kwargs: "dict[str, Any]"
Expand All @@ -46,16 +35,16 @@ def invoke_agent_span(
if should_send_default_pii():
messages = []
if agent.instructions:
system_instruction = (
message = (
agent.instructions
if isinstance(agent.instructions, str)
else safe_serialize(agent.instructions)
)
set_data_normalized(
span,
SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS,
_transform_system_instruction(system_instruction),
unpack=False,
messages.append(
{
"content": [{"text": message, "type": "text"}],
"role": "system",
}
)

original_input = kwargs.get("original_input")
Expand Down
48 changes: 37 additions & 11 deletions sentry_sdk/integrations/openai_agents/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,20 @@
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.tracing_utils import set_span_errored
from sentry_sdk.utils import event_from_exception, safe_serialize
from sentry_sdk.ai._openai_completions_api import _transform_system_instructions
from sentry_sdk.ai._openai_responses_api import (
_is_system_instruction,
_get_system_instructions,
)

from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Any
from agents import Usage
from agents import Usage, TResponseInputItem

from sentry_sdk.tracing import Span
from sentry_sdk._types import TextPart

try:
import agents
Expand Down Expand Up @@ -115,19 +121,39 @@ def _set_input_data(
return
request_messages = []

system_instructions = get_response_kwargs.get("system_instructions")
if system_instructions:
request_messages.append(
messages: "str | list[TResponseInputItem]" = get_response_kwargs.get("input", [])

instructions_text_parts: "list[TextPart]" = []
explicit_instructions = get_response_kwargs.get("system_instructions")
if explicit_instructions is not None:
instructions_text_parts.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM,
"content": [{"type": "text", "text": system_instructions}],
"type": "text",
"content": explicit_instructions,
}
)

for message in get_response_kwargs.get("input", []):
system_instructions = _get_system_instructions(messages)

# Deliberate use of function accepting completions API type because
# of shared structure FOR THIS PURPOSE ONLY.
instructions_text_parts += _transform_system_instructions(system_instructions)

if len(instructions_text_parts) > 0:
set_data_normalized(
span,
SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS,
instructions_text_parts,
unpack=False,
)

non_system_messages = [
message for message in messages if not _is_system_instruction(message)
]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing string input handling causes AttributeError crash

Medium Severity

The _set_input_data function declares messages can be str | list[TResponseInputItem] but doesn't handle strings. When messages is a string, the list comprehension iterates over individual characters, and line 168 then calls message.get("type") on each character, raising AttributeError: 'str' object has no attribute 'get'. The OpenAI integration's _set_responses_api_input_data handles this correctly with an if isinstance(messages, str): check that wraps the string and returns early.

Additional Locations (1)

Fix in Cursor Fix in Web

Copy link
Contributor Author

@alexander-alderman-webb alexander-alderman-webb Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The code throws an exception when passed a string before, so I consider this out of scope.
The type is correct and we should fix this later on, but in practice the patched function is called with a list in code paths from Agent.run().

for message in non_system_messages:
if "role" in message:
normalized_role = normalize_message_role(message.get("role"))
content = message.get("content")
normalized_role = normalize_message_role(message.get("role")) # type: ignore
content = message.get("content") # type: ignore
request_messages.append(
{
"role": normalized_role,
Expand All @@ -139,14 +165,14 @@ def _set_input_data(
}
)
else:
if message.get("type") == "function_call":
if message.get("type") == "function_call": # type: ignore
request_messages.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.ASSISTANT,
"content": [message],
}
)
elif message.get("type") == "function_call_output":
elif message.get("type") == "function_call_output": # type: ignore
request_messages.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL,
Expand Down
Loading
Loading