2525def select_compaction_candidate_items (
2626 items : list [TResponseInputItem ],
2727) -> list [TResponseInputItem ]:
28- """Select items that are candidates for compaction .
28+ """Select compaction candidate items .
2929
30- Excludes:
31- - User messages (type=message, role=user)
32- - Compaction items (type=compaction)
30+ Excludes user messages and compaction items.
3331 """
3432 return [
3533 item
@@ -52,7 +50,7 @@ def is_openai_model_name(model: str) -> bool:
5250 if not trimmed :
5351 return False
5452
55- # Handle fine-tuned models: ft:gpt-4o-mini :org:proj:suffix
53+ # Handle fine-tuned models: ft:gpt-4.1 :org:proj:suffix
5654 without_ft_prefix = trimmed [3 :] if trimmed .startswith ("ft:" ) else trimmed
5755 root = without_ft_prefix .split (":" , 1 )[0 ]
5856
@@ -68,8 +66,9 @@ def is_openai_model_name(model: str) -> bool:
6866class OpenAIResponsesCompactionSession (SessionABC , OpenAIResponsesCompactionAwareSession ):
6967 """Session decorator that triggers responses.compact when stored history grows.
7068
71- Wraps any Session (except OpenAIConversationsSession) and automatically calls
72- the OpenAI responses.compact API after each turn when the decision hook returns True.
69+ Works with OpenAI Responses API models only. Wraps any Session (except
70+ OpenAIConversationsSession) and automatically calls the OpenAI responses.compact
71+ API after each turn when the decision hook returns True.
7372 """
7473
7574 def __init__ (
@@ -78,9 +77,22 @@ def __init__(
7877 underlying_session : Session ,
7978 * ,
8079 client : AsyncOpenAI | None = None ,
81- model : str = "gpt-4o " ,
80+ model : str = "gpt-4.1 " ,
8281 should_trigger_compaction : Callable [[dict [str , Any ]], bool ] | None = None ,
8382 ):
83+ """Initialize the compaction session.
84+
85+ Args:
86+ session_id: Identifier for this session.
87+ underlying_session: Session store that holds the compacted history. Cannot be
88+ OpenAIConversationsSession.
89+ client: OpenAI client for responses.compact API calls. Defaults to
90+ get_default_openai_client() or new AsyncOpenAI().
91+ model: Model to use for responses.compact. Defaults to "gpt-4.1". Must be an
92+ OpenAI model name (gpt-*, o*, or ft:gpt-*).
93+ should_trigger_compaction: Custom decision hook. Defaults to triggering when
94+ 10+ compaction candidates exist.
95+ """
8496 if isinstance (underlying_session , OpenAIConversationsSession ):
8597 raise ValueError (
8698 "OpenAIResponsesCompactionSession cannot wrap OpenAIConversationsSession "
@@ -119,10 +131,8 @@ async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None
119131 "OpenAIResponsesCompactionSession.run_compaction requires a response_id"
120132 )
121133
122- # Get compaction candidates
123134 compaction_candidate_items , session_items = await self ._ensure_compaction_candidates ()
124135
125- # Check if should compact
126136 force = args .get ("force" , False ) if args else False
127137 should_compact = force or self .should_trigger_compaction (
128138 {
@@ -138,18 +148,14 @@ async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None
138148
139149 logger .debug (f"compact: start for { self ._response_id } using { self .model } " )
140150
141- # Call OpenAI responses.compact API
142151 compacted = await self .client .responses .compact (
143152 previous_response_id = self ._response_id ,
144153 model = self .model ,
145154 )
146155
147- # Replace entire session with compacted output
148156 await self .underlying_session .clear_session ()
149157 output_items : list [TResponseInputItem ] = []
150158 if compacted .output :
151- # We assume output items from API are compatible with input items (dicts)
152- # or we cast them accordingly. The SDK types usually allow this.
153159 for item in compacted .output :
154160 if isinstance (item , dict ):
155161 output_items .append (item )
@@ -159,7 +165,6 @@ async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None
159165 if output_items :
160166 await self .underlying_session .add_items (output_items )
161167
162- # Update caches
163168 self ._compaction_candidate_items = select_compaction_candidate_items (output_items )
164169 self ._session_items = output_items
165170
@@ -168,13 +173,11 @@ async def run_compaction(self, args: OpenAIResponsesCompactionArgs | None = None
168173 f"(output={ len (output_items )} , candidates={ len (self ._compaction_candidate_items )} )"
169174 )
170175
171- # Delegate all Session methods to underlying_session
172176 async def get_items (self , limit : int | None = None ) -> list [TResponseInputItem ]:
173177 return await self .underlying_session .get_items (limit )
174178
175179 async def add_items (self , items : list [TResponseInputItem ]) -> None :
176180 await self .underlying_session .add_items (items )
177- # Update caches incrementally
178181 if self ._compaction_candidate_items is not None :
179182 new_candidates = select_compaction_candidate_items (items )
180183 if new_candidates :
@@ -184,7 +187,6 @@ async def add_items(self, items: list[TResponseInputItem]) -> None:
184187
185188 async def pop_item (self ) -> TResponseInputItem | None :
186189 popped = await self .underlying_session .pop_item ()
187- # Invalidate caches on pop (simple approach)
188190 if popped :
189191 self ._compaction_candidate_items = None
190192 self ._session_items = None
0 commit comments