diff --git a/deepgram/__init__.py b/deepgram/__init__.py index c78d66dc..ca515619 100644 --- a/deepgram/__init__.py +++ b/deepgram/__init__.py @@ -340,6 +340,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -355,6 +356,10 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) # utilities diff --git a/deepgram/client.py b/deepgram/client.py index 2cdd126f..10709903 100644 --- a/deepgram/client.py +++ b/deepgram/client.py @@ -354,6 +354,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -369,6 +370,10 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) diff --git a/deepgram/clients/__init__.py b/deepgram/clients/__init__.py index ae098314..45bb1dd1 100644 --- a/deepgram/clients/__init__.py +++ b/deepgram/clients/__init__.py @@ -363,6 +363,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -378,4 +379,8 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) diff --git a/deepgram/clients/agent/__init__.py b/deepgram/clients/agent/__init__.py index 030c0ee7..4d6b8055 100644 --- a/deepgram/clients/agent/__init__.py +++ b/deepgram/clients/agent/__init__.py @@ -38,6 +38,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -53,4 +54,8 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) diff --git a/deepgram/clients/agent/client.py b/deepgram/clients/agent/client.py index 52655ded..ef2bd453 100644 --- a/deepgram/clients/agent/client.py +++ b/deepgram/clients/agent/client.py @@ -37,6 +37,7 @@ InjectUserMessageOptions as LatestInjectUserMessageOptions, FunctionCallResponse as LatestFunctionCallResponse, AgentKeepAlive as LatestAgentKeepAlive, + Flags as LatestFlags, # sub level Listen as LatestListen, Speak as LatestSpeak, @@ -52,6 +53,10 @@ Output as LatestOutput, Audio as LatestAudio, Endpoint as LatestEndpoint, + Context as LatestContext, + HistoryConversationMessage as LatestHistoryConversationMessage, + HistoryFunctionCallsMessage as LatestHistoryFunctionCallsMessage, + FunctionCallHistory as LatestFunctionCallHistory, ) @@ -85,6 +90,7 @@ InjectUserMessageOptions = LatestInjectUserMessageOptions FunctionCallResponse = LatestFunctionCallResponse AgentKeepAlive = LatestAgentKeepAlive +Flags = LatestFlags Listen = LatestListen Speak = LatestSpeak @@ -100,3 +106,7 @@ Output = LatestOutput Audio = LatestAudio Endpoint = LatestEndpoint +Context = LatestContext +HistoryConversationMessage = LatestHistoryConversationMessage +HistoryFunctionCallsMessage = LatestHistoryFunctionCallsMessage +FunctionCallHistory = LatestFunctionCallHistory diff --git a/deepgram/clients/agent/enums.py b/deepgram/clients/agent/enums.py index 81c242ee..910ce916 100644 --- a/deepgram/clients/agent/enums.py +++ b/deepgram/clients/agent/enums.py @@ -24,6 +24,7 @@ class AgentWebSocketEvents(StrEnum): FunctionCallRequest: str = "FunctionCallRequest" AgentStartedSpeaking: str = "AgentStartedSpeaking" AgentAudioDone: str = "AgentAudioDone" + History: str = "History" Error: str = "Error" Unhandled: str = "Unhandled" diff --git a/deepgram/clients/agent/v1/__init__.py b/deepgram/clients/agent/v1/__init__.py index ba39a269..5ec2ae91 100644 --- a/deepgram/clients/agent/v1/__init__.py +++ b/deepgram/clients/agent/v1/__init__.py @@ -42,6 +42,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -57,4 +58,8 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) diff --git a/deepgram/clients/agent/v1/websocket/__init__.py b/deepgram/clients/agent/v1/websocket/__init__.py index fcd1b588..5e3e7cb7 100644 --- a/deepgram/clients/agent/v1/websocket/__init__.py +++ b/deepgram/clients/agent/v1/websocket/__init__.py @@ -33,6 +33,7 @@ InjectUserMessageOptions, FunctionCallResponse, AgentKeepAlive, + Flags, # sub level Listen, Speak, @@ -48,4 +49,8 @@ Output, Audio, Endpoint, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, ) diff --git a/deepgram/clients/agent/v1/websocket/async_client.py b/deepgram/clients/agent/v1/websocket/async_client.py index c87ab2bb..26da7765 100644 --- a/deepgram/clients/agent/v1/websocket/async_client.py +++ b/deepgram/clients/agent/v1/websocket/async_client.py @@ -29,6 +29,10 @@ ErrorResponse, UnhandledResponse, ) +from .options import ( + HistoryConversationMessage, + HistoryFunctionCallsMessage, +) from .options import ( SettingsOptions, UpdatePromptOptions, @@ -478,6 +482,28 @@ async def _process_text(self, message: str) -> None: injection_refused=injection_refused_result, **dict(cast(Dict[Any, Any], self._kwargs)), ) + case AgentWebSocketEvents.History: + # Determine if this is conversation history or function call history + history_result: Union[HistoryConversationMessage, HistoryFunctionCallsMessage, Dict[Any, Any]] + + if "role" in data and "content" in data: + # This is conversation history + history_result = HistoryConversationMessage.from_json(message) + self._logger.verbose("HistoryConversationMessage: %s", history_result) + elif "function_calls" in data: + # This is function call history + history_result = HistoryFunctionCallsMessage.from_json(message) + self._logger.verbose("HistoryFunctionCallsMessage: %s", history_result) + else: + # Fallback for unknown History format + history_result = data + self._logger.verbose("History (unknown format): %s", history_result) + + await self._emit( + AgentWebSocketEvents(AgentWebSocketEvents.History), + history=history_result, + **dict(cast(Dict[Any, Any], self._kwargs)), + ) case AgentWebSocketEvents.Close: close_result: CloseResponse = CloseResponse.from_json(message) self._logger.verbose("CloseResponse: %s", close_result) diff --git a/deepgram/clients/agent/v1/websocket/client.py b/deepgram/clients/agent/v1/websocket/client.py index fa00add5..3de6cf60 100644 --- a/deepgram/clients/agent/v1/websocket/client.py +++ b/deepgram/clients/agent/v1/websocket/client.py @@ -29,6 +29,10 @@ ErrorResponse, UnhandledResponse, ) +from .options import ( + HistoryConversationMessage, + HistoryFunctionCallsMessage, +) from .options import ( SettingsOptions, UpdatePromptOptions, @@ -473,6 +477,28 @@ def _process_text(self, message: str) -> None: injection_refused=injection_refused_result, **dict(cast(Dict[Any, Any], self._kwargs)), ) + case AgentWebSocketEvents.History: + # Determine if this is conversation history or function call history + history_result: Union[HistoryConversationMessage, HistoryFunctionCallsMessage, Dict[Any, Any]] + + if "role" in data and "content" in data: + # This is conversation history + history_result = HistoryConversationMessage.from_json(message) + self._logger.verbose("HistoryConversationMessage: %s", history_result) + elif "function_calls" in data: + # This is function call history + history_result = HistoryFunctionCallsMessage.from_json(message) + self._logger.verbose("HistoryFunctionCallsMessage: %s", history_result) + else: + # Fallback for unknown History format + history_result = data + self._logger.verbose("History (unknown format): %s", history_result) + + self._emit( + AgentWebSocketEvents(AgentWebSocketEvents.History), + history=history_result, + **dict(cast(Dict[Any, Any], self._kwargs)), + ) case AgentWebSocketEvents.Close: close_result: CloseResponse = CloseResponse.from_json(message) self._logger.verbose("CloseResponse: %s", close_result) diff --git a/deepgram/clients/agent/v1/websocket/options.py b/deepgram/clients/agent/v1/websocket/options.py index f5c0e3f8..b08867bf 100644 --- a/deepgram/clients/agent/v1/websocket/options.py +++ b/deepgram/clients/agent/v1/websocket/options.py @@ -244,6 +244,94 @@ def __getitem__(self, key): return _dict[key] +# History and Context classes for Function Call Context / History feature + +@dataclass +class Flags(BaseResponse): + """ + This class defines configuration flags for the agent settings. + """ + + history: bool = field(default=True) + + +@dataclass +class HistoryConversationMessage(BaseResponse): + """ + This class defines a conversation text message as part of the conversation history. + """ + + type: str = field(default="History") + role: str = field(default="") # "user" or "assistant" + content: str = field(default="") + + +@dataclass +class FunctionCallHistory(BaseResponse): + """ + This class defines a single function call in the history. + """ + + id: str = field(default="") + name: str = field(default="") + client_side: bool = field(default=False) + arguments: str = field(default="") + response: str = field(default="") + + +@dataclass +class HistoryFunctionCallsMessage(BaseResponse): + """ + This class defines function call messages as part of the conversation history. + """ + + type: str = field(default="History") + function_calls: List[FunctionCallHistory] = field(default_factory=list) + + def __post_init__(self): + """Convert dict function_calls to FunctionCallHistory objects; normalize None.""" + if not self.function_calls: + self.function_calls = [] + return + + # Convert dicts and filter out any None values for safety + converted_calls = [ + FunctionCallHistory.from_dict(call) if isinstance(call, dict) else call + for call in self.function_calls + if call is not None + ] + self.function_calls = [call for call in converted_calls if call is not None] + + +@dataclass +class Context(BaseResponse): + """ + This class defines the conversation context including the history of messages and function calls. + """ + + messages: List[Union[HistoryConversationMessage, HistoryFunctionCallsMessage]] = field(default_factory=list) + + def __post_init__(self): + """Convert dict messages to appropriate message objects; normalize None.""" + if not self.messages: + self.messages = [] + return + + # Convert dicts to appropriate message objects and filter out None values + converted_messages = [] + for message in self.messages: + if message is None: + continue + if isinstance(message, dict): + if "function_calls" in message: + converted_messages.append(HistoryFunctionCallsMessage.from_dict(message)) + else: + converted_messages.append(HistoryConversationMessage.from_dict(message)) + else: + converted_messages.append(message) + self.messages = converted_messages + + @dataclass class Agent(BaseResponse): """ @@ -271,10 +359,12 @@ class Agent(BaseResponse): greeting: Optional[str] = field( default=None, metadata=dataclass_config(exclude=lambda f: f is None) ) - + context: Optional[Context] = field( + default=None, metadata=dataclass_config(exclude=lambda f: f is None) + ) def __post_init__(self): - """Handle conversion of dict/list data to proper Speak objects""" + """Handle conversion of dict/list data to proper Speak objects and Context objects""" # Handle speak conversion (OneOf pattern) if isinstance(self.speak, list): self.speak = [ @@ -284,6 +374,10 @@ def __post_init__(self): elif isinstance(self.speak, dict): self.speak = Speak.from_dict(self.speak) + # Handle context conversion + if isinstance(self.context, dict): + self.context = Context.from_dict(self.context) + def __getitem__(self, key): _dict = self.to_dict() if "listen" in _dict and isinstance(_dict["listen"], dict): @@ -295,6 +389,8 @@ def __getitem__(self, key): _dict["speak"] = [Speak.from_dict(item) for item in _dict["speak"]] elif isinstance(_dict["speak"], dict): _dict["speak"] = Speak.from_dict(_dict["speak"]) + if "context" in _dict and isinstance(_dict["context"], dict): + _dict["context"] = Context.from_dict(_dict["context"]) return _dict[key] @@ -356,6 +452,9 @@ class SettingsOptions(BaseResponse): mip_opt_out: Optional[bool] = field( default=False, metadata=dataclass_config(exclude=lambda f: f is None) ) + flags: Optional[Flags] = field( + default=None, metadata=dataclass_config(exclude=lambda f: f is None) + ) def __getitem__(self, key): _dict = self.to_dict() @@ -363,6 +462,9 @@ def __getitem__(self, key): _dict["audio"] = Audio.from_dict(_dict["audio"]) if "agent" in _dict and isinstance(_dict["agent"], dict): _dict["agent"] = Agent.from_dict(_dict["agent"]) + if "flags" in _dict and isinstance(_dict["flags"], dict): + _dict["flags"] = Flags.from_dict(_dict["flags"]) + return _dict[key] def check(self): """ diff --git a/examples/agent/context-history/main.py b/examples/agent/context-history/main.py new file mode 100644 index 00000000..5cf5ecfd --- /dev/null +++ b/examples/agent/context-history/main.py @@ -0,0 +1,353 @@ +# Copyright 2024 Deepgram SDK contributors. All Rights Reserved. +# Use of this source code is governed by a MIT license that can be found in the LICENSE file. +# SPDX-License-Identifier: MIT + +import json +import random +from deepgram.utils import verboselogs + +from deepgram import ( + DeepgramClient, + DeepgramClientOptions, + AgentWebSocketEvents, + SettingsOptions, + FunctionCallRequest, + FunctionCallResponse, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + Context, + Flags, +) + +# Mock weather data for demo purposes +WEATHER_DATA = { + "new york": {"temperature": 72, "condition": "sunny", "humidity": 45}, + "london": {"temperature": 18, "condition": "cloudy", "humidity": 80}, + "tokyo": {"temperature": 25, "condition": "rainy", "humidity": 90}, + "paris": {"temperature": 20, "condition": "partly cloudy", "humidity": 60}, + "sydney": {"temperature": 28, "condition": "sunny", "humidity": 50}, +} + +def get_weather(location, unit="fahrenheit"): + """ + Mock weather function that returns simulated weather data. + In a real application, this would call an actual weather API. + """ + location_key = location.lower() + + if location_key not in WEATHER_DATA: + # Return random weather for unknown locations + temp_c = random.randint(10, 35) + conditions = ["sunny", "cloudy", "rainy", "partly cloudy", "windy"] + weather = { + "temperature": temp_c, + "condition": random.choice(conditions), + "humidity": random.randint(30, 90) + } + else: + weather = WEATHER_DATA[location_key].copy() + + # Convert temperature if needed + if unit.lower() == "fahrenheit": + if location_key not in WEATHER_DATA: # Convert from Celsius + weather["temperature"] = int(weather["temperature"] * 9/5 + 32) + # WEATHER_DATA is already in Fahrenheit for known locations + else: # Celsius + if location_key in WEATHER_DATA: # Convert from Fahrenheit + weather["temperature"] = int((weather["temperature"] - 32) * 5/9) + + return { + "location": location, + "temperature": weather["temperature"], + "unit": unit, + "condition": weather["condition"], + "humidity": weather["humidity"], + "description": f"The weather in {location} is {weather['condition']} with a temperature of {weather['temperature']}°{'F' if unit.lower() == 'fahrenheit' else 'C'} and {weather['humidity']}% humidity." + } + + +def main(): + try: + print("🌤️ Starting Agent History & Weather Function Calling Demo") + print("=" * 60) + + # Initialize Deepgram client with enhanced options + config: DeepgramClientOptions = DeepgramClientOptions( + options={ + "keepalive": "true", + "microphone_record": "true", + "speaker_playback": "true", + }, + verbose=verboselogs.INFO, + ) + + deepgram: DeepgramClient = DeepgramClient("", config) + dg_connection = deepgram.agent.websocket.v("1") + + # Create initial conversation history for context + conversation_history = [ + HistoryConversationMessage( + role="user", + content="Hello, I'm looking for weather information." + ), + HistoryConversationMessage( + role="assistant", + content="Hello! I'm your weather assistant with access to current weather data. I can help you get weather information for any location worldwide. What city would you like to know about?" + ) + ] + + def on_open(self, open, **kwargs): + print(f"🔌 Connection opened: {open}") + + def on_binary_data(self, data, **kwargs): + # Handle audio data if needed + pass + + def on_welcome(self, welcome, **kwargs): + print(f"👋 Welcome received: {welcome}") + + def on_settings_applied(self, settings_applied, **kwargs): + print(f"⚙️ Settings applied: {settings_applied}") + + def on_conversation_text(self, conversation_text, **kwargs): + print(f"💬 Conversation: {conversation_text}") + + def on_user_started_speaking(self, user_started_speaking, **kwargs): + print(f"🎤 User started speaking: {user_started_speaking}") + + def on_agent_thinking(self, agent_thinking, **kwargs): + print(f"🤔 Agent thinking: {agent_thinking}") + + def on_history(self, history, **kwargs): + """ + Handle History events for both conversation context and function call history. + This is a first-class event, NOT an unhandled event. + """ + print(f"📚 History event received: {type(history)}") + + # Check if this is conversation history or function call history + if hasattr(history, 'role') and hasattr(history, 'content'): + # This is conversation history + print(f"📚 Conversation History payload:") + print(f" Type: {getattr(history, 'type', 'History')}") + print(f" Role: {history.role}") + print(f" Content: {history.content}") + print() + elif hasattr(history, 'function_calls'): + # This is function call history + print(f"📚 Function Call History payload:") + print(f" Type: {getattr(history, 'type', 'History')}") + print(f" Function calls: {len(history.function_calls) if history.function_calls else 0}") + if history.function_calls: + for i, call in enumerate(history.function_calls): + print(f" Call {i+1}:") + print(f" ID: {call.id}") + print(f" Name: {call.name}") + print(f" Client Side: {call.client_side}") + print(f" Arguments: {call.arguments}") + print(f" Response: {call.response[:100]}..." if len(call.response) > 100 else f" Response: {call.response}") + print() + else: + print(f"📚 Unknown History payload format: {history}") + + def on_function_call_request(self, function_call_request: FunctionCallRequest, **kwargs): + """ + Handle function call requests from the agent. + This will generate new History events automatically. + """ + # FunctionCallRequest contains a list of functions - usually just one + if not function_call_request.functions or len(function_call_request.functions) == 0: + print("❌ No functions in FunctionCallRequest") + return + + # Get the first (and usually only) function call + function_call = function_call_request.functions[0] + + print(f"🔧 Function Call Request: {function_call.name}") + print(f" ID: {function_call.id}") + print(f" Arguments: {function_call.arguments}") + print(f" Client Side: {function_call.client_side}") + + try: + # Parse the function arguments + args = json.loads(function_call.arguments) + + if function_call.name == "get_weather": + # Call our weather function + location = args.get("location", "") + unit = args.get("unit", "fahrenheit") + + weather_data = get_weather(location, unit) + + # Send the response back + response = FunctionCallResponse( + id=function_call.id, + name=function_call.name, + content=json.dumps(weather_data) + ) + + print(f"📞 Sending weather response for {location}: {weather_data['description']}") + dg_connection.send(response.to_json()) + else: + # Unknown function + response = FunctionCallResponse( + id=function_call.id, + name=function_call.name, + content=json.dumps({"error": f"Unknown function: {function_call.name}"}) + ) + dg_connection.send(response.to_json()) + + except json.JSONDecodeError as e: + print(f"❌ Error parsing function arguments: {e}") + response = FunctionCallResponse( + id=function_call.id, + name=function_call.name, + content=json.dumps({"error": "Invalid JSON in function arguments"}) + ) + dg_connection.send(response.to_json()) + except Exception as e: + print(f"❌ Error in function call: {e}") + response = FunctionCallResponse( + id=function_call.id, + name=function_call.name, + content=json.dumps({"error": str(e)}) + ) + dg_connection.send(response.to_json()) + + def on_agent_started_speaking(self, agent_started_speaking, **kwargs): + print(f"🗣️ Agent started speaking: {agent_started_speaking}") + + def on_agent_audio_done(self, agent_audio_done, **kwargs): + print(f"🔇 Agent audio done: {agent_audio_done}") + + def on_close(self, close, **kwargs): + print(f"🔌 Connection closed: {close}") + + def on_error(self, error, **kwargs): + print(f"❌ Error: {error}") + + def on_unhandled(self, unhandled, **kwargs): + print(f"❓ Unhandled event: {unhandled}") + + # Register event handlers + dg_connection.on(AgentWebSocketEvents.Open, on_open) + dg_connection.on(AgentWebSocketEvents.AudioData, on_binary_data) + dg_connection.on(AgentWebSocketEvents.Welcome, on_welcome) + dg_connection.on(AgentWebSocketEvents.SettingsApplied, on_settings_applied) + dg_connection.on(AgentWebSocketEvents.ConversationText, on_conversation_text) + dg_connection.on(AgentWebSocketEvents.UserStartedSpeaking, on_user_started_speaking) + dg_connection.on(AgentWebSocketEvents.AgentThinking, on_agent_thinking) + dg_connection.on(AgentWebSocketEvents.History, on_history) # First-class History event handler + dg_connection.on(AgentWebSocketEvents.FunctionCallRequest, on_function_call_request) + dg_connection.on(AgentWebSocketEvents.AgentStartedSpeaking, on_agent_started_speaking) + dg_connection.on(AgentWebSocketEvents.AgentAudioDone, on_agent_audio_done) + dg_connection.on(AgentWebSocketEvents.Close, on_close) + dg_connection.on(AgentWebSocketEvents.Error, on_error) + dg_connection.on(AgentWebSocketEvents.Unhandled, on_unhandled) + + # Configure agent settings with history and function calling + options: SettingsOptions = SettingsOptions() + + # Enable history feature for conversation context + options.flags = Flags(history=True) + + # Agent tags for analytics + options.tags = ["history-example", "function-calling", "weather-demo"] + + # Audio configuration + options.audio.input.encoding = "linear16" + options.audio.input.sample_rate = 16000 + + # Agent language and context + options.agent.language = "en" + + # Provide conversation context/history + options.agent.context = Context(messages=conversation_history) + + # Configure listen provider + options.agent.listen.provider.type = "deepgram" + options.agent.listen.provider.model = "nova-2" + + # Configure speak provider + options.agent.speak.provider.type = "deepgram" + options.agent.speak.provider.model = "aura-asteria-en" + + # Configure the thinking/LLM provider with function calling + options.agent.think.provider.type = "open_ai" + options.agent.think.provider.model = "gpt-4o-mini" + + # Define available functions using OpenAPI-like schema + options.agent.think.functions = [ + { + "name": "get_weather", + "description": "Get the current weather conditions for a specific location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city or location to get weather for (e.g., 'New York', 'London', 'Tokyo')" + }, + "unit": { + "type": "string", + "enum": ["fahrenheit", "celsius"], + "description": "Temperature unit preference", + "default": "fahrenheit" + } + }, + "required": ["location"] + } + } + ] + + options.agent.think.prompt = ( + "You are a helpful weather assistant with access to current weather data. " + "Use the get_weather function to provide accurate, up-to-date weather information when users ask about weather conditions. " + "Always be conversational and provide context about the weather conditions." + ) + + options.greeting = ( + "Hello! I'm your weather assistant with access to current weather data. " + "I remember our previous conversations and can help you with weather information for any location. " + "What would you like to know?" + ) + + # Start the connection + print("🚀 Starting connection with history and function calling enabled...") + print("Configuration:") + print(f" - History enabled: {options.flags.history}") + print(f" - Provider: {options.agent.think.provider.type} ({options.agent.think.provider.model})") + print(f" - Functions: {len(options.agent.think.functions)} available") + print(f" - Initial context: {len(conversation_history)} messages") + print() + + if dg_connection.start(options) is False: + print("❌ Failed to start connection") + return + + print("✅ Connection started successfully!") + print() + print("🎯 Expected Features:") + print(" 📚 History events for conversation context") + print(" 📚 History events for function call context") + print(" 🔧 Function calling with weather data") + print(" 📞 Live function calls generate new history events") + print() + print("💡 Try saying: 'What's the weather in New York?' or 'How's the weather in Tokyo in Celsius?'") + print() + print("Press Enter to stop...") + input() + + # Close the connection + dg_connection.finish() + print("🔌 Connection closed. Demo finished!") + + except Exception as e: + print(f"❌ Unexpected error: {e}") + print(f"Error type: {type(e)}") + import traceback + traceback.print_exc() + + +if __name__ == "__main__": + main() diff --git a/tests/response_data/agent/websocket/agent_tags-e55ef69c-events.json b/tests/response_data/agent/websocket/agent_tags-e55ef69c-events.json index 43ba0066..0513bf8e 100644 --- a/tests/response_data/agent/websocket/agent_tags-e55ef69c-events.json +++ b/tests/response_data/agent/websocket/agent_tags-e55ef69c-events.json @@ -1,29 +1,29 @@ [ { "type": "Welcome", - "timestamp": 1754089254.059805, + "timestamp": 1754948270.721306, "data": { "type": "Welcome", - "request_id": "60cc0bbe-be55-4c34-b0c6-e9c138885967" + "request_id": "132eb644-166e-4800-802e-7e90ea636b21" } }, { "type": "Open", - "timestamp": 1754089254.060123, + "timestamp": 1754948270.72157, "data": { "type": "Open" } }, { "type": "SettingsApplied", - "timestamp": 1754089254.1029801, + "timestamp": 1754948270.7652748, "data": { "type": "SettingsApplied" } }, { "type": "ConversationText", - "timestamp": 1754089255.110622, + "timestamp": 1754948271.768172, "data": { "type": "ConversationText", "role": "user", @@ -32,15 +32,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089255.1114728, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Hello, this is a test of agent tags functionality.\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089255.111763, + "timestamp": 1754948271.768724, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -48,7 +40,7 @@ }, { "type": "ConversationText", - "timestamp": 1754089256.122815, + "timestamp": 1754948272.522288, "data": { "type": "ConversationText", "role": "assistant", @@ -56,25 +48,26 @@ } }, { - "type": "Unhandled", - "timestamp": 1754089256.12335, + "type": "AgentStartedSpeaking", + "timestamp": 1754948272.5331469, "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Hello!\"}" + "total_latency": 0.751554899, + "tts_latency": 0.368593096, + "ttt_latency": 0.382961593 } }, { - "type": "AgentStartedSpeaking", - "timestamp": 1754089256.12362, + "type": "ConversationText", + "timestamp": 1754948273.247007, "data": { - "total_latency": 0.962977896, - "tts_latency": 0.368340208, - "ttt_latency": 0.594637578 + "type": "ConversationText", + "role": "assistant", + "content": "It seems you're testing the agent tags functionality." } }, { "type": "ConversationText", - "timestamp": 1754089256.6148539, + "timestamp": 1754948273.330629, "data": { "type": "ConversationText", "role": "user", @@ -83,15 +76,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089256.615833, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Can you confirm you are working with tags enabled?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089256.616431, + "timestamp": 1754948273.331824, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -99,57 +84,41 @@ }, { "type": "AgentAudioDone", - "timestamp": 1754089256.616906, + "timestamp": 1754948273.480625, "data": { "type": "AgentAudioDone" } }, { "type": "ConversationText", - "timestamp": 1754089257.768304, + "timestamp": 1754948274.393117, "data": { "type": "ConversationText", "role": "assistant", - "content": "Yes, I can confirm that I am able to work with tags." - } - }, - { - "type": "Unhandled", - "timestamp": 1754089257.768838, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Yes, I can confirm that I am able to work with tags.\"}" + "content": "Yes, I can confirm that I'm set up to work with tags enabled." } }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089257.7692642, + "timestamp": 1754948274.3937778, "data": { - "total_latency": 1.157360975, - "tts_latency": 0.385327765, - "ttt_latency": 0.7720331 + "total_latency": 0.996282726, + "tts_latency": 0.314806037, + "ttt_latency": 0.681476589 } }, { "type": "ConversationText", - "timestamp": 1754089261.3335302, + "timestamp": 1754948277.976524, "data": { "type": "ConversationText", "role": "assistant", - "content": "How can I assist you with them?" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089261.334396, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"How can I assist you with them?\"}" + "content": "How can I assist you with the tags functionality?" } }, { "type": "AgentAudioDone", - "timestamp": 1754089261.371368, + "timestamp": 1754948277.981493, "data": { "type": "AgentAudioDone" } diff --git a/tests/response_data/agent/websocket/basic_conversation-a40b2785-events.json b/tests/response_data/agent/websocket/basic_conversation-a40b2785-events.json index e66cc0bb..0807148f 100644 --- a/tests/response_data/agent/websocket/basic_conversation-a40b2785-events.json +++ b/tests/response_data/agent/websocket/basic_conversation-a40b2785-events.json @@ -1,29 +1,29 @@ [ { "type": "Welcome", - "timestamp": 1754089151.209811, + "timestamp": 1754948166.3834019, "data": { "type": "Welcome", - "request_id": "3be5ecc1-8c30-42b8-a7a5-077bbe51bdc2" + "request_id": "9f1e4eef-2516-4c65-a570-222349facadc" } }, { "type": "Open", - "timestamp": 1754089151.209898, + "timestamp": 1754948166.38349, "data": { "type": "Open" } }, { "type": "SettingsApplied", - "timestamp": 1754089151.250269, + "timestamp": 1754948166.425482, "data": { "type": "SettingsApplied" } }, { "type": "ConversationText", - "timestamp": 1754089152.256063, + "timestamp": 1754948167.444435, "data": { "type": "ConversationText", "role": "user", @@ -32,15 +32,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089152.2563221, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Hello, can you help me with a simple question?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089152.256472, + "timestamp": 1754948167.444911, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -48,33 +40,25 @@ }, { "type": "ConversationText", - "timestamp": 1754089153.319703, + "timestamp": 1754948168.265812, "data": { "type": "ConversationText", "role": "assistant", "content": "Of course!" } }, - { - "type": "Unhandled", - "timestamp": 1754089153.3203561, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Of course!\"}" - } - }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089153.320954, + "timestamp": 1754948168.273765, "data": { - "total_latency": 1.062668161, - "tts_latency": 0.309797676, - "ttt_latency": 0.752869918 + "total_latency": 0.834087238, + "tts_latency": 0.305673514, + "ttt_latency": 0.528413404 } }, { "type": "ConversationText", - "timestamp": 1754089153.764373, + "timestamp": 1754948168.945366, "data": { "type": "ConversationText", "role": "user", @@ -83,15 +67,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089153.764599, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"What is 2 + 2?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089153.764719, + "timestamp": 1754948168.9458811, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -99,47 +75,14 @@ }, { "type": "AgentAudioDone", - "timestamp": 1754089153.764807, - "data": { - "type": "AgentAudioDone" - } - }, - { - "type": "ConversationText", - "timestamp": 1754089154.517601, - "data": { - "type": "ConversationText", - "role": "assistant", - "content": "2 + 2 equals 4!" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089154.519148, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"2 + 2 equals 4!\"}" - } - }, - { - "type": "AgentStartedSpeaking", - "timestamp": 1754089154.519596, - "data": { - "total_latency": 0.755932164, - "tts_latency": 0.301259134, - "ttt_latency": 0.45467274 - } - }, - { - "type": "AgentAudioDone", - "timestamp": 1754089154.8745668, + "timestamp": 1754948168.9461288, "data": { "type": "AgentAudioDone" } }, { "type": "ConversationText", - "timestamp": 1754089155.273046, + "timestamp": 1754948170.451709, "data": { "type": "ConversationText", "role": "user", @@ -148,15 +91,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089155.2735202, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Thank you for your help.\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089155.27373, + "timestamp": 1754948170.452619, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -164,50 +99,34 @@ }, { "type": "ConversationText", - "timestamp": 1754089156.129859, + "timestamp": 1754948171.554943, "data": { "type": "ConversationText", "role": "assistant", - "content": "You're welcome!" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089156.130631, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"You're welcome!\"}" + "content": "2 + 2 is 4!" } }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089156.1312032, + "timestamp": 1754948171.563315, "data": { - "total_latency": 0.858516122, - "tts_latency": 0.32573959, - "ttt_latency": 0.532776132 + "total_latency": 1.101432299, + "tts_latency": 0.383479276, + "ttt_latency": 0.717952623 } }, { "type": "ConversationText", - "timestamp": 1754089157.306326, + "timestamp": 1754948173.394449, "data": { "type": "ConversationText", "role": "assistant", - "content": "If you have any more questions, feel free to ask!" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089157.3069599, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"If you have any more questions, feel free to ask!\"}" + "content": "You're welcome!" } }, { "type": "AgentAudioDone", - "timestamp": 1754089157.370949, + "timestamp": 1754948173.433344, "data": { "type": "AgentAudioDone" } diff --git a/tests/response_data/agent/websocket/fallback_providers-e16542b1-events.json b/tests/response_data/agent/websocket/fallback_providers-e16542b1-events.json index 15d0150b..b529f010 100644 --- a/tests/response_data/agent/websocket/fallback_providers-e16542b1-events.json +++ b/tests/response_data/agent/websocket/fallback_providers-e16542b1-events.json @@ -1,29 +1,29 @@ [ { "type": "Welcome", - "timestamp": 1754089177.534934, + "timestamp": 1754948192.721843, "data": { "type": "Welcome", - "request_id": "a923abe9-006b-430a-a02c-c74411d9aac7" + "request_id": "cc632fa8-37a7-486f-8c2b-a6f94988c5c8" } }, { "type": "Open", - "timestamp": 1754089177.5350668, + "timestamp": 1754948192.721969, "data": { "type": "Open" } }, { "type": "SettingsApplied", - "timestamp": 1754089177.5740302, + "timestamp": 1754948192.764568, "data": { "type": "SettingsApplied" } }, { "type": "ConversationText", - "timestamp": 1754089178.5788832, + "timestamp": 1754948193.77013, "data": { "type": "ConversationText", "role": "user", @@ -32,15 +32,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089178.579328, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Hello, can you test speaking with fallback providers?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089178.5795121, + "timestamp": 1754948193.771481, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -48,33 +40,25 @@ }, { "type": "ConversationText", - "timestamp": 1754089179.623884, + "timestamp": 1754948194.7253609, "data": { "type": "ConversationText", "role": "assistant", "content": "Hello!" } }, - { - "type": "Unhandled", - "timestamp": 1754089179.624897, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Hello!\"}" - } - }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089179.6257951, + "timestamp": 1754948194.726901, "data": { - "total_latency": 1.043773834, - "tts_latency": 0.378058042, - "ttt_latency": 0.665715682 + "total_latency": 0.847075398, + "tts_latency": 0.366917177, + "ttt_latency": 0.480158007 } }, { "type": "ConversationText", - "timestamp": 1754089180.093214, + "timestamp": 1754948195.277183, "data": { "type": "ConversationText", "role": "user", @@ -83,15 +67,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089180.093871, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Please say something else to test the fallback.\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089180.094195, + "timestamp": 1754948195.2784362, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -99,57 +75,41 @@ }, { "type": "AgentAudioDone", - "timestamp": 1754089180.0944588, + "timestamp": 1754948195.278949, "data": { "type": "AgentAudioDone" } }, { "type": "ConversationText", - "timestamp": 1754089180.754755, + "timestamp": 1754948196.056413, "data": { "type": "ConversationText", "role": "assistant", "content": "Sure!" } }, - { - "type": "Unhandled", - "timestamp": 1754089180.7552261, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Sure!\"}" - } - }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089180.7559838, + "timestamp": 1754948196.057754, "data": { - "total_latency": 0.539705597, - "tts_latency": 0.261027007, - "ttt_latency": 0.27867806 + "total_latency": 0.664855926, + "tts_latency": 0.306484619, + "ttt_latency": 0.358371064 } }, { "type": "ConversationText", - "timestamp": 1754089181.231325, + "timestamp": 1754948196.67066, "data": { "type": "ConversationText", "role": "assistant", "content": "How can I assist you today?" } }, - { - "type": "Unhandled", - "timestamp": 1754089181.232203, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"How can I assist you today?\"}" - } - }, { "type": "AgentAudioDone", - "timestamp": 1754089181.2641761, + "timestamp": 1754948196.7596312, "data": { "type": "AgentAudioDone" } diff --git a/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-events.json b/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-events.json index 0ab3ee07..071a760a 100644 --- a/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-events.json +++ b/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-events.json @@ -1,29 +1,29 @@ [ { "type": "Welcome", - "timestamp": 1754089229.198654, + "timestamp": 1754948245.3951008, "data": { "type": "Welcome", - "request_id": "b304cd18-7297-46c6-a441-b2fc4e5a2922" + "request_id": "c808d204-8bea-4d20-8514-9c69710aaaff" } }, { "type": "Open", - "timestamp": 1754089229.198943, + "timestamp": 1754948245.395226, "data": { "type": "Open" } }, { "type": "SettingsApplied", - "timestamp": 1754089229.240551, + "timestamp": 1754948245.434721, "data": { "type": "SettingsApplied" } }, { "type": "ConversationText", - "timestamp": 1754089230.2495959, + "timestamp": 1754948246.4364889, "data": { "type": "ConversationText", "role": "user", @@ -32,15 +32,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089230.250344, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"What's the weather like in New York?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089230.250602, + "timestamp": 1754948246.4373288, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -48,12 +40,12 @@ }, { "type": "FunctionCallRequest", - "timestamp": 1754089230.9693499, + "timestamp": 1754948247.5643442, "data": { "type": "FunctionCallRequest", "functions": [ { - "id": "call_jYXNwi9tuPqhgrtiIJxoqsaz", + "id": "call_bbDFsGyBeInV2H26OAICjPhg", "name": "get_weather", "arguments": "{\"location\":\"New York\"}", "client_side": true @@ -61,17 +53,9 @@ ] } }, - { - "type": "Unhandled", - "timestamp": 1754089231.014071, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"function_calls\":[{\"id\":\"call_jYXNwi9tuPqhgrtiIJxoqsaz\",\"name\":\"get_weather\",\"client_side\":true,\"arguments\":\"{\\\"location\\\":\\\"New York\\\"}\",\"response\":\"{\\\"success\\\": true, \\\"result\\\": \\\"Mock function response\\\", \\\"timestamp\\\": 1754089230.9695292}\"}]}" - } - }, { "type": "ConversationText", - "timestamp": 1754089231.766838, + "timestamp": 1754948247.94738, "data": { "type": "ConversationText", "role": "user", @@ -80,15 +64,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089231.7678668, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Can you also check the weather in London?\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089231.769166, + "timestamp": 1754948247.948669, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -96,12 +72,12 @@ }, { "type": "FunctionCallRequest", - "timestamp": 1754089233.014444, + "timestamp": 1754948249.277836, "data": { "type": "FunctionCallRequest", "functions": [ { - "id": "call_YwxHZk3V0XhsWPt7JueJMMqe", + "id": "call_5kvNcSk1Pk3XxV07ZyasrDIQ", "name": "get_weather", "arguments": "{\"location\":\"New York\"}", "client_side": true @@ -109,22 +85,14 @@ ] } }, - { - "type": "Unhandled", - "timestamp": 1754089233.05795, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"function_calls\":[{\"id\":\"call_YwxHZk3V0XhsWPt7JueJMMqe\",\"name\":\"get_weather\",\"client_side\":true,\"arguments\":\"{\\\"location\\\": \\\"New York\\\"}\",\"response\":\"{\\\"success\\\": true, \\\"result\\\": \\\"Mock function response\\\", \\\"timestamp\\\": 1754089233.014666}\"}]}" - } - }, { "type": "FunctionCallRequest", - "timestamp": 1754089233.0595121, + "timestamp": 1754948249.32042, "data": { "type": "FunctionCallRequest", "functions": [ { - "id": "call_DveXgRjUM8ICh61bmHyyUMAA", + "id": "call_m5zpyzvzTSvc57ZLsj4l5DXj", "name": "get_weather", "arguments": "{\"location\":\"London\"}", "client_side": true @@ -132,60 +100,45 @@ ] } }, - { - "type": "Unhandled", - "timestamp": 1754089233.0992022, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"function_calls\":[{\"id\":\"call_DveXgRjUM8ICh61bmHyyUMAA\",\"name\":\"get_weather\",\"client_side\":true,\"arguments\":\"{\\\"location\\\": \\\"London\\\"}\",\"response\":\"{\\\"success\\\": true, \\\"result\\\": \\\"Mock function response\\\", \\\"timestamp\\\": 1754089233.0596192}\"}]}" - } - }, { "type": "ConversationText", - "timestamp": 1754089234.274155, + "timestamp": 1754948250.94256, "data": { "type": "ConversationText", "role": "assistant", - "content": "I checked the weather in New York and London, but it seems the responses are placeholder data." - } - }, - { - "type": "Unhandled", - "timestamp": 1754089234.275143, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"I checked the weather in New York and London, but it seems the responses are placeholder data.\"}" + "content": "I checked the weather for both New York and London, but the response returned is a mock function response." } }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089234.2811031, + "timestamp": 1754948250.943639, "data": { - "total_latency": 2.5165708799999997, - "tts_latency": 0.37659356, - "ttt_latency": 2.139977173 + "total_latency": 2.875424046, + "tts_latency": 0.37155966, + "ttt_latency": 2.503864104 } }, { "type": "ConversationText", - "timestamp": 1754089239.476382, + "timestamp": 1754948256.58231, "data": { "type": "ConversationText", "role": "assistant", - "content": "Would you like me to try again?" + "content": "Unfortunately, it doesn't provide specific weather details." } }, { - "type": "Unhandled", - "timestamp": 1754089239.4781692, + "type": "ConversationText", + "timestamp": 1754948260.070521, "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Would you like me to try again?\"}" + "type": "ConversationText", + "role": "assistant", + "content": "If you need information about a specific aspect of the weather, such as temperature, humidity, or conditions, please let me know!" } }, { "type": "AgentAudioDone", - "timestamp": 1754089239.5617201, + "timestamp": 1754948260.24, "data": { "type": "AgentAudioDone" } diff --git a/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-function_calls.json b/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-function_calls.json index ef139494..6e0c0d6f 100644 --- a/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-function_calls.json +++ b/tests/response_data/agent/websocket/function_call_conversation-ac8ed698-function_calls.json @@ -4,7 +4,7 @@ "type": "FunctionCallRequest", "functions": [ { - "id": "call_jYXNwi9tuPqhgrtiIJxoqsaz", + "id": "call_bbDFsGyBeInV2H26OAICjPhg", "name": "get_weather", "arguments": "{\"location\":\"New York\"}", "client_side": true @@ -15,7 +15,7 @@ "type": "FunctionCallRequest", "functions": [ { - "id": "call_YwxHZk3V0XhsWPt7JueJMMqe", + "id": "call_5kvNcSk1Pk3XxV07ZyasrDIQ", "name": "get_weather", "arguments": "{\"location\":\"New York\"}", "client_side": true @@ -26,7 +26,7 @@ "type": "FunctionCallRequest", "functions": [ { - "id": "call_DveXgRjUM8ICh61bmHyyUMAA", + "id": "call_m5zpyzvzTSvc57ZLsj4l5DXj", "name": "get_weather", "arguments": "{\"location\":\"London\"}", "client_side": true diff --git a/tests/response_data/agent/websocket/inject_agent_message-3c5004a4-events.json b/tests/response_data/agent/websocket/inject_agent_message-3c5004a4-events.json index 0d42d290..f0003410 100644 --- a/tests/response_data/agent/websocket/inject_agent_message-3c5004a4-events.json +++ b/tests/response_data/agent/websocket/inject_agent_message-3c5004a4-events.json @@ -1,29 +1,29 @@ [ { "type": "Welcome", - "timestamp": 1754089202.366035, + "timestamp": 1754948218.05023, "data": { "type": "Welcome", - "request_id": "7ead99c4-740b-4f6f-af97-cff90869109c" + "request_id": "1751c7ac-9f91-423a-9304-b289978f8202" } }, { "type": "Open", - "timestamp": 1754089202.366368, + "timestamp": 1754948218.050529, "data": { "type": "Open" } }, { "type": "SettingsApplied", - "timestamp": 1754089202.412865, + "timestamp": 1754948218.0947149, "data": { "type": "SettingsApplied" } }, { "type": "ConversationText", - "timestamp": 1754089203.408017, + "timestamp": 1754948219.103036, "data": { "type": "ConversationText", "role": "user", @@ -32,15 +32,7 @@ }, { "type": "Unhandled", - "timestamp": 1754089203.4085178, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"user\",\"content\":\"Hello, I'm going to inject some agent messages.\"}" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089203.408761, + "timestamp": 1754948219.104838, "data": { "type": "Unhandled", "raw": "{\"type\":\"EndOfThought\"}" @@ -48,115 +40,75 @@ }, { "type": "ConversationText", - "timestamp": 1754089204.152384, + "timestamp": 1754948220.223428, "data": { "type": "ConversationText", "role": "assistant", "content": "Hello!" } }, - { - "type": "Unhandled", - "timestamp": 1754089204.1526778, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Hello!\"}" - } - }, { "type": "AgentStartedSpeaking", - "timestamp": 1754089204.1550121, + "timestamp": 1754948220.224504, "data": { - "total_latency": 0.740471001, - "tts_latency": 0.373125499, - "ttt_latency": 0.36734529 + "total_latency": 1.046059852, + "tts_latency": 0.379347438, + "ttt_latency": 0.666712208 } }, { "type": "ConversationText", - "timestamp": 1754089204.889982, + "timestamp": 1754948220.940325, "data": { "type": "ConversationText", "role": "assistant", - "content": "Sounds interesting." - } - }, - { - "type": "Unhandled", - "timestamp": 1754089204.8905249, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Sounds interesting.\"}" + "content": "Feel free to share the messages whenever you're ready." } }, { "type": "ConversationText", - "timestamp": 1754089206.034254, + "timestamp": 1754948223.392433, "data": { "type": "ConversationText", "role": "assistant", - "content": "What do you have in mind?" - } - }, - { - "type": "Unhandled", - "timestamp": 1754089206.035018, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"What do you have in mind?\"}" + "content": "I'm here to help!" } }, { "type": "AgentAudioDone", - "timestamp": 1754089206.119092, + "timestamp": 1754948223.4384499, "data": { "type": "AgentAudioDone" } }, { "type": "ConversationText", - "timestamp": 1754089206.3451462, + "timestamp": 1754948223.782128, "data": { "type": "ConversationText", "role": "assistant", "content": "Hello! I'm an agent message injected directly." } }, - { - "type": "Unhandled", - "timestamp": 1754089206.346717, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"Hello! I'm an agent message injected directly.\"}" - } - }, { "type": "AgentAudioDone", - "timestamp": 1754089206.983258, + "timestamp": 1754948224.4857159, "data": { "type": "AgentAudioDone" } }, { "type": "ConversationText", - "timestamp": 1754089207.362171, + "timestamp": 1754948224.8573608, "data": { "type": "ConversationText", "role": "assistant", "content": "This is another agent message to test the functionality." } }, - { - "type": "Unhandled", - "timestamp": 1754089207.363573, - "data": { - "type": "Unhandled", - "raw": "{\"type\":\"History\",\"role\":\"assistant\",\"content\":\"This is another agent message to test the functionality.\"}" - } - }, { "type": "AgentAudioDone", - "timestamp": 1754089208.153072, + "timestamp": 1754948225.6692321, "data": { "type": "AgentAudioDone" } diff --git a/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json b/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json index 451327cd..db8fe7ad 100644 --- a/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json +++ b/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json @@ -1 +1 @@ -{"metadata": {"transaction_key": "deprecated", "request_id": "1ef3b141-68af-4a3e-80c3-ee1f6ef05a35", "sha256": "5324da68ede209a16ac69a38e8cd29cee4d754434a041166cda3a1f5e0b24566", "created": "2025-07-22T16:56:44.589Z", "duration": 17.566313, "channels": 1, "models": ["3b3aabe4-608a-46ac-9585-7960a25daf1a"], "model_info": {"3b3aabe4-608a-46ac-9585-7960a25daf1a": {"name": "general-nova-3", "version": "2024-12-20.0", "arch": "nova-3"}}, "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 0, "output_tokens": 0}}, "results": {"channels": [{"alternatives": [{"transcript": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "confidence": 0.999143, "words": [{"word": "yep", "start": 5.52, "end": 6.2400002, "confidence": 0.92342806, "punctuated_word": "Yep."}, {"word": "i", "start": 6.96, "end": 7.2799997, "confidence": 0.57757515, "punctuated_word": "I"}, {"word": "said", "start": 7.2799997, "end": 7.52, "confidence": 0.9052356, "punctuated_word": "said"}, {"word": "it", "start": 7.52, "end": 7.68, "confidence": 0.99797314, "punctuated_word": "it"}, {"word": "before", "start": 7.68, "end": 8.08, "confidence": 0.8933872, "punctuated_word": "before,"}, {"word": "and", "start": 8.08, "end": 8.16, "confidence": 0.99981827, "punctuated_word": "and"}, {"word": "i'll", "start": 8.16, "end": 8.4, "confidence": 0.99961716, "punctuated_word": "I'll"}, {"word": "say", "start": 8.4, "end": 8.48, "confidence": 0.99941766, "punctuated_word": "say"}, {"word": "it", "start": 8.48, "end": 8.639999, "confidence": 0.999597, "punctuated_word": "it"}, {"word": "again", "start": 8.639999, "end": 8.96, "confidence": 0.9528253, "punctuated_word": "again."}, {"word": "life", "start": 10.071313, "end": 10.311313, "confidence": 0.9990013, "punctuated_word": "Life"}, {"word": "moves", "start": 10.311313, "end": 10.631312, "confidence": 0.9996643, "punctuated_word": "moves"}, {"word": "pretty", "start": 10.631312, "end": 11.031313, "confidence": 0.99988604, "punctuated_word": "pretty"}, {"word": "fast", "start": 11.031313, "end": 11.671312, "confidence": 0.9989686, "punctuated_word": "fast."}, {"word": "you", "start": 12.071312, "end": 12.311313, "confidence": 0.92013294, "punctuated_word": "You"}, {"word": "don't", "start": 12.311313, "end": 12.551312, "confidence": 0.99986017, "punctuated_word": "don't"}, {"word": "stop", "start": 12.551312, "end": 12.791312, "confidence": 0.99976414, "punctuated_word": "stop"}, {"word": "and", "start": 12.791312, "end": 12.951312, "confidence": 0.99852246, "punctuated_word": "and"}, {"word": "look", "start": 12.951312, "end": 13.111313, "confidence": 0.9998677, "punctuated_word": "look"}, {"word": "around", "start": 13.111313, "end": 13.351313, "confidence": 0.9998548, "punctuated_word": "around"}, {"word": "once", "start": 13.351313, "end": 13.671312, "confidence": 0.999143, "punctuated_word": "once"}, {"word": "in", "start": 13.671312, "end": 13.831312, "confidence": 0.9976291, "punctuated_word": "in"}, {"word": "a", "start": 13.831312, "end": 13.911312, "confidence": 0.98508644, "punctuated_word": "a"}, {"word": "while", "start": 13.911312, "end": 14.391312, "confidence": 0.9349461, "punctuated_word": "while,"}, {"word": "you", "start": 14.711312, "end": 14.871312, "confidence": 0.99921596, "punctuated_word": "you"}, {"word": "could", "start": 14.871312, "end": 15.031313, "confidence": 0.99974436, "punctuated_word": "could"}, {"word": "miss", "start": 15.031313, "end": 15.271313, "confidence": 0.9997112, "punctuated_word": "miss"}, {"word": "it", "start": 15.271313, "end": 15.5113125, "confidence": 0.99891484, "punctuated_word": "it."}], "paragraphs": {"transcript": "\nYep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "paragraphs": [{"sentences": [{"text": "Yep.", "start": 5.52, "end": 6.2400002}, {"text": "I said it before, and I'll say it again.", "start": 6.96, "end": 8.96}, {"text": "Life moves pretty fast.", "start": 10.071313, "end": 11.671312}, {"text": "You don't stop and look around once in a while, you could miss it.", "start": 12.071312, "end": 15.5113125}], "start": 5.52, "end": 15.5113125, "num_words": 28}]}}]}], "summary": {"result": "success", "short": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it."}}} \ No newline at end of file +{"metadata": {"transaction_key": "deprecated", "request_id": "7d757237-ae06-4e2f-8813-e1eb87a4614b", "sha256": "5324da68ede209a16ac69a38e8cd29cee4d754434a041166cda3a1f5e0b24566", "created": "2025-08-11T21:38:55.258Z", "duration": 17.566313, "channels": 1, "models": ["2187e11a-3532-4498-b076-81fa530bdd49"], "model_info": {"2187e11a-3532-4498-b076-81fa530bdd49": {"name": "general-nova-3", "version": "2025-07-31.0", "arch": "nova-3"}}, "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 0, "output_tokens": 0}}, "results": {"channels": [{"alternatives": [{"transcript": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "confidence": 0.9991429, "words": [{"word": "yep", "start": 5.52, "end": 6.2400002, "confidence": 0.92345256, "punctuated_word": "Yep."}, {"word": "i", "start": 6.96, "end": 7.2799997, "confidence": 0.5776232, "punctuated_word": "I"}, {"word": "said", "start": 7.2799997, "end": 7.52, "confidence": 0.9052255, "punctuated_word": "said"}, {"word": "it", "start": 7.52, "end": 7.68, "confidence": 0.9979741, "punctuated_word": "it"}, {"word": "before", "start": 7.68, "end": 8.08, "confidence": 0.89339817, "punctuated_word": "before,"}, {"word": "and", "start": 8.08, "end": 8.16, "confidence": 0.99981827, "punctuated_word": "and"}, {"word": "i'll", "start": 8.16, "end": 8.4, "confidence": 0.99961734, "punctuated_word": "I'll"}, {"word": "say", "start": 8.4, "end": 8.48, "confidence": 0.99941754, "punctuated_word": "say"}, {"word": "it", "start": 8.48, "end": 8.639999, "confidence": 0.99959713, "punctuated_word": "it"}, {"word": "again", "start": 8.639999, "end": 8.96, "confidence": 0.95283747, "punctuated_word": "again."}, {"word": "life", "start": 10.071313, "end": 10.311313, "confidence": 0.9990012, "punctuated_word": "Life"}, {"word": "moves", "start": 10.311313, "end": 10.631312, "confidence": 0.9996643, "punctuated_word": "moves"}, {"word": "pretty", "start": 10.631312, "end": 11.031313, "confidence": 0.99988604, "punctuated_word": "pretty"}, {"word": "fast", "start": 11.031313, "end": 11.671312, "confidence": 0.99896836, "punctuated_word": "fast."}, {"word": "you", "start": 12.071312, "end": 12.311313, "confidence": 0.9201446, "punctuated_word": "You"}, {"word": "don't", "start": 12.311313, "end": 12.551312, "confidence": 0.99986017, "punctuated_word": "don't"}, {"word": "stop", "start": 12.551312, "end": 12.791312, "confidence": 0.99976414, "punctuated_word": "stop"}, {"word": "and", "start": 12.791312, "end": 12.951312, "confidence": 0.998522, "punctuated_word": "and"}, {"word": "look", "start": 12.951312, "end": 13.111313, "confidence": 0.9998677, "punctuated_word": "look"}, {"word": "around", "start": 13.111313, "end": 13.351313, "confidence": 0.9998548, "punctuated_word": "around"}, {"word": "once", "start": 13.351313, "end": 13.671312, "confidence": 0.9991429, "punctuated_word": "once"}, {"word": "in", "start": 13.671312, "end": 13.831312, "confidence": 0.9976286, "punctuated_word": "in"}, {"word": "a", "start": 13.831312, "end": 13.911312, "confidence": 0.9850873, "punctuated_word": "a"}, {"word": "while", "start": 13.911312, "end": 14.391312, "confidence": 0.9349425, "punctuated_word": "while,"}, {"word": "you", "start": 14.711312, "end": 14.871312, "confidence": 0.99921596, "punctuated_word": "you"}, {"word": "could", "start": 14.871312, "end": 15.031313, "confidence": 0.99974436, "punctuated_word": "could"}, {"word": "miss", "start": 15.031313, "end": 15.271313, "confidence": 0.9997111, "punctuated_word": "miss"}, {"word": "it", "start": 15.271313, "end": 15.5113125, "confidence": 0.9989148, "punctuated_word": "it."}], "paragraphs": {"transcript": "\nYep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "paragraphs": [{"sentences": [{"text": "Yep.", "start": 5.52, "end": 6.2400002}, {"text": "I said it before, and I'll say it again.", "start": 6.96, "end": 8.96}, {"text": "Life moves pretty fast.", "start": 10.071313, "end": 11.671312}, {"text": "You don't stop and look around once in a while, you could miss it.", "start": 12.071312, "end": 15.5113125}], "start": 5.52, "end": 15.5113125, "num_words": 28}]}}]}], "summary": {"result": "success", "short": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it."}}} \ No newline at end of file diff --git a/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json b/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json index 730ad3cb..7a943945 100644 --- a/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json +++ b/tests/response_data/listen/rest/a231370d439312b1a404bb6ad8de955e900ec8eae9a906329af8cc672e6ec7ba-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json @@ -1 +1 @@ -{"metadata": {"transaction_key": "deprecated", "request_id": "cb126cd9-f72f-4336-bf62-57b3015272bc", "sha256": "95dc40091b6a8456a1554ddfc4f163768217afd66bee70a10c74bb52805cd0d9", "created": "2025-07-22T16:56:38.656Z", "duration": 19.097937, "channels": 1, "models": ["3b3aabe4-608a-46ac-9585-7960a25daf1a"], "model_info": {"3b3aabe4-608a-46ac-9585-7960a25daf1a": {"name": "general-nova-3", "version": "2024-12-20.0", "arch": "nova-3"}}, "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 63, "output_tokens": 43}}, "results": {"channels": [{"alternatives": [{"transcript": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "confidence": 0.9977381, "words": [{"word": "we", "start": 0.32, "end": 0.79999995, "confidence": 0.8624499, "punctuated_word": "We,"}, {"word": "the", "start": 0.79999995, "end": 0.96, "confidence": 0.9988005, "punctuated_word": "the"}, {"word": "people", "start": 0.96, "end": 1.1999999, "confidence": 0.9702857, "punctuated_word": "people"}, {"word": "of", "start": 1.1999999, "end": 1.4399999, "confidence": 0.9261158, "punctuated_word": "of"}, {"word": "the", "start": 1.4399999, "end": 1.5999999, "confidence": 0.9968951, "punctuated_word": "The"}, {"word": "united", "start": 1.5999999, "end": 1.92, "confidence": 0.99693906, "punctuated_word": "United"}, {"word": "states", "start": 1.92, "end": 2.56, "confidence": 0.9895239, "punctuated_word": "States,"}, {"word": "in", "start": 2.56, "end": 2.72, "confidence": 0.9984237, "punctuated_word": "in"}, {"word": "order", "start": 2.72, "end": 2.96, "confidence": 0.9999378, "punctuated_word": "order"}, {"word": "to", "start": 2.96, "end": 3.12, "confidence": 0.9960312, "punctuated_word": "to"}, {"word": "form", "start": 3.12, "end": 3.28, "confidence": 0.9993017, "punctuated_word": "form"}, {"word": "a", "start": 3.28, "end": 3.4399998, "confidence": 0.9991947, "punctuated_word": "a"}, {"word": "more", "start": 3.4399998, "end": 3.6799998, "confidence": 0.99967253, "punctuated_word": "more"}, {"word": "perfect", "start": 3.6799998, "end": 3.9199998, "confidence": 0.9996804, "punctuated_word": "perfect"}, {"word": "union", "start": 3.9199998, "end": 4.56, "confidence": 0.96660304, "punctuated_word": "union,"}, {"word": "establish", "start": 4.72, "end": 5.2, "confidence": 0.9780107, "punctuated_word": "establish"}, {"word": "justice", "start": 5.2, "end": 6.08, "confidence": 0.9962268, "punctuated_word": "justice,"}, {"word": "ensure", "start": 6.08, "end": 6.3999996, "confidence": 0.96901894, "punctuated_word": "ensure"}, {"word": "domestic", "start": 6.3999996, "end": 6.8799996, "confidence": 0.9797106, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 6.8799996, "end": 7.52, "confidence": 0.99495304, "punctuated_word": "tranquility,"}, {"word": "provide", "start": 7.792875, "end": 8.352875, "confidence": 0.99955326, "punctuated_word": "provide"}, {"word": "for", "start": 8.352875, "end": 8.512875, "confidence": 0.99970573, "punctuated_word": "for"}, {"word": "the", "start": 8.512875, "end": 8.672874, "confidence": 0.99844545, "punctuated_word": "the"}, {"word": "common", "start": 8.672874, "end": 8.912875, "confidence": 0.9994067, "punctuated_word": "common"}, {"word": "defense", "start": 8.912875, "end": 9.6328745, "confidence": 0.9897037, "punctuated_word": "defense,"}, {"word": "promote", "start": 9.6328745, "end": 9.952875, "confidence": 0.99213505, "punctuated_word": "promote"}, {"word": "the", "start": 9.952875, "end": 10.192875, "confidence": 0.99441385, "punctuated_word": "the"}, {"word": "general", "start": 10.192875, "end": 10.512875, "confidence": 0.9995796, "punctuated_word": "general"}, {"word": "welfare", "start": 10.512875, "end": 11.152875, "confidence": 0.9714123, "punctuated_word": "welfare,"}, {"word": "and", "start": 11.152875, "end": 11.232875, "confidence": 0.9996729, "punctuated_word": "and"}, {"word": "secure", "start": 11.232875, "end": 11.552875, "confidence": 0.9994293, "punctuated_word": "secure"}, {"word": "the", "start": 11.552875, "end": 11.792875, "confidence": 0.99942905, "punctuated_word": "the"}, {"word": "blessings", "start": 11.792875, "end": 12.112875, "confidence": 0.99741995, "punctuated_word": "blessings"}, {"word": "of", "start": 12.112875, "end": 12.272875, "confidence": 0.99958605, "punctuated_word": "of"}, {"word": "liberty", "start": 12.272875, "end": 12.672874, "confidence": 0.99673575, "punctuated_word": "liberty"}, {"word": "to", "start": 12.672874, "end": 12.912874, "confidence": 0.9903154, "punctuated_word": "to"}, {"word": "ourselves", "start": 12.912874, "end": 13.312875, "confidence": 0.99862087, "punctuated_word": "ourselves"}, {"word": "and", "start": 13.312875, "end": 13.552875, "confidence": 0.87773573, "punctuated_word": "and"}, {"word": "our", "start": 13.552875, "end": 13.712875, "confidence": 0.9971655, "punctuated_word": "our"}, {"word": "posterity", "start": 13.712875, "end": 14.592875, "confidence": 0.9914979, "punctuated_word": "posterity"}, {"word": "to", "start": 14.592875, "end": 14.832874, "confidence": 0.6025522, "punctuated_word": "to"}, {"word": "ordain", "start": 14.832874, "end": 15.312875, "confidence": 0.99851, "punctuated_word": "ordain"}, {"word": "and", "start": 15.312875, "end": 15.472875, "confidence": 0.9984882, "punctuated_word": "and"}, {"word": "establish", "start": 15.472875, "end": 15.952875, "confidence": 0.99775887, "punctuated_word": "establish"}, {"word": "this", "start": 15.952875, "end": 16.272875, "confidence": 0.998808, "punctuated_word": "this"}, {"word": "constitution", "start": 16.272875, "end": 16.912874, "confidence": 0.95854187, "punctuated_word": "constitution"}, {"word": "for", "start": 16.912874, "end": 17.152874, "confidence": 0.99841416, "punctuated_word": "for"}, {"word": "the", "start": 17.152874, "end": 17.312874, "confidence": 0.9980714, "punctuated_word": "The"}, {"word": "united", "start": 17.312874, "end": 17.632875, "confidence": 0.9977381, "punctuated_word": "United"}, {"word": "states", "start": 17.632875, "end": 17.952875, "confidence": 0.999585, "punctuated_word": "States"}, {"word": "of", "start": 17.952875, "end": 18.192875, "confidence": 0.99960726, "punctuated_word": "Of"}, {"word": "america", "start": 18.192875, "end": 18.592875, "confidence": 0.99715745, "punctuated_word": "America."}], "paragraphs": {"transcript": "\nWe, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "paragraphs": [{"sentences": [{"text": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "start": 0.32, "end": 18.592875}], "start": 0.32, "end": 18.592875, "num_words": 52}]}}]}], "summary": {"result": "success", "short": "Speaker 1 discusses the goal of establishing a more perfect union, justice, and the common defense for the United States of America, in order to secure the blessings of liberty and establish the constitution for the country."}}} \ No newline at end of file +{"metadata": {"transaction_key": "deprecated", "request_id": "d07f77ba-b091-4fa9-9faa-d3b514784fa6", "sha256": "95dc40091b6a8456a1554ddfc4f163768217afd66bee70a10c74bb52805cd0d9", "created": "2025-08-11T21:38:53.737Z", "duration": 19.097937, "channels": 1, "models": ["2187e11a-3532-4498-b076-81fa530bdd49"], "model_info": {"2187e11a-3532-4498-b076-81fa530bdd49": {"name": "general-nova-3", "version": "2025-07-31.0", "arch": "nova-3"}}, "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 63, "output_tokens": 43}}, "results": {"channels": [{"alternatives": [{"transcript": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "confidence": 0.9977379, "words": [{"word": "we", "start": 0.32, "end": 0.79999995, "confidence": 0.8624594, "punctuated_word": "We,"}, {"word": "the", "start": 0.79999995, "end": 0.96, "confidence": 0.9988009, "punctuated_word": "the"}, {"word": "people", "start": 0.96, "end": 1.1999999, "confidence": 0.9702921, "punctuated_word": "people"}, {"word": "of", "start": 1.1999999, "end": 1.4399999, "confidence": 0.92611325, "punctuated_word": "of"}, {"word": "the", "start": 1.4399999, "end": 1.5999999, "confidence": 0.99689424, "punctuated_word": "The"}, {"word": "united", "start": 1.5999999, "end": 1.92, "confidence": 0.99693954, "punctuated_word": "United"}, {"word": "states", "start": 1.92, "end": 2.56, "confidence": 0.98952484, "punctuated_word": "States,"}, {"word": "in", "start": 2.56, "end": 2.72, "confidence": 0.99842346, "punctuated_word": "in"}, {"word": "order", "start": 2.72, "end": 2.96, "confidence": 0.9999378, "punctuated_word": "order"}, {"word": "to", "start": 2.96, "end": 3.12, "confidence": 0.9960312, "punctuated_word": "to"}, {"word": "form", "start": 3.12, "end": 3.28, "confidence": 0.99930143, "punctuated_word": "form"}, {"word": "a", "start": 3.28, "end": 3.4399998, "confidence": 0.9991948, "punctuated_word": "a"}, {"word": "more", "start": 3.4399998, "end": 3.6799998, "confidence": 0.99967265, "punctuated_word": "more"}, {"word": "perfect", "start": 3.6799998, "end": 3.9199998, "confidence": 0.9996804, "punctuated_word": "perfect"}, {"word": "union", "start": 3.9199998, "end": 4.56, "confidence": 0.96661377, "punctuated_word": "union,"}, {"word": "establish", "start": 4.72, "end": 5.2, "confidence": 0.9780056, "punctuated_word": "establish"}, {"word": "justice", "start": 5.2, "end": 6.08, "confidence": 0.99622726, "punctuated_word": "justice,"}, {"word": "ensure", "start": 6.08, "end": 6.3999996, "confidence": 0.9690141, "punctuated_word": "ensure"}, {"word": "domestic", "start": 6.3999996, "end": 6.8799996, "confidence": 0.97970927, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 6.8799996, "end": 7.52, "confidence": 0.9949531, "punctuated_word": "tranquility,"}, {"word": "provide", "start": 7.792875, "end": 8.352875, "confidence": 0.99955326, "punctuated_word": "provide"}, {"word": "for", "start": 8.352875, "end": 8.512875, "confidence": 0.99970573, "punctuated_word": "for"}, {"word": "the", "start": 8.512875, "end": 8.672874, "confidence": 0.9984457, "punctuated_word": "the"}, {"word": "common", "start": 8.672874, "end": 8.912875, "confidence": 0.9994067, "punctuated_word": "common"}, {"word": "defense", "start": 8.912875, "end": 9.6328745, "confidence": 0.989704, "punctuated_word": "defense,"}, {"word": "promote", "start": 9.6328745, "end": 9.952875, "confidence": 0.9921375, "punctuated_word": "promote"}, {"word": "the", "start": 9.952875, "end": 10.192875, "confidence": 0.9944133, "punctuated_word": "the"}, {"word": "general", "start": 10.192875, "end": 10.512875, "confidence": 0.9995796, "punctuated_word": "general"}, {"word": "welfare", "start": 10.512875, "end": 11.152875, "confidence": 0.9714061, "punctuated_word": "welfare,"}, {"word": "and", "start": 11.152875, "end": 11.232875, "confidence": 0.999673, "punctuated_word": "and"}, {"word": "secure", "start": 11.232875, "end": 11.552875, "confidence": 0.9994294, "punctuated_word": "secure"}, {"word": "the", "start": 11.552875, "end": 11.792875, "confidence": 0.99942917, "punctuated_word": "the"}, {"word": "blessings", "start": 11.792875, "end": 12.112875, "confidence": 0.9974213, "punctuated_word": "blessings"}, {"word": "of", "start": 12.112875, "end": 12.272875, "confidence": 0.99958605, "punctuated_word": "of"}, {"word": "liberty", "start": 12.272875, "end": 12.672874, "confidence": 0.996736, "punctuated_word": "liberty"}, {"word": "to", "start": 12.672874, "end": 12.912874, "confidence": 0.99031293, "punctuated_word": "to"}, {"word": "ourselves", "start": 12.912874, "end": 13.312875, "confidence": 0.99862087, "punctuated_word": "ourselves"}, {"word": "and", "start": 13.312875, "end": 13.552875, "confidence": 0.87775034, "punctuated_word": "and"}, {"word": "our", "start": 13.552875, "end": 13.712875, "confidence": 0.997166, "punctuated_word": "our"}, {"word": "posterity", "start": 13.712875, "end": 14.592875, "confidence": 0.9914988, "punctuated_word": "posterity"}, {"word": "to", "start": 14.592875, "end": 14.832874, "confidence": 0.6025369, "punctuated_word": "to"}, {"word": "ordain", "start": 14.832874, "end": 15.312875, "confidence": 0.99850905, "punctuated_word": "ordain"}, {"word": "and", "start": 15.312875, "end": 15.472875, "confidence": 0.9984875, "punctuated_word": "and"}, {"word": "establish", "start": 15.472875, "end": 15.952875, "confidence": 0.99775887, "punctuated_word": "establish"}, {"word": "this", "start": 15.952875, "end": 16.272875, "confidence": 0.99880767, "punctuated_word": "this"}, {"word": "constitution", "start": 16.272875, "end": 16.912874, "confidence": 0.9585388, "punctuated_word": "constitution"}, {"word": "for", "start": 16.912874, "end": 17.152874, "confidence": 0.99841416, "punctuated_word": "for"}, {"word": "the", "start": 17.152874, "end": 17.312874, "confidence": 0.998071, "punctuated_word": "The"}, {"word": "united", "start": 17.312874, "end": 17.632875, "confidence": 0.9977379, "punctuated_word": "United"}, {"word": "states", "start": 17.632875, "end": 17.952875, "confidence": 0.999585, "punctuated_word": "States"}, {"word": "of", "start": 17.952875, "end": 18.192875, "confidence": 0.99960726, "punctuated_word": "Of"}, {"word": "america", "start": 18.192875, "end": 18.592875, "confidence": 0.99715734, "punctuated_word": "America."}], "paragraphs": {"transcript": "\nWe, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "paragraphs": [{"sentences": [{"text": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "start": 0.32, "end": 18.592875}], "start": 0.32, "end": 18.592875, "num_words": 52}]}}]}], "summary": {"result": "success", "short": "Speaker 1 discusses the goal of establishing a more perfect union, justice, and the common defense for the United States of America, in order to secure the blessings of liberty and establish the constitution for the country."}}} \ No newline at end of file diff --git a/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json b/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json index 4a0ad0b9..55c1327f 100644 --- a/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json +++ b/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-29e7c8100617f70da4ae9da1921cb5071a01219f4780ca70930b0a370ed2163a-response.json @@ -1 +1 @@ -{"metadata": {"transaction_key": "deprecated", "request_id": "b9c2dec0-9239-4d80-8a53-75bb357a05b1", "sha256": "5324da68ede209a16ac69a38e8cd29cee4d754434a041166cda3a1f5e0b24566", "created": "2025-07-22T16:56:41.450Z", "duration": 17.566313, "channels": 1, "models": ["3b3aabe4-608a-46ac-9585-7960a25daf1a"], "model_info": {"3b3aabe4-608a-46ac-9585-7960a25daf1a": {"name": "general-nova-3", "version": "2024-12-20.0", "arch": "nova-3"}}}, "results": {"channels": [{"alternatives": [{"transcript": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "confidence": 0.9991429, "words": [{"word": "yep", "start": 5.52, "end": 6.2400002, "confidence": 0.92344034, "punctuated_word": "Yep."}, {"word": "i", "start": 6.96, "end": 7.2799997, "confidence": 0.5774878, "punctuated_word": "I"}, {"word": "said", "start": 7.2799997, "end": 7.52, "confidence": 0.90520746, "punctuated_word": "said"}, {"word": "it", "start": 7.52, "end": 7.68, "confidence": 0.9979729, "punctuated_word": "it"}, {"word": "before", "start": 7.68, "end": 8.08, "confidence": 0.89339864, "punctuated_word": "before,"}, {"word": "and", "start": 8.08, "end": 8.16, "confidence": 0.99981827, "punctuated_word": "and"}, {"word": "i'll", "start": 8.16, "end": 8.4, "confidence": 0.99961716, "punctuated_word": "I'll"}, {"word": "say", "start": 8.4, "end": 8.48, "confidence": 0.99941754, "punctuated_word": "say"}, {"word": "it", "start": 8.48, "end": 8.639999, "confidence": 0.999597, "punctuated_word": "it"}, {"word": "again", "start": 8.639999, "end": 8.96, "confidence": 0.95282805, "punctuated_word": "again."}, {"word": "life", "start": 10.071313, "end": 10.311313, "confidence": 0.9990012, "punctuated_word": "Life"}, {"word": "moves", "start": 10.311313, "end": 10.631312, "confidence": 0.9996643, "punctuated_word": "moves"}, {"word": "pretty", "start": 10.631312, "end": 11.031313, "confidence": 0.99988604, "punctuated_word": "pretty"}, {"word": "fast", "start": 11.031313, "end": 11.671312, "confidence": 0.9989685, "punctuated_word": "fast."}, {"word": "you", "start": 12.071312, "end": 12.311313, "confidence": 0.92013574, "punctuated_word": "You"}, {"word": "don't", "start": 12.311313, "end": 12.551312, "confidence": 0.99986017, "punctuated_word": "don't"}, {"word": "stop", "start": 12.551312, "end": 12.791312, "confidence": 0.99976414, "punctuated_word": "stop"}, {"word": "and", "start": 12.791312, "end": 12.951312, "confidence": 0.99852234, "punctuated_word": "and"}, {"word": "look", "start": 12.951312, "end": 13.111313, "confidence": 0.9998677, "punctuated_word": "look"}, {"word": "around", "start": 13.111313, "end": 13.351313, "confidence": 0.9998548, "punctuated_word": "around"}, {"word": "once", "start": 13.351313, "end": 13.671312, "confidence": 0.9991429, "punctuated_word": "once"}, {"word": "in", "start": 13.671312, "end": 13.831312, "confidence": 0.9976285, "punctuated_word": "in"}, {"word": "a", "start": 13.831312, "end": 13.911312, "confidence": 0.98508644, "punctuated_word": "a"}, {"word": "while", "start": 13.911312, "end": 14.391312, "confidence": 0.9349544, "punctuated_word": "while,"}, {"word": "you", "start": 14.711312, "end": 14.871312, "confidence": 0.99921596, "punctuated_word": "you"}, {"word": "could", "start": 14.871312, "end": 15.031313, "confidence": 0.99974436, "punctuated_word": "could"}, {"word": "miss", "start": 15.031313, "end": 15.271313, "confidence": 0.9997111, "punctuated_word": "miss"}, {"word": "it", "start": 15.271313, "end": 15.5113125, "confidence": 0.99891466, "punctuated_word": "it."}], "paragraphs": {"transcript": "\nYep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "paragraphs": [{"sentences": [{"text": "Yep.", "start": 5.52, "end": 6.2400002}, {"text": "I said it before, and I'll say it again.", "start": 6.96, "end": 8.96}, {"text": "Life moves pretty fast.", "start": 10.071313, "end": 11.671312}, {"text": "You don't stop and look around once in a while, you could miss it.", "start": 12.071312, "end": 15.5113125}], "start": 5.52, "end": 15.5113125, "num_words": 28}]}}]}]}} \ No newline at end of file +{"metadata": {"transaction_key": "deprecated", "request_id": "821b28df-db1d-4125-b5f3-5312160e8dc7", "sha256": "5324da68ede209a16ac69a38e8cd29cee4d754434a041166cda3a1f5e0b24566", "created": "2025-08-11T21:38:54.604Z", "duration": 17.566313, "channels": 1, "models": ["2187e11a-3532-4498-b076-81fa530bdd49"], "model_info": {"2187e11a-3532-4498-b076-81fa530bdd49": {"name": "general-nova-3", "version": "2025-07-31.0", "arch": "nova-3"}}}, "results": {"channels": [{"alternatives": [{"transcript": "Yep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "confidence": 0.9991429, "words": [{"word": "yep", "start": 5.52, "end": 6.2400002, "confidence": 0.92345256, "punctuated_word": "Yep."}, {"word": "i", "start": 6.96, "end": 7.2799997, "confidence": 0.5776232, "punctuated_word": "I"}, {"word": "said", "start": 7.2799997, "end": 7.52, "confidence": 0.9052255, "punctuated_word": "said"}, {"word": "it", "start": 7.52, "end": 7.68, "confidence": 0.9979741, "punctuated_word": "it"}, {"word": "before", "start": 7.68, "end": 8.08, "confidence": 0.89339817, "punctuated_word": "before,"}, {"word": "and", "start": 8.08, "end": 8.16, "confidence": 0.99981827, "punctuated_word": "and"}, {"word": "i'll", "start": 8.16, "end": 8.4, "confidence": 0.99961734, "punctuated_word": "I'll"}, {"word": "say", "start": 8.4, "end": 8.48, "confidence": 0.99941754, "punctuated_word": "say"}, {"word": "it", "start": 8.48, "end": 8.639999, "confidence": 0.99959713, "punctuated_word": "it"}, {"word": "again", "start": 8.639999, "end": 8.96, "confidence": 0.95283747, "punctuated_word": "again."}, {"word": "life", "start": 10.071313, "end": 10.311313, "confidence": 0.9990012, "punctuated_word": "Life"}, {"word": "moves", "start": 10.311313, "end": 10.631312, "confidence": 0.9996643, "punctuated_word": "moves"}, {"word": "pretty", "start": 10.631312, "end": 11.031313, "confidence": 0.99988604, "punctuated_word": "pretty"}, {"word": "fast", "start": 11.031313, "end": 11.671312, "confidence": 0.99896836, "punctuated_word": "fast."}, {"word": "you", "start": 12.071312, "end": 12.311313, "confidence": 0.9201446, "punctuated_word": "You"}, {"word": "don't", "start": 12.311313, "end": 12.551312, "confidence": 0.99986017, "punctuated_word": "don't"}, {"word": "stop", "start": 12.551312, "end": 12.791312, "confidence": 0.99976414, "punctuated_word": "stop"}, {"word": "and", "start": 12.791312, "end": 12.951312, "confidence": 0.998522, "punctuated_word": "and"}, {"word": "look", "start": 12.951312, "end": 13.111313, "confidence": 0.9998677, "punctuated_word": "look"}, {"word": "around", "start": 13.111313, "end": 13.351313, "confidence": 0.9998548, "punctuated_word": "around"}, {"word": "once", "start": 13.351313, "end": 13.671312, "confidence": 0.9991429, "punctuated_word": "once"}, {"word": "in", "start": 13.671312, "end": 13.831312, "confidence": 0.9976286, "punctuated_word": "in"}, {"word": "a", "start": 13.831312, "end": 13.911312, "confidence": 0.9850873, "punctuated_word": "a"}, {"word": "while", "start": 13.911312, "end": 14.391312, "confidence": 0.9349425, "punctuated_word": "while,"}, {"word": "you", "start": 14.711312, "end": 14.871312, "confidence": 0.99921596, "punctuated_word": "you"}, {"word": "could", "start": 14.871312, "end": 15.031313, "confidence": 0.99974436, "punctuated_word": "could"}, {"word": "miss", "start": 15.031313, "end": 15.271313, "confidence": 0.9997111, "punctuated_word": "miss"}, {"word": "it", "start": 15.271313, "end": 15.5113125, "confidence": 0.9989148, "punctuated_word": "it."}], "paragraphs": {"transcript": "\nYep. I said it before, and I'll say it again. Life moves pretty fast. You don't stop and look around once in a while, you could miss it.", "paragraphs": [{"sentences": [{"text": "Yep.", "start": 5.52, "end": 6.2400002}, {"text": "I said it before, and I'll say it again.", "start": 6.96, "end": 8.96}, {"text": "Life moves pretty fast.", "start": 10.071313, "end": 11.671312}, {"text": "You don't stop and look around once in a while, you could miss it.", "start": 12.071312, "end": 15.5113125}], "start": 5.52, "end": 15.5113125, "num_words": 28}]}}]}]}} \ No newline at end of file diff --git a/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json b/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json index 4fa6796c..f323ef24 100644 --- a/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json +++ b/tests/response_data/listen/rest/c4e1c0031174878d8f0e3dbd87916ee16d56f1c610ac525af5712ea37226a455-a17f4880c5b4cf124ac54d06d77c9f0ab7f3fe1052ff1c7b090f7eaf8ede5b76-response.json @@ -1 +1 @@ -{"metadata": {"transaction_key": "deprecated", "request_id": "e33264a5-a72f-486f-99a1-85c630fa0191", "sha256": "95dc40091b6a8456a1554ddfc4f163768217afd66bee70a10c74bb52805cd0d9", "created": "2025-07-22T16:56:37.496Z", "duration": 19.097937, "channels": 1, "models": ["3b3aabe4-608a-46ac-9585-7960a25daf1a"], "model_info": {"3b3aabe4-608a-46ac-9585-7960a25daf1a": {"name": "general-nova-3", "version": "2024-12-20.0", "arch": "nova-3"}}}, "results": {"channels": [{"alternatives": [{"transcript": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "confidence": 0.9978794, "words": [{"word": "we", "start": 0.32, "end": 0.79999995, "confidence": 0.8624085, "punctuated_word": "We,"}, {"word": "the", "start": 0.79999995, "end": 0.96, "confidence": 0.99879944, "punctuated_word": "the"}, {"word": "people", "start": 0.96, "end": 1.1999999, "confidence": 0.9702684, "punctuated_word": "people"}, {"word": "of", "start": 1.1999999, "end": 1.4399999, "confidence": 0.9261229, "punctuated_word": "of"}, {"word": "the", "start": 1.4399999, "end": 1.5999999, "confidence": 0.9968953, "punctuated_word": "The"}, {"word": "united", "start": 1.5999999, "end": 1.92, "confidence": 0.99693906, "punctuated_word": "United"}, {"word": "states", "start": 1.92, "end": 2.56, "confidence": 0.98952234, "punctuated_word": "States,"}, {"word": "in", "start": 2.56, "end": 2.72, "confidence": 0.9984249, "punctuated_word": "in"}, {"word": "order", "start": 2.72, "end": 2.96, "confidence": 0.9999379, "punctuated_word": "order"}, {"word": "to", "start": 2.96, "end": 3.12, "confidence": 0.9960312, "punctuated_word": "to"}, {"word": "form", "start": 3.12, "end": 3.28, "confidence": 0.9993011, "punctuated_word": "form"}, {"word": "a", "start": 3.28, "end": 3.4399998, "confidence": 0.99919444, "punctuated_word": "a"}, {"word": "more", "start": 3.4399998, "end": 3.6799998, "confidence": 0.99967253, "punctuated_word": "more"}, {"word": "perfect", "start": 3.6799998, "end": 3.9199998, "confidence": 0.9996803, "punctuated_word": "perfect"}, {"word": "union", "start": 3.9199998, "end": 4.56, "confidence": 0.96659064, "punctuated_word": "union,"}, {"word": "establish", "start": 4.72, "end": 5.2, "confidence": 0.9779895, "punctuated_word": "establish"}, {"word": "justice", "start": 5.2, "end": 6.08, "confidence": 0.99622524, "punctuated_word": "justice,"}, {"word": "ensure", "start": 6.08, "end": 6.3999996, "confidence": 0.96898466, "punctuated_word": "ensure"}, {"word": "domestic", "start": 6.3999996, "end": 6.8799996, "confidence": 0.9797062, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 6.8799996, "end": 7.52, "confidence": 0.99495554, "punctuated_word": "tranquility,"}, {"word": "provide", "start": 7.792875, "end": 8.352875, "confidence": 0.9995815, "punctuated_word": "provide"}, {"word": "for", "start": 8.352875, "end": 8.512875, "confidence": 0.9997501, "punctuated_word": "for"}, {"word": "the", "start": 8.512875, "end": 8.672874, "confidence": 0.9986143, "punctuated_word": "the"}, {"word": "common", "start": 8.672874, "end": 8.912875, "confidence": 0.99946636, "punctuated_word": "common"}, {"word": "defense", "start": 8.912875, "end": 9.6328745, "confidence": 0.9903844, "punctuated_word": "defense,"}, {"word": "promote", "start": 9.6328745, "end": 9.952875, "confidence": 0.9923873, "punctuated_word": "promote"}, {"word": "the", "start": 9.952875, "end": 10.192875, "confidence": 0.99456656, "punctuated_word": "the"}, {"word": "general", "start": 10.192875, "end": 10.512875, "confidence": 0.99963284, "punctuated_word": "general"}, {"word": "welfare", "start": 10.512875, "end": 11.152875, "confidence": 0.97356033, "punctuated_word": "welfare,"}, {"word": "and", "start": 11.152875, "end": 11.232875, "confidence": 0.99971634, "punctuated_word": "and"}, {"word": "secure", "start": 11.232875, "end": 11.552875, "confidence": 0.99946445, "punctuated_word": "secure"}, {"word": "the", "start": 11.552875, "end": 11.792875, "confidence": 0.99948335, "punctuated_word": "the"}, {"word": "blessings", "start": 11.792875, "end": 12.112875, "confidence": 0.9976579, "punctuated_word": "blessings"}, {"word": "of", "start": 12.112875, "end": 12.272875, "confidence": 0.99962795, "punctuated_word": "of"}, {"word": "liberty", "start": 12.272875, "end": 12.672874, "confidence": 0.996944, "punctuated_word": "liberty"}, {"word": "to", "start": 12.672874, "end": 12.912874, "confidence": 0.99080896, "punctuated_word": "to"}, {"word": "ourselves", "start": 12.912874, "end": 13.312875, "confidence": 0.9987331, "punctuated_word": "ourselves"}, {"word": "and", "start": 13.312875, "end": 13.552875, "confidence": 0.8811709, "punctuated_word": "and"}, {"word": "our", "start": 13.552875, "end": 13.712875, "confidence": 0.9974247, "punctuated_word": "our"}, {"word": "posterity", "start": 13.712875, "end": 14.592875, "confidence": 0.99179626, "punctuated_word": "posterity"}, {"word": "to", "start": 14.592875, "end": 14.832874, "confidence": 0.6069034, "punctuated_word": "to"}, {"word": "ordain", "start": 14.832874, "end": 15.312875, "confidence": 0.99867016, "punctuated_word": "ordain"}, {"word": "and", "start": 15.312875, "end": 15.472875, "confidence": 0.9986406, "punctuated_word": "and"}, {"word": "establish", "start": 15.472875, "end": 15.952875, "confidence": 0.99800986, "punctuated_word": "establish"}, {"word": "this", "start": 15.952875, "end": 16.272875, "confidence": 0.9990182, "punctuated_word": "this"}, {"word": "constitution", "start": 16.272875, "end": 16.912874, "confidence": 0.9666705, "punctuated_word": "constitution"}, {"word": "for", "start": 16.912874, "end": 17.152874, "confidence": 0.9986424, "punctuated_word": "for"}, {"word": "the", "start": 17.152874, "end": 17.312874, "confidence": 0.99821657, "punctuated_word": "The"}, {"word": "united", "start": 17.312874, "end": 17.632875, "confidence": 0.9978794, "punctuated_word": "United"}, {"word": "states", "start": 17.632875, "end": 17.952875, "confidence": 0.99960905, "punctuated_word": "States"}, {"word": "of", "start": 17.952875, "end": 18.192875, "confidence": 0.99967766, "punctuated_word": "Of"}, {"word": "america", "start": 18.192875, "end": 18.592875, "confidence": 0.9972925, "punctuated_word": "America."}], "paragraphs": {"transcript": "\nWe, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "paragraphs": [{"sentences": [{"text": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "start": 0.32, "end": 18.592875}], "start": 0.32, "end": 18.592875, "num_words": 52}]}}]}]}} \ No newline at end of file +{"metadata": {"transaction_key": "deprecated", "request_id": "5342f70a-cfdd-46fc-ada5-ce2758adc6bb", "sha256": "95dc40091b6a8456a1554ddfc4f163768217afd66bee70a10c74bb52805cd0d9", "created": "2025-08-11T21:38:53.244Z", "duration": 19.097937, "channels": 1, "models": ["2187e11a-3532-4498-b076-81fa530bdd49"], "model_info": {"2187e11a-3532-4498-b076-81fa530bdd49": {"name": "general-nova-3", "version": "2025-07-31.0", "arch": "nova-3"}}}, "results": {"channels": [{"alternatives": [{"transcript": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "confidence": 0.9977379, "words": [{"word": "we", "start": 0.32, "end": 0.79999995, "confidence": 0.8624594, "punctuated_word": "We,"}, {"word": "the", "start": 0.79999995, "end": 0.96, "confidence": 0.9988009, "punctuated_word": "the"}, {"word": "people", "start": 0.96, "end": 1.1999999, "confidence": 0.9702921, "punctuated_word": "people"}, {"word": "of", "start": 1.1999999, "end": 1.4399999, "confidence": 0.92611325, "punctuated_word": "of"}, {"word": "the", "start": 1.4399999, "end": 1.5999999, "confidence": 0.99689424, "punctuated_word": "The"}, {"word": "united", "start": 1.5999999, "end": 1.92, "confidence": 0.99693954, "punctuated_word": "United"}, {"word": "states", "start": 1.92, "end": 2.56, "confidence": 0.98952484, "punctuated_word": "States,"}, {"word": "in", "start": 2.56, "end": 2.72, "confidence": 0.99842346, "punctuated_word": "in"}, {"word": "order", "start": 2.72, "end": 2.96, "confidence": 0.9999378, "punctuated_word": "order"}, {"word": "to", "start": 2.96, "end": 3.12, "confidence": 0.9960312, "punctuated_word": "to"}, {"word": "form", "start": 3.12, "end": 3.28, "confidence": 0.99930143, "punctuated_word": "form"}, {"word": "a", "start": 3.28, "end": 3.4399998, "confidence": 0.9991948, "punctuated_word": "a"}, {"word": "more", "start": 3.4399998, "end": 3.6799998, "confidence": 0.99967265, "punctuated_word": "more"}, {"word": "perfect", "start": 3.6799998, "end": 3.9199998, "confidence": 0.9996804, "punctuated_word": "perfect"}, {"word": "union", "start": 3.9199998, "end": 4.56, "confidence": 0.96661377, "punctuated_word": "union,"}, {"word": "establish", "start": 4.72, "end": 5.2, "confidence": 0.9780056, "punctuated_word": "establish"}, {"word": "justice", "start": 5.2, "end": 6.08, "confidence": 0.99622726, "punctuated_word": "justice,"}, {"word": "ensure", "start": 6.08, "end": 6.3999996, "confidence": 0.9690141, "punctuated_word": "ensure"}, {"word": "domestic", "start": 6.3999996, "end": 6.8799996, "confidence": 0.97970927, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 6.8799996, "end": 7.52, "confidence": 0.9949531, "punctuated_word": "tranquility,"}, {"word": "provide", "start": 7.792875, "end": 8.352875, "confidence": 0.99955326, "punctuated_word": "provide"}, {"word": "for", "start": 8.352875, "end": 8.512875, "confidence": 0.99970573, "punctuated_word": "for"}, {"word": "the", "start": 8.512875, "end": 8.672874, "confidence": 0.9984457, "punctuated_word": "the"}, {"word": "common", "start": 8.672874, "end": 8.912875, "confidence": 0.9994067, "punctuated_word": "common"}, {"word": "defense", "start": 8.912875, "end": 9.6328745, "confidence": 0.989704, "punctuated_word": "defense,"}, {"word": "promote", "start": 9.6328745, "end": 9.952875, "confidence": 0.9921375, "punctuated_word": "promote"}, {"word": "the", "start": 9.952875, "end": 10.192875, "confidence": 0.9944133, "punctuated_word": "the"}, {"word": "general", "start": 10.192875, "end": 10.512875, "confidence": 0.9995796, "punctuated_word": "general"}, {"word": "welfare", "start": 10.512875, "end": 11.152875, "confidence": 0.9714061, "punctuated_word": "welfare,"}, {"word": "and", "start": 11.152875, "end": 11.232875, "confidence": 0.999673, "punctuated_word": "and"}, {"word": "secure", "start": 11.232875, "end": 11.552875, "confidence": 0.9994294, "punctuated_word": "secure"}, {"word": "the", "start": 11.552875, "end": 11.792875, "confidence": 0.99942917, "punctuated_word": "the"}, {"word": "blessings", "start": 11.792875, "end": 12.112875, "confidence": 0.9974213, "punctuated_word": "blessings"}, {"word": "of", "start": 12.112875, "end": 12.272875, "confidence": 0.99958605, "punctuated_word": "of"}, {"word": "liberty", "start": 12.272875, "end": 12.672874, "confidence": 0.996736, "punctuated_word": "liberty"}, {"word": "to", "start": 12.672874, "end": 12.912874, "confidence": 0.99031293, "punctuated_word": "to"}, {"word": "ourselves", "start": 12.912874, "end": 13.312875, "confidence": 0.99862087, "punctuated_word": "ourselves"}, {"word": "and", "start": 13.312875, "end": 13.552875, "confidence": 0.87775034, "punctuated_word": "and"}, {"word": "our", "start": 13.552875, "end": 13.712875, "confidence": 0.997166, "punctuated_word": "our"}, {"word": "posterity", "start": 13.712875, "end": 14.592875, "confidence": 0.9914988, "punctuated_word": "posterity"}, {"word": "to", "start": 14.592875, "end": 14.832874, "confidence": 0.6025369, "punctuated_word": "to"}, {"word": "ordain", "start": 14.832874, "end": 15.312875, "confidence": 0.99850905, "punctuated_word": "ordain"}, {"word": "and", "start": 15.312875, "end": 15.472875, "confidence": 0.9984875, "punctuated_word": "and"}, {"word": "establish", "start": 15.472875, "end": 15.952875, "confidence": 0.99775887, "punctuated_word": "establish"}, {"word": "this", "start": 15.952875, "end": 16.272875, "confidence": 0.99880767, "punctuated_word": "this"}, {"word": "constitution", "start": 16.272875, "end": 16.912874, "confidence": 0.9585388, "punctuated_word": "constitution"}, {"word": "for", "start": 16.912874, "end": 17.152874, "confidence": 0.99841416, "punctuated_word": "for"}, {"word": "the", "start": 17.152874, "end": 17.312874, "confidence": 0.998071, "punctuated_word": "The"}, {"word": "united", "start": 17.312874, "end": 17.632875, "confidence": 0.9977379, "punctuated_word": "United"}, {"word": "states", "start": 17.632875, "end": 17.952875, "confidence": 0.999585, "punctuated_word": "States"}, {"word": "of", "start": 17.952875, "end": 18.192875, "confidence": 0.99960726, "punctuated_word": "Of"}, {"word": "america", "start": 18.192875, "end": 18.592875, "confidence": 0.99715734, "punctuated_word": "America."}], "paragraphs": {"transcript": "\nWe, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "paragraphs": [{"sentences": [{"text": "We, the people of The United States, in order to form a more perfect union, establish justice, ensure domestic tranquility, provide for the common defense, promote the general welfare, and secure the blessings of liberty to ourselves and our posterity to ordain and establish this constitution for The United States Of America.", "start": 0.32, "end": 18.592875}], "start": 0.32, "end": 18.592875, "num_words": 52}]}}]}]}} \ No newline at end of file diff --git a/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-42fc5ed98cabc1fa1a2f276301c27c46dd15f6f5187cd93d944cc94fa81c8469-response.json b/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-42fc5ed98cabc1fa1a2f276301c27c46dd15f6f5187cd93d944cc94fa81c8469-response.json index fdd85761..66fd8288 100644 --- a/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-42fc5ed98cabc1fa1a2f276301c27c46dd15f6f5187cd93d944cc94fa81c8469-response.json +++ b/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-42fc5ed98cabc1fa1a2f276301c27c46dd15f6f5187cd93d944cc94fa81c8469-response.json @@ -1 +1 @@ -{"channel": {"alternatives": [{"transcript": "Ensure domestic tranquility.", "confidence": 0.9897461, "words": [{"word": "ensure", "start": 6.251818, "end": 6.6427274, "confidence": 0.9897461, "punctuated_word": "Ensure"}, {"word": "domestic", "start": 6.6427274, "end": 7.1427274, "confidence": 0.99658203, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 7.19, "end": 7.4245453, "confidence": 0.9248047, "punctuated_word": "tranquility."}]}]}, "metadata": {"model_info": {"name": "general", "version": "2024-01-26.8851", "arch": "base"}, "request_id": "4b21fb93-0ece-46c8-a7c8-05709d2119dc", "model_uuid": "1ed36bac-f71c-4f3f-a31f-02fd6525c489"}, "type": "Results", "channel_index": [0, 1], "duration": 1.73, "start": 5.9, "is_final": true, "from_finalize": false, "speech_final": true} \ No newline at end of file +{"channel": {"alternatives": [{"transcript": "Ensure domestic tranquility.", "confidence": 0.9897461, "words": [{"word": "ensure", "start": 6.251818, "end": 6.6427274, "confidence": 0.9897461, "punctuated_word": "Ensure"}, {"word": "domestic", "start": 6.6427274, "end": 7.1427274, "confidence": 0.99658203, "punctuated_word": "domestic"}, {"word": "tranquility", "start": 7.19, "end": 7.4245453, "confidence": 0.9248047, "punctuated_word": "tranquility."}]}]}, "metadata": {"model_info": {"name": "general", "version": "2024-01-26.8851", "arch": "base"}, "request_id": "6f7bea3e-d496-4b99-b5f8-e929cd931ed5", "model_uuid": "1ed36bac-f71c-4f3f-a31f-02fd6525c489"}, "type": "Results", "channel_index": [0, 1], "duration": 1.73, "start": 5.9, "is_final": true, "from_finalize": false, "speech_final": true} \ No newline at end of file diff --git a/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-d7334c26cf6468c191e05ff5e8151da9b67985c66ab177e9446fd14bbafd70df-response.json b/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-d7334c26cf6468c191e05ff5e8151da9b67985c66ab177e9446fd14bbafd70df-response.json index f6e1682d..278fe648 100644 --- a/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-d7334c26cf6468c191e05ff5e8151da9b67985c66ab177e9446fd14bbafd70df-response.json +++ b/tests/response_data/listen/websocket/ed5bfd217988aa8cad492f63f79dc59f5f02fb9b85befe6f6ce404b8f19aaa0d-d7334c26cf6468c191e05ff5e8151da9b67985c66ab177e9446fd14bbafd70df-response.json @@ -1 +1 @@ -{"channel": {"alternatives": [{"transcript": "", "confidence": 0.0, "words": []}]}, "metadata": {"model_info": {"name": "general", "version": "2024-01-26.8851", "arch": "base"}, "request_id": "699ace2f-5c81-4cf7-ae95-81e39557d10f", "model_uuid": "1ed36bac-f71c-4f3f-a31f-02fd6525c489"}, "type": "Results", "channel_index": [0, 1], "duration": 0.74, "start": 0.0, "is_final": true, "from_finalize": false, "speech_final": true} \ No newline at end of file +{"channel": {"alternatives": [{"transcript": "", "confidence": 0.0, "words": []}]}, "metadata": {"model_info": {"name": "general", "version": "2024-01-26.8851", "arch": "base"}, "request_id": "ef1cb0b7-51bd-4fe0-806d-f16d2e6cab23", "model_uuid": "1ed36bac-f71c-4f3f-a31f-02fd6525c489"}, "type": "Results", "channel_index": [0, 1], "duration": 0.74, "start": 0.0, "is_final": true, "from_finalize": false, "speech_final": true} \ No newline at end of file diff --git a/tests/response_data/read/rest/3917a1c81c08e360c0d4bba0ff9ebd645e610e4149483e5f2888a2c5df388b37-23e873efdfd4d680286fda14ff8f10864218311e79efc92ecc82bce3e574c366-response.json b/tests/response_data/read/rest/3917a1c81c08e360c0d4bba0ff9ebd645e610e4149483e5f2888a2c5df388b37-23e873efdfd4d680286fda14ff8f10864218311e79efc92ecc82bce3e574c366-response.json index bb4c9501..01bfbaef 100644 --- a/tests/response_data/read/rest/3917a1c81c08e360c0d4bba0ff9ebd645e610e4149483e5f2888a2c5df388b37-23e873efdfd4d680286fda14ff8f10864218311e79efc92ecc82bce3e574c366-response.json +++ b/tests/response_data/read/rest/3917a1c81c08e360c0d4bba0ff9ebd645e610e4149483e5f2888a2c5df388b37-23e873efdfd4d680286fda14ff8f10864218311e79efc92ecc82bce3e574c366-response.json @@ -1 +1 @@ -{"metadata": {"request_id": "a7aac20e-ea39-4640-8c7b-0ece96170a33", "created": "2025-07-22T16:56:59.351Z", "language": "en", "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 1855, "output_tokens": 146}}, "results": {"summary": {"text": "The potential for voice-based interfaces in conversational AI applications is discussed, with a focus on voice-premises and wearable devices. The success of voice-first experiences and tools, including DeepgramQuad, is highlighted, with a focus on improving customer outcomes and speed and efficiency for everyday exchanges. The speakers emphasize the benefits of voice quality, including natural speech flow, and the potential for AI agents to be more human than humans in speech recognition. They also mention their involvement in machine learning and their plans to expand their waitlist for a speech-to-text model. They expect to release generally early next year, but if working on any real-time AI agent use cases, they can join their waitlist to jumpstart their development in production."}}} \ No newline at end of file +{"metadata": {"request_id": "b405a4ee-aafd-4bab-abf4-838df87ae10d", "created": "2025-08-11T21:39:06.772Z", "language": "en", "summary_info": {"model_uuid": "67875a7f-c9c4-48a0-aa55-5bdb8a91c34a", "input_tokens": 1855, "output_tokens": 146}}, "results": {"summary": {"text": "The potential for voice-based interfaces in conversational AI applications is discussed, with a focus on voice-premises and wearable devices. The success of voice-first experiences and tools, including DeepgramQuad, is highlighted, with a focus on improving customer outcomes and speed and efficiency for everyday exchanges. The speakers emphasize the benefits of voice quality, including natural speech flow, and the potential for AI agents to be more human than humans in speech recognition. They also mention their involvement in machine learning and their plans to expand their waitlist for a speech-to-text model. They expect to release generally early next year, but if working on any real-time AI agent use cases, they can join their waitlist to jumpstart their development in production."}}} \ No newline at end of file diff --git a/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef-response.json b/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef-response.json index fbbe55a5..2c736e09 100644 --- a/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef-response.json +++ b/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef-response.json @@ -1 +1 @@ -{"content_type": "audio/wav", "request_id": "5f42ff57-e0eb-4a50-8cc1-adf827146733", "model_uuid": "0bb159e1-5c0a-48fb-aa29-ed7c0401f116", "model_name": "aura-2-thalia-en", "characters": 13, "transfer_encoding": "chunked", "date": "Tue, 22 Jul 2025 16:56:59 GMT"} \ No newline at end of file +{"content_type": "audio/wav", "request_id": "6cd40c1b-e643-45f9-8596-5585b3e05a07", "model_uuid": "0bb159e1-5c0a-48fb-aa29-ed7c0401f116", "model_name": "aura-2-thalia-en", "characters": 13, "transfer_encoding": "chunked", "date": "Mon, 11 Aug 2025 21:39:06 GMT"} \ No newline at end of file diff --git a/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef.wav b/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef.wav index 9ef9d31f..8dcf5954 100644 Binary files a/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef.wav and b/tests/response_data/speak/rest/1fe0ad339338a9d6cffbab2c7ace41ba5387b5fe7906854795702dce91034fd3-f8c3bf62a9aa3e6fc1619c250e48abe7519373d3edf41be62eb5dc45199af2ef.wav differ diff --git a/tests/unit_test/test_unit_agent_history_context.py b/tests/unit_test/test_unit_agent_history_context.py new file mode 100644 index 00000000..572305c8 --- /dev/null +++ b/tests/unit_test/test_unit_agent_history_context.py @@ -0,0 +1,814 @@ +# Copyright 2024 Deepgram SDK contributors. All Rights Reserved. +# Use of this source code is governed by a MIT license that can be found in the LICENSE file. +# SPDX-License-Identifier: MIT + +import json +import pytest +from deepgram.clients.agent.v1.websocket.options import ( + SettingsOptions, + Agent, + Flags, + Context, + HistoryConversationMessage, + HistoryFunctionCallsMessage, + FunctionCallHistory, +) + + +class TestFlags: + """Unit tests for Flags class""" + + def test_flags_default_history_value(self): + """Test that history defaults to True""" + flags = Flags() + assert flags.history is True + + def test_flags_set_history_false(self): + """Test setting history to False""" + flags = Flags() + flags.history = False + assert flags.history is False + + def test_flags_set_history_true(self): + """Test explicitly setting history to True""" + flags = Flags() + flags.history = True + assert flags.history is True + + def test_flags_serialization(self): + """Test Flags JSON serialization""" + flags = Flags(history=True) + result = json.loads(flags.to_json()) + expected = {"history": True} + assert result == expected + + flags_false = Flags(history=False) + result_false = json.loads(flags_false.to_json()) + expected_false = {"history": False} + assert result_false == expected_false + + def test_flags_deserialization(self): + """Test Flags deserialization from dict""" + data = {"history": False} + flags = Flags.from_dict(data) + assert flags.history is False + + data_true = {"history": True} + flags_true = Flags.from_dict(data_true) + assert flags_true.history is True + + def test_flags_round_trip(self): + """Test serialization and deserialization round-trip""" + original = Flags(history=False) + serialized = original.to_dict() + restored = Flags.from_dict(serialized) + assert restored.history == original.history + + +class TestHistoryConversationMessage: + """Unit tests for HistoryConversationMessage class""" + + def test_history_conversation_message_creation(self): + """Test creating a HistoryConversationMessage object""" + message = HistoryConversationMessage( + role="user", + content="What's the weather like today?" + ) + + assert message.type == "History" + assert message.role == "user" + assert message.content == "What's the weather like today?" + + def test_history_conversation_message_defaults(self): + """Test default values for HistoryConversationMessage""" + message = HistoryConversationMessage() + + assert message.type == "History" + assert message.role == "" + assert message.content == "" + + def test_history_conversation_message_serialization(self): + """Test HistoryConversationMessage JSON serialization""" + message = HistoryConversationMessage( + role="assistant", + content="Based on the current data, it's sunny with a temperature of 72°F." + ) + + result = json.loads(message.to_json()) + expected = { + "type": "History", + "role": "assistant", + "content": "Based on the current data, it's sunny with a temperature of 72°F." + } + assert result == expected + + def test_history_conversation_message_deserialization(self): + """Test HistoryConversationMessage deserialization from dict""" + data = { + "type": "History", + "role": "user", + "content": "Hello, how are you?" + } + + message = HistoryConversationMessage.from_dict(data) + assert message.type == "History" + assert message.role == "user" + assert message.content == "Hello, how are you?" + + def test_history_conversation_message_round_trip(self): + """Test serialization and deserialization round-trip""" + original = HistoryConversationMessage( + role="assistant", + content="I'm doing well, thank you for asking!" + ) + + serialized = original.to_dict() + restored = HistoryConversationMessage.from_dict(serialized) + + assert restored.type == original.type + assert restored.role == original.role + assert restored.content == original.content + + +class TestFunctionCallHistory: + """Unit tests for FunctionCallHistory class""" + + def test_function_call_history_creation(self): + """Test creating a FunctionCallHistory object""" + function_call = FunctionCallHistory( + id="fc_12345678-90ab-cdef-1234-567890abcdef", + name="check_order_status", + client_side=True, + arguments='{"order_id": "ORD-123456"}', + response="Order #123456 status: Shipped - Expected delivery date: 2024-03-15" + ) + + assert function_call.id == "fc_12345678-90ab-cdef-1234-567890abcdef" + assert function_call.name == "check_order_status" + assert function_call.client_side is True + assert function_call.arguments == '{"order_id": "ORD-123456"}' + assert function_call.response == "Order #123456 status: Shipped - Expected delivery date: 2024-03-15" + + def test_function_call_history_defaults(self): + """Test default values for FunctionCallHistory""" + function_call = FunctionCallHistory() + + assert function_call.id == "" + assert function_call.name == "" + assert function_call.client_side is False + assert function_call.arguments == "" + assert function_call.response == "" + + def test_function_call_history_serialization(self): + """Test FunctionCallHistory JSON serialization""" + function_call = FunctionCallHistory( + id="fc_123", + name="get_weather", + client_side=False, + arguments='{"location": "New York"}', + response="Sunny, 75°F" + ) + + result = json.loads(function_call.to_json()) + expected = { + "id": "fc_123", + "name": "get_weather", + "client_side": False, + "arguments": '{"location": "New York"}', + "response": "Sunny, 75°F" + } + assert result == expected + + def test_function_call_history_deserialization(self): + """Test FunctionCallHistory deserialization from dict""" + data = { + "id": "fc_456", + "name": "send_email", + "client_side": True, + "arguments": '{"to": "user@example.com", "subject": "Test"}', + "response": "Email sent successfully" + } + + function_call = FunctionCallHistory.from_dict(data) + assert function_call.id == "fc_456" + assert function_call.name == "send_email" + assert function_call.client_side is True + assert function_call.arguments == '{"to": "user@example.com", "subject": "Test"}' + assert function_call.response == "Email sent successfully" + + +class TestHistoryFunctionCallsMessage: + """Unit tests for HistoryFunctionCallsMessage class""" + + def test_history_function_calls_message_creation(self): + """Test creating a HistoryFunctionCallsMessage object""" + function_call = FunctionCallHistory( + id="fc_123", + name="check_balance", + client_side=True, + arguments='{"account_id": "12345"}', + response="Current balance: $1,250.00" + ) + + message = HistoryFunctionCallsMessage( + function_calls=[function_call] + ) + + assert message.type == "History" + assert len(message.function_calls) == 1 + assert isinstance(message.function_calls[0], FunctionCallHistory) + assert message.function_calls[0].name == "check_balance" + + def test_history_function_calls_message_defaults(self): + """Test default values for HistoryFunctionCallsMessage""" + message = HistoryFunctionCallsMessage() + + assert message.type == "History" + assert message.function_calls == [] + + def test_history_function_calls_message_multiple_calls(self): + """Test HistoryFunctionCallsMessage with multiple function calls""" + call1 = FunctionCallHistory( + id="fc_1", + name="get_weather", + client_side=True, + arguments='{"location": "NYC"}', + response="Sunny, 72°F" + ) + + call2 = FunctionCallHistory( + id="fc_2", + name="get_time", + client_side=False, + arguments='{"timezone": "EST"}', + response="2024-03-15 14:30:00 EST" + ) + + message = HistoryFunctionCallsMessage(function_calls=[call1, call2]) + + assert len(message.function_calls) == 2 + assert message.function_calls[0].name == "get_weather" + assert message.function_calls[1].name == "get_time" + + def test_history_function_calls_message_serialization(self): + """Test HistoryFunctionCallsMessage JSON serialization""" + function_call = FunctionCallHistory( + id="fc_789", + name="calculate_tip", + client_side=True, + arguments='{"bill_amount": 50.00, "tip_percentage": 18}', + response="Recommended tip: $9.00" + ) + + message = HistoryFunctionCallsMessage(function_calls=[function_call]) + result = json.loads(message.to_json()) + + expected = { + "type": "History", + "function_calls": [ + { + "id": "fc_789", + "name": "calculate_tip", + "client_side": True, + "arguments": '{"bill_amount": 50.00, "tip_percentage": 18}', + "response": "Recommended tip: $9.00" + } + ] + } + assert result == expected + + def test_history_function_calls_message_deserialization(self): + """Test HistoryFunctionCallsMessage deserialization from dict""" + data = { + "type": "History", + "function_calls": [ + { + "id": "fc_101", + "name": "book_flight", + "client_side": False, + "arguments": '{"origin": "NYC", "destination": "LAX"}', + "response": "Flight booked successfully" + } + ] + } + + message = HistoryFunctionCallsMessage.from_dict(data) + assert message.type == "History" + assert len(message.function_calls) == 1 + assert isinstance(message.function_calls[0], FunctionCallHistory) + assert message.function_calls[0].name == "book_flight" + + def test_history_function_calls_message_post_init_conversion(self): + """Test that __post_init__ converts dict function_calls to FunctionCallHistory objects""" + # Create message with dict instead of FunctionCallHistory objects + message = HistoryFunctionCallsMessage() + message.function_calls = [ + { + "id": "fc_202", + "name": "convert_currency", + "client_side": True, + "arguments": '{"from": "USD", "to": "EUR", "amount": 100}', + "response": "100 USD = 85.50 EUR" + } + ] + + # Trigger __post_init__ + message.__post_init__() + + assert len(message.function_calls) == 1 + assert isinstance(message.function_calls[0], FunctionCallHistory) + assert message.function_calls[0].name == "convert_currency" + + +class TestContext: + """Unit tests for Context class""" + + def test_context_creation_empty(self): + """Test creating an empty Context object""" + context = Context() + assert context.messages == [] + + def test_context_creation_with_conversation_messages(self): + """Test creating Context with conversation messages""" + msg1 = HistoryConversationMessage( + role="user", + content="Hello, I need help with my order" + ) + msg2 = HistoryConversationMessage( + role="assistant", + content="I'd be happy to help! What's your order number?" + ) + + context = Context(messages=[msg1, msg2]) + + assert len(context.messages) == 2 + assert isinstance(context.messages[0], HistoryConversationMessage) + assert isinstance(context.messages[1], HistoryConversationMessage) + assert context.messages[0].role == "user" + assert context.messages[1].role == "assistant" + + def test_context_creation_with_function_call_messages(self): + """Test creating Context with function call messages""" + function_call = FunctionCallHistory( + id="fc_303", + name="lookup_order", + client_side=True, + arguments='{"order_number": "ORD-789"}', + response="Order found: Status is Processing" + ) + + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + context = Context(messages=[func_msg]) + + assert len(context.messages) == 1 + assert isinstance(context.messages[0], HistoryFunctionCallsMessage) + assert len(context.messages[0].function_calls) == 1 + + def test_context_creation_with_mixed_messages(self): + """Test creating Context with both conversation and function call messages""" + conv_msg = HistoryConversationMessage( + role="user", + content="What's my order status?" + ) + + function_call = FunctionCallHistory( + id="fc_404", + name="get_order_status", + client_side=True, + arguments='{"order_id": "12345"}', + response="Your order is shipped and will arrive tomorrow" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + response_msg = HistoryConversationMessage( + role="assistant", + content="Your order is shipped and will arrive tomorrow" + ) + + context = Context(messages=[conv_msg, func_msg, response_msg]) + + assert len(context.messages) == 3 + assert isinstance(context.messages[0], HistoryConversationMessage) + assert isinstance(context.messages[1], HistoryFunctionCallsMessage) + assert isinstance(context.messages[2], HistoryConversationMessage) + + def test_context_serialization(self): + """Test Context JSON serialization""" + conv_msg = HistoryConversationMessage( + role="user", + content="Test message" + ) + + context = Context(messages=[conv_msg]) + result = json.loads(context.to_json()) + + expected = { + "messages": [ + { + "type": "History", + "role": "user", + "content": "Test message" + } + ] + } + assert result == expected + + def test_context_deserialization(self): + """Test Context deserialization from dict using realistic construction approach""" + # Create message objects first, then construct Context + conv_msg = HistoryConversationMessage( + role="assistant", + content="How can I help you today?" + ) + + function_call = FunctionCallHistory( + id="fc_505", + name="greet_user", + client_side=False, + arguments='{}', + response="User greeted successfully" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + context = Context(messages=[conv_msg, func_msg]) + + assert len(context.messages) == 2 + assert isinstance(context.messages[0], HistoryConversationMessage) + assert isinstance(context.messages[1], HistoryFunctionCallsMessage) + assert context.messages[0].content == "How can I help you today?" + assert len(context.messages[1].function_calls) == 1 + + def test_context_post_init_conversion(self): + """Test that __post_init__ converts dict messages to appropriate message objects""" + context = Context() + context.messages = [ + { + "type": "History", + "role": "user", + "content": "Hello" + }, + { + "type": "History", + "function_calls": [ + { + "id": "fc_606", + "name": "process_greeting", + "client_side": True, + "arguments": '{"greeting": "Hello"}', + "response": "Greeting processed" + } + ] + } + ] + + # Trigger __post_init__ + context.__post_init__() + + assert len(context.messages) == 2 + assert isinstance(context.messages[0], HistoryConversationMessage) + assert isinstance(context.messages[1], HistoryFunctionCallsMessage) + assert context.messages[0].content == "Hello" + assert len(context.messages[1].function_calls) == 1 + + +class TestAgentIntegration: + """Integration tests for Agent class with context""" + + def test_agent_with_context(self): + """Test Agent class with context field""" + conv_msg = HistoryConversationMessage( + role="user", + content="Previous conversation context" + ) + context = Context(messages=[conv_msg]) + + agent = Agent( + language="en", + context=context + ) + + assert agent.language == "en" + assert agent.context is not None + assert isinstance(agent.context, Context) + assert len(agent.context.messages) == 1 + + def test_agent_context_serialization(self): + """Test Agent serialization with context""" + function_call = FunctionCallHistory( + id="fc_707", + name="previous_action", + client_side=True, + arguments='{"action": "test"}', + response="Action completed" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + context = Context(messages=[func_msg]) + + agent = Agent(context=context) + result = agent.to_dict() + + assert "context" in result + assert "messages" in result["context"] + assert len(result["context"]["messages"]) == 1 + assert result["context"]["messages"][0]["type"] == "History" + + def test_agent_context_deserialization(self): + """Test Agent deserialization with context""" + data = { + "language": "es", + "context": { + "messages": [ + { + "type": "History", + "role": "assistant", + "content": "Hola, ¿cómo puedo ayudarte?" + } + ] + } + } + + agent = Agent.from_dict(data) + + assert agent.language == "es" + assert agent.context is not None + assert isinstance(agent.context, Context) + assert len(agent.context.messages) == 1 + assert isinstance(agent.context.messages[0], HistoryConversationMessage) + assert agent.context.messages[0].content == "Hola, ¿cómo puedo ayudarte?" + + +class TestSettingsOptionsIntegration: + """Integration tests for SettingsOptions with flags and context""" + + def test_settings_options_with_flags(self): + """Test SettingsOptions with flags field""" + flags = Flags(history=True) + settings = SettingsOptions(flags=flags) + + assert settings.flags is not None + assert isinstance(settings.flags, Flags) + assert settings.flags.history is True + + def test_settings_options_with_flags_and_context(self): + """Test SettingsOptions with both flags and agent context""" + # Create flags + flags = Flags(history=True) + + # Create context with mixed messages + conv_msg = HistoryConversationMessage( + role="user", + content="I want to continue our previous conversation" + ) + + function_call = FunctionCallHistory( + id="fc_808", + name="retrieve_context", + client_side=True, + arguments='{"session_id": "sess_123"}', + response="Context retrieved successfully" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + context = Context(messages=[conv_msg, func_msg]) + + # Create settings + settings = SettingsOptions( + flags=flags, + agent=Agent(context=context) + ) + + assert settings.flags.history is True + assert settings.agent.context is not None + assert len(settings.agent.context.messages) == 2 + + def test_settings_options_full_serialization(self): + """Test complete SettingsOptions serialization with all new features""" + flags = Flags(history=False) + + conv_msg = HistoryConversationMessage( + role="assistant", + content="Welcome back! I remember our last conversation." + ) + + function_call = FunctionCallHistory( + id="fc_909", + name="load_user_preferences", + client_side=False, + arguments='{"user_id": "user_456"}', + response="Preferences loaded: theme=dark, language=en" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + context = Context(messages=[conv_msg, func_msg]) + + settings = SettingsOptions( + experimental=True, + flags=flags, + agent=Agent( + language="en", + context=context + ) + ) + + result = settings.to_dict() + + # Verify structure + assert result["experimental"] is True + assert result["flags"]["history"] is False + assert result["agent"]["language"] == "en" + assert "context" in result["agent"] + assert len(result["agent"]["context"]["messages"]) == 2 + + # Verify message types + messages = result["agent"]["context"]["messages"] + assert messages[0]["type"] == "History" + assert messages[0]["role"] == "assistant" + assert messages[1]["type"] == "History" + assert "function_calls" in messages[1] + + def test_settings_options_full_deserialization(self): + """Test complete SettingsOptions deserialization with all new features using realistic construction""" + # Create message objects programmatically + conv_msg = HistoryConversationMessage( + role="user", + content="¿Recuerdas nuestra conversación anterior?" + ) + + function_call = FunctionCallHistory( + id="fc_010", + name="buscar_historial", + client_side=True, + arguments='{"usuario": "test"}', + response="Historial encontrado" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + context = Context(messages=[conv_msg, func_msg]) + flags = Flags(history=True) + + settings = SettingsOptions( + experimental=False, + flags=flags, + agent=Agent( + language="es", + context=context + ) + ) + + assert settings.experimental is False + assert settings.flags.history is True + assert settings.agent.language == "es" + assert len(settings.agent.context.messages) == 2 + + # Verify message types are correctly set + assert isinstance(settings.agent.context.messages[0], HistoryConversationMessage) + assert isinstance(settings.agent.context.messages[1], HistoryFunctionCallsMessage) + + # Verify content + assert settings.agent.context.messages[0].content == "¿Recuerdas nuestra conversación anterior?" + assert len(settings.agent.context.messages[1].function_calls) == 1 + assert settings.agent.context.messages[1].function_calls[0].name == "buscar_historial" + + def test_settings_options_round_trip(self): + """Test complete round-trip serialization/deserialization using a hybrid approach""" + # Create original settings + flags = Flags(history=True) + + conv_msg = HistoryConversationMessage( + role="user", + content="This is a test message for round-trip testing" + ) + + function_call = FunctionCallHistory( + id="fc_roundtrip", + name="test_function", + client_side=True, + arguments='{"test": "data"}', + response="Test successful" + ) + func_msg = HistoryFunctionCallsMessage(function_calls=[function_call]) + + context = Context(messages=[conv_msg, func_msg]) + + original = SettingsOptions( + experimental=True, + flags=flags, + agent=Agent( + language="en", + context=context + ) + ) + + # Test serialization + serialized = original.to_dict() + + # Verify serialized structure + assert serialized["experimental"] is True + assert serialized["flags"]["history"] is True + assert serialized["agent"]["language"] == "en" + assert "context" in serialized["agent"] + assert len(serialized["agent"]["context"]["messages"]) == 2 + + # Test that we can reconstruct equivalent object + reconstructed_flags = Flags(history=serialized["flags"]["history"]) + + # Reconstruct messages manually (more realistic usage) + reconstructed_conv_msg = HistoryConversationMessage( + role=serialized["agent"]["context"]["messages"][0]["role"], + content=serialized["agent"]["context"]["messages"][0]["content"] + ) + + reconstructed_func_call = FunctionCallHistory( + id=serialized["agent"]["context"]["messages"][1]["function_calls"][0]["id"], + name=serialized["agent"]["context"]["messages"][1]["function_calls"][0]["name"], + client_side=serialized["agent"]["context"]["messages"][1]["function_calls"][0]["client_side"], + arguments=serialized["agent"]["context"]["messages"][1]["function_calls"][0]["arguments"], + response=serialized["agent"]["context"]["messages"][1]["function_calls"][0]["response"] + ) + reconstructed_func_msg = HistoryFunctionCallsMessage(function_calls=[reconstructed_func_call]) + + reconstructed_context = Context(messages=[reconstructed_conv_msg, reconstructed_func_msg]) + + restored = SettingsOptions( + experimental=serialized["experimental"], + flags=reconstructed_flags, + agent=Agent( + language=serialized["agent"]["language"], + context=reconstructed_context + ) + ) + + # Verify everything matches + assert restored.experimental == original.experimental + assert restored.flags.history == original.flags.history + assert restored.agent.language == original.agent.language + assert len(restored.agent.context.messages) == len(original.agent.context.messages) + + # Verify message content + assert restored.agent.context.messages[0].content == original.agent.context.messages[0].content + assert len(restored.agent.context.messages[1].function_calls) == len(original.agent.context.messages[1].function_calls) + assert restored.agent.context.messages[1].function_calls[0].name == original.agent.context.messages[1].function_calls[0].name + + +class TestErrorHandling: + """Test error handling and edge cases""" + + def test_context_with_empty_messages(self): + """Test Context handles empty messages list correctly""" + context = Context(messages=[]) + assert context.messages == [] + + result = context.to_dict() + assert result["messages"] == [] + + def test_history_function_calls_message_empty_function_calls(self): + """Test HistoryFunctionCallsMessage handles empty function_calls list""" + message = HistoryFunctionCallsMessage(function_calls=[]) + assert message.function_calls == [] + + result = message.to_dict() + assert result["function_calls"] == [] + + def test_context_post_init_with_invalid_message_structure(self): + """Test Context.__post_init__ handles malformed message dicts gracefully""" + context = Context() + context.messages = [ + { + "type": "History", + "role": "user", + "content": "Valid conversation message" + }, + { + "type": "History" + # Missing both content and function_calls - should default to conversation + }, + { + "type": "History", + "function_calls": [] # Empty function calls + } + ] + + context.__post_init__() + + assert len(context.messages) == 3 + assert isinstance(context.messages[0], HistoryConversationMessage) + assert isinstance(context.messages[1], HistoryConversationMessage) + assert isinstance(context.messages[2], HistoryFunctionCallsMessage) + + def test_agent_context_none_handling(self): + """Test Agent handles None context correctly""" + agent = Agent(context=None) + assert agent.context is None + + result = agent.to_dict() + # context should be excluded from serialization when None + assert "context" not in result + + def test_settings_options_flags_none_handling(self): + """Test SettingsOptions handles None flags correctly""" + settings = SettingsOptions(flags=None) + assert settings.flags is None + + result = settings.to_dict() + # flags should be excluded from serialization when None + assert "flags" not in result \ No newline at end of file