Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/openai/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,10 @@
from .video_create_error import VideoCreateError as VideoCreateError
from .video_remix_params import VideoRemixParams as VideoRemixParams
from .batch_create_params import BatchCreateParams as BatchCreateParams
from .batch_request_input import BatchRequestInput as BatchRequestInput
from .video_create_params import VideoCreateParams as VideoCreateParams
from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
from .batch_request_output import BatchRequestOutput as BatchRequestOutput
from .eval_create_response import EvalCreateResponse as EvalCreateResponse
from .eval_delete_response import EvalDeleteResponse as EvalDeleteResponse
from .eval_update_response import EvalUpdateResponse as EvalUpdateResponse
Expand Down
59 changes: 59 additions & 0 deletions src/openai/types/batch_request_input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from __future__ import annotations

from typing import Any, Dict, Union
from typing_extensions import Literal, Required, TypedDict

from .embedding_create_params import EmbeddingCreateParams
from .completion_create_params import CompletionCreateParams
from .moderation_create_params import ModerationCreateParams
from .chat.completion_create_params import CompletionCreateParams as ChatCompletionCreateParams
from .responses.response_create_params import ResponseCreateParams

__all__ = ["BatchRequestInput"]


# Union of all possible request body types for batch requests
BatchRequestBody = Union[
ChatCompletionCreateParams,
CompletionCreateParams,
EmbeddingCreateParams,
ModerationCreateParams,
ResponseCreateParams,
Dict[str, Any], # Fallback for any additional types
]


class BatchRequestInput(TypedDict, total=False):
"""The per-line object of the batch input file.

Each line in a batch input JSONL file represents a single request that will be
processed as part of the batch. The file can contain up to 50,000 requests and
can be up to 200 MB in size.
"""

custom_id: Required[str]
"""A developer-provided per-request id that will be used to match outputs to inputs.

Must be unique for each request in a batch.
"""

method: Required[Literal["POST"]]
"""The HTTP method to be used for the request.

Currently only `POST` is supported.
"""

url: Required[str]
"""The OpenAI API relative URL to be used for the request.

Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
`/v1/completions`, and `/v1/moderations` are supported.
"""

body: Required[BatchRequestBody]
"""The request body for the API endpoint specified in `url`.

The structure of the body must match the expected parameters for the endpoint.
For example, if `url` is `/v1/chat/completions`, the body should be a
`ChatCompletionCreateParams` object.
"""
95 changes: 95 additions & 0 deletions src/openai/types/batch_request_output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from __future__ import annotations

from typing import Union, Optional
from typing_extensions import Literal

from .._models import BaseModel
from .completion import Completion
from .responses.response import Response
from .chat.chat_completion import ChatCompletion
from .create_embedding_response import CreateEmbeddingResponse
from .moderation_create_response import ModerationCreateResponse

__all__ = ["BatchRequestOutput", "BatchRequestOutputResponse", "BatchRequestOutputError", "BatchResponseBody"]


# Union of all possible response body types for batch request outputs
BatchResponseBody = Union[
ChatCompletion,
Completion,
CreateEmbeddingResponse,
ModerationCreateResponse,
Response,
]


class BatchRequestOutputError(BaseModel):
"""For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure."""

code: Literal["batch_expired", "batch_cancelled", "request_timeout"]
"""A machine-readable error code.

Possible values:
- `batch_expired`: The request could not be executed before the
completion window ended.
- `batch_cancelled`: The batch was cancelled before this request
executed.
- `request_timeout`: The underlying call to the model timed out.
"""

message: str
"""A human-readable error message."""


class BatchRequestOutputResponse(BaseModel):
"""The response object for a successfully executed batch request."""

status_code: int
"""The HTTP status code of the response."""

request_id: str
"""An unique identifier for the OpenAI API request.

Please include this request ID when contacting support.
"""

body: BatchResponseBody
"""The JSON body of the response.

The structure depends on the endpoint that was called:
- `/v1/chat/completions` -> `ChatCompletion`
- `/v1/completions` -> `Completion`
- `/v1/embeddings` -> `CreateEmbeddingResponse`
- `/v1/moderations` -> `ModerationCreateResponse`
- `/v1/responses` -> `Response`
"""


class BatchRequestOutput(BaseModel):
"""The per-line object of the batch output and error files.

Each line in a batch output JSONL file represents the result of a single
request from the batch input file. The output file contains successfully
executed requests, while the error file contains requests that failed.
"""

id: str
"""The unique ID of the batch request."""

custom_id: str
"""A developer-provided per-request id that will be used to match outputs to inputs.

This matches the `custom_id` from the corresponding `BatchRequestInput`.
"""

response: Optional[BatchRequestOutputResponse] = None
"""The response object if the request was successfully executed.

Contains the HTTP status code, request ID, and the JSON body of the response.
"""

error: Optional[BatchRequestOutputError] = None
"""The error object if the request failed with a non-HTTP error.

Contains a machine-readable error code and a human-readable error message.
"""