Source code for openai.types.chat.chat_completion

# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Optional
from typing_extensions import Literal

from ..._models import BaseModel
from ..completion_usage import CompletionUsage
from .chat_completion_message import ChatCompletionMessage
from .chat_completion_token_logprob import ChatCompletionTokenLogprob

__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]


class ChoiceLogprobs(BaseModel):
    content: Optional[List[ChatCompletionTokenLogprob]] = None
    """A list of message content tokens with log probability information."""

    refusal: Optional[List[ChatCompletionTokenLogprob]] = None
    """A list of message refusal tokens with log probability information."""


[docs] class Choice(BaseModel): finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. """ index: int """The index of the choice in the list of choices.""" logprobs: Optional[ChoiceLogprobs] = None """Log probability information for the choice.""" message: ChatCompletionMessage """A chat completion message generated by the model."""
[docs] class ChatCompletion(BaseModel): id: str """A unique identifier for the chat completion.""" choices: List[Choice] """A list of chat completion choices. Can be more than one if `n` is greater than 1. """ created: int """The Unix timestamp (in seconds) of when the chat completion was created.""" model: str """The model used for the chat completion.""" object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" service_tier: Optional[Literal["scale", "default"]] = None """The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. """ system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. """ usage: Optional[CompletionUsage] = None """Usage statistics for the completion request."""