Source code for openai.types.completion_usage
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from .._models import BaseModel
__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
class CompletionTokensDetails(BaseModel):
accepted_prediction_tokens: Optional[int] = None
"""
When using Predicted Outputs, the number of tokens in the prediction that
appeared in the completion.
"""
audio_tokens: Optional[int] = None
"""Audio input tokens generated by the model."""
reasoning_tokens: Optional[int] = None
"""Tokens generated by the model for reasoning."""
rejected_prediction_tokens: Optional[int] = None
"""
When using Predicted Outputs, the number of tokens in the prediction that did
not appear in the completion. However, like reasoning tokens, these tokens are
still counted in the total completion tokens for purposes of billing, output,
and context window limits.
"""
class PromptTokensDetails(BaseModel):
audio_tokens: Optional[int] = None
"""Audio input tokens present in the prompt."""
cached_tokens: Optional[int] = None
"""Cached tokens present in the prompt."""
[docs]
class CompletionUsage(BaseModel):
completion_tokens: int
"""Number of tokens in the generated completion."""
prompt_tokens: int
"""Number of tokens in the prompt."""
total_tokens: int
"""Total number of tokens used in the request (prompt + completion)."""
completion_tokens_details: Optional[CompletionTokensDetails] = None
"""Breakdown of tokens used in a completion."""
prompt_tokens_details: Optional[PromptTokensDetails] = None
"""Breakdown of tokens used in the prompt."""