Source code for camel.models.azure_openai_model

# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import os
from typing import Any, Dict, List, Optional, Union

from openai import AzureOpenAI, Stream

from camel.configs import OPENAI_API_PARAMS
from camel.messages import OpenAIMessage
from camel.models.base_model import BaseModelBackend
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required


[docs] class AzureOpenAIModel(BaseModelBackend): r"""Azure OpenAI API in a unified BaseModelBackend interface. Doc: https://learn.microsoft.com/en-us/azure/ai-services/openai/ """ def __init__( self, model_type: ModelType, model_config_dict: Dict[str, Any], api_key: Optional[str] = None, url: Optional[str] = None, api_version: Optional[str] = None, azure_deployment_name: Optional[str] = None, ) -> None: r"""Constructor for OpenAI backend. Args: model_type (ModelType): Model for which a backend is created, one of GPT_* series. model_config_dict (Dict[str, Any]): A dictionary that will be fed into openai.ChatCompletion.create(). api_key (Optional[str]): The API key for authenticating with the OpenAI service. (default: :obj:`None`) url (Optional[str]): The url to the OpenAI service. (default: :obj:`None`) api_version (Optional[str]): The api version for the model. azure_deployment_name (Optional[str]): The deployment name you chose when you deployed an azure model. (default: :obj:`None`) """ super().__init__(model_type, model_config_dict, api_key, url) self._url = url or os.environ.get("AZURE_OPENAI_ENDPOINT") self._api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY") self.api_version = api_version or os.environ.get("AZURE_API_VERSION") self.azure_deployment_name = azure_deployment_name or os.environ.get( "AZURE_DEPLOYMENT_NAME" ) if self._url is None: raise ValueError( "Must provide either the `url` argument " "or `AZURE_OPENAI_ENDPOINT` environment variable." ) if self._api_key is None: raise ValueError( "Must provide either the `api_key` argument " "or `AZURE_OPENAI_API_KEY` environment variable." ) if self.api_version is None: raise ValueError( "Must provide either the `api_version` argument " "or `AZURE_API_VERSION` environment variable." ) if self.azure_deployment_name is None: raise ValueError( "Must provide either the `azure_deployment_name` argument " "or `AZURE_DEPLOYMENT_NAME` environment variable." ) self.model = str(self.azure_deployment_name) self._client = AzureOpenAI( azure_endpoint=str(self._url), azure_deployment=self.azure_deployment_name, api_version=self.api_version, api_key=self._api_key, timeout=60, max_retries=3, ) self._token_counter: Optional[BaseTokenCounter] = None @property def token_counter(self) -> BaseTokenCounter: r"""Initialize the token counter for the model backend. Returns: BaseTokenCounter: The token counter following the model's tokenization style. """ if not self._token_counter: self._token_counter = OpenAITokenCounter(self.model_type) return self._token_counter
[docs] @api_keys_required("AZURE_OPENAI_API_KEY", "AZURE_API_VERSION") def run( self, messages: List[OpenAIMessage], ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]: r"""Runs inference of Azure OpenAI chat completion. Args: messages (List[OpenAIMessage]): Message list with the chat history in OpenAI API format. Returns: Union[ChatCompletion, Stream[ChatCompletionChunk]]: `ChatCompletion` in the non-stream mode, or `Stream[ChatCompletionChunk]` in the stream mode. """ response = self._client.chat.completions.create( messages=messages, model=self.model, **self.model_config_dict, ) return response
[docs] def check_model_config(self): r"""Check whether the model configuration contains any unexpected arguments to Azure OpenAI API. Raises: ValueError: If the model configuration dictionary contains any unexpected arguments to Azure OpenAI API. """ for param in self.model_config_dict: if param not in OPENAI_API_PARAMS: raise ValueError( f"Unexpected argument `{param}` is " "input into Azure OpenAI model backend." )
@property def stream(self) -> bool: r"""Returns whether the model is in stream mode, which sends partial results each time. Returns: bool: Whether the model is in stream mode. """ return self.model_config_dict.get("stream", False)