Source code for camel.configs.aiml_config
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
from __future__ import annotations
from typing import Sequence, Type, Union
from pydantic import BaseModel, Field
from camel.configs.base_config import BaseConfig
from camel.types import NOT_GIVEN, NotGiven
[docs]
class AIMLConfig(BaseConfig):
r"""Defines the parameters for generating chat completions using the
AIML API.
Args:
temperature (float, optional): Determines the degree of randomness
in the response. (default: :obj:`0.7`)
top_p (float, optional): The top_p (nucleus) parameter is used to
dynamically adjust the number of choices for each predicted token
based on the cumulative probabilities. (default: :obj:`0.7`)
n (int, optional): Number of generations to return. (default::obj:`1`)
response_format (object, optional): An object specifying the format
that the model must output.
stream (bool, optional): If set, tokens are returned as Server-Sent
Events as they are made available. (default: :obj:`False`)
stop (str or list, optional): Up to :obj:`4` sequences where the API
will stop generating further tokens. (default: :obj:`None`)
max_tokens (int, optional): The maximum number of tokens to generate.
(default: :obj:`None`)
logit_bias (dict, optional): Modify the likelihood of specified tokens
appearing in the completion. Accepts a json object that maps tokens
(specified by their token ID in the tokenizer) to an associated
bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
is added to the logits generated by the model prior to sampling.
The exact effect will vary per model, but values between:obj:` -1`
and :obj:`1` should decrease or increase likelihood of selection;
values like :obj:`-100` or :obj:`100` should result in a ban or
exclusive selection of the relevant token. (default: :obj:`{}`)
frequency_penalty (float, optional): Number between :obj:`-2.0` and
:obj:`2.0`. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's
likelihood to repeat the same line verbatim. See more information
about frequency and presence penalties. (default: :obj:`0.0`)
presence_penalty (float, optional): Number between :obj:`-2.0` and
:obj:`2.0`. Positive values penalize new tokens based on whether
they appear in the text so far, increasing the model's likelihood
to talk about new topics. See more information about frequency and
presence penalties. (default: :obj:`0.0`)
tools (list[FunctionTool], optional): A list of tools the model may
call. Currently, only functions are supported as a tool. Use this
to provide a list of functions the model may generate JSON inputs
for. A max of 128 functions are supported.
"""
temperature: float = 0.7
top_p: float = 0.7
n: int = 1
stream: bool = False
stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
max_tokens: Union[int, NotGiven] = NOT_GIVEN
logit_bias: dict = Field(default_factory=dict)
response_format: Union[Type[BaseModel], dict, NotGiven] = NOT_GIVEN
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
AIML_API_PARAMS = {param for param in AIMLConfig.model_fields.keys()}