camel.types package

Contents

camel.types package#

Submodules#

camel.types.enums module#

class camel.types.enums.AudioModelType(value)[source]#

Bases: Enum

An enumeration.

TTS_1 = 'tts-1'#
TTS_1_HD = 'tts-1-hd'#
property is_openai: bool#

Returns whether this type of audio models is an OpenAI-released model.

class camel.types.enums.EmbeddingModelType(value)[source]#

Bases: Enum

An enumeration.

MISTRAL_EMBED = 'mistral-embed'#
TEXT_EMBEDDING_3_LARGE = 'text-embedding-3-large'#
TEXT_EMBEDDING_3_SMALL = 'text-embedding-3-small'#
TEXT_EMBEDDING_ADA_2 = 'text-embedding-ada-002'#
property is_mistral: bool#

Returns whether this type of models is an Mistral-released model.

property is_openai: bool#

Returns whether this type of models is an OpenAI-released model.

property output_dim: int#
class camel.types.enums.JinaReturnFormat(value)[source]#

Bases: Enum

An enumeration.

DEFAULT = None#
HTML = 'html'#
MARKDOWN = 'markdown'#
TEXT = 'text'#
class camel.types.enums.ModelPlatformType(value)[source]#

Bases: Enum

An enumeration.

ANTHROPIC = 'anthropic'#
AZURE = 'azure'#
COHERE = 'cohere'#
DEFAULT = 'openai'#
GEMINI = 'gemini'#
GROQ = 'groq'#
LITELLM = 'litellm'#
MISTRAL = 'mistral'#
OLLAMA = 'ollama'#
OPENAI = 'openai'#
OPENAI_COMPATIBLE_MODEL = 'openai-compatible-model'#
QWEN = 'tongyi-qianwen'#
REKA = 'reka'#
SAMBA = 'samba-nova'#
TOGETHER = 'together'#
VLLM = 'vllm'#
YI = 'lingyiwanwu'#
ZHIPU = 'zhipuai'#
property is_anthropic: bool#

Returns whether this platform is anthropic.

property is_azure: bool#

Returns whether this platform is azure.

property is_cohere: bool#

Returns whether this platform is Cohere.

property is_gemini: bool#

Returns whether this platform is Gemini.

property is_groq: bool#

Returns whether this platform is groq.

property is_litellm: bool#

Returns whether this platform is litellm.

property is_mistral: bool#

Returns whether this platform is mistral.

property is_ollama: bool#

Returns whether this platform is ollama.

property is_openai: bool#

Returns whether this platform is openai.

property is_openai_compatible_model: bool#

Returns whether this is a platform supporting openai compatibility

property is_qwen: bool#

Returns whether this platform is Qwen.

property is_reka: bool#

Returns whether this platform is Reka.

property is_samba: bool#

Returns whether this platform is Samba Nova.

property is_together: bool#

Returns whether this platform is together.

property is_vllm: bool#

Returns whether this platform is vllm.

property is_yi: bool#

Returns whether this platform is Yi.

property is_zhipuai: bool#

Returns whether this platform is zhipu.

class camel.types.enums.OpenAIBackendRole(value)[source]#

Bases: Enum

An enumeration.

ASSISTANT = 'assistant'#
FUNCTION = 'function'#
SYSTEM = 'system'#
TOOL = 'tool'#
USER = 'user'#
class camel.types.enums.OpenAIImageType(value)[source]#

Bases: Enum

Image types supported by OpenAI vision model.

GIF = 'gif'#
JPEG = 'jpeg'#
JPG = 'jpg'#
PNG = 'png'#
WEBP = 'webp'#
class camel.types.enums.OpenAIImageTypeMeta(cls, bases, classdict, **kwds)[source]#

Bases: EnumMeta

class camel.types.enums.OpenAIVisionDetailType(value)[source]#

Bases: Enum

An enumeration.

AUTO = 'auto'#
HIGH = 'high'#
LOW = 'low'#
class camel.types.enums.OpenAPIName(value)[source]#

Bases: Enum

An enumeration.

BIZTOC = 'biztoc'#
COURSERA = 'coursera'#
CREATE_QR_CODE = 'create_qr_code'#
KLARNA = 'klarna'#
NASA_APOD = 'nasa_apod'#
OUTSCHOOL = 'outschool'#
SPEAK = 'speak'#
WEB_SCRAPER = 'web_scraper'#
class camel.types.enums.RoleType(value)[source]#

Bases: Enum

An enumeration.

ASSISTANT = 'assistant'#
CRITIC = 'critic'#
DEFAULT = 'default'#
EMBODIMENT = 'embodiment'#
USER = 'user'#
class camel.types.enums.StorageType(value)[source]#

Bases: Enum

An enumeration.

MILVUS = 'milvus'#
QDRANT = 'qdrant'#
class camel.types.enums.TaskType(value)[source]#

Bases: Enum

An enumeration.

AI_SOCIETY = 'ai_society'#
CODE = 'code'#
DEFAULT = 'default'#
EVALUATION = 'evaluation'#
GENERATE_TEXT_EMBEDDING_DATA = 'generate_text_embedding_data'#
IMAGE_CRAFT = 'image_craft'#
MISALIGNMENT = 'misalignment'#
MULTI_CONDITION_IMAGE_CRAFT = 'multi_condition_image_craft'#
OBJECT_RECOGNITION = 'object_recognition'#
ROLE_DESCRIPTION = 'role_description'#
SOLUTION_EXTRACTION = 'solution_extraction'#
TRANSLATION = 'translation'#
VIDEO_DESCRIPTION = 'video_description'#
class camel.types.enums.TerminationMode(value)[source]#

Bases: Enum

An enumeration.

ALL = 'all'#
ANY = 'any'#
class camel.types.enums.VectorDistance(value)[source]#

Bases: Enum

Distance metrics used in a vector database.

COSINE = 'cosine'#

//en.wikipedia.org/wiki/Cosine_similarity

Type:

Cosine similarity. https

DOT = 'dot'#

//en.wikipedia.org/wiki/Dot_product

Type:

Dot product. https

EUCLIDEAN = 'euclidean'#

//en.wikipedia.org/wiki/Euclidean_distance

Type:

Euclidean distance. https

class camel.types.enums.VoiceType(value)[source]#

Bases: Enum

An enumeration.

ALLOY = 'alloy'#
ECHO = 'echo'#
FABLE = 'fable'#
NOVA = 'nova'#
ONYX = 'onyx'#
SHIMMER = 'shimmer'#
property is_openai: bool#

Returns whether this type of voice is an OpenAI-released voice.

camel.types.openai_types module#

Module contents#

class camel.types.AudioModelType(value)[source]#

Bases: Enum

An enumeration.

TTS_1 = 'tts-1'#
TTS_1_HD = 'tts-1-hd'#
property is_openai: bool#

Returns whether this type of audio models is an OpenAI-released model.

class camel.types.ChatCompletion(**data: Any)[source]#

Bases: BaseModel

choices: List[Choice]#

A list of chat completion choices.

Can be more than one if n is greater than 1.

created: int#

The Unix timestamp (in seconds) of when the chat completion was created.

id: str#

A unique identifier for the chat completion.

model: str#

The model used for the chat completion.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'choices': FieldInfo(annotation=List[Choice], required=True), 'created': FieldInfo(annotation=int, required=True), 'id': FieldInfo(annotation=str, required=True), 'model': FieldInfo(annotation=str, required=True), 'object': FieldInfo(annotation=Literal['chat.completion'], required=True), 'service_tier': FieldInfo(annotation=Union[Literal['scale', 'default'], NoneType], required=False, default=None), 'system_fingerprint': FieldInfo(annotation=Union[str, NoneType], required=False, default=None), 'usage': FieldInfo(annotation=Union[CompletionUsage, NoneType], required=False, default=None)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

object: Literal['chat.completion']#

The object type, which is always chat.completion.

service_tier: Literal['scale', 'default'] | None#

The service tier used for processing the request.

This field is only included if the service_tier parameter is specified in the request.

system_fingerprint: str | None#

This fingerprint represents the backend configuration that the model runs with.

Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.

usage: CompletionUsage | None#

Usage statistics for the completion request.

class camel.types.ChatCompletionAssistantMessageParam[source]#

Bases: TypedDict

audio: Audio | None#

Data about a previous audio response from the model.

[Learn more](https://platform.openai.com/docs/guides/audio).

content: str | Iterable[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None#

The contents of the assistant message.

Required unless tool_calls or function_call is specified.

function_call: FunctionCall | None#

Deprecated and replaced by tool_calls.

The name and arguments of a function that should be called, as generated by the model.

name: str#

An optional name for the participant.

Provides the model information to differentiate between participants of the same role.

refusal: str | None#

The refusal message by the assistant.

role: Required[Literal['assistant']]#

The role of the messages author, in this case assistant.

tool_calls: Iterable[ChatCompletionMessageToolCallParam]#

The tool calls generated by the model, such as function calls.

class camel.types.ChatCompletionChunk(**data: Any)[source]#

Bases: BaseModel

choices: List[Choice]#

A list of chat completion choices.

Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {“include_usage”: true}.

created: int#

The Unix timestamp (in seconds) of when the chat completion was created.

Each chunk has the same timestamp.

id: str#

A unique identifier for the chat completion. Each chunk has the same ID.

model: str#

The model to generate the completion.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'choices': FieldInfo(annotation=List[Choice], required=True), 'created': FieldInfo(annotation=int, required=True), 'id': FieldInfo(annotation=str, required=True), 'model': FieldInfo(annotation=str, required=True), 'object': FieldInfo(annotation=Literal['chat.completion.chunk'], required=True), 'service_tier': FieldInfo(annotation=Union[Literal['scale', 'default'], NoneType], required=False, default=None), 'system_fingerprint': FieldInfo(annotation=Union[str, NoneType], required=False, default=None), 'usage': FieldInfo(annotation=Union[CompletionUsage, NoneType], required=False, default=None)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

object: Literal['chat.completion.chunk']#

The object type, which is always chat.completion.chunk.

service_tier: Literal['scale', 'default'] | None#

The service tier used for processing the request.

This field is only included if the service_tier parameter is specified in the request.

system_fingerprint: str | None#

This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.

usage: CompletionUsage | None#

An optional field that will only be present when you set stream_options: {“include_usage”: true} in your request. When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.

class camel.types.ChatCompletionFunctionMessageParam[source]#

Bases: TypedDict

content: Required[str | None]#

The contents of the function message.

name: Required[str]#

The name of the function to call.

role: Required[Literal['function']]#

The role of the messages author, in this case function.

class camel.types.ChatCompletionMessage(**data: Any)[source]#

Bases: BaseModel

audio: ChatCompletionAudio | None#

If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio).

content: str | None#

The contents of the message.

function_call: FunctionCall | None#

Deprecated and replaced by tool_calls.

The name and arguments of a function that should be called, as generated by the model.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'audio': FieldInfo(annotation=Union[ChatCompletionAudio, NoneType], required=False, default=None), 'content': FieldInfo(annotation=Union[str, NoneType], required=False, default=None), 'function_call': FieldInfo(annotation=Union[FunctionCall, NoneType], required=False, default=None), 'refusal': FieldInfo(annotation=Union[str, NoneType], required=False, default=None), 'role': FieldInfo(annotation=Literal['assistant'], required=True), 'tool_calls': FieldInfo(annotation=Union[List[ChatCompletionMessageToolCall], NoneType], required=False, default=None)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

refusal: str | None#

The refusal message generated by the model.

role: Literal['assistant']#

The role of the author of this message.

tool_calls: List[ChatCompletionMessageToolCall] | None#

The tool calls generated by the model, such as function calls.

class camel.types.ChatCompletionSystemMessageParam[source]#

Bases: TypedDict

content: Required[str | Iterable[ChatCompletionContentPartTextParam]]#

The contents of the system message.

name: str#

An optional name for the participant.

Provides the model information to differentiate between participants of the same role.

role: Required[Literal['system']]#

The role of the messages author, in this case system.

class camel.types.ChatCompletionUserMessageParam[source]#

Bases: TypedDict

content: Required[str | Iterable[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam | ChatCompletionContentPartInputAudioParam]]#

The contents of the user message.

name: str#

An optional name for the participant.

Provides the model information to differentiate between participants of the same role.

role: Required[Literal['user']]#

The role of the messages author, in this case user.

class camel.types.Choice(**data: Any)[source]#

Bases: BaseModel

finish_reason: Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call']#

The reason the model stopped generating tokens.

This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.

index: int#

The index of the choice in the list of choices.

logprobs: ChoiceLogprobs | None#

Log probability information for the choice.

message: ChatCompletionMessage#

A chat completion message generated by the model.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'finish_reason': FieldInfo(annotation=Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call'], required=True), 'index': FieldInfo(annotation=int, required=True), 'logprobs': FieldInfo(annotation=Union[ChoiceLogprobs, NoneType], required=False, default=None), 'message': FieldInfo(annotation=ChatCompletionMessage, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

class camel.types.CompletionUsage(**data: Any)[source]#

Bases: BaseModel

completion_tokens: int#

Number of tokens in the generated completion.

completion_tokens_details: CompletionTokensDetails | None#

Breakdown of tokens used in a completion.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'completion_tokens': FieldInfo(annotation=int, required=True), 'completion_tokens_details': FieldInfo(annotation=Union[CompletionTokensDetails, NoneType], required=False, default=None), 'prompt_tokens': FieldInfo(annotation=int, required=True), 'prompt_tokens_details': FieldInfo(annotation=Union[PromptTokensDetails, NoneType], required=False, default=None), 'total_tokens': FieldInfo(annotation=int, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

prompt_tokens: int#

Number of tokens in the prompt.

prompt_tokens_details: PromptTokensDetails | None#

Breakdown of tokens used in the prompt.

total_tokens: int#

Total number of tokens used in the request (prompt + completion).

class camel.types.EmbeddingModelType(value)[source]#

Bases: Enum

An enumeration.

MISTRAL_EMBED = 'mistral-embed'#
TEXT_EMBEDDING_3_LARGE = 'text-embedding-3-large'#
TEXT_EMBEDDING_3_SMALL = 'text-embedding-3-small'#
TEXT_EMBEDDING_ADA_2 = 'text-embedding-ada-002'#
property is_mistral: bool#

Returns whether this type of models is an Mistral-released model.

property is_openai: bool#

Returns whether this type of models is an OpenAI-released model.

property output_dim: int#
class camel.types.ModelPlatformType(value)[source]#

Bases: Enum

An enumeration.

ANTHROPIC = 'anthropic'#
AZURE = 'azure'#
COHERE = 'cohere'#
DEFAULT = 'openai'#
GEMINI = 'gemini'#
GROQ = 'groq'#
LITELLM = 'litellm'#
MISTRAL = 'mistral'#
OLLAMA = 'ollama'#
OPENAI = 'openai'#
OPENAI_COMPATIBLE_MODEL = 'openai-compatible-model'#
QWEN = 'tongyi-qianwen'#
REKA = 'reka'#
SAMBA = 'samba-nova'#
TOGETHER = 'together'#
VLLM = 'vllm'#
YI = 'lingyiwanwu'#
ZHIPU = 'zhipuai'#
property is_anthropic: bool#

Returns whether this platform is anthropic.

property is_azure: bool#

Returns whether this platform is azure.

property is_cohere: bool#

Returns whether this platform is Cohere.

property is_gemini: bool#

Returns whether this platform is Gemini.

property is_groq: bool#

Returns whether this platform is groq.

property is_litellm: bool#

Returns whether this platform is litellm.

property is_mistral: bool#

Returns whether this platform is mistral.

property is_ollama: bool#

Returns whether this platform is ollama.

property is_openai: bool#

Returns whether this platform is openai.

property is_openai_compatible_model: bool#

Returns whether this is a platform supporting openai compatibility

property is_qwen: bool#

Returns whether this platform is Qwen.

property is_reka: bool#

Returns whether this platform is Reka.

property is_samba: bool#

Returns whether this platform is Samba Nova.

property is_together: bool#

Returns whether this platform is together.

property is_vllm: bool#

Returns whether this platform is vllm.

property is_yi: bool#

Returns whether this platform is Yi.

property is_zhipuai: bool#

Returns whether this platform is zhipu.

class camel.types.ModelType(value)#

Bases: UnifiedModelType, Enum

An enumeration.

CLAUDE_2_0 = ModelType.CLAUDE_2_0#
CLAUDE_2_1 = ModelType.CLAUDE_2_1#
CLAUDE_3_5_SONNET = ModelType.CLAUDE_3_5_SONNET#
CLAUDE_3_HAIKU = ModelType.CLAUDE_3_HAIKU#
CLAUDE_3_OPUS = ModelType.CLAUDE_3_OPUS#
CLAUDE_3_SONNET = ModelType.CLAUDE_3_SONNET#
CLAUDE_INSTANT_1_2 = ModelType.CLAUDE_INSTANT_1_2#
COHERE_COMMAND = ModelType.COHERE_COMMAND#
COHERE_COMMAND_LIGHT = ModelType.COHERE_COMMAND_LIGHT#
COHERE_COMMAND_NIGHTLY = ModelType.COHERE_COMMAND_NIGHTLY#
COHERE_COMMAND_R = ModelType.COHERE_COMMAND_R#
COHERE_COMMAND_R_PLUS = ModelType.COHERE_COMMAND_R_PLUS#
DEFAULT = ModelType.GPT_4O_MINI#
GEMINI_1_5_FLASH = ModelType.GEMINI_1_5_FLASH#
GEMINI_1_5_PRO = ModelType.GEMINI_1_5_PRO#
GEMINI_EXP_1114 = ModelType.GEMINI_EXP_1114#
GLM_3_TURBO = ModelType.GLM_3_TURBO#
GLM_4 = ModelType.GLM_4#
GLM_4V = ModelType.GLM_4V#
GPT_3_5_TURBO = ModelType.GPT_3_5_TURBO#
GPT_4 = ModelType.GPT_4#
GPT_4O = ModelType.GPT_4O#
GPT_4O_MINI = ModelType.GPT_4O_MINI#
GPT_4_TURBO = ModelType.GPT_4_TURBO#
GROQ_GEMMA_2_9B_IT = ModelType.GROQ_GEMMA_2_9B_IT#
GROQ_GEMMA_7B_IT = ModelType.GROQ_GEMMA_7B_IT#
GROQ_LLAMA_3_1_405B = ModelType.GROQ_LLAMA_3_1_405B#
GROQ_LLAMA_3_1_70B = ModelType.GROQ_LLAMA_3_1_70B#
GROQ_LLAMA_3_1_8B = ModelType.GROQ_LLAMA_3_1_8B#
GROQ_LLAMA_3_70B = ModelType.GROQ_LLAMA_3_70B#
GROQ_LLAMA_3_8B = ModelType.GROQ_LLAMA_3_8B#
GROQ_MIXTRAL_8_7B = ModelType.GROQ_MIXTRAL_8_7B#
MISTRAL_3B = ModelType.MISTRAL_3B#
MISTRAL_7B = ModelType.MISTRAL_7B#
MISTRAL_8B = ModelType.MISTRAL_8B#
MISTRAL_CODESTRAL = ModelType.MISTRAL_CODESTRAL#
MISTRAL_CODESTRAL_MAMBA = ModelType.MISTRAL_CODESTRAL_MAMBA#
MISTRAL_LARGE = ModelType.MISTRAL_LARGE#
MISTRAL_MIXTRAL_8x22B = ModelType.MISTRAL_MIXTRAL_8x22B#
MISTRAL_MIXTRAL_8x7B = ModelType.MISTRAL_MIXTRAL_8x7B#
MISTRAL_NEMO = ModelType.MISTRAL_NEMO#
MISTRAL_PIXTRAL_12B = ModelType.MISTRAL_PIXTRAL_12B#
NEMOTRON_4_REWARD = ModelType.NEMOTRON_4_REWARD#
O1_MINI = ModelType.O1_MINI#
O1_PREVIEW = ModelType.O1_PREVIEW#
QWEN_2_5_14B = ModelType.QWEN_2_5_14B#
QWEN_2_5_32B = ModelType.QWEN_2_5_32B#
QWEN_2_5_72B = ModelType.QWEN_2_5_72B#
QWEN_2_5_CODER_32B = ModelType.QWEN_2_5_CODER_32B#
QWEN_CODER_TURBO = ModelType.QWEN_CODER_TURBO#
QWEN_LONG = ModelType.QWEN_LONG#
QWEN_MATH_PLUS = ModelType.QWEN_MATH_PLUS#
QWEN_MATH_TURBO = ModelType.QWEN_MATH_TURBO#
QWEN_MAX = ModelType.QWEN_MAX#
QWEN_PLUS = ModelType.QWEN_PLUS#
QWEN_TURBO = ModelType.QWEN_TURBO#
QWEN_VL_MAX = ModelType.QWEN_VL_MAX#
QWEN_VL_PLUS = ModelType.QWEN_VL_PLUS#
REKA_CORE = ModelType.REKA_CORE#
REKA_EDGE = ModelType.REKA_EDGE#
REKA_FLASH = ModelType.REKA_FLASH#
STUB = ModelType.STUB#
YI_LARGE = ModelType.YI_LARGE#
YI_LARGE_FC = ModelType.YI_LARGE_FC#
YI_LARGE_RAG = ModelType.YI_LARGE_RAG#
YI_LARGE_TURBO = ModelType.YI_LARGE_TURBO#
YI_LIGHTNING = ModelType.YI_LIGHTNING#
YI_MEDIUM = ModelType.YI_MEDIUM#
YI_MEDIUM_200K = ModelType.YI_MEDIUM_200K#
YI_SPARK = ModelType.YI_SPARK#
YI_VISION = ModelType.YI_VISION#
property is_anthropic: bool#

Returns whether this type of models is Anthropic-released model.

Returns:

Whether this type of models is anthropic.

Return type:

bool

property is_azure_openai: bool#

Returns whether this type of models is an OpenAI-released model from Azure.

property is_cohere: bool#

Returns whether this type of models is a Cohere model.

Returns:

Whether this type of models is Cohere.

Return type:

bool

property is_gemini: bool#

Returns whether this type of models is Gemini model.

Returns:

Whether this type of models is gemini.

Return type:

bool

property is_groq: bool#

Returns whether this type of models is served by Groq.

property is_mistral: bool#

Returns whether this type of models is served by Mistral.

property is_nvidia: bool#

Returns whether this type of models is Nvidia-released model.

Returns:

Whether this type of models is nvidia.

Return type:

bool

property is_openai: bool#

Returns whether this type of models is an OpenAI-released model.

property is_qwen: bool#

Returns whether the model is a Qwen model.

property is_reka: bool#

Returns whether this type of models is Reka model.

Returns:

Whether this type of models is Reka.

Return type:

bool

property is_yi: bool#

Returns whether this type of models is Yi model.

Returns:

Whether this type of models is Yi.

Return type:

bool

property is_zhipuai: bool#

Returns whether this type of models is an ZhipuAI model.

property support_native_tool_calling: bool#

Returns whether the model supports native tool calling.

property token_limit: int#

Returns the maximum token limit for a given model.

Returns:

The maximum token limit for the given model.

Return type:

int

property value_for_tiktoken: str#

Returns the model name for TikToken.

class camel.types.NotGiven#

Bases: object

A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior).

For example:

```py def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: …

get(timeout=1) # 1s timeout get(timeout=None) # No timeout get() # Default timeout behavior, which may not be statically known at the method definition. ```

class camel.types.OpenAIBackendRole(value)[source]#

Bases: Enum

An enumeration.

ASSISTANT = 'assistant'#
FUNCTION = 'function'#
SYSTEM = 'system'#
TOOL = 'tool'#
USER = 'user'#
class camel.types.OpenAIImageType(value)[source]#

Bases: Enum

Image types supported by OpenAI vision model.

GIF = 'gif'#
JPEG = 'jpeg'#
JPG = 'jpg'#
PNG = 'png'#
WEBP = 'webp'#
class camel.types.OpenAIVisionDetailType(value)[source]#

Bases: Enum

An enumeration.

AUTO = 'auto'#
HIGH = 'high'#
LOW = 'low'#
class camel.types.OpenAPIName(value)[source]#

Bases: Enum

An enumeration.

BIZTOC = 'biztoc'#
COURSERA = 'coursera'#
CREATE_QR_CODE = 'create_qr_code'#
KLARNA = 'klarna'#
NASA_APOD = 'nasa_apod'#
OUTSCHOOL = 'outschool'#
SPEAK = 'speak'#
WEB_SCRAPER = 'web_scraper'#
class camel.types.ParsedChatCompletion(*, id: str, choices: List[ParsedChoice], created: int, model: str, object: Literal['chat.completion'], service_tier: Literal['scale', 'default'] | None = None, system_fingerprint: str | None = None, usage: CompletionUsage | None = None, **extra_data: Any)[source]#

Bases: ChatCompletion, GenericModel, Generic[ContentType]

choices: List[ParsedChoice]#

A list of chat completion choices.

Can be more than one if n is greater than 1.

model_computed_fields: ClassVar[Dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {'defer_build': True, 'extra': 'allow'}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[Dict[str, FieldInfo]] = {'choices': FieldInfo(annotation=List[ParsedChoice], required=True), 'created': FieldInfo(annotation=int, required=True), 'id': FieldInfo(annotation=str, required=True), 'model': FieldInfo(annotation=str, required=True), 'object': FieldInfo(annotation=Literal['chat.completion'], required=True), 'service_tier': FieldInfo(annotation=Union[Literal['scale', 'default'], NoneType], required=False, default=None), 'system_fingerprint': FieldInfo(annotation=Union[str, NoneType], required=False, default=None), 'usage': FieldInfo(annotation=Union[CompletionUsage, NoneType], required=False, default=None)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo] objects.

This replaces Model.__fields__ from Pydantic V1.

class camel.types.RoleType(value)[source]#

Bases: Enum

An enumeration.

ASSISTANT = 'assistant'#
CRITIC = 'critic'#
DEFAULT = 'default'#
EMBODIMENT = 'embodiment'#
USER = 'user'#
class camel.types.StorageType(value)[source]#

Bases: Enum

An enumeration.

MILVUS = 'milvus'#
QDRANT = 'qdrant'#
class camel.types.TaskType(value)[source]#

Bases: Enum

An enumeration.

AI_SOCIETY = 'ai_society'#
CODE = 'code'#
DEFAULT = 'default'#
EVALUATION = 'evaluation'#
GENERATE_TEXT_EMBEDDING_DATA = 'generate_text_embedding_data'#
IMAGE_CRAFT = 'image_craft'#
MISALIGNMENT = 'misalignment'#
MULTI_CONDITION_IMAGE_CRAFT = 'multi_condition_image_craft'#
OBJECT_RECOGNITION = 'object_recognition'#
ROLE_DESCRIPTION = 'role_description'#
SOLUTION_EXTRACTION = 'solution_extraction'#
TRANSLATION = 'translation'#
VIDEO_DESCRIPTION = 'video_description'#
class camel.types.TerminationMode(value)[source]#

Bases: Enum

An enumeration.

ALL = 'all'#
ANY = 'any'#
class camel.types.UnifiedModelType(value: ModelType | str)[source]#

Bases: str

Class used for support both ModelType and str to be used to represent a model type in a unified way. This class is a subclass of str so that it can be used as string seamlessly.

Parameters:

value (Union[ModelType, str]) – The value of the model type.

property is_anthropic: bool#

Returns whether the model is an Anthropic model.

property is_azure_openai: bool#

Returns whether the model is an Azure OpenAI model.

property is_cohere: bool#

Returns whether the model is a Cohere model.

property is_gemini: bool#

Returns whether the model is a Gemini model.

property is_groq: bool#

Returns whether the model is a Groq served model.

property is_mistral: bool#

Returns whether the model is a Mistral model.

property is_openai: bool#

Returns whether the model is an OpenAI model.

property is_qwen: bool#

Returns whether the model is a Qwen model.

property is_reka: bool#

Returns whether the model is a Reka model.

property is_yi: bool#

Returns whether the model is a Yi model.

property is_zhipuai: bool#

Returns whether the model is a Zhipuai model.

property support_native_tool_calling: bool#

Returns whether the model supports native tool calling.

property token_limit: int#

Returns the token limit for the model. Here we set the default value as 999_999_999 if it’s not provided from model_config_dict

property value_for_tiktoken: str#

Returns the model name for TikToken.

class camel.types.VectorDistance(value)[source]#

Bases: Enum

Distance metrics used in a vector database.

COSINE = 'cosine'#

//en.wikipedia.org/wiki/Cosine_similarity

Type:

Cosine similarity. https

DOT = 'dot'#

//en.wikipedia.org/wiki/Dot_product

Type:

Dot product. https

EUCLIDEAN = 'euclidean'#

//en.wikipedia.org/wiki/Euclidean_distance

Type:

Euclidean distance. https

class camel.types.VoiceType(value)[source]#

Bases: Enum

An enumeration.

ALLOY = 'alloy'#
ECHO = 'echo'#
FABLE = 'fable'#
NOVA = 'nova'#
ONYX = 'onyx'#
SHIMMER = 'shimmer'#
property is_openai: bool#

Returns whether this type of voice is an OpenAI-released voice.