class ModelScopeModel(OpenAICompatibleModel):
openai.ChatCompletion.create()
. If :obj:None
, :obj:ModelScopeConfig().as_dict()
will be used. (default: :obj:None
)None
) refer to the following link for more details:https://api-inference.modelscope.cn/v1/
)OpenAITokenCounter( ModelType.GPT_4O_MINI)
will be used. (default: :obj:None
)None
)3
) **kwargs (Any): Additional arguments to pass to the client initialization.def __init__(
self,
model_type: Union[ModelType, str],
model_config_dict: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
url: Optional[str] = None,
token_counter: Optional[BaseTokenCounter] = None,
timeout: Optional[float] = None,
max_retries: int = 3,
**kwargs: Any
):
def _post_handle_response(
self,
response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
):
<think>
tags at the beginning.
def _request_chat_completion(
self,
messages: List[OpenAIMessage],
tools: Optional[List[Dict[str, Any]]] = None
):
def check_model_config(self):