# This file was auto-generated by Fern from our API Definition.

import typing
from json.decoder import JSONDecodeError

from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.http_response import AsyncHttpResponse, HttpResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..core.request_options import RequestOptions
from ..core.unchecked_base_model import construct_type
from ..errors.internal_server_error import InternalServerError
from ..types.ml_backend import MlBackend
from .types.create_ml_request_auth_method import CreateMlRequestAuthMethod
from .types.list_model_versions_ml_response import ListModelVersionsMlResponse
from .types.update_ml_request_auth_method import UpdateMlRequestAuthMethod

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class RawMlClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._client_wrapper = client_wrapper

    def list(
        self, *, project: typing.Optional[int] = None, request_options: typing.Optional[RequestOptions] = None
    ) -> HttpResponse[typing.List[MlBackend]]:
        """

            List all configured ML backends for a specific project by ID.
            Use the following cURL command:
            ```bash
            curl http://localhost:8000/api/ml?project={project_id} -H 'Authorization: Token abc123'


        Parameters
        ----------
        project : typing.Optional[int]
            Project ID

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[typing.List[MlBackend]]

        """
        _response = self._client_wrapper.httpx_client.request(
            "api/ml/",
            method="GET",
            params={
                "project": project,
            },
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    typing.List[MlBackend],
                    construct_type(
                        type_=typing.List[MlBackend],  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def create(
        self,
        *,
        auth_method: typing.Optional[CreateMlRequestAuthMethod] = OMIT,
        basic_auth_pass: typing.Optional[str] = OMIT,
        basic_auth_user: typing.Optional[str] = OMIT,
        description: typing.Optional[str] = OMIT,
        extra_params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        is_interactive: typing.Optional[bool] = OMIT,
        project: typing.Optional[int] = OMIT,
        timeout: typing.Optional[int] = OMIT,
        title: typing.Optional[str] = OMIT,
        url: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[MlBackend]:
        """
        
            Add an ML backend to a project using the Label Studio UI or by sending a POST request using the following cURL 
            command:
            ```bash
            curl -X POST -H 'Content-type: application/json' http://localhost:8000/api/ml -H 'Authorization: Token abc123'\\
            --data '{"url": "http://localhost:9090", "project": {project_id}}' 
            
        
        Parameters
        ----------
        auth_method : typing.Optional[CreateMlRequestAuthMethod]
            Auth method
        
        basic_auth_pass : typing.Optional[str]
            Basic auth password
        
        basic_auth_user : typing.Optional[str]
            Basic auth user
        
        description : typing.Optional[str]
            Description
        
        extra_params : typing.Optional[typing.Dict[str, typing.Any]]
            Extra parameters
        
        is_interactive : typing.Optional[bool]
            Is interactive
        
        project : typing.Optional[int]
            Project ID
        
        timeout : typing.Optional[int]
            Response model timeout
        
        title : typing.Optional[str]
            Title
        
        url : typing.Optional[str]
            ML backend URL
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        HttpResponse[MlBackend]
            
        """
        _response = self._client_wrapper.httpx_client.request(
            "api/ml/",
            method="POST",
            json={
                "auth_method": auth_method,
                "basic_auth_pass": basic_auth_pass,
                "basic_auth_user": basic_auth_user,
                "description": description,
                "extra_params": extra_params,
                "is_interactive": is_interactive,
                "project": project,
                "timeout": timeout,
                "title": title,
                "url": url,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def get(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[MlBackend]:
        """

            Get details about a specific ML backend connection by ID. For example, make a GET request using the
            following cURL command:
            ```bash
            curl http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'


        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[MlBackend]

        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="GET",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
        """

            Remove an existing ML backend connection by ID. For example, use the
            following cURL command:
            ```bash
            curl -X DELETE http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'


        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[None]
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="DELETE",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                return HttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def update(
        self,
        id: int,
        *,
        auth_method: typing.Optional[UpdateMlRequestAuthMethod] = OMIT,
        basic_auth_pass: typing.Optional[str] = OMIT,
        basic_auth_user: typing.Optional[str] = OMIT,
        description: typing.Optional[str] = OMIT,
        extra_params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        is_interactive: typing.Optional[bool] = OMIT,
        project: typing.Optional[int] = OMIT,
        timeout: typing.Optional[int] = OMIT,
        title: typing.Optional[str] = OMIT,
        url: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[MlBackend]:
        """
        
            Update ML backend parameters using the Label Studio UI or by sending a PATCH request using the following cURL command:
            ```bash
            curl -X PATCH -H 'Content-type: application/json' http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'\\
            --data '{"url": "http://localhost:9091"}' 
            
        
        Parameters
        ----------
        id : int
        
        auth_method : typing.Optional[UpdateMlRequestAuthMethod]
            Auth method
        
        basic_auth_pass : typing.Optional[str]
            Basic auth password
        
        basic_auth_user : typing.Optional[str]
            Basic auth user
        
        description : typing.Optional[str]
            Description
        
        extra_params : typing.Optional[typing.Dict[str, typing.Any]]
            Extra parameters
        
        is_interactive : typing.Optional[bool]
            Is interactive
        
        project : typing.Optional[int]
            Project ID
        
        timeout : typing.Optional[int]
            Response model timeout
        
        title : typing.Optional[str]
            Title
        
        url : typing.Optional[str]
            ML backend URL
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        HttpResponse[MlBackend]
            
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="PATCH",
            json={
                "auth_method": auth_method,
                "basic_auth_pass": basic_auth_pass,
                "basic_auth_user": basic_auth_user,
                "description": description,
                "extra_params": extra_params,
                "is_interactive": is_interactive,
                "project": project,
                "timeout": timeout,
                "title": title,
                "url": url,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def predict_interactive(
        self,
        id: int,
        *,
        task: int,
        context: typing.Optional[typing.Any] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[None]:
        """

                Send a request to the machine learning backend set up to be used for interactive preannotations to retrieve a
                predicted region based on annotator input.
                See [set up machine learning](https://labelstud.io/guide/ml.html#Get-interactive-preannotations) for more.


        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        task : int
            ID of task to annotate

        context : typing.Optional[typing.Any]
            Context for ML model

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[None]
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/interactive-annotating",
            method="POST",
            json={
                "context": context,
                "task": task,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return HttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def predict_all_tasks(
        self,
        id: int,
        *,
        batch_size: typing.Optional[int] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[None]:
        """
        <Card href="https://humansignal.com/goenterprise">
                <img style="pointer-events: none; margin-left: 0px; margin-right: 0px;" src="https://docs.humansignal.com/images/badge.svg" alt="Label Studio Enterprise badge"/>
                <p style="margin-top: 10px; font-size: 14px;">
                    This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
                </p>
            </Card>


        Create predictions for all tasks using a specific ML backend so that you can set up an active learning strategy based on the confidence or uncertainty scores associated with the predictions. Creating predictions requires a Label Studio ML backend set up and configured for your project.

        See [Set up machine learning](https://labelstud.io/guide/ml.html) for more details about a Label Studio ML backend.

        Reference the ML backend ID in the path of this API call. Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).

        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        batch_size : typing.Optional[int]
            Computed number of tasks without predictions that the ML backend needs to predict.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[None]
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/predict",
            method="POST",
            params={
                "batch_size": batch_size,
            },
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                return HttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def train(
        self,
        id: int,
        *,
        use_ground_truth: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> HttpResponse[None]:
        """

                After you add an ML backend, call this API with the ML backend ID to start training with
                already-labeled tasks.

                Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).


        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        use_ground_truth : typing.Optional[bool]
            Whether to include ground truth annotations in training

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[None]
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/train",
            method="POST",
            json={
                "use_ground_truth": use_ground_truth,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return HttpResponse(response=_response, data=None)
            if _response.status_code == 500:
                raise InternalServerError(
                    headers=dict(_response.headers),
                    body=typing.cast(
                        typing.Any,
                        construct_type(
                            type_=typing.Any,  # type: ignore
                            object_=_response.json(),
                        ),
                    ),
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    def list_model_versions(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> HttpResponse[ListModelVersionsMlResponse]:
        """
        Get available versions of the model.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        HttpResponse[ListModelVersionsMlResponse]
            List of available versions.
        """
        _response = self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/versions",
            method="GET",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    ListModelVersionsMlResponse,
                    construct_type(
                        type_=ListModelVersionsMlResponse,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return HttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)


class AsyncRawMlClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._client_wrapper = client_wrapper

    async def list(
        self, *, project: typing.Optional[int] = None, request_options: typing.Optional[RequestOptions] = None
    ) -> AsyncHttpResponse[typing.List[MlBackend]]:
        """

            List all configured ML backends for a specific project by ID.
            Use the following cURL command:
            ```bash
            curl http://localhost:8000/api/ml?project={project_id} -H 'Authorization: Token abc123'


        Parameters
        ----------
        project : typing.Optional[int]
            Project ID

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[typing.List[MlBackend]]

        """
        _response = await self._client_wrapper.httpx_client.request(
            "api/ml/",
            method="GET",
            params={
                "project": project,
            },
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    typing.List[MlBackend],
                    construct_type(
                        type_=typing.List[MlBackend],  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def create(
        self,
        *,
        auth_method: typing.Optional[CreateMlRequestAuthMethod] = OMIT,
        basic_auth_pass: typing.Optional[str] = OMIT,
        basic_auth_user: typing.Optional[str] = OMIT,
        description: typing.Optional[str] = OMIT,
        extra_params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        is_interactive: typing.Optional[bool] = OMIT,
        project: typing.Optional[int] = OMIT,
        timeout: typing.Optional[int] = OMIT,
        title: typing.Optional[str] = OMIT,
        url: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[MlBackend]:
        """
        
            Add an ML backend to a project using the Label Studio UI or by sending a POST request using the following cURL 
            command:
            ```bash
            curl -X POST -H 'Content-type: application/json' http://localhost:8000/api/ml -H 'Authorization: Token abc123'\\
            --data '{"url": "http://localhost:9090", "project": {project_id}}' 
            
        
        Parameters
        ----------
        auth_method : typing.Optional[CreateMlRequestAuthMethod]
            Auth method
        
        basic_auth_pass : typing.Optional[str]
            Basic auth password
        
        basic_auth_user : typing.Optional[str]
            Basic auth user
        
        description : typing.Optional[str]
            Description
        
        extra_params : typing.Optional[typing.Dict[str, typing.Any]]
            Extra parameters
        
        is_interactive : typing.Optional[bool]
            Is interactive
        
        project : typing.Optional[int]
            Project ID
        
        timeout : typing.Optional[int]
            Response model timeout
        
        title : typing.Optional[str]
            Title
        
        url : typing.Optional[str]
            ML backend URL
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        AsyncHttpResponse[MlBackend]
            
        """
        _response = await self._client_wrapper.httpx_client.request(
            "api/ml/",
            method="POST",
            json={
                "auth_method": auth_method,
                "basic_auth_pass": basic_auth_pass,
                "basic_auth_user": basic_auth_user,
                "description": description,
                "extra_params": extra_params,
                "is_interactive": is_interactive,
                "project": project,
                "timeout": timeout,
                "title": title,
                "url": url,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def get(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> AsyncHttpResponse[MlBackend]:
        """

            Get details about a specific ML backend connection by ID. For example, make a GET request using the
            following cURL command:
            ```bash
            curl http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'


        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[MlBackend]

        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="GET",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def delete(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> AsyncHttpResponse[None]:
        """

            Remove an existing ML backend connection by ID. For example, use the
            following cURL command:
            ```bash
            curl -X DELETE http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'


        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[None]
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="DELETE",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                return AsyncHttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def update(
        self,
        id: int,
        *,
        auth_method: typing.Optional[UpdateMlRequestAuthMethod] = OMIT,
        basic_auth_pass: typing.Optional[str] = OMIT,
        basic_auth_user: typing.Optional[str] = OMIT,
        description: typing.Optional[str] = OMIT,
        extra_params: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
        is_interactive: typing.Optional[bool] = OMIT,
        project: typing.Optional[int] = OMIT,
        timeout: typing.Optional[int] = OMIT,
        title: typing.Optional[str] = OMIT,
        url: typing.Optional[str] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[MlBackend]:
        """
        
            Update ML backend parameters using the Label Studio UI or by sending a PATCH request using the following cURL command:
            ```bash
            curl -X PATCH -H 'Content-type: application/json' http://localhost:8000/api/ml/{ml_backend_ID} -H 'Authorization: Token abc123'\\
            --data '{"url": "http://localhost:9091"}' 
            
        
        Parameters
        ----------
        id : int
        
        auth_method : typing.Optional[UpdateMlRequestAuthMethod]
            Auth method
        
        basic_auth_pass : typing.Optional[str]
            Basic auth password
        
        basic_auth_user : typing.Optional[str]
            Basic auth user
        
        description : typing.Optional[str]
            Description
        
        extra_params : typing.Optional[typing.Dict[str, typing.Any]]
            Extra parameters
        
        is_interactive : typing.Optional[bool]
            Is interactive
        
        project : typing.Optional[int]
            Project ID
        
        timeout : typing.Optional[int]
            Response model timeout
        
        title : typing.Optional[str]
            Title
        
        url : typing.Optional[str]
            ML backend URL
        
        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.
        
        Returns
        -------
        AsyncHttpResponse[MlBackend]
            
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}",
            method="PATCH",
            json={
                "auth_method": auth_method,
                "basic_auth_pass": basic_auth_pass,
                "basic_auth_user": basic_auth_user,
                "description": description,
                "extra_params": extra_params,
                "is_interactive": is_interactive,
                "project": project,
                "timeout": timeout,
                "title": title,
                "url": url,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    MlBackend,
                    construct_type(
                        type_=MlBackend,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def predict_interactive(
        self,
        id: int,
        *,
        task: int,
        context: typing.Optional[typing.Any] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[None]:
        """

                Send a request to the machine learning backend set up to be used for interactive preannotations to retrieve a
                predicted region based on annotator input.
                See [set up machine learning](https://labelstud.io/guide/ml.html#Get-interactive-preannotations) for more.


        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        task : int
            ID of task to annotate

        context : typing.Optional[typing.Any]
            Context for ML model

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[None]
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/interactive-annotating",
            method="POST",
            json={
                "context": context,
                "task": task,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return AsyncHttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def predict_all_tasks(
        self,
        id: int,
        *,
        batch_size: typing.Optional[int] = None,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[None]:
        """
        <Card href="https://humansignal.com/goenterprise">
                <img style="pointer-events: none; margin-left: 0px; margin-right: 0px;" src="https://docs.humansignal.com/images/badge.svg" alt="Label Studio Enterprise badge"/>
                <p style="margin-top: 10px; font-size: 14px;">
                    This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
                </p>
            </Card>


        Create predictions for all tasks using a specific ML backend so that you can set up an active learning strategy based on the confidence or uncertainty scores associated with the predictions. Creating predictions requires a Label Studio ML backend set up and configured for your project.

        See [Set up machine learning](https://labelstud.io/guide/ml.html) for more details about a Label Studio ML backend.

        Reference the ML backend ID in the path of this API call. Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).

        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        batch_size : typing.Optional[int]
            Computed number of tasks without predictions that the ML backend needs to predict.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[None]
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/predict",
            method="POST",
            params={
                "batch_size": batch_size,
            },
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                return AsyncHttpResponse(response=_response, data=None)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def train(
        self,
        id: int,
        *,
        use_ground_truth: typing.Optional[bool] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AsyncHttpResponse[None]:
        """

                After you add an ML backend, call this API with the ML backend ID to start training with
                already-labeled tasks.

                Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).


        Parameters
        ----------
        id : int
            A unique integer value identifying this ML backend.

        use_ground_truth : typing.Optional[bool]
            Whether to include ground truth annotations in training

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[None]
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/train",
            method="POST",
            json={
                "use_ground_truth": use_ground_truth,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return AsyncHttpResponse(response=_response, data=None)
            if _response.status_code == 500:
                raise InternalServerError(
                    headers=dict(_response.headers),
                    body=typing.cast(
                        typing.Any,
                        construct_type(
                            type_=typing.Any,  # type: ignore
                            object_=_response.json(),
                        ),
                    ),
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)

    async def list_model_versions(
        self, id: int, *, request_options: typing.Optional[RequestOptions] = None
    ) -> AsyncHttpResponse[ListModelVersionsMlResponse]:
        """
        Get available versions of the model.

        Parameters
        ----------
        id : int

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AsyncHttpResponse[ListModelVersionsMlResponse]
            List of available versions.
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"api/ml/{jsonable_encoder(id)}/versions",
            method="GET",
            request_options=request_options,
        )
        try:
            if 200 <= _response.status_code < 300:
                _data = typing.cast(
                    ListModelVersionsMlResponse,
                    construct_type(
                        type_=ListModelVersionsMlResponse,  # type: ignore
                        object_=_response.json(),
                    ),
                )
                return AsyncHttpResponse(response=_response, data=_data)
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
        raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
